Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(332)

Side by Side Diff: components/safe_browsing_db/v4_store.cc

Issue 2384893002: PVer4: Test checksum on startup outside the hotpath of DB load (Closed)
Patch Set: Verify that the checksum check happens async Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/base64.h" 5 #include "base/base64.h"
6 #include "base/bind.h" 6 #include "base/bind.h"
7 #include "base/files/file_util.h" 7 #include "base/files/file_util.h"
8 #include "base/memory/ptr_util.h" 8 #include "base/memory/ptr_util.h"
9 #include "base/metrics/histogram_macros.h" 9 #include "base/metrics/histogram_macros.h"
10 #include "base/metrics/sparse_histogram.h" 10 #include "base/metrics/sparse_histogram.h"
(...skipping 16 matching lines...) Expand all
27 const uint32_t kFileVersion = 9; 27 const uint32_t kFileVersion = 9;
28 28
29 std::string GetUmaSuffixForStore(const base::FilePath& file_path) { 29 std::string GetUmaSuffixForStore(const base::FilePath& file_path) {
30 return base::StringPrintf( 30 return base::StringPrintf(
31 ".%" PRIsFP, file_path.BaseName().RemoveExtension().value().c_str()); 31 ".%" PRIsFP, file_path.BaseName().RemoveExtension().value().c_str());
32 } 32 }
33 33
34 void RecordTimeWithAndWithoutStore(const std::string& metric, 34 void RecordTimeWithAndWithoutStore(const std::string& metric,
35 base::TimeDelta time, 35 base::TimeDelta time,
36 const base::FilePath& file_path) { 36 const base::FilePath& file_path) {
37 std::string suffix = GetUmaSuffixForStore(file_path);
38
39 // The histograms below are a modified expansion of the 37 // The histograms below are a modified expansion of the
40 // UMA_HISTOGRAM_LONG_TIMES macro adapted to allow for a dynamically suffixed 38 // UMA_HISTOGRAM_LONG_TIMES macro adapted to allow for a dynamically suffixed
41 // histogram name. 39 // histogram name.
42 // Note: The factory creates and owns the histogram. 40 // Note: The factory creates and owns the histogram.
43 const int kBucketCount = 100; 41 const int kBucketCount = 100;
44 base::HistogramBase* histogram = base::Histogram::FactoryTimeGet( 42 base::HistogramBase* histogram = base::Histogram::FactoryTimeGet(
45 metric, base::TimeDelta::FromMilliseconds(1), 43 metric, base::TimeDelta::FromMilliseconds(1),
46 base::TimeDelta::FromMinutes(1), kBucketCount, 44 base::TimeDelta::FromMinutes(1), kBucketCount,
47 base::HistogramBase::kUmaTargetedHistogramFlag); 45 base::HistogramBase::kUmaTargetedHistogramFlag);
48 if (histogram) { 46 if (histogram) {
49 histogram->AddTime(time); 47 histogram->AddTime(time);
50 } 48 }
51 49
50 std::string suffix = GetUmaSuffixForStore(file_path);
52 base::HistogramBase* histogram_suffix = base::Histogram::FactoryTimeGet( 51 base::HistogramBase* histogram_suffix = base::Histogram::FactoryTimeGet(
53 metric + suffix, base::TimeDelta::FromMilliseconds(1), 52 metric + suffix, base::TimeDelta::FromMilliseconds(1),
54 base::TimeDelta::FromMinutes(1), kBucketCount, 53 base::TimeDelta::FromMinutes(1), kBucketCount,
55 base::HistogramBase::kUmaTargetedHistogramFlag); 54 base::HistogramBase::kUmaTargetedHistogramFlag);
56 if (histogram_suffix) { 55 if (histogram_suffix) {
57 histogram_suffix->AddTime(time); 56 histogram_suffix->AddTime(time);
58 } 57 }
59 } 58 }
60 59
61 void RecordAddUnlumpedHashesTime(base::TimeDelta time) { 60 void RecordAddUnlumpedHashesTime(base::TimeDelta time) {
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
169 V4Store::~V4Store() {} 168 V4Store::~V4Store() {}
170 169
171 std::string V4Store::DebugString() const { 170 std::string V4Store::DebugString() const {
172 std::string state_base64; 171 std::string state_base64;
173 base::Base64Encode(state_, &state_base64); 172 base::Base64Encode(state_, &state_base64);
174 173
175 return base::StringPrintf("path: %" PRIsFP "; state: %s", 174 return base::StringPrintf("path: %" PRIsFP "; state: %s",
176 store_path_.value().c_str(), state_base64.c_str()); 175 store_path_.value().c_str(), state_base64.c_str());
177 } 176 }
178 177
179 bool V4Store::Reset() { 178 void V4Store::Reset() {
180 // TODO(vakh): Implement skeleton. 179 expected_checksum_.clear();
180 hash_prefix_map_.clear();
181 state_ = ""; 181 state_ = "";
182 return true;
183 } 182 }
184 183
185 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( 184 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk(
186 const HashPrefixMap& hash_prefix_map_old, 185 const HashPrefixMap& hash_prefix_map_old,
187 std::unique_ptr<ListUpdateResponse> response) { 186 std::unique_ptr<ListUpdateResponse> response) {
188 DCHECK(response->has_response_type()); 187 DCHECK(response->has_response_type());
189 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); 188 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type());
190 189
191 TimeTicks before = TimeTicks::Now(); 190 TimeTicks before = TimeTicks::Now();
192 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response); 191 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response);
193 if (result == APPLY_UPDATE_SUCCESS) { 192 if (result == APPLY_UPDATE_SUCCESS) {
194 RecordProcessPartialUpdateTime(TimeTicks::Now() - before, store_path_); 193 RecordProcessPartialUpdateTime(TimeTicks::Now() - before, store_path_);
195 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded 194 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded
196 // hash prefixes and response_type as FULL_UPDATE, and write that to disk. 195 // hash prefixes and response_type as FULL_UPDATE, and write that to disk.
197 } 196 }
198 return result; 197 return result;
199 } 198 }
200 199
201 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk( 200 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk(
202 std::unique_ptr<ListUpdateResponse> response) { 201 std::unique_ptr<ListUpdateResponse> response) {
203 TimeTicks before = TimeTicks::Now(); 202 TimeTicks before = TimeTicks::Now();
204 ApplyUpdateResult result = ProcessFullUpdate(response); 203 ApplyUpdateResult result = ProcessFullUpdate(response);
205 if (result == APPLY_UPDATE_SUCCESS) { 204 if (result == APPLY_UPDATE_SUCCESS) {
205 RecordProcessFullUpdateTime(TimeTicks::Now() - before, store_path_);
206 RecordStoreWriteResult(WriteToDisk(std::move(response))); 206 RecordStoreWriteResult(WriteToDisk(std::move(response)));
207 RecordProcessFullUpdateTime(TimeTicks::Now() - before, store_path_);
208 } 207 }
209 return result; 208 return result;
210 } 209 }
211 210
212 ApplyUpdateResult V4Store::ProcessFullUpdate( 211 ApplyUpdateResult V4Store::ProcessFullUpdate(
213 const std::unique_ptr<ListUpdateResponse>& response) { 212 const std::unique_ptr<ListUpdateResponse>& response) {
214 DCHECK(response->has_response_type()); 213 DCHECK(response->has_response_type());
215 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type()); 214 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type());
216 // TODO(vakh): For a full update, we don't need to process the update in 215 // TODO(vakh): For a full update, we don't need to process the update in
217 // lexographical order to store it, but we do need to do that for calculating 216 // lexographical order to store it, but we do need to do that for calculating
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
305 } else { 304 } else {
306 DVLOG(1) << "Failure: ApplyUpdate: reason: " << apply_update_result 305 DVLOG(1) << "Failure: ApplyUpdate: reason: " << apply_update_result
307 << "; store: " << *this; 306 << "; store: " << *this;
308 // new_store failed updating. Pass a nullptr to the callback. 307 // new_store failed updating. Pass a nullptr to the callback.
309 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr)); 308 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr));
310 } 309 }
311 310
312 RecordApplyUpdateResult(apply_update_result); 311 RecordApplyUpdateResult(apply_update_result);
313 } 312 }
314 313
315 // static
316 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions( 314 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions(
317 const RepeatedPtrField<ThreatEntrySet>& additions, 315 const RepeatedPtrField<ThreatEntrySet>& additions,
318 HashPrefixMap* additions_map) { 316 HashPrefixMap* additions_map) {
319 for (const auto& addition : additions) { 317 for (const auto& addition : additions) {
320 ApplyUpdateResult apply_update_result = APPLY_UPDATE_SUCCESS; 318 ApplyUpdateResult apply_update_result = APPLY_UPDATE_SUCCESS;
321 const CompressionType compression_type = addition.compression_type(); 319 const CompressionType compression_type = addition.compression_type();
322 if (compression_type == RAW) { 320 if (compression_type == RAW) {
323 DCHECK(addition.has_raw_hashes()); 321 DCHECK(addition.has_raw_hashes());
324 DCHECK(addition.raw_hashes().has_raw_hashes()); 322 DCHECK(addition.raw_hashes().has_raw_hashes());
325 323
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
443 441
444 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity + 442 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity +
445 prefix_length_to_add); 443 prefix_length_to_add);
446 } 444 }
447 } 445 }
448 446
449 ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, 447 ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map,
450 const HashPrefixMap& additions_map, 448 const HashPrefixMap& additions_map,
451 const RepeatedField<int32>* raw_removals, 449 const RepeatedField<int32>* raw_removals,
452 const std::string& expected_checksum) { 450 const std::string& expected_checksum) {
451 DCHECK(task_runner_->RunsTasksOnCurrentThread());
453 DCHECK(hash_prefix_map_.empty()); 452 DCHECK(hash_prefix_map_.empty());
453
454 bool calculate_checksum = !expected_checksum.empty();
455 if (calculate_checksum &&
456 (expected_checksum.size() != crypto::kSHA256Length)) {
457 return CHECKSUM_MISMATCH_FAILURE;
458 }
459
460 if (old_prefixes_map.empty()) {
461 // If the old map is empty, which it is at startup, then just copy over the
462 // additions map.
463 DCHECK(!raw_removals);
464 hash_prefix_map_ = additions_map;
465
466 // Calculate the checksum asynchronously later and if it doesn't match,
467 // reset the store.
468 expected_checksum_ = expected_checksum;
469
470 return APPLY_UPDATE_SUCCESS;
471 }
472
454 hash_prefix_map_.clear(); 473 hash_prefix_map_.clear();
455 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_); 474 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_);
456 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_); 475 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_);
457 476
458 IteratorMap old_iterator_map; 477 IteratorMap old_iterator_map;
459 HashPrefix next_smallest_prefix_old; 478 HashPrefix next_smallest_prefix_old;
460 InitializeIteratorMap(old_prefixes_map, &old_iterator_map); 479 InitializeIteratorMap(old_prefixes_map, &old_iterator_map);
461 bool old_has_unmerged = GetNextSmallestUnmergedPrefix( 480 bool old_has_unmerged = GetNextSmallestUnmergedPrefix(
462 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); 481 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old);
463 482
464 IteratorMap additions_iterator_map; 483 IteratorMap additions_iterator_map;
465 HashPrefix next_smallest_prefix_additions; 484 HashPrefix next_smallest_prefix_additions;
466 InitializeIteratorMap(additions_map, &additions_iterator_map); 485 InitializeIteratorMap(additions_map, &additions_iterator_map);
467 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix( 486 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix(
468 additions_map, additions_iterator_map, &next_smallest_prefix_additions); 487 additions_map, additions_iterator_map, &next_smallest_prefix_additions);
469 488
470 // Classical merge sort. 489 // Classical merge sort.
471 // The two constructs to merge are maps: old_prefixes_map, additions_map. 490 // The two constructs to merge are maps: old_prefixes_map, additions_map.
472 // At least one of the maps still has elements that need to be merged into the 491 // At least one of the maps still has elements that need to be merged into the
473 // new store. 492 // new store.
474 493
475 bool calculate_checksum = !expected_checksum.empty();
476 std::unique_ptr<crypto::SecureHash> checksum_ctx( 494 std::unique_ptr<crypto::SecureHash> checksum_ctx(
477 crypto::SecureHash::Create(crypto::SecureHash::SHA256)); 495 crypto::SecureHash::Create(crypto::SecureHash::SHA256));
478 496
479 // Keep track of the number of elements picked from the old map. This is used 497 // Keep track of the number of elements picked from the old map. This is used
480 // to determine which elements to drop based on the raw_removals. Note that 498 // to determine which elements to drop based on the raw_removals. Note that
481 // picked is not the same as merged. A picked element isn't merged if its 499 // picked is not the same as merged. A picked element isn't merged if its
482 // index is on the raw_removals list. 500 // index is on the raw_removals list.
483 int total_picked_from_old = 0; 501 int total_picked_from_old = 0;
484 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr; 502 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr;
485 while (old_has_unmerged || additions_has_unmerged) { 503 while (old_has_unmerged || additions_has_unmerged) {
486 // If the same hash prefix appears in the existing store and the additions 504 // If the same hash prefix appears in the existing store and the additions
487 // list, something is clearly wrong. Discard the update. 505 // list, something is clearly wrong. Discard the update.
488 if (old_has_unmerged && additions_has_unmerged && 506 if (old_has_unmerged && additions_has_unmerged &&
489 next_smallest_prefix_old == next_smallest_prefix_additions) { 507 next_smallest_prefix_old == next_smallest_prefix_additions) {
490 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE; 508 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE;
491 } 509 }
492 510
493 // Select which map to pick the next hash prefix from to keep the result in 511 // Select which map to pick the next hash prefix from to keep the result in
494 // lexographically sorted order. 512 // lexographically sorted order.
495 bool pick_from_old = 513 bool pick_from_old =
496 old_has_unmerged && 514 old_has_unmerged &&
497 (!additions_has_unmerged || 515 (!additions_has_unmerged ||
498 (next_smallest_prefix_old < next_smallest_prefix_additions)); 516 (next_smallest_prefix_old < next_smallest_prefix_additions));
499 517
500 PrefixSize next_smallest_prefix_size; 518 PrefixSize next_smallest_prefix_size;
501 if (pick_from_old) { 519 if (pick_from_old) {
502 next_smallest_prefix_size = next_smallest_prefix_old.size(); 520 next_smallest_prefix_size = next_smallest_prefix_old.size();
503 521
504 // Update the iterator map, which means that we have merged one hash 522 // Update the iterator map, which means that we have merged one hash
505 // prefix of size |next_size_for_old| from the old store. 523 // prefix of size |next_smallest_prefix_size| from the old store.
506 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; 524 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size;
507 525
508 if (!raw_removals || removals_iter == raw_removals->end() || 526 if (!raw_removals || removals_iter == raw_removals->end() ||
509 *removals_iter != total_picked_from_old) { 527 *removals_iter != total_picked_from_old) {
510 // Append the smallest hash to the appropriate list. 528 // Append the smallest hash to the appropriate list.
511 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; 529 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old;
512 530
513 if (calculate_checksum) { 531 if (calculate_checksum) {
514 checksum_ctx->Update(base::string_as_array(&next_smallest_prefix_old), 532 checksum_ctx->Update(next_smallest_prefix_old.data(),
515 next_smallest_prefix_size); 533 next_smallest_prefix_size);
516 } 534 }
517 } else { 535 } else {
518 // Element not added to new map. Move the removals iterator forward. 536 // Element not added to new map. Move the removals iterator forward.
519 removals_iter++; 537 removals_iter++;
520 } 538 }
521 539
522 total_picked_from_old++; 540 total_picked_from_old++;
523 541
524 // Find the next smallest unmerged element in the old store's map. 542 // Find the next smallest unmerged element in the old store's map.
525 old_has_unmerged = GetNextSmallestUnmergedPrefix( 543 old_has_unmerged = GetNextSmallestUnmergedPrefix(
526 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); 544 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old);
527 } else { 545 } else {
528 next_smallest_prefix_size = next_smallest_prefix_additions.size(); 546 next_smallest_prefix_size = next_smallest_prefix_additions.size();
529 547
530 // Append the smallest hash to the appropriate list. 548 // Append the smallest hash to the appropriate list.
531 hash_prefix_map_[next_smallest_prefix_size] += 549 hash_prefix_map_[next_smallest_prefix_size] +=
532 next_smallest_prefix_additions; 550 next_smallest_prefix_additions;
533 551
534 if (calculate_checksum) { 552 if (calculate_checksum) {
535 checksum_ctx->Update( 553 checksum_ctx->Update(next_smallest_prefix_additions.data(),
536 base::string_as_array(&next_smallest_prefix_additions), 554 next_smallest_prefix_size);
537 next_smallest_prefix_size);
538 } 555 }
539 556
540 // Update the iterator map, which means that we have merged one hash 557 // Update the iterator map, which means that we have merged one hash
541 // prefix of size |next_smallest_prefix_size| from the update. 558 // prefix of size |next_smallest_prefix_size| from the update.
542 additions_iterator_map[next_smallest_prefix_size] += 559 additions_iterator_map[next_smallest_prefix_size] +=
543 next_smallest_prefix_size; 560 next_smallest_prefix_size;
544 561
545 // Find the next smallest unmerged element in the additions map. 562 // Find the next smallest unmerged element in the additions map.
546 additions_has_unmerged = 563 additions_has_unmerged =
547 GetNextSmallestUnmergedPrefix(additions_map, additions_iterator_map, 564 GetNextSmallestUnmergedPrefix(additions_map, additions_iterator_map,
548 &next_smallest_prefix_additions); 565 &next_smallest_prefix_additions);
549 } 566 }
550 } 567 }
551 568
552 if (raw_removals && removals_iter != raw_removals->end()) { 569 if (raw_removals && removals_iter != raw_removals->end()) {
553 return REMOVALS_INDEX_TOO_LARGE_FAILURE; 570 return REMOVALS_INDEX_TOO_LARGE_FAILURE;
554 } 571 }
555 572
556 if (calculate_checksum) { 573 if (calculate_checksum) {
557 std::string checksum(crypto::kSHA256Length, 0); 574 char checksum[crypto::kSHA256Length];
558 checksum_ctx->Finish(base::string_as_array(&checksum), checksum.size()); 575 checksum_ctx->Finish(checksum, sizeof(checksum));
559 if (checksum != expected_checksum) { 576 for (size_t i = 0; i < crypto::kSHA256Length; i++) {
560 std::string checksum_base64, expected_checksum_base64; 577 if (checksum[i] != expected_checksum[i]) {
561 base::Base64Encode(checksum, &checksum_base64); 578 #if DCHECK_IS_ON()
562 base::Base64Encode(expected_checksum, &expected_checksum_base64); 579 std::string checksum_b64, expected_checksum_b64;
563 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_base64 580 base::Base64Encode(base::StringPiece(checksum, arraysize(checksum)),
564 << " expected: " << expected_checksum_base64; 581 &checksum_b64);
565 return CHECKSUM_MISMATCH_FAILURE; 582 base::Base64Encode(expected_checksum, &expected_checksum_b64);
583 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_b64
584 << "; expected: " << expected_checksum_b64
585 << "; store: " << *this;
586 #endif
587 return CHECKSUM_MISMATCH_FAILURE;
588 }
566 } 589 }
567 } 590 }
568 591
569 return APPLY_UPDATE_SUCCESS; 592 return APPLY_UPDATE_SUCCESS;
570 } 593 }
571 594
572 StoreReadResult V4Store::ReadFromDisk() { 595 StoreReadResult V4Store::ReadFromDisk() {
573 DCHECK(task_runner_->RunsTasksOnCurrentThread()); 596 DCHECK(task_runner_->RunsTasksOnCurrentThread());
574 597
575 TimeTicks before = TimeTicks::Now(); 598 TimeTicks before = TimeTicks::Now();
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
683 int result = hash_prefix.compare(mid_prefix); 706 int result = hash_prefix.compare(mid_prefix);
684 if (result == 0) { 707 if (result == 0) {
685 return true; 708 return true;
686 } else if (result < 0) { 709 } else if (result < 0) {
687 return HashPrefixMatches(hash_prefix, begin, mid); 710 return HashPrefixMatches(hash_prefix, begin, mid);
688 } else { 711 } else {
689 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); 712 return HashPrefixMatches(hash_prefix, mid + prefix_size, end);
690 } 713 }
691 } 714 }
692 715
716 bool V4Store::VerifyChecksum() {
717 DCHECK(task_runner_->RunsTasksOnCurrentThread());
718
719 if (expected_checksum_.empty()) {
720 // Nothing to check here folks!
721 // TODO(vakh): Do not allow empty checksums.
722 return true;
723 }
724
725 IteratorMap iterator_map;
726 HashPrefix next_smallest_prefix;
727 InitializeIteratorMap(hash_prefix_map_, &iterator_map);
728 bool has_unmerged = GetNextSmallestUnmergedPrefix(
729 hash_prefix_map_, iterator_map, &next_smallest_prefix);
730
731 std::unique_ptr<crypto::SecureHash> checksum_ctx(
732 crypto::SecureHash::Create(crypto::SecureHash::SHA256));
733 while (has_unmerged) {
734 PrefixSize next_smallest_prefix_size;
735 next_smallest_prefix_size = next_smallest_prefix.size();
736
737 // Update the iterator map, which means that we have read one hash
738 // prefix of size |next_smallest_prefix_size| from hash_prefix_map_.
739 iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size;
740
741 checksum_ctx->Update(next_smallest_prefix.data(),
742 next_smallest_prefix_size);
743
744 // Find the next smallest unmerged element in the map.
745 has_unmerged = GetNextSmallestUnmergedPrefix(hash_prefix_map_, iterator_map,
746 &next_smallest_prefix);
747 }
748
749 char checksum[crypto::kSHA256Length];
750 checksum_ctx->Finish(checksum, sizeof(checksum));
751 for (size_t i = 0; i < crypto::kSHA256Length; i++) {
752 if (checksum[i] != expected_checksum_[i]) {
753 RecordApplyUpdateResultWhenReadingFromDisk(CHECKSUM_MISMATCH_FAILURE);
754 #if DCHECK_IS_ON()
755 std::string checksum_b64, expected_checksum_b64;
756 base::Base64Encode(base::StringPiece(checksum, arraysize(checksum)),
757 &checksum_b64);
758 base::Base64Encode(expected_checksum_, &expected_checksum_b64);
759 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_b64
760 << "; expected: " << expected_checksum_b64
761 << "; store: " << *this;
762 #endif
763 return false;
764 }
765 }
766 return true;
767 }
768
693 } // namespace safe_browsing 769 } // namespace safe_browsing
OLDNEW
« no previous file with comments | « components/safe_browsing_db/v4_store.h ('k') | components/safe_browsing_db/v4_store_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698