Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(522)

Side by Side Diff: components/safe_browsing_db/v4_store.cc

Issue 2384893002: PVer4: Test checksum on startup outside the hotpath of DB load (Closed)
Patch Set: go: design-doc-v4store-verifychecksum -- VerifyChecksum in a way that avoids race conditions betwee… Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/base64.h" 5 #include "base/base64.h"
6 #include "base/bind.h" 6 #include "base/bind.h"
7 #include "base/files/file_util.h" 7 #include "base/files/file_util.h"
8 #include "base/memory/ptr_util.h" 8 #include "base/memory/ptr_util.h"
9 #include "base/metrics/histogram_macros.h" 9 #include "base/metrics/histogram_macros.h"
10 #include "base/metrics/sparse_histogram.h" 10 #include "base/metrics/sparse_histogram.h"
11 #include "base/stl_util.h" 11 #include "base/stl_util.h"
12 #include "base/strings/stringprintf.h" 12 #include "base/strings/stringprintf.h"
13 #include "components/safe_browsing_db/v4_rice.h" 13 #include "components/safe_browsing_db/v4_rice.h"
14 #include "components/safe_browsing_db/v4_store.h" 14 #include "components/safe_browsing_db/v4_store.h"
15 #include "components/safe_browsing_db/v4_store.pb.h" 15 #include "components/safe_browsing_db/v4_store.pb.h"
16 #include "content/public/browser/browser_thread.h"
16 #include "crypto/secure_hash.h" 17 #include "crypto/secure_hash.h"
17 #include "crypto/sha2.h" 18 #include "crypto/sha2.h"
18 19
19 using base::TimeTicks; 20 using base::TimeTicks;
20 21
21 namespace safe_browsing { 22 namespace safe_browsing {
22 23
23 namespace { 24 namespace {
24 25
25 const uint32_t kFileMagic = 0x600D71FE; 26 const uint32_t kFileMagic = 0x600D71FE;
26 27
27 const uint32_t kFileVersion = 9; 28 const uint32_t kFileVersion = 9;
28 29
29 std::string GetUmaSuffixForStore(const base::FilePath& file_path) { 30 std::string GetUmaSuffixForStore(const base::FilePath& file_path) {
30 return base::StringPrintf( 31 return base::StringPrintf(
31 ".%" PRIsFP, file_path.BaseName().RemoveExtension().value().c_str()); 32 ".%" PRIsFP, file_path.BaseName().RemoveExtension().value().c_str());
32 } 33 }
33 34
34 void RecordTimeWithAndWithoutStore(const std::string& metric, 35 void RecordTimeWithAndWithoutStore(const std::string& metric,
35 base::TimeDelta time, 36 base::TimeDelta time,
36 const base::FilePath& file_path) { 37 const base::FilePath& file_path) {
37 std::string suffix = GetUmaSuffixForStore(file_path);
38
39 // The histograms below are a modified expansion of the 38 // The histograms below are a modified expansion of the
40 // UMA_HISTOGRAM_LONG_TIMES macro adapted to allow for a dynamically suffixed 39 // UMA_HISTOGRAM_LONG_TIMES macro adapted to allow for a dynamically suffixed
41 // histogram name. 40 // histogram name.
42 // Note: The factory creates and owns the histogram. 41 // Note: The factory creates and owns the histogram.
43 const int kBucketCount = 100; 42 const int kBucketCount = 100;
44 base::HistogramBase* histogram = base::Histogram::FactoryTimeGet( 43 base::HistogramBase* histogram = base::Histogram::FactoryTimeGet(
45 metric, base::TimeDelta::FromMilliseconds(1), 44 metric, base::TimeDelta::FromMilliseconds(1),
46 base::TimeDelta::FromMinutes(1), kBucketCount, 45 base::TimeDelta::FromMinutes(1), kBucketCount,
47 base::HistogramBase::kUmaTargetedHistogramFlag); 46 base::HistogramBase::kUmaTargetedHistogramFlag);
48 if (histogram) { 47 if (histogram) {
49 histogram->AddTime(time); 48 histogram->AddTime(time);
50 } 49 }
51 50
51 std::string suffix = GetUmaSuffixForStore(file_path);
52 base::HistogramBase* histogram_suffix = base::Histogram::FactoryTimeGet( 52 base::HistogramBase* histogram_suffix = base::Histogram::FactoryTimeGet(
53 metric + suffix, base::TimeDelta::FromMilliseconds(1), 53 metric + suffix, base::TimeDelta::FromMilliseconds(1),
54 base::TimeDelta::FromMinutes(1), kBucketCount, 54 base::TimeDelta::FromMinutes(1), kBucketCount,
55 base::HistogramBase::kUmaTargetedHistogramFlag); 55 base::HistogramBase::kUmaTargetedHistogramFlag);
56 if (histogram_suffix) { 56 if (histogram_suffix) {
57 histogram_suffix->AddTime(time); 57 histogram_suffix->AddTime(time);
58 } 58 }
59 } 59 }
60 60
61 void RecordAddUnlumpedHashesTime(base::TimeDelta time) { 61 void RecordAddUnlumpedHashesTime(base::TimeDelta time) {
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
169 V4Store::~V4Store() {} 169 V4Store::~V4Store() {}
170 170
171 std::string V4Store::DebugString() const { 171 std::string V4Store::DebugString() const {
172 std::string state_base64; 172 std::string state_base64;
173 base::Base64Encode(state_, &state_base64); 173 base::Base64Encode(state_, &state_base64);
174 174
175 return base::StringPrintf("path: %" PRIsFP "; state: %s", 175 return base::StringPrintf("path: %" PRIsFP "; state: %s",
176 store_path_.value().c_str(), state_base64.c_str()); 176 store_path_.value().c_str(), state_base64.c_str());
177 } 177 }
178 178
179 bool V4Store::Reset() { 179 void V4Store::Reset() {
180 // TODO(vakh): Implement skeleton. 180 expected_checksum_.clear();
181 hash_prefix_map_.clear();
181 state_ = ""; 182 state_ = "";
182 return true;
183 } 183 }
184 184
185 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( 185 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk(
186 const HashPrefixMap& hash_prefix_map_old, 186 const HashPrefixMap& hash_prefix_map_old,
187 std::unique_ptr<ListUpdateResponse> response) { 187 std::unique_ptr<ListUpdateResponse> response) {
188 DCHECK(response->has_response_type()); 188 DCHECK(response->has_response_type());
189 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); 189 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type());
190 190
191 TimeTicks before = TimeTicks::Now(); 191 TimeTicks before = TimeTicks::Now();
192 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response); 192 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response);
193 if (result == APPLY_UPDATE_SUCCESS) { 193 if (result == APPLY_UPDATE_SUCCESS) {
194 RecordProcessPartialUpdateTime(TimeTicks::Now() - before, store_path_); 194 RecordProcessPartialUpdateTime(TimeTicks::Now() - before, store_path_);
195 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded 195 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded
196 // hash prefixes and response_type as FULL_UPDATE, and write that to disk. 196 // hash prefixes and response_type as FULL_UPDATE, and write that to disk.
197 } 197 }
198 return result; 198 return result;
199 } 199 }
200 200
201 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk( 201 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk(
202 std::unique_ptr<ListUpdateResponse> response) { 202 std::unique_ptr<ListUpdateResponse> response) {
203 TimeTicks before = TimeTicks::Now(); 203 TimeTicks before = TimeTicks::Now();
204 ApplyUpdateResult result = ProcessFullUpdate(response); 204 ApplyUpdateResult result = ProcessFullUpdate(response);
205 if (result == APPLY_UPDATE_SUCCESS) { 205 if (result == APPLY_UPDATE_SUCCESS) {
206 RecordProcessFullUpdateTime(TimeTicks::Now() - before, store_path_);
206 RecordStoreWriteResult(WriteToDisk(std::move(response))); 207 RecordStoreWriteResult(WriteToDisk(std::move(response)));
207 RecordProcessFullUpdateTime(TimeTicks::Now() - before, store_path_);
208 } 208 }
209 return result; 209 return result;
210 } 210 }
211 211
212 ApplyUpdateResult V4Store::ProcessFullUpdate( 212 ApplyUpdateResult V4Store::ProcessFullUpdate(
213 const std::unique_ptr<ListUpdateResponse>& response) { 213 const std::unique_ptr<ListUpdateResponse>& response) {
214 DCHECK(response->has_response_type()); 214 DCHECK(response->has_response_type());
215 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type()); 215 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type());
216 // TODO(vakh): For a full update, we don't need to process the update in 216 // TODO(vakh): For a full update, we don't need to process the update in
217 // lexographical order to store it, but we do need to do that for calculating 217 // lexographical order to store it, but we do need to do that for calculating
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
305 } else { 305 } else {
306 DVLOG(1) << "Failure: ApplyUpdate: reason: " << apply_update_result 306 DVLOG(1) << "Failure: ApplyUpdate: reason: " << apply_update_result
307 << "; store: " << *this; 307 << "; store: " << *this;
308 // new_store failed updating. Pass a nullptr to the callback. 308 // new_store failed updating. Pass a nullptr to the callback.
309 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr)); 309 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr));
310 } 310 }
311 311
312 RecordApplyUpdateResult(apply_update_result); 312 RecordApplyUpdateResult(apply_update_result);
313 } 313 }
314 314
315 // static
316 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions( 315 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions(
317 const RepeatedPtrField<ThreatEntrySet>& additions, 316 const RepeatedPtrField<ThreatEntrySet>& additions,
318 HashPrefixMap* additions_map) { 317 HashPrefixMap* additions_map) {
319 for (const auto& addition : additions) { 318 for (const auto& addition : additions) {
320 ApplyUpdateResult apply_update_result = APPLY_UPDATE_SUCCESS; 319 ApplyUpdateResult apply_update_result = APPLY_UPDATE_SUCCESS;
321 const CompressionType compression_type = addition.compression_type(); 320 const CompressionType compression_type = addition.compression_type();
322 if (compression_type == RAW) { 321 if (compression_type == RAW) {
323 DCHECK(addition.has_raw_hashes()); 322 DCHECK(addition.has_raw_hashes());
324 DCHECK(addition.raw_hashes().has_raw_hashes()); 323 DCHECK(addition.raw_hashes().has_raw_hashes());
325 324
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
443 442
444 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity + 443 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity +
445 prefix_length_to_add); 444 prefix_length_to_add);
446 } 445 }
447 } 446 }
448 447
449 ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, 448 ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map,
450 const HashPrefixMap& additions_map, 449 const HashPrefixMap& additions_map,
451 const RepeatedField<int32>* raw_removals, 450 const RepeatedField<int32>* raw_removals,
452 const std::string& expected_checksum) { 451 const std::string& expected_checksum) {
452 DCHECK(task_runner_->RunsTasksOnCurrentThread());
453 DCHECK(hash_prefix_map_.empty()); 453 DCHECK(hash_prefix_map_.empty());
454
455 bool calculate_checksum = !expected_checksum.empty();
456 if (old_prefixes_map.empty()) {
457 // If the old map is empty, which it is at startup, then just copy over the
458 // additions map.
459 DCHECK(!raw_removals);
460 hash_prefix_map_ = additions_map;
461
462 if (calculate_checksum) {
463 // Calculate the checksum asynchronously later and if it doesn't match,
464 // reset the store.
465 expected_checksum_ = expected_checksum;
466 }
467
468 return APPLY_UPDATE_SUCCESS;
469 }
470
454 hash_prefix_map_.clear(); 471 hash_prefix_map_.clear();
455 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_); 472 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_);
456 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_); 473 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_);
457 474
458 IteratorMap old_iterator_map; 475 IteratorMap old_iterator_map;
459 HashPrefix next_smallest_prefix_old; 476 HashPrefix next_smallest_prefix_old;
460 InitializeIteratorMap(old_prefixes_map, &old_iterator_map); 477 InitializeIteratorMap(old_prefixes_map, &old_iterator_map);
461 bool old_has_unmerged = GetNextSmallestUnmergedPrefix( 478 bool old_has_unmerged = GetNextSmallestUnmergedPrefix(
462 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); 479 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old);
463 480
464 IteratorMap additions_iterator_map; 481 IteratorMap additions_iterator_map;
465 HashPrefix next_smallest_prefix_additions; 482 HashPrefix next_smallest_prefix_additions;
466 InitializeIteratorMap(additions_map, &additions_iterator_map); 483 InitializeIteratorMap(additions_map, &additions_iterator_map);
467 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix( 484 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix(
468 additions_map, additions_iterator_map, &next_smallest_prefix_additions); 485 additions_map, additions_iterator_map, &next_smallest_prefix_additions);
469 486
470 // Classical merge sort. 487 // Classical merge sort.
471 // The two constructs to merge are maps: old_prefixes_map, additions_map. 488 // The two constructs to merge are maps: old_prefixes_map, additions_map.
472 // At least one of the maps still has elements that need to be merged into the 489 // At least one of the maps still has elements that need to be merged into the
473 // new store. 490 // new store.
474 491
475 bool calculate_checksum = !expected_checksum.empty();
476 std::unique_ptr<crypto::SecureHash> checksum_ctx( 492 std::unique_ptr<crypto::SecureHash> checksum_ctx(
477 crypto::SecureHash::Create(crypto::SecureHash::SHA256)); 493 crypto::SecureHash::Create(crypto::SecureHash::SHA256));
478 494
479 // Keep track of the number of elements picked from the old map. This is used 495 // Keep track of the number of elements picked from the old map. This is used
480 // to determine which elements to drop based on the raw_removals. Note that 496 // to determine which elements to drop based on the raw_removals. Note that
481 // picked is not the same as merged. A picked element isn't merged if its 497 // picked is not the same as merged. A picked element isn't merged if its
482 // index is on the raw_removals list. 498 // index is on the raw_removals list.
483 int total_picked_from_old = 0; 499 int total_picked_from_old = 0;
484 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr; 500 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr;
485 while (old_has_unmerged || additions_has_unmerged) { 501 while (old_has_unmerged || additions_has_unmerged) {
486 // If the same hash prefix appears in the existing store and the additions 502 // If the same hash prefix appears in the existing store and the additions
487 // list, something is clearly wrong. Discard the update. 503 // list, something is clearly wrong. Discard the update.
488 if (old_has_unmerged && additions_has_unmerged && 504 if (old_has_unmerged && additions_has_unmerged &&
489 next_smallest_prefix_old == next_smallest_prefix_additions) { 505 next_smallest_prefix_old == next_smallest_prefix_additions) {
490 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE; 506 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE;
491 } 507 }
492 508
493 // Select which map to pick the next hash prefix from to keep the result in 509 // Select which map to pick the next hash prefix from to keep the result in
494 // lexographically sorted order. 510 // lexographically sorted order.
495 bool pick_from_old = 511 bool pick_from_old =
496 old_has_unmerged && 512 old_has_unmerged &&
497 (!additions_has_unmerged || 513 (!additions_has_unmerged ||
498 (next_smallest_prefix_old < next_smallest_prefix_additions)); 514 (next_smallest_prefix_old < next_smallest_prefix_additions));
499 515
500 PrefixSize next_smallest_prefix_size; 516 PrefixSize next_smallest_prefix_size;
501 if (pick_from_old) { 517 if (pick_from_old) {
502 next_smallest_prefix_size = next_smallest_prefix_old.size(); 518 next_smallest_prefix_size = next_smallest_prefix_old.size();
503 519
504 // Update the iterator map, which means that we have merged one hash 520 // Update the iterator map, which means that we have merged one hash
505 // prefix of size |next_size_for_old| from the old store. 521 // prefix of size |next_smallest_prefix_size| from the old store.
506 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; 522 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size;
507 523
508 if (!raw_removals || removals_iter == raw_removals->end() || 524 if (!raw_removals || removals_iter == raw_removals->end() ||
509 *removals_iter != total_picked_from_old) { 525 *removals_iter != total_picked_from_old) {
510 // Append the smallest hash to the appropriate list. 526 // Append the smallest hash to the appropriate list.
511 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; 527 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old;
512 528
513 if (calculate_checksum) { 529 if (calculate_checksum) {
514 checksum_ctx->Update(base::string_as_array(&next_smallest_prefix_old), 530 checksum_ctx->Update(base::string_as_array(&next_smallest_prefix_old),
515 next_smallest_prefix_size); 531 next_smallest_prefix_size);
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
554 } 570 }
555 571
556 if (calculate_checksum) { 572 if (calculate_checksum) {
557 std::string checksum(crypto::kSHA256Length, 0); 573 std::string checksum(crypto::kSHA256Length, 0);
558 checksum_ctx->Finish(base::string_as_array(&checksum), checksum.size()); 574 checksum_ctx->Finish(base::string_as_array(&checksum), checksum.size());
559 if (checksum != expected_checksum) { 575 if (checksum != expected_checksum) {
560 std::string checksum_base64, expected_checksum_base64; 576 std::string checksum_base64, expected_checksum_base64;
561 base::Base64Encode(checksum, &checksum_base64); 577 base::Base64Encode(checksum, &checksum_base64);
562 base::Base64Encode(expected_checksum, &expected_checksum_base64); 578 base::Base64Encode(expected_checksum, &expected_checksum_base64);
563 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_base64 579 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_base64
564 << " expected: " << expected_checksum_base64; 580 << "; expected: " << expected_checksum_base64
581 << "; store: " << *this;
582 ;
565 return CHECKSUM_MISMATCH_FAILURE; 583 return CHECKSUM_MISMATCH_FAILURE;
566 } 584 }
567 } 585 }
568 586
569 return APPLY_UPDATE_SUCCESS; 587 return APPLY_UPDATE_SUCCESS;
570 } 588 }
571 589
572 StoreReadResult V4Store::ReadFromDisk() { 590 StoreReadResult V4Store::ReadFromDisk() {
573 DCHECK(task_runner_->RunsTasksOnCurrentThread()); 591 DCHECK(task_runner_->RunsTasksOnCurrentThread());
574 592
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
644 DCHECK_EQ(file_format_string.size(), written); 662 DCHECK_EQ(file_format_string.size(), written);
645 663
646 if (!base::Move(new_filename, store_path_)) { 664 if (!base::Move(new_filename, store_path_)) {
647 return UNABLE_TO_RENAME_FAILURE; 665 return UNABLE_TO_RENAME_FAILURE;
648 } 666 }
649 667
650 return WRITE_SUCCESS; 668 return WRITE_SUCCESS;
651 } 669 }
652 670
653 HashPrefix V4Store::GetMatchingHashPrefix(const FullHash& full_hash) { 671 HashPrefix V4Store::GetMatchingHashPrefix(const FullHash& full_hash) {
672 DCHECK_CURRENTLY_ON(content::BrowserThread::IO);
654 // It should never be the case that more than one hash prefixes match a given 673 // It should never be the case that more than one hash prefixes match a given
655 // full hash. However, if that happens, this method returns any one of them. 674 // full hash. However, if that happens, this method returns any one of them.
656 // It does not guarantee which one of those will be returned. 675 // It does not guarantee which one of those will be returned.
657 DCHECK_EQ(32u, full_hash.size()); 676 DCHECK_EQ(32u, full_hash.size());
658 for (const auto& pair : hash_prefix_map_) { 677 for (const auto& pair : hash_prefix_map_) {
659 const PrefixSize& prefix_size = pair.first; 678 const PrefixSize& prefix_size = pair.first;
660 const HashPrefixes& hash_prefixes = pair.second; 679 const HashPrefixes& hash_prefixes = pair.second;
661 HashPrefix hash_prefix = full_hash.substr(0, prefix_size); 680 HashPrefix hash_prefix = full_hash.substr(0, prefix_size);
662 if (HashPrefixMatches(hash_prefix, hash_prefixes.begin(), 681 if (HashPrefixMatches(hash_prefix, hash_prefixes.begin(),
663 hash_prefixes.end())) { 682 hash_prefixes.end())) {
(...skipping 19 matching lines...) Expand all
683 int result = hash_prefix.compare(mid_prefix); 702 int result = hash_prefix.compare(mid_prefix);
684 if (result == 0) { 703 if (result == 0) {
685 return true; 704 return true;
686 } else if (result < 0) { 705 } else if (result < 0) {
687 return HashPrefixMatches(hash_prefix, begin, mid); 706 return HashPrefixMatches(hash_prefix, begin, mid);
688 } else { 707 } else {
689 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); 708 return HashPrefixMatches(hash_prefix, mid + prefix_size, end);
690 } 709 }
691 } 710 }
692 711
712 bool V4Store::VerifyChecksum() {
713 DCHECK(task_runner_->RunsTasksOnCurrentThread());
714
715 if (expected_checksum_.empty()) {
716 // If the |expected_checksum_| is empty, the file (or hash_prefix_map_)
717 // should also be empty.
718 return hash_prefix_map_.empty();
719 }
720
721 IteratorMap iterator_map;
722 HashPrefix next_smallest_prefix;
723 InitializeIteratorMap(hash_prefix_map_, &iterator_map);
724 bool has_unmerged = GetNextSmallestUnmergedPrefix(
725 hash_prefix_map_, iterator_map, &next_smallest_prefix);
726
727 std::unique_ptr<crypto::SecureHash> checksum_ctx(
728 crypto::SecureHash::Create(crypto::SecureHash::SHA256));
729 while (has_unmerged) {
730 PrefixSize next_smallest_prefix_size;
731 next_smallest_prefix_size = next_smallest_prefix.size();
732
733 // Update the iterator map, which means that we have read one hash
734 // prefix of size |next_smallest_prefix_size| from hash_prefix_map_.
735 iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size;
736
737 checksum_ctx->Update(base::string_as_array(&next_smallest_prefix),
738 next_smallest_prefix_size);
739
740 // Find the next smallest unmerged element in the map.
741 has_unmerged = GetNextSmallestUnmergedPrefix(hash_prefix_map_, iterator_map,
742 &next_smallest_prefix);
743 }
744
745 std::string checksum(crypto::kSHA256Length, 0);
746 checksum_ctx->Finish(base::string_as_array(&checksum), checksum.size());
747 if (checksum == expected_checksum_) {
748 expected_checksum_.clear();
749 return true;
750 }
751
752 std::string checksum_base64, expected_checksum_base64;
753 base::Base64Encode(checksum, &checksum_base64);
754 base::Base64Encode(expected_checksum_, &expected_checksum_base64);
755 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_base64
756 << "; expected: " << expected_checksum_base64
757 << "; store: " << *this;
758 RecordApplyUpdateResultWhenReadingFromDisk(CHECKSUM_MISMATCH_FAILURE);
759
760 return false;
761 }
762
693 } // namespace safe_browsing 763 } // namespace safe_browsing
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698