Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(320)

Side by Side Diff: components/safe_browsing_db/v4_store.cc

Issue 2384893002: PVer4: Test checksum on startup outside the hotpath of DB load (Closed)
Patch Set: shess@ feedback Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/base64.h" 5 #include "base/base64.h"
6 #include "base/bind.h" 6 #include "base/bind.h"
7 #include "base/files/file_util.h" 7 #include "base/files/file_util.h"
8 #include "base/memory/ptr_util.h" 8 #include "base/memory/ptr_util.h"
9 #include "base/metrics/histogram_macros.h" 9 #include "base/metrics/histogram_macros.h"
10 #include "base/metrics/sparse_histogram.h" 10 #include "base/metrics/sparse_histogram.h"
(...skipping 16 matching lines...) Expand all
27 const uint32_t kFileVersion = 9; 27 const uint32_t kFileVersion = 9;
28 28
29 std::string GetUmaSuffixForStore(const base::FilePath& file_path) { 29 std::string GetUmaSuffixForStore(const base::FilePath& file_path) {
30 return base::StringPrintf( 30 return base::StringPrintf(
31 ".%" PRIsFP, file_path.BaseName().RemoveExtension().value().c_str()); 31 ".%" PRIsFP, file_path.BaseName().RemoveExtension().value().c_str());
32 } 32 }
33 33
34 void RecordTimeWithAndWithoutStore(const std::string& metric, 34 void RecordTimeWithAndWithoutStore(const std::string& metric,
35 base::TimeDelta time, 35 base::TimeDelta time,
36 const base::FilePath& file_path) { 36 const base::FilePath& file_path) {
37 std::string suffix = GetUmaSuffixForStore(file_path);
38
39 // The histograms below are a modified expansion of the 37 // The histograms below are a modified expansion of the
40 // UMA_HISTOGRAM_LONG_TIMES macro adapted to allow for a dynamically suffixed 38 // UMA_HISTOGRAM_LONG_TIMES macro adapted to allow for a dynamically suffixed
41 // histogram name. 39 // histogram name.
42 // Note: The factory creates and owns the histogram. 40 // Note: The factory creates and owns the histogram.
43 const int kBucketCount = 100; 41 const int kBucketCount = 100;
44 base::HistogramBase* histogram = base::Histogram::FactoryTimeGet( 42 base::HistogramBase* histogram = base::Histogram::FactoryTimeGet(
45 metric, base::TimeDelta::FromMilliseconds(1), 43 metric, base::TimeDelta::FromMilliseconds(1),
46 base::TimeDelta::FromMinutes(1), kBucketCount, 44 base::TimeDelta::FromMinutes(1), kBucketCount,
47 base::HistogramBase::kUmaTargetedHistogramFlag); 45 base::HistogramBase::kUmaTargetedHistogramFlag);
48 if (histogram) { 46 if (histogram) {
49 histogram->AddTime(time); 47 histogram->AddTime(time);
50 } 48 }
51 49
50 std::string suffix = GetUmaSuffixForStore(file_path);
52 base::HistogramBase* histogram_suffix = base::Histogram::FactoryTimeGet( 51 base::HistogramBase* histogram_suffix = base::Histogram::FactoryTimeGet(
53 metric + suffix, base::TimeDelta::FromMilliseconds(1), 52 metric + suffix, base::TimeDelta::FromMilliseconds(1),
54 base::TimeDelta::FromMinutes(1), kBucketCount, 53 base::TimeDelta::FromMinutes(1), kBucketCount,
55 base::HistogramBase::kUmaTargetedHistogramFlag); 54 base::HistogramBase::kUmaTargetedHistogramFlag);
56 if (histogram_suffix) { 55 if (histogram_suffix) {
57 histogram_suffix->AddTime(time); 56 histogram_suffix->AddTime(time);
58 } 57 }
59 } 58 }
60 59
61 void RecordAddUnlumpedHashesTime(base::TimeDelta time) { 60 void RecordAddUnlumpedHashesTime(base::TimeDelta time) {
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
169 V4Store::~V4Store() {} 168 V4Store::~V4Store() {}
170 169
171 std::string V4Store::DebugString() const { 170 std::string V4Store::DebugString() const {
172 std::string state_base64; 171 std::string state_base64;
173 base::Base64Encode(state_, &state_base64); 172 base::Base64Encode(state_, &state_base64);
174 173
175 return base::StringPrintf("path: %" PRIsFP "; state: %s", 174 return base::StringPrintf("path: %" PRIsFP "; state: %s",
176 store_path_.value().c_str(), state_base64.c_str()); 175 store_path_.value().c_str(), state_base64.c_str());
177 } 176 }
178 177
179 bool V4Store::Reset() { 178 void V4Store::Reset() {
180 // TODO(vakh): Implement skeleton. 179 expected_checksum_.clear();
180 hash_prefix_map_.clear();
181 state_ = ""; 181 state_ = "";
182 return true;
183 } 182 }
184 183
185 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( 184 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk(
186 const HashPrefixMap& hash_prefix_map_old, 185 const HashPrefixMap& hash_prefix_map_old,
187 std::unique_ptr<ListUpdateResponse> response) { 186 std::unique_ptr<ListUpdateResponse> response) {
188 DCHECK(response->has_response_type()); 187 DCHECK(response->has_response_type());
189 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); 188 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type());
190 189
191 TimeTicks before = TimeTicks::Now(); 190 TimeTicks before = TimeTicks::Now();
192 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response); 191 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response);
193 if (result == APPLY_UPDATE_SUCCESS) { 192 if (result == APPLY_UPDATE_SUCCESS) {
194 RecordProcessPartialUpdateTime(TimeTicks::Now() - before, store_path_); 193 RecordProcessPartialUpdateTime(TimeTicks::Now() - before, store_path_);
195 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded 194 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded
196 // hash prefixes and response_type as FULL_UPDATE, and write that to disk. 195 // hash prefixes and response_type as FULL_UPDATE, and write that to disk.
197 } 196 }
198 return result; 197 return result;
199 } 198 }
200 199
201 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk( 200 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk(
202 std::unique_ptr<ListUpdateResponse> response) { 201 std::unique_ptr<ListUpdateResponse> response) {
203 TimeTicks before = TimeTicks::Now(); 202 TimeTicks before = TimeTicks::Now();
204 ApplyUpdateResult result = ProcessFullUpdate(response); 203 ApplyUpdateResult result = ProcessFullUpdate(response);
205 if (result == APPLY_UPDATE_SUCCESS) { 204 if (result == APPLY_UPDATE_SUCCESS) {
205 RecordProcessFullUpdateTime(TimeTicks::Now() - before, store_path_);
206 RecordStoreWriteResult(WriteToDisk(std::move(response))); 206 RecordStoreWriteResult(WriteToDisk(std::move(response)));
207 RecordProcessFullUpdateTime(TimeTicks::Now() - before, store_path_);
208 } 207 }
209 return result; 208 return result;
210 } 209 }
211 210
212 ApplyUpdateResult V4Store::ProcessFullUpdate( 211 ApplyUpdateResult V4Store::ProcessFullUpdate(
213 const std::unique_ptr<ListUpdateResponse>& response) { 212 const std::unique_ptr<ListUpdateResponse>& response) {
214 DCHECK(response->has_response_type()); 213 DCHECK(response->has_response_type());
215 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type()); 214 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type());
216 // TODO(vakh): For a full update, we don't need to process the update in 215 // TODO(vakh): For a full update, we don't need to process the update in
217 // lexographical order to store it, but we do need to do that for calculating 216 // lexographical order to store it, but we do need to do that for calculating
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
305 } else { 304 } else {
306 DVLOG(1) << "Failure: ApplyUpdate: reason: " << apply_update_result 305 DVLOG(1) << "Failure: ApplyUpdate: reason: " << apply_update_result
307 << "; store: " << *this; 306 << "; store: " << *this;
308 // new_store failed updating. Pass a nullptr to the callback. 307 // new_store failed updating. Pass a nullptr to the callback.
309 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr)); 308 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr));
310 } 309 }
311 310
312 RecordApplyUpdateResult(apply_update_result); 311 RecordApplyUpdateResult(apply_update_result);
313 } 312 }
314 313
315 // static
316 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions( 314 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions(
317 const RepeatedPtrField<ThreatEntrySet>& additions, 315 const RepeatedPtrField<ThreatEntrySet>& additions,
318 HashPrefixMap* additions_map) { 316 HashPrefixMap* additions_map) {
319 for (const auto& addition : additions) { 317 for (const auto& addition : additions) {
320 ApplyUpdateResult apply_update_result = APPLY_UPDATE_SUCCESS; 318 ApplyUpdateResult apply_update_result = APPLY_UPDATE_SUCCESS;
321 const CompressionType compression_type = addition.compression_type(); 319 const CompressionType compression_type = addition.compression_type();
322 if (compression_type == RAW) { 320 if (compression_type == RAW) {
323 DCHECK(addition.has_raw_hashes()); 321 DCHECK(addition.has_raw_hashes());
324 DCHECK(addition.raw_hashes().has_raw_hashes()); 322 DCHECK(addition.raw_hashes().has_raw_hashes());
325 323
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
443 441
444 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity + 442 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity +
445 prefix_length_to_add); 443 prefix_length_to_add);
446 } 444 }
447 } 445 }
448 446
449 ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, 447 ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map,
450 const HashPrefixMap& additions_map, 448 const HashPrefixMap& additions_map,
451 const RepeatedField<int32>* raw_removals, 449 const RepeatedField<int32>* raw_removals,
452 const std::string& expected_checksum) { 450 const std::string& expected_checksum) {
451 DCHECK(task_runner_->RunsTasksOnCurrentThread());
453 DCHECK(hash_prefix_map_.empty()); 452 DCHECK(hash_prefix_map_.empty());
453
454 bool calculate_checksum = !expected_checksum.empty();
455 if (calculate_checksum &&
456 (expected_checksum.size() != crypto::kSHA256Length)) {
457 return CHECKSUM_MISMATCH_FAILURE;
458 }
459
460 if (old_prefixes_map.empty()) {
461 // If the old map is empty, which it is at startup, then just copy over the
462 // additions map.
463 DCHECK(!raw_removals);
464 hash_prefix_map_ = additions_map;
465
466 if (calculate_checksum) {
Nathan Parker 2016/10/07 23:24:29 nit: You could just copy it anyway, since it's eit
vakh (use Gerrit instead) 2016/10/10 17:42:33 Done.
467 // Calculate the checksum asynchronously later and if it doesn't match,
468 // reset the store.
469 expected_checksum_ = expected_checksum;
470 }
471
472 return APPLY_UPDATE_SUCCESS;
473 }
474
454 hash_prefix_map_.clear(); 475 hash_prefix_map_.clear();
455 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_); 476 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_);
456 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_); 477 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_);
457 478
458 IteratorMap old_iterator_map; 479 IteratorMap old_iterator_map;
459 HashPrefix next_smallest_prefix_old; 480 HashPrefix next_smallest_prefix_old;
460 InitializeIteratorMap(old_prefixes_map, &old_iterator_map); 481 InitializeIteratorMap(old_prefixes_map, &old_iterator_map);
461 bool old_has_unmerged = GetNextSmallestUnmergedPrefix( 482 bool old_has_unmerged = GetNextSmallestUnmergedPrefix(
462 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); 483 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old);
463 484
464 IteratorMap additions_iterator_map; 485 IteratorMap additions_iterator_map;
465 HashPrefix next_smallest_prefix_additions; 486 HashPrefix next_smallest_prefix_additions;
466 InitializeIteratorMap(additions_map, &additions_iterator_map); 487 InitializeIteratorMap(additions_map, &additions_iterator_map);
467 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix( 488 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix(
468 additions_map, additions_iterator_map, &next_smallest_prefix_additions); 489 additions_map, additions_iterator_map, &next_smallest_prefix_additions);
469 490
470 // Classical merge sort. 491 // Classical merge sort.
471 // The two constructs to merge are maps: old_prefixes_map, additions_map. 492 // The two constructs to merge are maps: old_prefixes_map, additions_map.
472 // At least one of the maps still has elements that need to be merged into the 493 // At least one of the maps still has elements that need to be merged into the
473 // new store. 494 // new store.
474 495
475 bool calculate_checksum = !expected_checksum.empty();
476 std::unique_ptr<crypto::SecureHash> checksum_ctx( 496 std::unique_ptr<crypto::SecureHash> checksum_ctx(
477 crypto::SecureHash::Create(crypto::SecureHash::SHA256)); 497 crypto::SecureHash::Create(crypto::SecureHash::SHA256));
478 498
479 // Keep track of the number of elements picked from the old map. This is used 499 // Keep track of the number of elements picked from the old map. This is used
480 // to determine which elements to drop based on the raw_removals. Note that 500 // to determine which elements to drop based on the raw_removals. Note that
481 // picked is not the same as merged. A picked element isn't merged if its 501 // picked is not the same as merged. A picked element isn't merged if its
482 // index is on the raw_removals list. 502 // index is on the raw_removals list.
483 int total_picked_from_old = 0; 503 int total_picked_from_old = 0;
484 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr; 504 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr;
485 while (old_has_unmerged || additions_has_unmerged) { 505 while (old_has_unmerged || additions_has_unmerged) {
486 // If the same hash prefix appears in the existing store and the additions 506 // If the same hash prefix appears in the existing store and the additions
487 // list, something is clearly wrong. Discard the update. 507 // list, something is clearly wrong. Discard the update.
488 if (old_has_unmerged && additions_has_unmerged && 508 if (old_has_unmerged && additions_has_unmerged &&
489 next_smallest_prefix_old == next_smallest_prefix_additions) { 509 next_smallest_prefix_old == next_smallest_prefix_additions) {
490 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE; 510 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE;
491 } 511 }
492 512
493 // Select which map to pick the next hash prefix from to keep the result in 513 // Select which map to pick the next hash prefix from to keep the result in
494 // lexographically sorted order. 514 // lexographically sorted order.
495 bool pick_from_old = 515 bool pick_from_old =
496 old_has_unmerged && 516 old_has_unmerged &&
497 (!additions_has_unmerged || 517 (!additions_has_unmerged ||
498 (next_smallest_prefix_old < next_smallest_prefix_additions)); 518 (next_smallest_prefix_old < next_smallest_prefix_additions));
499 519
500 PrefixSize next_smallest_prefix_size; 520 PrefixSize next_smallest_prefix_size;
501 if (pick_from_old) { 521 if (pick_from_old) {
502 next_smallest_prefix_size = next_smallest_prefix_old.size(); 522 next_smallest_prefix_size = next_smallest_prefix_old.size();
503 523
504 // Update the iterator map, which means that we have merged one hash 524 // Update the iterator map, which means that we have merged one hash
505 // prefix of size |next_size_for_old| from the old store. 525 // prefix of size |next_smallest_prefix_size| from the old store.
506 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; 526 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size;
507 527
508 if (!raw_removals || removals_iter == raw_removals->end() || 528 if (!raw_removals || removals_iter == raw_removals->end() ||
509 *removals_iter != total_picked_from_old) { 529 *removals_iter != total_picked_from_old) {
510 // Append the smallest hash to the appropriate list. 530 // Append the smallest hash to the appropriate list.
511 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; 531 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old;
512 532
513 if (calculate_checksum) { 533 if (calculate_checksum) {
514 checksum_ctx->Update(base::string_as_array(&next_smallest_prefix_old), 534 checksum_ctx->Update(next_smallest_prefix_old.data(),
515 next_smallest_prefix_size); 535 next_smallest_prefix_size);
516 } 536 }
517 } else { 537 } else {
518 // Element not added to new map. Move the removals iterator forward. 538 // Element not added to new map. Move the removals iterator forward.
519 removals_iter++; 539 removals_iter++;
520 } 540 }
521 541
522 total_picked_from_old++; 542 total_picked_from_old++;
523 543
524 // Find the next smallest unmerged element in the old store's map. 544 // Find the next smallest unmerged element in the old store's map.
525 old_has_unmerged = GetNextSmallestUnmergedPrefix( 545 old_has_unmerged = GetNextSmallestUnmergedPrefix(
526 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); 546 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old);
527 } else { 547 } else {
528 next_smallest_prefix_size = next_smallest_prefix_additions.size(); 548 next_smallest_prefix_size = next_smallest_prefix_additions.size();
529 549
530 // Append the smallest hash to the appropriate list. 550 // Append the smallest hash to the appropriate list.
531 hash_prefix_map_[next_smallest_prefix_size] += 551 hash_prefix_map_[next_smallest_prefix_size] +=
532 next_smallest_prefix_additions; 552 next_smallest_prefix_additions;
533 553
534 if (calculate_checksum) { 554 if (calculate_checksum) {
535 checksum_ctx->Update( 555 checksum_ctx->Update(next_smallest_prefix_additions.data(),
536 base::string_as_array(&next_smallest_prefix_additions), 556 next_smallest_prefix_size);
537 next_smallest_prefix_size);
538 } 557 }
539 558
540 // Update the iterator map, which means that we have merged one hash 559 // Update the iterator map, which means that we have merged one hash
541 // prefix of size |next_smallest_prefix_size| from the update. 560 // prefix of size |next_smallest_prefix_size| from the update.
542 additions_iterator_map[next_smallest_prefix_size] += 561 additions_iterator_map[next_smallest_prefix_size] +=
543 next_smallest_prefix_size; 562 next_smallest_prefix_size;
544 563
545 // Find the next smallest unmerged element in the additions map. 564 // Find the next smallest unmerged element in the additions map.
546 additions_has_unmerged = 565 additions_has_unmerged =
547 GetNextSmallestUnmergedPrefix(additions_map, additions_iterator_map, 566 GetNextSmallestUnmergedPrefix(additions_map, additions_iterator_map,
548 &next_smallest_prefix_additions); 567 &next_smallest_prefix_additions);
549 } 568 }
550 } 569 }
551 570
552 if (raw_removals && removals_iter != raw_removals->end()) { 571 if (raw_removals && removals_iter != raw_removals->end()) {
553 return REMOVALS_INDEX_TOO_LARGE_FAILURE; 572 return REMOVALS_INDEX_TOO_LARGE_FAILURE;
554 } 573 }
555 574
556 if (calculate_checksum) { 575 if (calculate_checksum) {
557 std::string checksum(crypto::kSHA256Length, 0); 576 char checksum[crypto::kSHA256Length];
558 checksum_ctx->Finish(base::string_as_array(&checksum), checksum.size()); 577 checksum_ctx->Finish(checksum, sizeof(checksum));
559 if (checksum != expected_checksum) { 578 for (size_t i = 0; i < crypto::kSHA256Length; i++) {
560 std::string checksum_base64, expected_checksum_base64; 579 if (checksum[i] != expected_checksum[i]) {
561 base::Base64Encode(checksum, &checksum_base64); 580 #if DCHECK_IS_ON()
562 base::Base64Encode(expected_checksum, &expected_checksum_base64); 581 std::string checksum_b64, expected_checksum_b64;
563 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_base64 582 base::Base64Encode(base::StringPiece(checksum, arraysize(checksum)),
564 << " expected: " << expected_checksum_base64; 583 &checksum_b64);
565 return CHECKSUM_MISMATCH_FAILURE; 584 base::Base64Encode(expected_checksum, &expected_checksum_b64);
585 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_b64
586 << "; expected: " << expected_checksum_b64
587 << "; store: " << *this;
588 #endif
589 return CHECKSUM_MISMATCH_FAILURE;
590 }
566 } 591 }
567 } 592 }
568 593
569 return APPLY_UPDATE_SUCCESS; 594 return APPLY_UPDATE_SUCCESS;
570 } 595 }
571 596
572 StoreReadResult V4Store::ReadFromDisk() { 597 StoreReadResult V4Store::ReadFromDisk() {
573 DCHECK(task_runner_->RunsTasksOnCurrentThread()); 598 DCHECK(task_runner_->RunsTasksOnCurrentThread());
574 599
575 TimeTicks before = TimeTicks::Now(); 600 TimeTicks before = TimeTicks::Now();
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
683 int result = hash_prefix.compare(mid_prefix); 708 int result = hash_prefix.compare(mid_prefix);
684 if (result == 0) { 709 if (result == 0) {
685 return true; 710 return true;
686 } else if (result < 0) { 711 } else if (result < 0) {
687 return HashPrefixMatches(hash_prefix, begin, mid); 712 return HashPrefixMatches(hash_prefix, begin, mid);
688 } else { 713 } else {
689 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); 714 return HashPrefixMatches(hash_prefix, mid + prefix_size, end);
690 } 715 }
691 } 716 }
692 717
718 bool V4Store::VerifyChecksum() {
719 DCHECK(task_runner_->RunsTasksOnCurrentThread());
720
721 if (expected_checksum_.empty()) {
Nathan Parker 2016/10/07 23:24:29 Doesn't an empty expected_checksum mean there's no
vakh (use Gerrit instead) 2016/10/10 17:42:33 Done. Also added a TODO to not allow empty checksu
722 // If the |expected_checksum_| is empty, the file (or hash_prefix_map_)
723 // should also be empty.
724 return hash_prefix_map_.empty();
725 }
726
727 IteratorMap iterator_map;
728 HashPrefix next_smallest_prefix;
729 InitializeIteratorMap(hash_prefix_map_, &iterator_map);
730 bool has_unmerged = GetNextSmallestUnmergedPrefix(
731 hash_prefix_map_, iterator_map, &next_smallest_prefix);
732
733 std::unique_ptr<crypto::SecureHash> checksum_ctx(
734 crypto::SecureHash::Create(crypto::SecureHash::SHA256));
735 while (has_unmerged) {
736 PrefixSize next_smallest_prefix_size;
737 next_smallest_prefix_size = next_smallest_prefix.size();
738
739 // Update the iterator map, which means that we have read one hash
740 // prefix of size |next_smallest_prefix_size| from hash_prefix_map_.
741 iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size;
742
743 checksum_ctx->Update(next_smallest_prefix.data(),
744 next_smallest_prefix_size);
745
746 // Find the next smallest unmerged element in the map.
747 has_unmerged = GetNextSmallestUnmergedPrefix(hash_prefix_map_, iterator_map,
748 &next_smallest_prefix);
749 }
750
751 char checksum[crypto::kSHA256Length];
752 checksum_ctx->Finish(checksum, sizeof(checksum));
753 for (size_t i = 0; i < crypto::kSHA256Length; i++) {
754 if (checksum[i] != expected_checksum_[i]) {
755 RecordApplyUpdateResultWhenReadingFromDisk(CHECKSUM_MISMATCH_FAILURE);
756 #if DCHECK_IS_ON()
757 std::string checksum_b64, expected_checksum_b64;
758 base::Base64Encode(base::StringPiece(checksum, arraysize(checksum)),
759 &checksum_b64);
760 base::Base64Encode(expected_checksum_, &expected_checksum_b64);
761 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_b64
762 << "; expected: " << expected_checksum_b64
763 << "; store: " << *this;
764 #endif
765 return false;
766 }
767 }
768 return true;
769 }
770
693 } // namespace safe_browsing 771 } // namespace safe_browsing
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698