OLD | NEW |
---|---|
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/base64.h" | 5 #include "base/base64.h" |
6 #include "base/bind.h" | 6 #include "base/bind.h" |
7 #include "base/files/file_util.h" | 7 #include "base/files/file_util.h" |
8 #include "base/memory/ptr_util.h" | 8 #include "base/memory/ptr_util.h" |
9 #include "base/metrics/histogram_macros.h" | 9 #include "base/metrics/histogram_macros.h" |
10 #include "base/metrics/sparse_histogram.h" | 10 #include "base/metrics/sparse_histogram.h" |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
123 | 123 |
124 std::string V4Store::DebugString() const { | 124 std::string V4Store::DebugString() const { |
125 std::string state_base64; | 125 std::string state_base64; |
126 base::Base64Encode(state_, &state_base64); | 126 base::Base64Encode(state_, &state_base64); |
127 | 127 |
128 return base::StringPrintf("path: %" PRIsFP "; state: %s", | 128 return base::StringPrintf("path: %" PRIsFP "; state: %s", |
129 store_path_.value().c_str(), state_base64.c_str()); | 129 store_path_.value().c_str(), state_base64.c_str()); |
130 } | 130 } |
131 | 131 |
132 bool V4Store::Reset() { | 132 bool V4Store::Reset() { |
133 // TODO(vakh): Implement skeleton. | 133 hash_prefix_map_.clear(); |
Nathan Parker
2016/10/03 02:26:49
This should delete the file too, according to the
vakh (use Gerrit instead)
2016/10/04 07:14:39
Done. Don't need to delete the file since it would
| |
134 state_ = ""; | 134 state_ = ""; |
135 return true; | 135 return true; |
136 } | 136 } |
137 | 137 |
138 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( | 138 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( |
139 const HashPrefixMap& hash_prefix_map_old, | 139 const HashPrefixMap& hash_prefix_map_old, |
140 std::unique_ptr<ListUpdateResponse> response) { | 140 std::unique_ptr<ListUpdateResponse> response) { |
141 DCHECK(response->has_response_type()); | 141 DCHECK(response->has_response_type()); |
142 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); | 142 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); |
143 | 143 |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
223 expected_checksum = response->checksum().sha256(); | 223 expected_checksum = response->checksum().sha256(); |
224 } | 224 } |
225 | 225 |
226 base::TimeTicks before = base::TimeTicks::Now(); | 226 base::TimeTicks before = base::TimeTicks::Now(); |
227 apply_update_result = MergeUpdate(hash_prefix_map_old, hash_prefix_map, | 227 apply_update_result = MergeUpdate(hash_prefix_map_old, hash_prefix_map, |
228 raw_removals, expected_checksum); | 228 raw_removals, expected_checksum); |
229 base::TimeTicks after = base::TimeTicks::Now(); | 229 base::TimeTicks after = base::TimeTicks::Now(); |
230 if (apply_update_result != APPLY_UPDATE_SUCCESS) { | 230 if (apply_update_result != APPLY_UPDATE_SUCCESS) { |
231 return apply_update_result; | 231 return apply_update_result; |
232 } | 232 } |
233 RecordMergeUpdateTime(after - before); | 233 RecordMergeUpdateTime(after - before); |
Nathan Parker
2016/10/03 02:26:48
This mergeupdatetime entry will be for a different
vakh (use Gerrit instead)
2016/10/04 07:14:39
Done. With the latest patch, the time metrics are
vakh (use Gerrit instead)
2016/10/04 20:34:50
Actually, I removed that change to keep this CL si
| |
234 | 234 |
235 state_ = response->new_client_state(); | 235 state_ = response->new_client_state(); |
236 return APPLY_UPDATE_SUCCESS; | 236 return APPLY_UPDATE_SUCCESS; |
237 } | 237 } |
238 | 238 |
239 void V4Store::ApplyUpdate( | 239 void V4Store::ApplyUpdate( |
240 std::unique_ptr<ListUpdateResponse> response, | 240 std::unique_ptr<ListUpdateResponse> response, |
241 const scoped_refptr<base::SingleThreadTaskRunner>& callback_task_runner, | 241 const scoped_refptr<base::SingleThreadTaskRunner>& callback_task_runner, |
242 UpdatedStoreReadyCallback callback) { | 242 UpdatedStoreReadyCallback callback) { |
243 std::unique_ptr<V4Store> new_store( | 243 std::unique_ptr<V4Store> new_store( |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
402 | 402 |
403 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity + | 403 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity + |
404 prefix_length_to_add); | 404 prefix_length_to_add); |
405 } | 405 } |
406 } | 406 } |
407 | 407 |
408 ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, | 408 ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, |
409 const HashPrefixMap& additions_map, | 409 const HashPrefixMap& additions_map, |
410 const RepeatedField<int32>* raw_removals, | 410 const RepeatedField<int32>* raw_removals, |
411 const std::string& expected_checksum) { | 411 const std::string& expected_checksum) { |
412 DCHECK(task_runner_->RunsTasksOnCurrentThread()); | |
412 DCHECK(hash_prefix_map_.empty()); | 413 DCHECK(hash_prefix_map_.empty()); |
414 | |
415 bool calculate_checksum = !expected_checksum.empty(); | |
416 if (old_prefixes_map.empty()) { | |
417 // If the old map is empty, which it is at startup, then just copy over the | |
418 // additions map. | |
419 DCHECK(!raw_removals); | |
420 hash_prefix_map_ = additions_map; | |
421 | |
422 if (calculate_checksum) { | |
423 task_runner_->PostTask( | |
Nathan Parker
2016/10/03 02:26:49
Add a comment to why this is not run synchronously
vakh (use Gerrit instead)
2016/10/04 07:14:39
Done.
| |
424 FROM_HERE, base::Bind(&V4Store::VerifyChecksum, | |
Nathan Parker
2016/10/03 02:26:49
How many CPU seconds will this take, when it does
vakh (use Gerrit instead)
2016/10/04 07:14:39
It takes 2-3 seconds on my machine.
| |
425 base::Unretained(this), expected_checksum)); | |
426 } | |
427 | |
428 return APPLY_UPDATE_SUCCESS; | |
429 } | |
430 | |
413 hash_prefix_map_.clear(); | 431 hash_prefix_map_.clear(); |
414 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_); | 432 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_); |
415 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_); | 433 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_); |
416 | 434 |
417 IteratorMap old_iterator_map; | 435 IteratorMap old_iterator_map; |
418 HashPrefix next_smallest_prefix_old; | 436 HashPrefix next_smallest_prefix_old; |
419 InitializeIteratorMap(old_prefixes_map, &old_iterator_map); | 437 InitializeIteratorMap(old_prefixes_map, &old_iterator_map); |
420 bool old_has_unmerged = GetNextSmallestUnmergedPrefix( | 438 bool old_has_unmerged = GetNextSmallestUnmergedPrefix( |
421 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); | 439 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); |
422 | 440 |
423 IteratorMap additions_iterator_map; | 441 IteratorMap additions_iterator_map; |
424 HashPrefix next_smallest_prefix_additions; | 442 HashPrefix next_smallest_prefix_additions; |
425 InitializeIteratorMap(additions_map, &additions_iterator_map); | 443 InitializeIteratorMap(additions_map, &additions_iterator_map); |
426 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix( | 444 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix( |
427 additions_map, additions_iterator_map, &next_smallest_prefix_additions); | 445 additions_map, additions_iterator_map, &next_smallest_prefix_additions); |
428 | 446 |
429 // Classical merge sort. | 447 // Classical merge sort. |
430 // The two constructs to merge are maps: old_prefixes_map, additions_map. | 448 // The two constructs to merge are maps: old_prefixes_map, additions_map. |
431 // At least one of the maps still has elements that need to be merged into the | 449 // At least one of the maps still has elements that need to be merged into the |
432 // new store. | 450 // new store. |
433 | 451 |
434 bool calculate_checksum = !expected_checksum.empty(); | |
435 std::unique_ptr<crypto::SecureHash> checksum_ctx( | 452 std::unique_ptr<crypto::SecureHash> checksum_ctx( |
436 crypto::SecureHash::Create(crypto::SecureHash::SHA256)); | 453 crypto::SecureHash::Create(crypto::SecureHash::SHA256)); |
437 | 454 |
438 // Keep track of the number of elements picked from the old map. This is used | 455 // Keep track of the number of elements picked from the old map. This is used |
439 // to determine which elements to drop based on the raw_removals. Note that | 456 // to determine which elements to drop based on the raw_removals. Note that |
440 // picked is not the same as merged. A picked element isn't merged if its | 457 // picked is not the same as merged. A picked element isn't merged if its |
441 // index is on the raw_removals list. | 458 // index is on the raw_removals list. |
442 int total_picked_from_old = 0; | 459 int total_picked_from_old = 0; |
443 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr; | 460 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr; |
444 while (old_has_unmerged || additions_has_unmerged) { | 461 while (old_has_unmerged || additions_has_unmerged) { |
445 // If the same hash prefix appears in the existing store and the additions | 462 // If the same hash prefix appears in the existing store and the additions |
446 // list, something is clearly wrong. Discard the update. | 463 // list, something is clearly wrong. Discard the update. |
447 if (old_has_unmerged && additions_has_unmerged && | 464 if (old_has_unmerged && additions_has_unmerged && |
448 next_smallest_prefix_old == next_smallest_prefix_additions) { | 465 next_smallest_prefix_old == next_smallest_prefix_additions) { |
449 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE; | 466 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE; |
450 } | 467 } |
451 | 468 |
452 // Select which map to pick the next hash prefix from to keep the result in | 469 // Select which map to pick the next hash prefix from to keep the result in |
453 // lexographically sorted order. | 470 // lexographically sorted order. |
454 bool pick_from_old = | 471 bool pick_from_old = |
455 old_has_unmerged && | 472 old_has_unmerged && |
456 (!additions_has_unmerged || | 473 (!additions_has_unmerged || |
457 (next_smallest_prefix_old < next_smallest_prefix_additions)); | 474 (next_smallest_prefix_old < next_smallest_prefix_additions)); |
458 | 475 |
459 PrefixSize next_smallest_prefix_size; | 476 PrefixSize next_smallest_prefix_size; |
460 if (pick_from_old) { | 477 if (pick_from_old) { |
461 next_smallest_prefix_size = next_smallest_prefix_old.size(); | 478 next_smallest_prefix_size = next_smallest_prefix_old.size(); |
462 | 479 |
463 // Update the iterator map, which means that we have merged one hash | 480 // Update the iterator map, which means that we have merged one hash |
464 // prefix of size |next_size_for_old| from the old store. | 481 // prefix of size |next_smallest_prefix_size| from the old store. |
465 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; | 482 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; |
466 | 483 |
467 if (!raw_removals || removals_iter == raw_removals->end() || | 484 if (!raw_removals || removals_iter == raw_removals->end() || |
468 *removals_iter != total_picked_from_old) { | 485 *removals_iter != total_picked_from_old) { |
469 // Append the smallest hash to the appropriate list. | 486 // Append the smallest hash to the appropriate list. |
470 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; | 487 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; |
471 | 488 |
472 if (calculate_checksum) { | 489 if (calculate_checksum) { |
473 checksum_ctx->Update(base::string_as_array(&next_smallest_prefix_old), | 490 checksum_ctx->Update(base::string_as_array(&next_smallest_prefix_old), |
474 next_smallest_prefix_size); | 491 next_smallest_prefix_size); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
513 } | 530 } |
514 | 531 |
515 if (calculate_checksum) { | 532 if (calculate_checksum) { |
516 std::string checksum(crypto::kSHA256Length, 0); | 533 std::string checksum(crypto::kSHA256Length, 0); |
517 checksum_ctx->Finish(base::string_as_array(&checksum), checksum.size()); | 534 checksum_ctx->Finish(base::string_as_array(&checksum), checksum.size()); |
518 if (checksum != expected_checksum) { | 535 if (checksum != expected_checksum) { |
519 std::string checksum_base64, expected_checksum_base64; | 536 std::string checksum_base64, expected_checksum_base64; |
520 base::Base64Encode(checksum, &checksum_base64); | 537 base::Base64Encode(checksum, &checksum_base64); |
521 base::Base64Encode(expected_checksum, &expected_checksum_base64); | 538 base::Base64Encode(expected_checksum, &expected_checksum_base64); |
522 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_base64 | 539 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_base64 |
523 << " expected: " << expected_checksum_base64; | 540 << "; expected: " << expected_checksum_base64 |
541 << "; store: " << *this; | |
542 ; | |
524 return CHECKSUM_MISMATCH_FAILURE; | 543 return CHECKSUM_MISMATCH_FAILURE; |
525 } | 544 } |
526 } | 545 } |
527 | 546 |
528 return APPLY_UPDATE_SUCCESS; | 547 return APPLY_UPDATE_SUCCESS; |
529 } | 548 } |
530 | 549 |
531 StoreReadResult V4Store::ReadFromDisk() { | 550 StoreReadResult V4Store::ReadFromDisk() { |
532 DCHECK(task_runner_->RunsTasksOnCurrentThread()); | 551 DCHECK(task_runner_->RunsTasksOnCurrentThread()); |
533 | 552 |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
643 int result = hash_prefix.compare(mid_prefix); | 662 int result = hash_prefix.compare(mid_prefix); |
644 if (result == 0) { | 663 if (result == 0) { |
645 return true; | 664 return true; |
646 } else if (result < 0) { | 665 } else if (result < 0) { |
647 return HashPrefixMatches(hash_prefix, begin, mid); | 666 return HashPrefixMatches(hash_prefix, begin, mid); |
648 } else { | 667 } else { |
649 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); | 668 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); |
650 } | 669 } |
651 } | 670 } |
652 | 671 |
672 void V4Store::VerifyChecksum(const std::string& expected_checksum) { | |
Nathan Parker
2016/10/03 02:26:49
Maybe call this VerifyChecksumDelayed, so as to di
vakh (use Gerrit instead)
2016/10/04 07:14:39
Done.
| |
673 DCHECK(task_runner_->RunsTasksOnCurrentThread()); | |
674 | |
675 IteratorMap iterator_map; | |
676 HashPrefix next_smallest_prefix; | |
677 InitializeIteratorMap(hash_prefix_map_, &iterator_map); | |
678 bool has_unmerged = GetNextSmallestUnmergedPrefix( | |
679 hash_prefix_map_, iterator_map, &next_smallest_prefix); | |
680 | |
681 std::unique_ptr<crypto::SecureHash> checksum_ctx( | |
682 crypto::SecureHash::Create(crypto::SecureHash::SHA256)); | |
683 while (has_unmerged) { | |
684 PrefixSize next_smallest_prefix_size; | |
685 next_smallest_prefix_size = next_smallest_prefix.size(); | |
686 | |
687 // Update the iterator map, which means that we have read one hash | |
688 // prefix of size |next_smallest_prefix_size| from hash_prefix_map_. | |
689 iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; | |
690 | |
691 checksum_ctx->Update(base::string_as_array(&next_smallest_prefix), | |
692 next_smallest_prefix_size); | |
693 | |
694 // Find the next smallest unmerged element in the map. | |
695 has_unmerged = GetNextSmallestUnmergedPrefix(hash_prefix_map_, iterator_map, | |
696 &next_smallest_prefix); | |
697 } | |
698 | |
699 std::string checksum(crypto::kSHA256Length, 0); | |
700 checksum_ctx->Finish(base::string_as_array(&checksum), checksum.size()); | |
701 if (checksum == expected_checksum) { | |
702 return; | |
703 } | |
704 | |
705 std::string checksum_base64, expected_checksum_base64; | |
706 base::Base64Encode(checksum, &checksum_base64); | |
707 base::Base64Encode(expected_checksum, &expected_checksum_base64); | |
708 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_base64 | |
709 << "; expected: " << expected_checksum_base64 | |
710 << "; store: " << *this; | |
711 RecordApplyUpdateResultWhenReadingFromDisk(CHECKSUM_MISMATCH_FAILURE); | |
Nathan Parker
2016/10/03 02:26:49
This should probably be a different enum, since ch
vakh (use Gerrit instead)
2016/10/04 07:14:39
Yes, but it is different. This one is: RecordApply
| |
712 | |
713 Reset(); | |
Nathan Parker
2016/10/03 02:26:49
This is a race if someone is accessing the store o
vakh (use Gerrit instead)
2016/10/04 07:14:39
Done.
| |
714 } | |
715 | |
653 } // namespace safe_browsing | 716 } // namespace safe_browsing |
OLD | NEW |