Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/base64.h" | 5 #include "base/base64.h" |
| 6 #include "base/bind.h" | 6 #include "base/bind.h" |
| 7 #include "base/files/file_util.h" | 7 #include "base/files/file_util.h" |
| 8 #include "base/memory/ptr_util.h" | 8 #include "base/memory/ptr_util.h" |
| 9 #include "base/metrics/histogram_macros.h" | 9 #include "base/metrics/histogram_macros.h" |
| 10 #include "base/metrics/sparse_histogram.h" | 10 #include "base/metrics/sparse_histogram.h" |
| 11 #include "base/stl_util.h" | 11 #include "base/stl_util.h" |
| 12 #include "base/strings/stringprintf.h" | 12 #include "base/strings/stringprintf.h" |
| 13 #include "components/safe_browsing_db/v4_rice.h" | 13 #include "components/safe_browsing_db/v4_rice.h" |
| 14 #include "components/safe_browsing_db/v4_store.h" | 14 #include "components/safe_browsing_db/v4_store.h" |
| 15 #include "components/safe_browsing_db/v4_store.pb.h" | 15 #include "components/safe_browsing_db/v4_store.pb.h" |
| 16 #include "content/public/browser/browser_thread.h" | |
| 16 #include "crypto/secure_hash.h" | 17 #include "crypto/secure_hash.h" |
| 17 #include "crypto/sha2.h" | 18 #include "crypto/sha2.h" |
| 18 | 19 |
| 19 using base::TimeTicks; | 20 using base::TimeTicks; |
| 20 | 21 |
| 21 namespace safe_browsing { | 22 namespace safe_browsing { |
| 22 | 23 |
| 23 namespace { | 24 namespace { |
| 24 | 25 |
| 25 const uint32_t kFileMagic = 0x600D71FE; | 26 const uint32_t kFileMagic = 0x600D71FE; |
| 26 | 27 |
| 27 const uint32_t kFileVersion = 9; | 28 const uint32_t kFileVersion = 9; |
| 28 | 29 |
| 29 std::string GetUmaSuffixForStore(const base::FilePath& file_path) { | 30 std::string GetUmaSuffixForStore(const base::FilePath& file_path) { |
| 30 return base::StringPrintf( | 31 return base::StringPrintf( |
| 31 ".%" PRIsFP, file_path.BaseName().RemoveExtension().value().c_str()); | 32 ".%" PRIsFP, file_path.BaseName().RemoveExtension().value().c_str()); |
| 32 } | 33 } |
| 33 | 34 |
| 34 void RecordTimeWithAndWithoutStore(const std::string& metric, | 35 void RecordTimeWithAndWithoutStore(const std::string& metric, |
| 35 base::TimeDelta time, | 36 base::TimeDelta time, |
| 36 const base::FilePath& file_path) { | 37 const base::FilePath& file_path) { |
| 37 std::string suffix = GetUmaSuffixForStore(file_path); | |
| 38 | |
| 39 // The histograms below are an expansion of the UMA_HISTOGRAM_LONG_TIMES | 38 // The histograms below are an expansion of the UMA_HISTOGRAM_LONG_TIMES |
| 40 // macro adapted to allow for a dynamically suffixed histogram name. | 39 // macro adapted to allow for a dynamically suffixed histogram name. |
| 41 // Note: The factory creates and owns the histogram. | 40 // Note: The factory creates and owns the histogram. |
| 42 base::HistogramBase* histogram = base::Histogram::FactoryTimeGet( | 41 base::HistogramBase* histogram = base::Histogram::FactoryTimeGet( |
| 43 metric, base::TimeDelta::FromMilliseconds(1), | 42 metric, base::TimeDelta::FromMilliseconds(1), |
| 44 base::TimeDelta::FromHours(1), 50, | 43 base::TimeDelta::FromHours(1), 50, |
| 45 base::HistogramBase::kUmaTargetedHistogramFlag); | 44 base::HistogramBase::kUmaTargetedHistogramFlag); |
| 46 if (histogram) { | 45 if (histogram) { |
| 47 histogram->AddTime(time); | 46 histogram->AddTime(time); |
| 48 } | 47 } |
| 49 | 48 |
| 49 std::string suffix = GetUmaSuffixForStore(file_path); | |
| 50 base::HistogramBase* histogram_suffix = base::Histogram::FactoryTimeGet( | 50 base::HistogramBase* histogram_suffix = base::Histogram::FactoryTimeGet( |
| 51 metric + suffix, base::TimeDelta::FromMilliseconds(1), | 51 metric + suffix, base::TimeDelta::FromMilliseconds(1), |
| 52 base::TimeDelta::FromHours(1), 50, | 52 base::TimeDelta::FromHours(1), 50, |
| 53 base::HistogramBase::kUmaTargetedHistogramFlag); | 53 base::HistogramBase::kUmaTargetedHistogramFlag); |
| 54 if (histogram_suffix) { | 54 if (histogram_suffix) { |
| 55 histogram_suffix->AddTime(time); | 55 histogram_suffix->AddTime(time); |
| 56 } | 56 } |
| 57 } | 57 } |
| 58 | 58 |
| 59 void RecordAddUnlumpedHashesTime(base::TimeDelta time) { | 59 void RecordAddUnlumpedHashesTime(base::TimeDelta time) { |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 167 V4Store::~V4Store() {} | 167 V4Store::~V4Store() {} |
| 168 | 168 |
| 169 std::string V4Store::DebugString() const { | 169 std::string V4Store::DebugString() const { |
| 170 std::string state_base64; | 170 std::string state_base64; |
| 171 base::Base64Encode(state_, &state_base64); | 171 base::Base64Encode(state_, &state_base64); |
| 172 | 172 |
| 173 return base::StringPrintf("path: %" PRIsFP "; state: %s", | 173 return base::StringPrintf("path: %" PRIsFP "; state: %s", |
| 174 store_path_.value().c_str(), state_base64.c_str()); | 174 store_path_.value().c_str(), state_base64.c_str()); |
| 175 } | 175 } |
| 176 | 176 |
| 177 bool V4Store::Reset() { | 177 void V4Store::Reset() { |
| 178 // TODO(vakh): Implement skeleton. | 178 DCHECK(task_runner_->RunsTasksOnCurrentThread()); |
| 179 | |
| 180 // Posted to IO thread to avoid the race condition between | |
| 181 // |GetMatchingHashPrefix()|, which runs on the IO thread, and |Reset()|, | |
| 182 // which runs on task runner. | |
|
Scott Hess - ex-Googler
2016/10/05 04:42:13
I may be reading things wrong, but I think now wha
vakh (use Gerrit instead)
2016/10/05 23:49:32
I've deleted the Reset code in the V4Database sinc
| |
| 183 content::BrowserThread::PostTask( | |
| 184 content::BrowserThread::IO, FROM_HERE, | |
| 185 base::Bind(&V4Store::ResetOnIOThread, base::Unretained(this))); | |
| 186 } | |
| 187 | |
| 188 void V4Store::ResetOnIOThread() { | |
| 189 DCHECK_CURRENTLY_ON(content::BrowserThread::IO); | |
| 190 hash_prefix_map_.clear(); | |
| 179 state_ = ""; | 191 state_ = ""; |
| 180 return true; | |
| 181 } | 192 } |
| 182 | 193 |
| 183 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( | 194 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( |
| 184 const HashPrefixMap& hash_prefix_map_old, | 195 const HashPrefixMap& hash_prefix_map_old, |
| 185 std::unique_ptr<ListUpdateResponse> response) { | 196 std::unique_ptr<ListUpdateResponse> response) { |
| 186 DCHECK(response->has_response_type()); | 197 DCHECK(response->has_response_type()); |
| 187 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); | 198 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); |
| 188 | 199 |
| 189 TimeTicks before = TimeTicks::Now(); | 200 TimeTicks before = TimeTicks::Now(); |
| 190 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response); | 201 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response); |
| 191 if (result == APPLY_UPDATE_SUCCESS) { | 202 if (result == APPLY_UPDATE_SUCCESS) { |
| 192 RecordProcessPartialUpdateTime(TimeTicks::Now() - before, store_path_); | 203 RecordProcessPartialUpdateTime(TimeTicks::Now() - before, store_path_); |
| 193 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded | 204 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded |
| 194 // hash prefixes and response_type as FULL_UPDATE, and write that to disk. | 205 // hash prefixes and response_type as FULL_UPDATE, and write that to disk. |
| 195 } | 206 } |
| 196 return result; | 207 return result; |
| 197 } | 208 } |
| 198 | 209 |
| 199 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk( | 210 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk( |
| 200 std::unique_ptr<ListUpdateResponse> response) { | 211 std::unique_ptr<ListUpdateResponse> response) { |
| 201 TimeTicks before = TimeTicks::Now(); | 212 TimeTicks before = TimeTicks::Now(); |
| 202 ApplyUpdateResult result = ProcessFullUpdate(response); | 213 ApplyUpdateResult result = ProcessFullUpdate(response); |
| 203 if (result == APPLY_UPDATE_SUCCESS) { | 214 if (result == APPLY_UPDATE_SUCCESS) { |
| 215 RecordProcessFullUpdateTime(TimeTicks::Now() - before, store_path_); | |
| 204 RecordStoreWriteResult(WriteToDisk(std::move(response))); | 216 RecordStoreWriteResult(WriteToDisk(std::move(response))); |
| 205 RecordProcessFullUpdateTime(TimeTicks::Now() - before, store_path_); | |
| 206 } | 217 } |
| 207 return result; | 218 return result; |
| 208 } | 219 } |
| 209 | 220 |
| 210 ApplyUpdateResult V4Store::ProcessFullUpdate( | 221 ApplyUpdateResult V4Store::ProcessFullUpdate( |
| 211 const std::unique_ptr<ListUpdateResponse>& response) { | 222 const std::unique_ptr<ListUpdateResponse>& response) { |
| 212 DCHECK(response->has_response_type()); | 223 DCHECK(response->has_response_type()); |
| 213 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type()); | 224 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type()); |
| 214 // TODO(vakh): For a full update, we don't need to process the update in | 225 // TODO(vakh): For a full update, we don't need to process the update in |
| 215 // lexographical order to store it, but we do need to do that for calculating | 226 // lexographical order to store it, but we do need to do that for calculating |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 303 } else { | 314 } else { |
| 304 DVLOG(1) << "Failure: ApplyUpdate: reason: " << apply_update_result | 315 DVLOG(1) << "Failure: ApplyUpdate: reason: " << apply_update_result |
| 305 << "; store: " << *this; | 316 << "; store: " << *this; |
| 306 // new_store failed updating. Pass a nullptr to the callback. | 317 // new_store failed updating. Pass a nullptr to the callback. |
| 307 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr)); | 318 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr)); |
| 308 } | 319 } |
| 309 | 320 |
| 310 RecordApplyUpdateResult(apply_update_result); | 321 RecordApplyUpdateResult(apply_update_result); |
| 311 } | 322 } |
| 312 | 323 |
| 313 // static | |
| 314 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions( | 324 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions( |
| 315 const RepeatedPtrField<ThreatEntrySet>& additions, | 325 const RepeatedPtrField<ThreatEntrySet>& additions, |
| 316 HashPrefixMap* additions_map) { | 326 HashPrefixMap* additions_map) { |
| 317 for (const auto& addition : additions) { | 327 for (const auto& addition : additions) { |
| 318 ApplyUpdateResult apply_update_result = APPLY_UPDATE_SUCCESS; | 328 ApplyUpdateResult apply_update_result = APPLY_UPDATE_SUCCESS; |
| 319 const CompressionType compression_type = addition.compression_type(); | 329 const CompressionType compression_type = addition.compression_type(); |
| 320 if (compression_type == RAW) { | 330 if (compression_type == RAW) { |
| 321 DCHECK(addition.has_raw_hashes()); | 331 DCHECK(addition.has_raw_hashes()); |
| 322 DCHECK(addition.raw_hashes().has_raw_hashes()); | 332 DCHECK(addition.raw_hashes().has_raw_hashes()); |
| 323 | 333 |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 441 | 451 |
| 442 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity + | 452 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity + |
| 443 prefix_length_to_add); | 453 prefix_length_to_add); |
| 444 } | 454 } |
| 445 } | 455 } |
| 446 | 456 |
| 447 ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, | 457 ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, |
| 448 const HashPrefixMap& additions_map, | 458 const HashPrefixMap& additions_map, |
| 449 const RepeatedField<int32>* raw_removals, | 459 const RepeatedField<int32>* raw_removals, |
| 450 const std::string& expected_checksum) { | 460 const std::string& expected_checksum) { |
| 461 DCHECK(task_runner_->RunsTasksOnCurrentThread()); | |
| 451 DCHECK(hash_prefix_map_.empty()); | 462 DCHECK(hash_prefix_map_.empty()); |
| 463 | |
| 464 bool calculate_checksum = !expected_checksum.empty(); | |
| 465 if (old_prefixes_map.empty()) { | |
| 466 // If the old map is empty, which it is at startup, then just copy over the | |
| 467 // additions map. | |
| 468 DCHECK(!raw_removals); | |
| 469 hash_prefix_map_ = additions_map; | |
| 470 | |
| 471 if (calculate_checksum) { | |
| 472 // Calculate the checksum asynchronously later and if it doesn't match, | |
| 473 // reset the store. This is done so that the SafeBrowsing DB loads | |
| 474 // quickly, because navigation operations are not allowed until the DB | |
| 475 // loads. | |
| 476 task_runner_->PostTask( | |
| 477 FROM_HERE, base::Bind(&V4Store::VerifyChecksumDelayed, | |
| 478 base::Unretained(this), expected_checksum)); | |
| 479 } | |
| 480 | |
| 481 return APPLY_UPDATE_SUCCESS; | |
| 482 } | |
| 483 | |
| 452 hash_prefix_map_.clear(); | 484 hash_prefix_map_.clear(); |
| 453 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_); | 485 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_); |
| 454 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_); | 486 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_); |
| 455 | 487 |
| 456 IteratorMap old_iterator_map; | 488 IteratorMap old_iterator_map; |
| 457 HashPrefix next_smallest_prefix_old; | 489 HashPrefix next_smallest_prefix_old; |
| 458 InitializeIteratorMap(old_prefixes_map, &old_iterator_map); | 490 InitializeIteratorMap(old_prefixes_map, &old_iterator_map); |
| 459 bool old_has_unmerged = GetNextSmallestUnmergedPrefix( | 491 bool old_has_unmerged = GetNextSmallestUnmergedPrefix( |
| 460 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); | 492 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); |
| 461 | 493 |
| 462 IteratorMap additions_iterator_map; | 494 IteratorMap additions_iterator_map; |
| 463 HashPrefix next_smallest_prefix_additions; | 495 HashPrefix next_smallest_prefix_additions; |
| 464 InitializeIteratorMap(additions_map, &additions_iterator_map); | 496 InitializeIteratorMap(additions_map, &additions_iterator_map); |
| 465 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix( | 497 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix( |
| 466 additions_map, additions_iterator_map, &next_smallest_prefix_additions); | 498 additions_map, additions_iterator_map, &next_smallest_prefix_additions); |
| 467 | 499 |
| 468 // Classical merge sort. | 500 // Classical merge sort. |
| 469 // The two constructs to merge are maps: old_prefixes_map, additions_map. | 501 // The two constructs to merge are maps: old_prefixes_map, additions_map. |
| 470 // At least one of the maps still has elements that need to be merged into the | 502 // At least one of the maps still has elements that need to be merged into the |
| 471 // new store. | 503 // new store. |
| 472 | 504 |
| 473 bool calculate_checksum = !expected_checksum.empty(); | |
| 474 std::unique_ptr<crypto::SecureHash> checksum_ctx( | 505 std::unique_ptr<crypto::SecureHash> checksum_ctx( |
| 475 crypto::SecureHash::Create(crypto::SecureHash::SHA256)); | 506 crypto::SecureHash::Create(crypto::SecureHash::SHA256)); |
| 476 | 507 |
| 477 // Keep track of the number of elements picked from the old map. This is used | 508 // Keep track of the number of elements picked from the old map. This is used |
| 478 // to determine which elements to drop based on the raw_removals. Note that | 509 // to determine which elements to drop based on the raw_removals. Note that |
| 479 // picked is not the same as merged. A picked element isn't merged if its | 510 // picked is not the same as merged. A picked element isn't merged if its |
| 480 // index is on the raw_removals list. | 511 // index is on the raw_removals list. |
| 481 int total_picked_from_old = 0; | 512 int total_picked_from_old = 0; |
| 482 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr; | 513 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr; |
| 483 while (old_has_unmerged || additions_has_unmerged) { | 514 while (old_has_unmerged || additions_has_unmerged) { |
| 484 // If the same hash prefix appears in the existing store and the additions | 515 // If the same hash prefix appears in the existing store and the additions |
| 485 // list, something is clearly wrong. Discard the update. | 516 // list, something is clearly wrong. Discard the update. |
| 486 if (old_has_unmerged && additions_has_unmerged && | 517 if (old_has_unmerged && additions_has_unmerged && |
| 487 next_smallest_prefix_old == next_smallest_prefix_additions) { | 518 next_smallest_prefix_old == next_smallest_prefix_additions) { |
| 488 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE; | 519 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE; |
| 489 } | 520 } |
| 490 | 521 |
| 491 // Select which map to pick the next hash prefix from to keep the result in | 522 // Select which map to pick the next hash prefix from to keep the result in |
| 492 // lexographically sorted order. | 523 // lexographically sorted order. |
| 493 bool pick_from_old = | 524 bool pick_from_old = |
| 494 old_has_unmerged && | 525 old_has_unmerged && |
| 495 (!additions_has_unmerged || | 526 (!additions_has_unmerged || |
| 496 (next_smallest_prefix_old < next_smallest_prefix_additions)); | 527 (next_smallest_prefix_old < next_smallest_prefix_additions)); |
| 497 | 528 |
| 498 PrefixSize next_smallest_prefix_size; | 529 PrefixSize next_smallest_prefix_size; |
| 499 if (pick_from_old) { | 530 if (pick_from_old) { |
| 500 next_smallest_prefix_size = next_smallest_prefix_old.size(); | 531 next_smallest_prefix_size = next_smallest_prefix_old.size(); |
| 501 | 532 |
| 502 // Update the iterator map, which means that we have merged one hash | 533 // Update the iterator map, which means that we have merged one hash |
| 503 // prefix of size |next_size_for_old| from the old store. | 534 // prefix of size |next_smallest_prefix_size| from the old store. |
| 504 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; | 535 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; |
| 505 | 536 |
| 506 if (!raw_removals || removals_iter == raw_removals->end() || | 537 if (!raw_removals || removals_iter == raw_removals->end() || |
| 507 *removals_iter != total_picked_from_old) { | 538 *removals_iter != total_picked_from_old) { |
| 508 // Append the smallest hash to the appropriate list. | 539 // Append the smallest hash to the appropriate list. |
| 509 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; | 540 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; |
| 510 | 541 |
| 511 if (calculate_checksum) { | 542 if (calculate_checksum) { |
| 512 checksum_ctx->Update(base::string_as_array(&next_smallest_prefix_old), | 543 checksum_ctx->Update(base::string_as_array(&next_smallest_prefix_old), |
| 513 next_smallest_prefix_size); | 544 next_smallest_prefix_size); |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 552 } | 583 } |
| 553 | 584 |
| 554 if (calculate_checksum) { | 585 if (calculate_checksum) { |
| 555 std::string checksum(crypto::kSHA256Length, 0); | 586 std::string checksum(crypto::kSHA256Length, 0); |
| 556 checksum_ctx->Finish(base::string_as_array(&checksum), checksum.size()); | 587 checksum_ctx->Finish(base::string_as_array(&checksum), checksum.size()); |
| 557 if (checksum != expected_checksum) { | 588 if (checksum != expected_checksum) { |
| 558 std::string checksum_base64, expected_checksum_base64; | 589 std::string checksum_base64, expected_checksum_base64; |
| 559 base::Base64Encode(checksum, &checksum_base64); | 590 base::Base64Encode(checksum, &checksum_base64); |
| 560 base::Base64Encode(expected_checksum, &expected_checksum_base64); | 591 base::Base64Encode(expected_checksum, &expected_checksum_base64); |
| 561 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_base64 | 592 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_base64 |
| 562 << " expected: " << expected_checksum_base64; | 593 << "; expected: " << expected_checksum_base64 |
| 594 << "; store: " << *this; | |
| 595 ; | |
| 563 return CHECKSUM_MISMATCH_FAILURE; | 596 return CHECKSUM_MISMATCH_FAILURE; |
| 564 } | 597 } |
| 565 } | 598 } |
| 566 | 599 |
| 567 return APPLY_UPDATE_SUCCESS; | 600 return APPLY_UPDATE_SUCCESS; |
| 568 } | 601 } |
| 569 | 602 |
| 570 StoreReadResult V4Store::ReadFromDisk() { | 603 StoreReadResult V4Store::ReadFromDisk() { |
| 571 DCHECK(task_runner_->RunsTasksOnCurrentThread()); | 604 DCHECK(task_runner_->RunsTasksOnCurrentThread()); |
| 572 | 605 |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 642 DCHECK_EQ(file_format_string.size(), written); | 675 DCHECK_EQ(file_format_string.size(), written); |
| 643 | 676 |
| 644 if (!base::Move(new_filename, store_path_)) { | 677 if (!base::Move(new_filename, store_path_)) { |
| 645 return UNABLE_TO_RENAME_FAILURE; | 678 return UNABLE_TO_RENAME_FAILURE; |
| 646 } | 679 } |
| 647 | 680 |
| 648 return WRITE_SUCCESS; | 681 return WRITE_SUCCESS; |
| 649 } | 682 } |
| 650 | 683 |
| 651 HashPrefix V4Store::GetMatchingHashPrefix(const FullHash& full_hash) { | 684 HashPrefix V4Store::GetMatchingHashPrefix(const FullHash& full_hash) { |
| 685 DCHECK_CURRENTLY_ON(content::BrowserThread::IO); | |
| 652 // It should never be the case that more than one hash prefixes match a given | 686 // It should never be the case that more than one hash prefixes match a given |
| 653 // full hash. However, if that happens, this method returns any one of them. | 687 // full hash. However, if that happens, this method returns any one of them. |
| 654 // It does not guarantee which one of those will be returned. | 688 // It does not guarantee which one of those will be returned. |
| 655 DCHECK_EQ(32u, full_hash.size()); | 689 DCHECK_EQ(32u, full_hash.size()); |
| 656 for (const auto& pair : hash_prefix_map_) { | 690 for (const auto& pair : hash_prefix_map_) { |
| 657 const PrefixSize& prefix_size = pair.first; | 691 const PrefixSize& prefix_size = pair.first; |
| 658 const HashPrefixes& hash_prefixes = pair.second; | 692 const HashPrefixes& hash_prefixes = pair.second; |
| 659 HashPrefix hash_prefix = full_hash.substr(0, prefix_size); | 693 HashPrefix hash_prefix = full_hash.substr(0, prefix_size); |
| 660 if (HashPrefixMatches(hash_prefix, hash_prefixes.begin(), | 694 if (HashPrefixMatches(hash_prefix, hash_prefixes.begin(), |
| 661 hash_prefixes.end())) { | 695 hash_prefixes.end())) { |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 681 int result = hash_prefix.compare(mid_prefix); | 715 int result = hash_prefix.compare(mid_prefix); |
| 682 if (result == 0) { | 716 if (result == 0) { |
| 683 return true; | 717 return true; |
| 684 } else if (result < 0) { | 718 } else if (result < 0) { |
| 685 return HashPrefixMatches(hash_prefix, begin, mid); | 719 return HashPrefixMatches(hash_prefix, begin, mid); |
| 686 } else { | 720 } else { |
| 687 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); | 721 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); |
| 688 } | 722 } |
| 689 } | 723 } |
| 690 | 724 |
| 725 void V4Store::VerifyChecksumDelayed(const std::string& expected_checksum) { | |
| 726 DCHECK(task_runner_->RunsTasksOnCurrentThread()); | |
| 727 | |
| 728 IteratorMap iterator_map; | |
| 729 HashPrefix next_smallest_prefix; | |
| 730 InitializeIteratorMap(hash_prefix_map_, &iterator_map); | |
| 731 bool has_unmerged = GetNextSmallestUnmergedPrefix( | |
| 732 hash_prefix_map_, iterator_map, &next_smallest_prefix); | |
| 733 | |
| 734 std::unique_ptr<crypto::SecureHash> checksum_ctx( | |
| 735 crypto::SecureHash::Create(crypto::SecureHash::SHA256)); | |
| 736 while (has_unmerged) { | |
| 737 PrefixSize next_smallest_prefix_size; | |
| 738 next_smallest_prefix_size = next_smallest_prefix.size(); | |
| 739 | |
| 740 // Update the iterator map, which means that we have read one hash | |
| 741 // prefix of size |next_smallest_prefix_size| from hash_prefix_map_. | |
| 742 iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; | |
| 743 | |
| 744 checksum_ctx->Update(base::string_as_array(&next_smallest_prefix), | |
| 745 next_smallest_prefix_size); | |
| 746 | |
| 747 // Find the next smallest unmerged element in the map. | |
| 748 has_unmerged = GetNextSmallestUnmergedPrefix(hash_prefix_map_, iterator_map, | |
| 749 &next_smallest_prefix); | |
| 750 } | |
| 751 | |
| 752 std::string checksum(crypto::kSHA256Length, 0); | |
| 753 checksum_ctx->Finish(base::string_as_array(&checksum), checksum.size()); | |
| 754 if (checksum == expected_checksum) { | |
| 755 return; | |
| 756 } | |
| 757 | |
| 758 std::string checksum_base64, expected_checksum_base64; | |
| 759 base::Base64Encode(checksum, &checksum_base64); | |
| 760 base::Base64Encode(expected_checksum, &expected_checksum_base64); | |
| 761 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_base64 | |
| 762 << "; expected: " << expected_checksum_base64 | |
| 763 << "; store: " << *this; | |
| 764 RecordApplyUpdateResultWhenReadingFromDisk(CHECKSUM_MISMATCH_FAILURE); | |
| 765 | |
| 766 Reset(); | |
| 767 } | |
| 768 | |
| 691 } // namespace safe_browsing | 769 } // namespace safe_browsing |
| OLD | NEW |