Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/base64.h" | 5 #include "base/base64.h" |
| 6 #include "base/bind.h" | 6 #include "base/bind.h" |
| 7 #include "base/files/file_util.h" | 7 #include "base/files/file_util.h" |
| 8 #include "base/memory/ptr_util.h" | 8 #include "base/memory/ptr_util.h" |
| 9 #include "base/metrics/histogram_macros.h" | 9 #include "base/metrics/histogram_macros.h" |
| 10 #include "base/metrics/sparse_histogram.h" | 10 #include "base/metrics/sparse_histogram.h" |
| (...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 195 expected_checksum = response->checksum().sha256(); | 195 expected_checksum = response->checksum().sha256(); |
| 196 } | 196 } |
| 197 | 197 |
| 198 apply_update_result = MergeUpdate(hash_prefix_map_old, hash_prefix_map, | 198 apply_update_result = MergeUpdate(hash_prefix_map_old, hash_prefix_map, |
| 199 raw_removals, expected_checksum); | 199 raw_removals, expected_checksum); |
| 200 if (apply_update_result != APPLY_UPDATE_SUCCESS) { | 200 if (apply_update_result != APPLY_UPDATE_SUCCESS) { |
| 201 return apply_update_result; | 201 return apply_update_result; |
| 202 } | 202 } |
| 203 | 203 |
| 204 state_ = response->new_client_state(); | 204 state_ = response->new_client_state(); |
| 205 DVLOG(1) << "Update successful: " << *this; | |
|
Nathan Parker
2016/08/12 23:01:49
These are fairly verbose -- You should plan to rem
vakh (use Gerrit instead)
2016/08/13 00:15:14
Ack. Done.
Also: http://crbug.com/637468
| |
| 205 return APPLY_UPDATE_SUCCESS; | 206 return APPLY_UPDATE_SUCCESS; |
| 206 } | 207 } |
| 207 | 208 |
| 208 void V4Store::ApplyUpdate( | 209 void V4Store::ApplyUpdate( |
| 209 std::unique_ptr<ListUpdateResponse> response, | 210 std::unique_ptr<ListUpdateResponse> response, |
| 210 const scoped_refptr<base::SingleThreadTaskRunner>& callback_task_runner, | 211 const scoped_refptr<base::SingleThreadTaskRunner>& callback_task_runner, |
| 211 UpdatedStoreReadyCallback callback) { | 212 UpdatedStoreReadyCallback callback) { |
| 213 DVLOG(1) << "ApplyUpdate: response_size: " << response->ByteSize() << " : " | |
| 214 << *this; | |
| 215 | |
| 212 std::unique_ptr<V4Store> new_store( | 216 std::unique_ptr<V4Store> new_store( |
| 213 new V4Store(this->task_runner_, this->store_path_)); | 217 new V4Store(this->task_runner_, this->store_path_)); |
| 214 | 218 |
| 215 ApplyUpdateResult apply_update_result; | 219 ApplyUpdateResult apply_update_result; |
| 216 if (response->response_type() == ListUpdateResponse::PARTIAL_UPDATE) { | 220 if (response->response_type() == ListUpdateResponse::PARTIAL_UPDATE) { |
| 217 apply_update_result = new_store->ProcessPartialUpdateAndWriteToDisk( | 221 apply_update_result = new_store->ProcessPartialUpdateAndWriteToDisk( |
| 218 hash_prefix_map_, std::move(response)); | 222 hash_prefix_map_, std::move(response)); |
| 219 } else if (response->response_type() == ListUpdateResponse::FULL_UPDATE) { | 223 } else if (response->response_type() == ListUpdateResponse::FULL_UPDATE) { |
| 220 apply_update_result = | 224 apply_update_result = |
| 221 new_store->ProcessFullUpdateAndWriteToDisk(std::move(response)); | 225 new_store->ProcessFullUpdateAndWriteToDisk(std::move(response)); |
| 222 } else { | 226 } else { |
| 223 apply_update_result = UNEXPECTED_RESPONSE_TYPE_FAILURE; | 227 apply_update_result = UNEXPECTED_RESPONSE_TYPE_FAILURE; |
| 224 NOTREACHED() << "Unexpected response type: " << response->response_type(); | 228 NOTREACHED() << "Failure: Unexpected response type: " |
| 229 << response->response_type(); | |
| 225 } | 230 } |
| 226 | 231 |
| 227 if (apply_update_result == APPLY_UPDATE_SUCCESS) { | 232 if (apply_update_result == APPLY_UPDATE_SUCCESS) { |
| 228 // new_store is done updating, pass it to the callback. | 233 // new_store is done updating, pass it to the callback. |
| 229 callback_task_runner->PostTask( | 234 callback_task_runner->PostTask( |
| 230 FROM_HERE, base::Bind(callback, base::Passed(&new_store))); | 235 FROM_HERE, base::Bind(callback, base::Passed(&new_store))); |
| 231 } else { | 236 } else { |
| 232 DVLOG(1) << "ApplyUpdate failed: reason: " << apply_update_result | 237 DVLOG(1) << "Failure: ApplyUpdate: reason: " << apply_update_result |
| 233 << "; store: " << *this; | 238 << "; store: " << *this; |
| 234 // new_store failed updating. Pass a nullptr to the callback. | 239 // new_store failed updating. Pass a nullptr to the callback. |
| 235 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr)); | 240 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr)); |
| 236 } | 241 } |
| 237 | 242 |
| 238 RecordApplyUpdateResult(apply_update_result); | 243 RecordApplyUpdateResult(apply_update_result); |
| 239 } | 244 } |
| 240 | 245 |
| 241 // static | 246 // static |
| 242 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions( | 247 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions( |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 294 NOTREACHED(); | 299 NOTREACHED(); |
| 295 return PREFIX_SIZE_TOO_SMALL_FAILURE; | 300 return PREFIX_SIZE_TOO_SMALL_FAILURE; |
| 296 } | 301 } |
| 297 if (prefix_size > kMaxHashPrefixLength) { | 302 if (prefix_size > kMaxHashPrefixLength) { |
| 298 NOTREACHED(); | 303 NOTREACHED(); |
| 299 return PREFIX_SIZE_TOO_LARGE_FAILURE; | 304 return PREFIX_SIZE_TOO_LARGE_FAILURE; |
| 300 } | 305 } |
| 301 if (lumped_hashes.size() % prefix_size != 0) { | 306 if (lumped_hashes.size() % prefix_size != 0) { |
| 302 return ADDITIONS_SIZE_UNEXPECTED_FAILURE; | 307 return ADDITIONS_SIZE_UNEXPECTED_FAILURE; |
| 303 } | 308 } |
| 309 | |
| 304 // TODO(vakh): Figure out a way to avoid the following copy operation. | 310 // TODO(vakh): Figure out a way to avoid the following copy operation. |
| 305 (*additions_map)[prefix_size] = lumped_hashes; | 311 (*additions_map)[prefix_size] = lumped_hashes; |
| 306 return APPLY_UPDATE_SUCCESS; | 312 return APPLY_UPDATE_SUCCESS; |
| 307 } | 313 } |
| 308 | 314 |
| 309 // static | 315 // static |
| 310 bool V4Store::GetNextSmallestUnmergedPrefix( | 316 bool V4Store::GetNextSmallestUnmergedPrefix( |
| 311 const HashPrefixMap& hash_prefix_map, | 317 const HashPrefixMap& hash_prefix_map, |
| 312 const IteratorMap& iterator_map, | 318 const IteratorMap& iterator_map, |
| 313 HashPrefix* smallest_hash_prefix) { | 319 HashPrefix* smallest_hash_prefix) { |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 388 // to determine which elements to drop based on the raw_removals. Note that | 394 // to determine which elements to drop based on the raw_removals. Note that |
| 389 // picked is not the same as merged. A picked element isn't merged if its | 395 // picked is not the same as merged. A picked element isn't merged if its |
| 390 // index is on the raw_removals list. | 396 // index is on the raw_removals list. |
| 391 int total_picked_from_old = 0; | 397 int total_picked_from_old = 0; |
| 392 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr; | 398 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr; |
| 393 while (old_has_unmerged || additions_has_unmerged) { | 399 while (old_has_unmerged || additions_has_unmerged) { |
| 394 // If the same hash prefix appears in the existing store and the additions | 400 // If the same hash prefix appears in the existing store and the additions |
| 395 // list, something is clearly wrong. Discard the update. | 401 // list, something is clearly wrong. Discard the update. |
| 396 if (old_has_unmerged && additions_has_unmerged && | 402 if (old_has_unmerged && additions_has_unmerged && |
| 397 next_smallest_prefix_old == next_smallest_prefix_additions) { | 403 next_smallest_prefix_old == next_smallest_prefix_additions) { |
| 404 DVLOG(1) << "Failure: ADDITIONS_HAS_EXISTING_PREFIX_FAILURE"; | |
|
Nathan Parker
2016/08/12 23:01:49
Can you log this further up instead? Just logging
vakh (use Gerrit instead)
2016/08/13 00:15:14
Done.
| |
| 398 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE; | 405 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE; |
| 399 } | 406 } |
| 400 | 407 |
| 401 // Select which map to pick the next hash prefix from to keep the result in | 408 // Select which map to pick the next hash prefix from to keep the result in |
| 402 // lexographically sorted order. | 409 // lexographically sorted order. |
| 403 bool pick_from_old = | 410 bool pick_from_old = |
| 404 old_has_unmerged && | 411 old_has_unmerged && |
| 405 (!additions_has_unmerged || | 412 (!additions_has_unmerged || |
| 406 (next_smallest_prefix_old < next_smallest_prefix_additions)); | 413 (next_smallest_prefix_old < next_smallest_prefix_additions)); |
| 407 | 414 |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 450 next_smallest_prefix_size; | 457 next_smallest_prefix_size; |
| 451 | 458 |
| 452 // Find the next smallest unmerged element in the additions map. | 459 // Find the next smallest unmerged element in the additions map. |
| 453 additions_has_unmerged = | 460 additions_has_unmerged = |
| 454 GetNextSmallestUnmergedPrefix(additions_map, additions_iterator_map, | 461 GetNextSmallestUnmergedPrefix(additions_map, additions_iterator_map, |
| 455 &next_smallest_prefix_additions); | 462 &next_smallest_prefix_additions); |
| 456 } | 463 } |
| 457 } | 464 } |
| 458 | 465 |
| 459 if (raw_removals && removals_iter != raw_removals->end()) { | 466 if (raw_removals && removals_iter != raw_removals->end()) { |
| 467 DVLOG(1) << "Failure: REMOVALS_INDEX_TOO_LARGE_FAILURE"; | |
| 460 return REMOVALS_INDEX_TOO_LARGE_FAILURE; | 468 return REMOVALS_INDEX_TOO_LARGE_FAILURE; |
| 461 } | 469 } |
| 462 | 470 |
| 463 if (calculate_checksum) { | 471 if (calculate_checksum) { |
| 464 std::string checksum(crypto::kSHA256Length, 0); | 472 std::string checksum(crypto::kSHA256Length, 0); |
| 465 checksum_ctx->Finish(string_as_array(&checksum), checksum.size()); | 473 checksum_ctx->Finish(string_as_array(&checksum), checksum.size()); |
| 466 if (checksum != expected_checksum) { | 474 if (checksum != expected_checksum) { |
| 467 std::string checksum_base64, expected_checksum_base64; | 475 std::string checksum_base64, expected_checksum_base64; |
| 468 base::Base64Encode(checksum, &checksum_base64); | 476 base::Base64Encode(checksum, &checksum_base64); |
| 469 base::Base64Encode(expected_checksum, &expected_checksum_base64); | 477 base::Base64Encode(expected_checksum, &expected_checksum_base64); |
| 470 DVLOG(1) << "Checksum failed: calculated: " << checksum_base64 | 478 DVLOG(1) << "Failure: Checksum mismatch: calculated: " << checksum_base64 |
| 471 << "expected: " << expected_checksum_base64; | 479 << " expected: " << expected_checksum_base64; |
| 472 return CHECKSUM_MISMATCH_FAILURE; | 480 return CHECKSUM_MISMATCH_FAILURE; |
| 473 } | 481 } |
| 474 } | 482 } |
| 475 | 483 |
| 476 return APPLY_UPDATE_SUCCESS; | 484 return APPLY_UPDATE_SUCCESS; |
| 477 } | 485 } |
| 478 | 486 |
| 479 StoreReadResult V4Store::ReadFromDisk() { | 487 StoreReadResult V4Store::ReadFromDisk() { |
| 480 DCHECK(task_runner_->RunsTasksOnCurrentThread()); | 488 DCHECK(task_runner_->RunsTasksOnCurrentThread()); |
| 481 | 489 |
| 482 std::string contents; | 490 std::string contents; |
| 483 bool read_success = base::ReadFileToString(store_path_, &contents); | 491 bool read_success = base::ReadFileToString(store_path_, &contents); |
| 484 if (!read_success) { | 492 if (!read_success) { |
| 485 return FILE_UNREADABLE_FAILURE; | 493 return FILE_UNREADABLE_FAILURE; |
| 486 } | 494 } |
| 487 | 495 |
| 488 if (contents.empty()) { | 496 if (contents.empty()) { |
| 489 return FILE_EMPTY_FAILURE; | 497 return FILE_EMPTY_FAILURE; |
| 490 } | 498 } |
| 491 | 499 |
| 492 V4StoreFileFormat file_format; | 500 V4StoreFileFormat file_format; |
| 493 if (!file_format.ParseFromString(contents)) { | 501 if (!file_format.ParseFromString(contents)) { |
| 494 return PROTO_PARSING_FAILURE; | 502 return PROTO_PARSING_FAILURE; |
| 495 } | 503 } |
| 496 | 504 |
| 497 if (file_format.magic_number() != kFileMagic) { | 505 if (file_format.magic_number() != kFileMagic) { |
| 498 DVLOG(1) << "Unexpected magic number found in file: " | 506 DVLOG(1) << "Failure: Unexpected magic number found in file: " |
|
Nathan Parker
2016/08/12 23:01:49
These DVLOGs actually seem like more clutter than
vakh (use Gerrit instead)
2016/08/13 00:15:14
Done.
| |
| 499 << file_format.magic_number(); | 507 << file_format.magic_number(); |
| 500 return UNEXPECTED_MAGIC_NUMBER_FAILURE; | 508 return UNEXPECTED_MAGIC_NUMBER_FAILURE; |
| 501 } | 509 } |
| 502 | 510 |
| 503 UMA_HISTOGRAM_SPARSE_SLOWLY("SafeBrowsing.V4StoreVersionRead", | 511 UMA_HISTOGRAM_SPARSE_SLOWLY("SafeBrowsing.V4StoreVersionRead", |
| 504 file_format.version_number()); | 512 file_format.version_number()); |
| 505 if (file_format.version_number() != kFileVersion) { | 513 if (file_format.version_number() != kFileVersion) { |
| 506 DVLOG(1) << "File version incompatible: " << file_format.version_number() | 514 DVLOG(1) << "Failure: File version incompatible: " |
| 507 << "; expected: " << kFileVersion; | 515 << file_format.version_number() << "; expected: " << kFileVersion; |
| 508 return FILE_VERSION_INCOMPATIBLE_FAILURE; | 516 return FILE_VERSION_INCOMPATIBLE_FAILURE; |
| 509 } | 517 } |
| 510 | 518 |
| 511 if (!file_format.has_list_update_response()) { | 519 if (!file_format.has_list_update_response()) { |
| 512 return HASH_PREFIX_INFO_MISSING_FAILURE; | 520 return HASH_PREFIX_INFO_MISSING_FAILURE; |
| 513 } | 521 } |
| 514 | 522 |
| 515 std::unique_ptr<ListUpdateResponse> response(new ListUpdateResponse); | 523 std::unique_ptr<ListUpdateResponse> response(new ListUpdateResponse); |
| 516 response->Swap(file_format.mutable_list_update_response()); | 524 response->Swap(file_format.mutable_list_update_response()); |
| 517 ApplyUpdateResult apply_update_result = ProcessFullUpdate(response); | 525 ApplyUpdateResult apply_update_result = ProcessFullUpdate(response); |
| 518 RecordApplyUpdateResultWhenReadingFromDisk(apply_update_result); | 526 RecordApplyUpdateResultWhenReadingFromDisk(apply_update_result); |
| 519 if (apply_update_result != APPLY_UPDATE_SUCCESS) { | 527 if (apply_update_result != APPLY_UPDATE_SUCCESS) { |
| 520 hash_prefix_map_.clear(); | 528 hash_prefix_map_.clear(); |
| 521 return HASH_PREFIX_MAP_GENERATION_FAILURE; | 529 return HASH_PREFIX_MAP_GENERATION_FAILURE; |
| 522 } | 530 } |
| 523 | 531 |
| 524 return READ_SUCCESS; | 532 return READ_SUCCESS; |
| 525 } | 533 } |
| 526 | 534 |
| 527 StoreWriteResult V4Store::WriteToDisk( | 535 StoreWriteResult V4Store::WriteToDisk( |
| 528 std::unique_ptr<ListUpdateResponse> response) const { | 536 std::unique_ptr<ListUpdateResponse> response) const { |
| 529 // Do not write partial updates to the disk. | 537 // Do not write partial updates to the disk. |
| 530 // After merging the updates, the ListUpdateResponse passed to this method | 538 // After merging the updates, the ListUpdateResponse passed to this method |
| 531 // should be a FULL_UPDATE. | 539 // should be a FULL_UPDATE. |
| 532 if (!response->has_response_type() || | 540 if (!response->has_response_type() || |
| 533 response->response_type() != ListUpdateResponse::FULL_UPDATE) { | 541 response->response_type() != ListUpdateResponse::FULL_UPDATE) { |
| 534 DVLOG(1) << "response->has_response_type(): " | 542 DVLOG(1) << "Failure: response->has_response_type(): " |
| 535 << response->has_response_type(); | 543 << response->has_response_type() |
| 536 DVLOG(1) << "response->response_type(): " << response->response_type(); | 544 << " : response->response_type(): " << response->response_type(); |
| 537 return INVALID_RESPONSE_TYPE_FAILURE; | 545 return INVALID_RESPONSE_TYPE_FAILURE; |
| 538 } | 546 } |
| 539 | 547 |
| 540 // Attempt writing to a temporary file first and at the end, swap the files. | 548 // Attempt writing to a temporary file first and at the end, swap the files. |
| 541 const base::FilePath new_filename = TemporaryFileForFilename(store_path_); | 549 const base::FilePath new_filename = TemporaryFileForFilename(store_path_); |
| 542 | 550 |
| 543 V4StoreFileFormat file_format; | 551 V4StoreFileFormat file_format; |
| 544 file_format.set_magic_number(kFileMagic); | 552 file_format.set_magic_number(kFileMagic); |
| 545 file_format.set_version_number(kFileVersion); | 553 file_format.set_version_number(kFileVersion); |
| 546 ListUpdateResponse* response_to_write = | 554 ListUpdateResponse* response_to_write = |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 593 if (result == 0) { | 601 if (result == 0) { |
| 594 return true; | 602 return true; |
| 595 } else if (result < 0) { | 603 } else if (result < 0) { |
| 596 return HashPrefixMatches(hash_prefix, begin, mid); | 604 return HashPrefixMatches(hash_prefix, begin, mid); |
| 597 } else { | 605 } else { |
| 598 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); | 606 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); |
| 599 } | 607 } |
| 600 } | 608 } |
| 601 | 609 |
| 602 } // namespace safe_browsing | 610 } // namespace safe_browsing |
| OLD | NEW |