Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/base64.h" | 5 #include "base/base64.h" |
| 6 #include "base/bind.h" | 6 #include "base/bind.h" |
| 7 #include "base/files/file_util.h" | 7 #include "base/files/file_util.h" |
| 8 #include "base/memory/ptr_util.h" | 8 #include "base/memory/ptr_util.h" |
| 9 #include "base/metrics/histogram_macros.h" | 9 #include "base/metrics/histogram_macros.h" |
| 10 #include "base/metrics/sparse_histogram.h" | 10 #include "base/metrics/sparse_histogram.h" |
| 11 #include "base/stl_util.h" | |
| 11 #include "base/strings/stringprintf.h" | 12 #include "base/strings/stringprintf.h" |
| 12 #include "components/safe_browsing_db/v4_rice.h" | 13 #include "components/safe_browsing_db/v4_rice.h" |
| 13 #include "components/safe_browsing_db/v4_store.h" | 14 #include "components/safe_browsing_db/v4_store.h" |
| 14 #include "components/safe_browsing_db/v4_store.pb.h" | 15 #include "components/safe_browsing_db/v4_store.pb.h" |
| 16 #include "crypto/secure_hash.h" | |
| 17 #include "crypto/sha2.h" | |
| 15 | 18 |
| 16 namespace safe_browsing { | 19 namespace safe_browsing { |
| 17 | 20 |
| 18 namespace { | 21 namespace { |
| 19 const uint32_t kFileMagic = 0x600D71FE; | 22 const uint32_t kFileMagic = 0x600D71FE; |
| 20 | 23 |
| 21 const uint32_t kFileVersion = 9; | 24 const uint32_t kFileVersion = 9; |
| 22 | 25 |
| 23 // The minimum expected size (in bytes) of a hash-prefix. | 26 // The minimum expected size (in bytes) of a hash-prefix. |
| 24 const uint32_t kMinHashPrefixLength = 4; | 27 const uint32_t kMinHashPrefixLength = 4; |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 106 return base::StringPrintf("path: %" PRIsFP "; state: %s", | 109 return base::StringPrintf("path: %" PRIsFP "; state: %s", |
| 107 store_path_.value().c_str(), state_base64.c_str()); | 110 store_path_.value().c_str(), state_base64.c_str()); |
| 108 } | 111 } |
| 109 | 112 |
| 110 bool V4Store::Reset() { | 113 bool V4Store::Reset() { |
| 111 // TODO(vakh): Implement skeleton. | 114 // TODO(vakh): Implement skeleton. |
| 112 state_ = ""; | 115 state_ = ""; |
| 113 return true; | 116 return true; |
| 114 } | 117 } |
| 115 | 118 |
| 116 ApplyUpdateResult V4Store::ProcessFullUpdate( | 119 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( |
| 117 std::unique_ptr<ListUpdateResponse> response, | 120 const HashPrefixMap& hash_prefix_map_old, |
| 118 const std::unique_ptr<V4Store>& new_store) { | 121 std::unique_ptr<ListUpdateResponse> response) { |
| 119 HashPrefixMap hash_prefix_map; | 122 DCHECK(response->has_response_type()); |
| 120 ApplyUpdateResult apply_update_result = | 123 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); |
| 121 UpdateHashPrefixMapFromAdditions(response->additions(), &hash_prefix_map); | 124 |
| 122 if (apply_update_result == APPLY_UPDATE_SUCCESS) { | 125 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response); |
| 123 new_store->hash_prefix_map_ = hash_prefix_map; | 126 if (result == APPLY_UPDATE_SUCCESS) { |
| 124 RecordStoreWriteResult(new_store->WriteToDisk(std::move(response))); | 127 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded |
| 128 // hash prefixes and response_type as FULL_UPDATE, and write that to disk. | |
| 125 } | 129 } |
| 126 return apply_update_result; | 130 return result; |
| 127 } | 131 } |
| 128 | 132 |
| 129 ApplyUpdateResult V4Store::ProcessPartialUpdate( | 133 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk( |
| 130 std::unique_ptr<ListUpdateResponse> response, | 134 std::unique_ptr<ListUpdateResponse> response) { |
| 131 const std::unique_ptr<V4Store>& new_store) { | 135 ApplyUpdateResult result = ProcessFullUpdate(response); |
| 132 // TODO(vakh): | 136 if (result == APPLY_UPDATE_SUCCESS) { |
| 133 // 1. Done: Merge the old store and the new update in new_store. | 137 RecordStoreWriteResult(WriteToDisk(std::move(response))); |
| 134 // 2. Create a ListUpdateResponse containing RICE encoded hash-prefixes and | 138 } |
| 135 // response_type as FULL_UPDATE, and write that to disk. | 139 return result; |
| 136 // 3. Remove this if condition after completing 1. and 2. | 140 } |
| 137 | 141 |
| 142 ApplyUpdateResult V4Store::ProcessFullUpdate( | |
| 143 const std::unique_ptr<ListUpdateResponse>& response) { | |
| 144 DCHECK(response->has_response_type()); | |
| 145 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type()); | |
| 146 // TODO(vakh): For a full update, we don't need to process the update in | |
| 147 // lexographical order to store it, but we do need to do that for calculating | |
|
noé
2016/08/11 08:15:25
I believe the right spelling is lexicographical :)
vakh (use Gerrit instead)
2016/08/11 08:31:20
Thanks. I'll fix that in the next CL.
| |
| 148 // checksum. It might save some CPU cycles to store the full update as-is and | |
| 149 // walk the list of hash prefixes in lexographical order only for checksum | |
| 150 // calculation. | |
| 151 return ProcessUpdate(HashPrefixMap(), response); | |
| 152 } | |
| 153 | |
| 154 ApplyUpdateResult V4Store::ProcessUpdate( | |
| 155 const HashPrefixMap& hash_prefix_map_old, | |
| 156 const std::unique_ptr<ListUpdateResponse>& response) { | |
| 138 const RepeatedField<int32>* raw_removals = nullptr; | 157 const RepeatedField<int32>* raw_removals = nullptr; |
| 139 RepeatedField<int32> rice_removals; | 158 RepeatedField<int32> rice_removals; |
| 140 size_t removals_size = response->removals_size(); | 159 size_t removals_size = response->removals_size(); |
| 141 DCHECK_LE(removals_size, 1u); | 160 DCHECK_LE(removals_size, 1u); |
| 142 if (removals_size == 1) { | 161 if (removals_size == 1) { |
| 143 const ThreatEntrySet& removal = response->removals().Get(0); | 162 const ThreatEntrySet& removal = response->removals().Get(0); |
| 144 const CompressionType compression_type = removal.compression_type(); | 163 const CompressionType compression_type = removal.compression_type(); |
| 145 if (compression_type == RAW) { | 164 if (compression_type == RAW) { |
| 146 raw_removals = &removal.raw_indices().indices(); | 165 raw_removals = &removal.raw_indices().indices(); |
| 147 } else if (compression_type == RICE) { | 166 } else if (compression_type == RICE) { |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 160 } | 179 } |
| 161 } else { | 180 } else { |
| 162 NOTREACHED() << "Unexpected compression_type type: " << compression_type; | 181 NOTREACHED() << "Unexpected compression_type type: " << compression_type; |
| 163 return UNEXPECTED_COMPRESSION_TYPE_REMOVALS_FAILURE; | 182 return UNEXPECTED_COMPRESSION_TYPE_REMOVALS_FAILURE; |
| 164 } | 183 } |
| 165 } | 184 } |
| 166 | 185 |
| 167 HashPrefixMap hash_prefix_map; | 186 HashPrefixMap hash_prefix_map; |
| 168 ApplyUpdateResult apply_update_result = | 187 ApplyUpdateResult apply_update_result = |
| 169 UpdateHashPrefixMapFromAdditions(response->additions(), &hash_prefix_map); | 188 UpdateHashPrefixMapFromAdditions(response->additions(), &hash_prefix_map); |
| 189 if (apply_update_result != APPLY_UPDATE_SUCCESS) { | |
| 190 return apply_update_result; | |
| 191 } | |
| 170 | 192 |
| 171 if (apply_update_result == APPLY_UPDATE_SUCCESS) { | 193 std::string expected_checksum; |
| 172 apply_update_result = | 194 if (response->has_checksum() && response->checksum().has_sha256()) { |
| 173 new_store->MergeUpdate(hash_prefix_map_, hash_prefix_map, raw_removals); | 195 expected_checksum = response->checksum().sha256(); |
| 174 } | 196 } |
| 175 return apply_update_result; | 197 |
| 198 apply_update_result = MergeUpdate(hash_prefix_map_old, hash_prefix_map, | |
| 199 raw_removals, expected_checksum); | |
| 200 if (apply_update_result != APPLY_UPDATE_SUCCESS) { | |
| 201 return apply_update_result; | |
| 202 } | |
| 203 | |
| 204 state_ = response->new_client_state(); | |
| 205 return APPLY_UPDATE_SUCCESS; | |
| 176 } | 206 } |
| 177 | 207 |
| 178 void V4Store::ApplyUpdate( | 208 void V4Store::ApplyUpdate( |
| 179 std::unique_ptr<ListUpdateResponse> response, | 209 std::unique_ptr<ListUpdateResponse> response, |
| 180 const scoped_refptr<base::SingleThreadTaskRunner>& callback_task_runner, | 210 const scoped_refptr<base::SingleThreadTaskRunner>& callback_task_runner, |
| 181 UpdatedStoreReadyCallback callback) { | 211 UpdatedStoreReadyCallback callback) { |
| 182 std::unique_ptr<V4Store> new_store( | 212 std::unique_ptr<V4Store> new_store( |
| 183 new V4Store(this->task_runner_, this->store_path_)); | 213 new V4Store(this->task_runner_, this->store_path_)); |
| 184 new_store->state_ = response->new_client_state(); | |
| 185 | 214 |
| 186 ApplyUpdateResult apply_update_result; | 215 ApplyUpdateResult apply_update_result; |
| 187 if (response->response_type() == ListUpdateResponse::PARTIAL_UPDATE) { | 216 if (response->response_type() == ListUpdateResponse::PARTIAL_UPDATE) { |
| 188 apply_update_result = ProcessPartialUpdate(std::move(response), new_store); | 217 apply_update_result = new_store->ProcessPartialUpdateAndWriteToDisk( |
| 218 hash_prefix_map_, std::move(response)); | |
| 189 } else if (response->response_type() == ListUpdateResponse::FULL_UPDATE) { | 219 } else if (response->response_type() == ListUpdateResponse::FULL_UPDATE) { |
| 190 apply_update_result = ProcessFullUpdate(std::move(response), new_store); | 220 apply_update_result = |
| 221 new_store->ProcessFullUpdateAndWriteToDisk(std::move(response)); | |
| 191 } else { | 222 } else { |
| 192 apply_update_result = UNEXPECTED_RESPONSE_TYPE_FAILURE; | 223 apply_update_result = UNEXPECTED_RESPONSE_TYPE_FAILURE; |
| 193 NOTREACHED() << "Unexpected response type: " << response->response_type(); | 224 NOTREACHED() << "Unexpected response type: " << response->response_type(); |
| 194 } | 225 } |
| 195 | 226 |
| 196 if (apply_update_result == APPLY_UPDATE_SUCCESS) { | 227 if (apply_update_result == APPLY_UPDATE_SUCCESS) { |
| 197 // new_store is done updating, pass it to the callback. | 228 // new_store is done updating, pass it to the callback. |
| 198 callback_task_runner->PostTask( | 229 callback_task_runner->PostTask( |
| 199 FROM_HERE, base::Bind(callback, base::Passed(&new_store))); | 230 FROM_HERE, base::Bind(callback, base::Passed(&new_store))); |
| 200 } else { | 231 } else { |
| 232 DVLOG(1) << "ApplyUpdate failed: reason: " << apply_update_result | |
| 233 << "; store: " << *this; | |
| 201 // new_store failed updating. Pass a nullptr to the callback. | 234 // new_store failed updating. Pass a nullptr to the callback. |
| 202 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr)); | 235 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr)); |
| 203 } | 236 } |
| 204 | 237 |
| 205 RecordApplyUpdateResult(apply_update_result); | 238 RecordApplyUpdateResult(apply_update_result); |
| 206 } | 239 } |
| 207 | 240 |
| 208 // static | 241 // static |
| 209 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions( | 242 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions( |
| 210 const RepeatedPtrField<ThreatEntrySet>& additions, | 243 const RepeatedPtrField<ThreatEntrySet>& additions, |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 314 | 347 |
| 315 const HashPrefixes& existing_prefixes = | 348 const HashPrefixes& existing_prefixes = |
| 316 (*prefix_map_to_update)[prefix_size]; | 349 (*prefix_map_to_update)[prefix_size]; |
| 317 size_t existing_capacity = existing_prefixes.capacity(); | 350 size_t existing_capacity = existing_prefixes.capacity(); |
| 318 | 351 |
| 319 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity + | 352 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity + |
| 320 prefix_length_to_add); | 353 prefix_length_to_add); |
| 321 } | 354 } |
| 322 } | 355 } |
| 323 | 356 |
| 324 ApplyUpdateResult V4Store::MergeUpdate( | 357 ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, |
| 325 const HashPrefixMap& old_prefixes_map, | 358 const HashPrefixMap& additions_map, |
| 326 const HashPrefixMap& additions_map, | 359 const RepeatedField<int32>* raw_removals, |
| 327 const RepeatedField<int32>* raw_removals) { | 360 const std::string& expected_checksum) { |
| 328 DCHECK(hash_prefix_map_.empty()); | 361 DCHECK(hash_prefix_map_.empty()); |
| 329 hash_prefix_map_.clear(); | 362 hash_prefix_map_.clear(); |
| 330 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_); | 363 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_); |
| 331 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_); | 364 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_); |
| 332 | 365 |
| 333 IteratorMap old_iterator_map; | 366 IteratorMap old_iterator_map; |
| 334 HashPrefix next_smallest_prefix_old; | 367 HashPrefix next_smallest_prefix_old; |
| 335 InitializeIteratorMap(old_prefixes_map, &old_iterator_map); | 368 InitializeIteratorMap(old_prefixes_map, &old_iterator_map); |
| 336 bool old_has_unmerged = GetNextSmallestUnmergedPrefix( | 369 bool old_has_unmerged = GetNextSmallestUnmergedPrefix( |
| 337 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); | 370 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); |
| 338 | 371 |
| 339 IteratorMap additions_iterator_map; | 372 IteratorMap additions_iterator_map; |
| 340 HashPrefix next_smallest_prefix_additions; | 373 HashPrefix next_smallest_prefix_additions; |
| 341 InitializeIteratorMap(additions_map, &additions_iterator_map); | 374 InitializeIteratorMap(additions_map, &additions_iterator_map); |
| 342 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix( | 375 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix( |
| 343 additions_map, additions_iterator_map, &next_smallest_prefix_additions); | 376 additions_map, additions_iterator_map, &next_smallest_prefix_additions); |
| 344 | 377 |
| 345 // Classical merge sort. | 378 // Classical merge sort. |
| 346 // The two constructs to merge are maps: old_prefixes_map, additions_map. | 379 // The two constructs to merge are maps: old_prefixes_map, additions_map. |
| 347 // At least one of the maps still has elements that need to be merged into the | 380 // At least one of the maps still has elements that need to be merged into the |
| 348 // new store. | 381 // new store. |
| 349 | 382 |
| 383 bool calculate_checksum = !expected_checksum.empty(); | |
| 384 std::unique_ptr<crypto::SecureHash> checksum_ctx( | |
| 385 crypto::SecureHash::Create(crypto::SecureHash::SHA256)); | |
| 386 | |
| 350 // Keep track of the number of elements picked from the old map. This is used | 387 // Keep track of the number of elements picked from the old map. This is used |
| 351 // to determine which elements to drop based on the raw_removals. Note that | 388 // to determine which elements to drop based on the raw_removals. Note that |
| 352 // picked is not the same as merged. A picked element isn't merged if its | 389 // picked is not the same as merged. A picked element isn't merged if its |
| 353 // index is on the raw_removals list. | 390 // index is on the raw_removals list. |
| 354 int total_picked_from_old = 0; | 391 int total_picked_from_old = 0; |
| 355 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr; | 392 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr; |
| 356 while (old_has_unmerged || additions_has_unmerged) { | 393 while (old_has_unmerged || additions_has_unmerged) { |
|
noé
2016/08/11 08:15:25
This is quite complicated code. Have you considere
vakh (use Gerrit instead)
2016/08/11 08:31:20
Yes, it would simplify the code but at the cost of
vakh (use Gerrit instead)
2016/08/11 17:05:59
Sorry, I take that back. I think the time complexi
| |
| 357 // If the same hash prefix appears in the existing store and the additions | 394 // If the same hash prefix appears in the existing store and the additions |
| 358 // list, something is clearly wrong. Discard the update. | 395 // list, something is clearly wrong. Discard the update. |
| 359 if (old_has_unmerged && additions_has_unmerged && | 396 if (old_has_unmerged && additions_has_unmerged && |
| 360 next_smallest_prefix_old == next_smallest_prefix_additions) { | 397 next_smallest_prefix_old == next_smallest_prefix_additions) { |
| 361 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE; | 398 return ADDITIONS_HAS_EXISTING_PREFIX_FAILURE; |
| 362 } | 399 } |
| 363 | 400 |
| 364 // Select which map to pick the next hash prefix from to keep the result in | 401 // Select which map to pick the next hash prefix from to keep the result in |
| 365 // lexographically sorted order. | 402 // lexographically sorted order. |
| 366 bool pick_from_old = | 403 bool pick_from_old = |
| 367 old_has_unmerged && | 404 old_has_unmerged && |
| 368 (!additions_has_unmerged || | 405 (!additions_has_unmerged || |
| 369 (next_smallest_prefix_old < next_smallest_prefix_additions)); | 406 (next_smallest_prefix_old < next_smallest_prefix_additions)); |
| 370 | 407 |
| 371 PrefixSize next_smallest_prefix_size; | 408 PrefixSize next_smallest_prefix_size; |
| 372 if (pick_from_old) { | 409 if (pick_from_old) { |
| 373 next_smallest_prefix_size = next_smallest_prefix_old.size(); | 410 next_smallest_prefix_size = next_smallest_prefix_old.size(); |
| 374 | 411 |
| 375 // Update the iterator map, which means that we have merged one hash | 412 // Update the iterator map, which means that we have merged one hash |
| 376 // prefix of size |next_size_for_old| from the old store. | 413 // prefix of size |next_size_for_old| from the old store. |
| 377 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; | 414 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; |
| 378 | 415 |
| 379 if (!raw_removals || removals_iter == raw_removals->end() || | 416 if (!raw_removals || removals_iter == raw_removals->end() || |
| 380 *removals_iter != total_picked_from_old) { | 417 *removals_iter != total_picked_from_old) { |
| 381 // Append the smallest hash to the appropriate list. | 418 // Append the smallest hash to the appropriate list. |
| 382 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; | 419 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; |
| 420 | |
| 421 if (calculate_checksum) { | |
| 422 checksum_ctx->Update(string_as_array(&next_smallest_prefix_old), | |
| 423 next_smallest_prefix_size); | |
| 424 } | |
| 383 } else { | 425 } else { |
| 384 // Element not added to new map. Move the removals iterator forward. | 426 // Element not added to new map. Move the removals iterator forward. |
| 385 removals_iter++; | 427 removals_iter++; |
| 386 } | 428 } |
| 387 | 429 |
| 388 total_picked_from_old++; | 430 total_picked_from_old++; |
| 389 | 431 |
| 390 // Find the next smallest unmerged element in the old store's map. | 432 // Find the next smallest unmerged element in the old store's map. |
| 391 old_has_unmerged = GetNextSmallestUnmergedPrefix( | 433 old_has_unmerged = GetNextSmallestUnmergedPrefix( |
| 392 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); | 434 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); |
| 393 } else { | 435 } else { |
| 394 next_smallest_prefix_size = next_smallest_prefix_additions.size(); | 436 next_smallest_prefix_size = next_smallest_prefix_additions.size(); |
| 395 | 437 |
| 396 // Append the smallest hash to the appropriate list. | 438 // Append the smallest hash to the appropriate list. |
| 397 hash_prefix_map_[next_smallest_prefix_size] += | 439 hash_prefix_map_[next_smallest_prefix_size] += |
| 398 next_smallest_prefix_additions; | 440 next_smallest_prefix_additions; |
| 399 | 441 |
| 442 if (calculate_checksum) { | |
| 443 checksum_ctx->Update(string_as_array(&next_smallest_prefix_additions), | |
| 444 next_smallest_prefix_size); | |
| 445 } | |
| 446 | |
| 400 // Update the iterator map, which means that we have merged one hash | 447 // Update the iterator map, which means that we have merged one hash |
| 401 // prefix of size |next_smallest_prefix_size| from the update. | 448 // prefix of size |next_smallest_prefix_size| from the update. |
| 402 additions_iterator_map[next_smallest_prefix_size] += | 449 additions_iterator_map[next_smallest_prefix_size] += |
| 403 next_smallest_prefix_size; | 450 next_smallest_prefix_size; |
| 404 | 451 |
| 405 // Find the next smallest unmerged element in the additions map. | 452 // Find the next smallest unmerged element in the additions map. |
| 406 additions_has_unmerged = | 453 additions_has_unmerged = |
| 407 GetNextSmallestUnmergedPrefix(additions_map, additions_iterator_map, | 454 GetNextSmallestUnmergedPrefix(additions_map, additions_iterator_map, |
| 408 &next_smallest_prefix_additions); | 455 &next_smallest_prefix_additions); |
| 409 } | 456 } |
| 410 } | 457 } |
| 411 | 458 |
| 412 return (!raw_removals || removals_iter == raw_removals->end()) | 459 if (raw_removals && removals_iter != raw_removals->end()) { |
| 413 ? APPLY_UPDATE_SUCCESS | 460 return REMOVALS_INDEX_TOO_LARGE_FAILURE; |
| 414 : REMOVALS_INDEX_TOO_LARGE_FAILURE; | 461 } |
| 462 | |
| 463 if (calculate_checksum) { | |
| 464 std::string checksum(crypto::kSHA256Length, 0); | |
| 465 checksum_ctx->Finish(string_as_array(&checksum), checksum.size()); | |
| 466 if (checksum != expected_checksum) { | |
| 467 std::string checksum_base64, expected_checksum_base64; | |
| 468 base::Base64Encode(checksum, &checksum_base64); | |
| 469 base::Base64Encode(expected_checksum, &expected_checksum_base64); | |
| 470 DVLOG(1) << "Checksum failed: calculated: " << checksum_base64 | |
| 471 << "expected: " << expected_checksum_base64; | |
| 472 return CHECKSUM_MISMATCH_FAILURE; | |
| 473 } | |
| 474 } | |
| 475 | |
| 476 return APPLY_UPDATE_SUCCESS; | |
| 415 } | 477 } |
| 416 | 478 |
| 417 StoreReadResult V4Store::ReadFromDisk() { | 479 StoreReadResult V4Store::ReadFromDisk() { |
| 418 DCHECK(task_runner_->RunsTasksOnCurrentThread()); | 480 DCHECK(task_runner_->RunsTasksOnCurrentThread()); |
| 419 | 481 |
| 420 std::string contents; | 482 std::string contents; |
| 421 bool read_success = base::ReadFileToString(store_path_, &contents); | 483 bool read_success = base::ReadFileToString(store_path_, &contents); |
| 422 if (!read_success) { | 484 if (!read_success) { |
| 423 return FILE_UNREADABLE_FAILURE; | 485 return FILE_UNREADABLE_FAILURE; |
| 424 } | 486 } |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 443 if (file_format.version_number() != kFileVersion) { | 505 if (file_format.version_number() != kFileVersion) { |
| 444 DVLOG(1) << "File version incompatible: " << file_format.version_number() | 506 DVLOG(1) << "File version incompatible: " << file_format.version_number() |
| 445 << "; expected: " << kFileVersion; | 507 << "; expected: " << kFileVersion; |
| 446 return FILE_VERSION_INCOMPATIBLE_FAILURE; | 508 return FILE_VERSION_INCOMPATIBLE_FAILURE; |
| 447 } | 509 } |
| 448 | 510 |
| 449 if (!file_format.has_list_update_response()) { | 511 if (!file_format.has_list_update_response()) { |
| 450 return HASH_PREFIX_INFO_MISSING_FAILURE; | 512 return HASH_PREFIX_INFO_MISSING_FAILURE; |
| 451 } | 513 } |
| 452 | 514 |
| 453 const ListUpdateResponse& response = file_format.list_update_response(); | 515 std::unique_ptr<ListUpdateResponse> response(new ListUpdateResponse); |
| 454 ApplyUpdateResult apply_update_result = UpdateHashPrefixMapFromAdditions( | 516 response->Swap(file_format.mutable_list_update_response()); |
| 455 response.additions(), &hash_prefix_map_); | 517 ApplyUpdateResult apply_update_result = ProcessFullUpdate(response); |
| 456 RecordApplyUpdateResultWhenReadingFromDisk(apply_update_result); | 518 RecordApplyUpdateResultWhenReadingFromDisk(apply_update_result); |
| 457 if (apply_update_result != APPLY_UPDATE_SUCCESS) { | 519 if (apply_update_result != APPLY_UPDATE_SUCCESS) { |
| 458 hash_prefix_map_.clear(); | 520 hash_prefix_map_.clear(); |
| 459 return HASH_PREFIX_MAP_GENERATION_FAILURE; | 521 return HASH_PREFIX_MAP_GENERATION_FAILURE; |
| 460 } | 522 } |
| 461 | 523 |
| 462 state_ = response.new_client_state(); | |
| 463 return READ_SUCCESS; | 524 return READ_SUCCESS; |
| 464 } | 525 } |
| 465 | 526 |
| 466 StoreWriteResult V4Store::WriteToDisk( | 527 StoreWriteResult V4Store::WriteToDisk( |
| 467 std::unique_ptr<ListUpdateResponse> response) const { | 528 std::unique_ptr<ListUpdateResponse> response) const { |
| 468 // Do not write partial updates to the disk. | 529 // Do not write partial updates to the disk. |
| 469 // After merging the updates, the ListUpdateResponse passed to this method | 530 // After merging the updates, the ListUpdateResponse passed to this method |
| 470 // should be a FULL_UPDATE. | 531 // should be a FULL_UPDATE. |
| 471 if (!response->has_response_type() || | 532 if (!response->has_response_type() || |
| 472 response->response_type() != ListUpdateResponse::FULL_UPDATE) { | 533 response->response_type() != ListUpdateResponse::FULL_UPDATE) { |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 532 if (result == 0) { | 593 if (result == 0) { |
| 533 return true; | 594 return true; |
| 534 } else if (result < 0) { | 595 } else if (result < 0) { |
| 535 return HashPrefixMatches(hash_prefix, begin, mid); | 596 return HashPrefixMatches(hash_prefix, begin, mid); |
| 536 } else { | 597 } else { |
| 537 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); | 598 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); |
| 538 } | 599 } |
| 539 } | 600 } |
| 540 | 601 |
| 541 } // namespace safe_browsing | 602 } // namespace safe_browsing |
| OLD | NEW |