Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(205)

Side by Side Diff: components/safe_browsing_db/v4_store.cc

Issue 2206733002: PVer4: Verify checksum for downloaded updates (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@01_v4_rice_store
Patch Set: XXS: Added a DCHECK for PARTIAL_UPDATE in ProcessPartialUpdate Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/base64.h" 5 #include "base/base64.h"
6 #include "base/bind.h" 6 #include "base/bind.h"
7 #include "base/files/file_util.h" 7 #include "base/files/file_util.h"
8 #include "base/memory/ptr_util.h" 8 #include "base/memory/ptr_util.h"
9 #include "base/metrics/histogram_macros.h" 9 #include "base/metrics/histogram_macros.h"
10 #include "base/metrics/sparse_histogram.h" 10 #include "base/metrics/sparse_histogram.h"
11 #include "base/strings/stringprintf.h" 11 #include "base/strings/stringprintf.h"
12 #include "components/safe_browsing_db/v4_rice.h" 12 #include "components/safe_browsing_db/v4_rice.h"
13 #include "components/safe_browsing_db/v4_store.h" 13 #include "components/safe_browsing_db/v4_store.h"
14 #include "components/safe_browsing_db/v4_store.pb.h" 14 #include "components/safe_browsing_db/v4_store.pb.h"
15 #include "crypto/sha2.h"
15 16
16 namespace safe_browsing { 17 namespace safe_browsing {
17 18
18 namespace { 19 namespace {
19 const uint32_t kFileMagic = 0x600D71FE; 20 const uint32_t kFileMagic = 0x600D71FE;
20 21
21 const uint32_t kFileVersion = 9; 22 const uint32_t kFileVersion = 9;
22 23
23 // The minimum expected size (in bytes) of a hash-prefix. 24 // The minimum expected size (in bytes) of a hash-prefix.
24 const uint32_t kMinHashPrefixLength = 4; 25 const uint32_t kMinHashPrefixLength = 4;
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
106 return base::StringPrintf("path: %" PRIsFP "; state: %s", 107 return base::StringPrintf("path: %" PRIsFP "; state: %s",
107 store_path_.value().c_str(), state_base64.c_str()); 108 store_path_.value().c_str(), state_base64.c_str());
108 } 109 }
109 110
110 bool V4Store::Reset() { 111 bool V4Store::Reset() {
111 // TODO(vakh): Implement skeleton. 112 // TODO(vakh): Implement skeleton.
112 state_ = ""; 113 state_ = "";
113 return true; 114 return true;
114 } 115 }
115 116
116 ApplyUpdateResult V4Store::ProcessFullUpdate( 117 // static
118 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk(
119 const HashPrefixMap& hash_prefix_map_old,
117 std::unique_ptr<ListUpdateResponse> response, 120 std::unique_ptr<ListUpdateResponse> response,
118 const std::unique_ptr<V4Store>& new_store) { 121 const std::unique_ptr<V4Store>& new_store) {
119 HashPrefixMap hash_prefix_map; 122 DCHECK(response->has_response_type());
120 ApplyUpdateResult apply_update_result = 123 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type());
121 UpdateHashPrefixMapFromAdditions(response->additions(), &hash_prefix_map); 124
122 if (apply_update_result == APPLY_UPDATE_SUCCESS) { 125 ApplyUpdateResult result =
123 new_store->hash_prefix_map_ = hash_prefix_map; 126 ProcessUpdate(hash_prefix_map_old, response, new_store.get());
Nathan Parker 2016/08/08 21:17:19 This change removes the nicely optimized short-cut
vakh (use Gerrit instead) 2016/08/08 22:46:47 Done.
127 if (result == APPLY_UPDATE_SUCCESS) {
128 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded
129 // hash prefixes and response_type as FULL_UPDATE, and write that to disk.
130 }
131 return result;
132 }
133
134 // static
135 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk(
136 std::unique_ptr<ListUpdateResponse> response,
137 const std::unique_ptr<V4Store>& new_store) {
138 ApplyUpdateResult result = ProcessFullUpdate(response, new_store.get());
139 if (result == APPLY_UPDATE_SUCCESS) {
124 RecordStoreWriteResult(new_store->WriteToDisk(std::move(response))); 140 RecordStoreWriteResult(new_store->WriteToDisk(std::move(response)));
125 } 141 }
126 return apply_update_result; 142 return result;
127 } 143 }
128 144
129 ApplyUpdateResult V4Store::ProcessPartialUpdate( 145 // static
130 std::unique_ptr<ListUpdateResponse> response, 146 ApplyUpdateResult V4Store::ProcessFullUpdate(
131 const std::unique_ptr<V4Store>& new_store) { 147 const std::unique_ptr<ListUpdateResponse>& response,
132 // TODO(vakh): 148 V4Store* new_store) {
133 // 1. Done: Merge the old store and the new update in new_store. 149 DCHECK(response->has_response_type());
134 // 2. Create a ListUpdateResponse containing RICE encoded hash-prefixes and 150 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type());
135 // response_type as FULL_UPDATE, and write that to disk. 151 return ProcessUpdate(HashPrefixMap(), response, new_store);
136 // 3. Remove this if condition after completing 1. and 2. 152 }
137 153
154 // static
155 ApplyUpdateResult V4Store::ProcessUpdate(
156 const HashPrefixMap& hash_prefix_map_old,
157 const std::unique_ptr<ListUpdateResponse>& response,
158 V4Store* new_store) {
138 const RepeatedField<int32>* raw_removals = nullptr; 159 const RepeatedField<int32>* raw_removals = nullptr;
139 RepeatedField<int32> rice_removals; 160 RepeatedField<int32> rice_removals;
140 size_t removals_size = response->removals_size(); 161 size_t removals_size = response->removals_size();
141 DCHECK_LE(removals_size, 1u); 162 DCHECK_LE(removals_size, 1u);
142 if (removals_size == 1) { 163 if (removals_size == 1) {
143 const ThreatEntrySet& removal = response->removals().Get(0); 164 const ThreatEntrySet& removal = response->removals().Get(0);
144 const CompressionType compression_type = removal.compression_type(); 165 const CompressionType compression_type = removal.compression_type();
145 if (compression_type == RAW) { 166 if (compression_type == RAW) {
146 raw_removals = &removal.raw_indices().indices(); 167 raw_removals = &removal.raw_indices().indices();
147 } else if (compression_type == RICE) { 168 } else if (compression_type == RICE) {
(...skipping 12 matching lines...) Expand all
160 } 181 }
161 } else { 182 } else {
162 NOTREACHED() << "Unexpected compression_type type: " << compression_type; 183 NOTREACHED() << "Unexpected compression_type type: " << compression_type;
163 return UNEXPECTED_COMPRESSION_TYPE_REMOVALS_FAILURE; 184 return UNEXPECTED_COMPRESSION_TYPE_REMOVALS_FAILURE;
164 } 185 }
165 } 186 }
166 187
167 HashPrefixMap hash_prefix_map; 188 HashPrefixMap hash_prefix_map;
168 ApplyUpdateResult apply_update_result = 189 ApplyUpdateResult apply_update_result =
169 UpdateHashPrefixMapFromAdditions(response->additions(), &hash_prefix_map); 190 UpdateHashPrefixMapFromAdditions(response->additions(), &hash_prefix_map);
191 if (apply_update_result != APPLY_UPDATE_SUCCESS) {
192 return apply_update_result;
193 }
170 194
171 if (apply_update_result == APPLY_UPDATE_SUCCESS) { 195 std::string expected_checksum;
172 apply_update_result = 196 if (response->has_checksum() && response->checksum().has_sha256()) {
173 new_store->MergeUpdate(hash_prefix_map_, hash_prefix_map, raw_removals); 197 expected_checksum = response->checksum().sha256();
174 } 198 }
175 return apply_update_result; 199
200 apply_update_result = new_store->MergeUpdate(
201 hash_prefix_map_old, hash_prefix_map, raw_removals, expected_checksum);
202 if (apply_update_result != APPLY_UPDATE_SUCCESS) {
203 return apply_update_result;
204 }
205
206 new_store->state_ = response->new_client_state();
207 return APPLY_UPDATE_SUCCESS;
176 } 208 }
177 209
178 void V4Store::ApplyUpdate( 210 void V4Store::ApplyUpdate(
179 std::unique_ptr<ListUpdateResponse> response, 211 std::unique_ptr<ListUpdateResponse> response,
180 const scoped_refptr<base::SingleThreadTaskRunner>& callback_task_runner, 212 const scoped_refptr<base::SingleThreadTaskRunner>& callback_task_runner,
181 UpdatedStoreReadyCallback callback) { 213 UpdatedStoreReadyCallback callback) {
182 std::unique_ptr<V4Store> new_store( 214 std::unique_ptr<V4Store> new_store(
183 new V4Store(this->task_runner_, this->store_path_)); 215 new V4Store(this->task_runner_, this->store_path_));
184 new_store->state_ = response->new_client_state();
185 216
186 ApplyUpdateResult apply_update_result; 217 ApplyUpdateResult apply_update_result;
187 if (response->response_type() == ListUpdateResponse::PARTIAL_UPDATE) { 218 if (response->response_type() == ListUpdateResponse::PARTIAL_UPDATE) {
188 apply_update_result = ProcessPartialUpdate(std::move(response), new_store); 219 apply_update_result = ProcessPartialUpdateAndWriteToDisk(
220 hash_prefix_map_, std::move(response), new_store);
189 } else if (response->response_type() == ListUpdateResponse::FULL_UPDATE) { 221 } else if (response->response_type() == ListUpdateResponse::FULL_UPDATE) {
190 apply_update_result = ProcessFullUpdate(std::move(response), new_store); 222 apply_update_result =
223 ProcessFullUpdateAndWriteToDisk(std::move(response), new_store);
191 } else { 224 } else {
192 apply_update_result = UNEXPECTED_RESPONSE_TYPE_FAILURE; 225 apply_update_result = UNEXPECTED_RESPONSE_TYPE_FAILURE;
193 NOTREACHED() << "Unexpected response type: " << response->response_type(); 226 NOTREACHED() << "Unexpected response type: " << response->response_type();
194 } 227 }
195 228
196 if (apply_update_result == APPLY_UPDATE_SUCCESS) { 229 if (apply_update_result == APPLY_UPDATE_SUCCESS) {
197 // new_store is done updating, pass it to the callback. 230 // new_store is done updating, pass it to the callback.
198 callback_task_runner->PostTask( 231 callback_task_runner->PostTask(
199 FROM_HERE, base::Bind(callback, base::Passed(&new_store))); 232 FROM_HERE, base::Bind(callback, base::Passed(&new_store)));
200 } else { 233 } else {
234 DVLOG(1) << "ApplyUpdate failed: " << *this;
Nathan Parker 2016/08/08 21:17:19 Make it a single DVLOG.
vakh (use Gerrit instead) 2016/08/08 22:46:47 Done.
235 DVLOG(1) << "reason: " << apply_update_result;
201 // new_store failed updating. Pass a nullptr to the callback. 236 // new_store failed updating. Pass a nullptr to the callback.
202 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr)); 237 callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr));
203 } 238 }
204 239
205 RecordApplyUpdateResult(apply_update_result); 240 RecordApplyUpdateResult(apply_update_result);
206 } 241 }
207 242
208 // static 243 // static
209 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions( 244 ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions(
210 const RepeatedPtrField<ThreatEntrySet>& additions, 245 const RepeatedPtrField<ThreatEntrySet>& additions,
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
314 349
315 const HashPrefixes& existing_prefixes = 350 const HashPrefixes& existing_prefixes =
316 (*prefix_map_to_update)[prefix_size]; 351 (*prefix_map_to_update)[prefix_size];
317 size_t existing_capacity = existing_prefixes.capacity(); 352 size_t existing_capacity = existing_prefixes.capacity();
318 353
319 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity + 354 (*prefix_map_to_update)[prefix_size].reserve(existing_capacity +
320 prefix_length_to_add); 355 prefix_length_to_add);
321 } 356 }
322 } 357 }
323 358
324 ApplyUpdateResult V4Store::MergeUpdate( 359 ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map,
325 const HashPrefixMap& old_prefixes_map, 360 const HashPrefixMap& additions_map,
326 const HashPrefixMap& additions_map, 361 const RepeatedField<int32>* raw_removals,
327 const RepeatedField<int32>* raw_removals) { 362 const std::string& expected_checksum) {
328 DCHECK(hash_prefix_map_.empty()); 363 DCHECK(hash_prefix_map_.empty());
329 hash_prefix_map_.clear(); 364 hash_prefix_map_.clear();
330 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_); 365 ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_);
331 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_); 366 ReserveSpaceInPrefixMap(additions_map, &hash_prefix_map_);
332 367
333 IteratorMap old_iterator_map; 368 IteratorMap old_iterator_map;
334 HashPrefix next_smallest_prefix_old; 369 HashPrefix next_smallest_prefix_old;
335 InitializeIteratorMap(old_prefixes_map, &old_iterator_map); 370 InitializeIteratorMap(old_prefixes_map, &old_iterator_map);
336 bool old_has_unmerged = GetNextSmallestUnmergedPrefix( 371 bool old_has_unmerged = GetNextSmallestUnmergedPrefix(
337 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); 372 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old);
338 373
339 IteratorMap additions_iterator_map; 374 IteratorMap additions_iterator_map;
340 HashPrefix next_smallest_prefix_additions; 375 HashPrefix next_smallest_prefix_additions;
341 InitializeIteratorMap(additions_map, &additions_iterator_map); 376 InitializeIteratorMap(additions_map, &additions_iterator_map);
342 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix( 377 bool additions_has_unmerged = GetNextSmallestUnmergedPrefix(
343 additions_map, additions_iterator_map, &next_smallest_prefix_additions); 378 additions_map, additions_iterator_map, &next_smallest_prefix_additions);
344 379
345 // Classical merge sort. 380 // Classical merge sort.
346 // The two constructs to merge are maps: old_prefixes_map, additions_map. 381 // The two constructs to merge are maps: old_prefixes_map, additions_map.
347 // At least one of the maps still has elements that need to be merged into the 382 // At least one of the maps still has elements that need to be merged into the
348 // new store. 383 // new store.
349 384
385 // |all_prefixes_concatenated| stores the concatenated list of hash prefixes
386 // in lexographically sorted order. It is used to calculate the |checksum| at
387 // the end. This checksum is matched against the expected checksum sent by
388 // the server.
389 HashPrefixes all_prefixes_concatenated;
390 bool calculate_checksum = !expected_checksum.empty();
391
350 // Keep track of the number of elements picked from the old map. This is used 392 // Keep track of the number of elements picked from the old map. This is used
351 // to determine which elements to drop based on the raw_removals. Note that 393 // to determine which elements to drop based on the raw_removals. Note that
352 // picked is not the same as merged. A picked element isn't merged if its 394 // picked is not the same as merged. A picked element isn't merged if its
353 // index is on the raw_removals list. 395 // index is on the raw_removals list.
354 int total_picked_from_old = 0; 396 int total_picked_from_old = 0;
355 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr; 397 const int* removals_iter = raw_removals ? raw_removals->begin() : nullptr;
356 while (old_has_unmerged || additions_has_unmerged) { 398 while (old_has_unmerged || additions_has_unmerged) {
357 // If the same hash prefix appears in the existing store and the additions 399 // If the same hash prefix appears in the existing store and the additions
358 // list, something is clearly wrong. Discard the update. 400 // list, something is clearly wrong. Discard the update.
359 if (old_has_unmerged && additions_has_unmerged && 401 if (old_has_unmerged && additions_has_unmerged &&
(...skipping 13 matching lines...) Expand all
373 next_smallest_prefix_size = next_smallest_prefix_old.size(); 415 next_smallest_prefix_size = next_smallest_prefix_old.size();
374 416
375 // Update the iterator map, which means that we have merged one hash 417 // Update the iterator map, which means that we have merged one hash
376 // prefix of size |next_size_for_old| from the old store. 418 // prefix of size |next_size_for_old| from the old store.
377 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; 419 old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size;
378 420
379 if (!raw_removals || removals_iter == raw_removals->end() || 421 if (!raw_removals || removals_iter == raw_removals->end() ||
380 *removals_iter != total_picked_from_old) { 422 *removals_iter != total_picked_from_old) {
381 // Append the smallest hash to the appropriate list. 423 // Append the smallest hash to the appropriate list.
382 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; 424 hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old;
425
426 if (calculate_checksum) {
427 all_prefixes_concatenated += next_smallest_prefix_old;
Nathan Parker 2016/08/08 21:17:19 Rather than building up another whole copy of the
vakh (use Gerrit instead) 2016/08/08 22:46:47 Done.
428 }
383 } else { 429 } else {
384 // Element not added to new map. Move the removals iterator forward. 430 // Element not added to new map. Move the removals iterator forward.
385 removals_iter++; 431 removals_iter++;
386 } 432 }
387 433
388 total_picked_from_old++; 434 total_picked_from_old++;
389 435
390 // Find the next smallest unmerged element in the old store's map. 436 // Find the next smallest unmerged element in the old store's map.
391 old_has_unmerged = GetNextSmallestUnmergedPrefix( 437 old_has_unmerged = GetNextSmallestUnmergedPrefix(
392 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); 438 old_prefixes_map, old_iterator_map, &next_smallest_prefix_old);
393 } else { 439 } else {
394 next_smallest_prefix_size = next_smallest_prefix_additions.size(); 440 next_smallest_prefix_size = next_smallest_prefix_additions.size();
395 441
396 // Append the smallest hash to the appropriate list. 442 // Append the smallest hash to the appropriate list.
397 hash_prefix_map_[next_smallest_prefix_size] += 443 hash_prefix_map_[next_smallest_prefix_size] +=
398 next_smallest_prefix_additions; 444 next_smallest_prefix_additions;
399 445
446 if (calculate_checksum) {
447 all_prefixes_concatenated += next_smallest_prefix_additions;
448 }
449
400 // Update the iterator map, which means that we have merged one hash 450 // Update the iterator map, which means that we have merged one hash
401 // prefix of size |next_smallest_prefix_size| from the update. 451 // prefix of size |next_smallest_prefix_size| from the update.
402 additions_iterator_map[next_smallest_prefix_size] += 452 additions_iterator_map[next_smallest_prefix_size] +=
403 next_smallest_prefix_size; 453 next_smallest_prefix_size;
404 454
405 // Find the next smallest unmerged element in the additions map. 455 // Find the next smallest unmerged element in the additions map.
406 additions_has_unmerged = 456 additions_has_unmerged =
407 GetNextSmallestUnmergedPrefix(additions_map, additions_iterator_map, 457 GetNextSmallestUnmergedPrefix(additions_map, additions_iterator_map,
408 &next_smallest_prefix_additions); 458 &next_smallest_prefix_additions);
409 } 459 }
410 } 460 }
411 461
462 if (calculate_checksum) {
463 std::string checksum = crypto::SHA256HashString(all_prefixes_concatenated);
464 if (checksum != expected_checksum) {
465 std::string checksum_base64, expected_checksum_base64;
466 base::Base64Encode(checksum, &checksum_base64);
467 base::Base64Encode(expected_checksum, &expected_checksum_base64);
468 DVLOG(1) << "calculated checksum: " << checksum_base64;
Nathan Parker 2016/08/08 21:17:19 How about making it a single DVLOG, with an error
vakh (use Gerrit instead) 2016/08/08 22:46:47 Done.
469 DVLOG(1) << "expected checksum: " << expected_checksum_base64;
470 return CHECKSUM_MISMATCH_FAILURE;
471 }
472 }
473
412 return (!raw_removals || removals_iter == raw_removals->end()) 474 return (!raw_removals || removals_iter == raw_removals->end())
Nathan Parker 2016/08/08 21:17:19 It might be useful to check (!raw_removals || remo
vakh (use Gerrit instead) 2016/08/08 22:46:47 Done.
413 ? APPLY_UPDATE_SUCCESS 475 ? APPLY_UPDATE_SUCCESS
414 : REMOVALS_INDEX_TOO_LARGE_FAILURE; 476 : REMOVALS_INDEX_TOO_LARGE_FAILURE;
415 } 477 }
416 478
417 StoreReadResult V4Store::ReadFromDisk() { 479 StoreReadResult V4Store::ReadFromDisk() {
418 DCHECK(task_runner_->RunsTasksOnCurrentThread()); 480 DCHECK(task_runner_->RunsTasksOnCurrentThread());
419 481
420 std::string contents; 482 std::string contents;
421 bool read_success = base::ReadFileToString(store_path_, &contents); 483 bool read_success = base::ReadFileToString(store_path_, &contents);
422 if (!read_success) { 484 if (!read_success) {
(...skipping 20 matching lines...) Expand all
443 if (file_format.version_number() != kFileVersion) { 505 if (file_format.version_number() != kFileVersion) {
444 DVLOG(1) << "File version incompatible: " << file_format.version_number() 506 DVLOG(1) << "File version incompatible: " << file_format.version_number()
445 << "; expected: " << kFileVersion; 507 << "; expected: " << kFileVersion;
446 return FILE_VERSION_INCOMPATIBLE_FAILURE; 508 return FILE_VERSION_INCOMPATIBLE_FAILURE;
447 } 509 }
448 510
449 if (!file_format.has_list_update_response()) { 511 if (!file_format.has_list_update_response()) {
450 return HASH_PREFIX_INFO_MISSING_FAILURE; 512 return HASH_PREFIX_INFO_MISSING_FAILURE;
451 } 513 }
452 514
453 const ListUpdateResponse& response = file_format.list_update_response(); 515 std::unique_ptr<ListUpdateResponse> response(new ListUpdateResponse);
454 ApplyUpdateResult apply_update_result = UpdateHashPrefixMapFromAdditions( 516 response->Swap(file_format.mutable_list_update_response());
455 response.additions(), &hash_prefix_map_); 517 ApplyUpdateResult apply_update_result = ProcessFullUpdate(response, this);
456 RecordApplyUpdateResultWhenReadingFromDisk(apply_update_result); 518 RecordApplyUpdateResultWhenReadingFromDisk(apply_update_result);
457 if (apply_update_result != APPLY_UPDATE_SUCCESS) { 519 if (apply_update_result != APPLY_UPDATE_SUCCESS) {
458 hash_prefix_map_.clear(); 520 hash_prefix_map_.clear();
459 return HASH_PREFIX_MAP_GENERATION_FAILURE; 521 return HASH_PREFIX_MAP_GENERATION_FAILURE;
460 } 522 }
461 523
462 state_ = response.new_client_state();
463 return READ_SUCCESS; 524 return READ_SUCCESS;
464 } 525 }
465 526
466 StoreWriteResult V4Store::WriteToDisk( 527 StoreWriteResult V4Store::WriteToDisk(
467 std::unique_ptr<ListUpdateResponse> response) const { 528 std::unique_ptr<ListUpdateResponse> response) const {
468 // Do not write partial updates to the disk. 529 // Do not write partial updates to the disk.
469 // After merging the updates, the ListUpdateResponse passed to this method 530 // After merging the updates, the ListUpdateResponse passed to this method
470 // should be a FULL_UPDATE. 531 // should be a FULL_UPDATE.
471 if (!response->has_response_type() || 532 if (!response->has_response_type() ||
472 response->response_type() != ListUpdateResponse::FULL_UPDATE) { 533 response->response_type() != ListUpdateResponse::FULL_UPDATE) {
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
532 if (result == 0) { 593 if (result == 0) {
533 return true; 594 return true;
534 } else if (result < 0) { 595 } else if (result < 0) {
535 return HashPrefixMatches(hash_prefix, begin, mid); 596 return HashPrefixMatches(hash_prefix, begin, mid);
536 } else { 597 } else {
537 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); 598 return HashPrefixMatches(hash_prefix, mid + prefix_size, end);
538 } 599 }
539 } 600 }
540 601
541 } // namespace safe_browsing 602 } // namespace safe_browsing
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698