Chromium Code Reviews| Index: components/safe_browsing_db/v4_store.cc |
| diff --git a/components/safe_browsing_db/v4_store.cc b/components/safe_browsing_db/v4_store.cc |
| index bf401c5e75884f5837236a884f5a857f0308bcfa..2876ef747f24137b50c7dd4aab93fc8f72776142 100644 |
| --- a/components/safe_browsing_db/v4_store.cc |
| +++ b/components/safe_browsing_db/v4_store.cc |
| @@ -54,6 +54,10 @@ const base::FilePath TemporaryFileForFilename(const base::FilePath& filename) { |
| } // namespace |
| +using ::google::protobuf::RepeatedField; |
| +using ::google::protobuf::RepeatedPtrField; |
| +using ::google::protobuf::int32; |
|
Nathan Parker
2016/07/15 16:49:38
seems like this might conflict with an existing in
vakh (use Gerrit instead)
2016/07/18 18:43:17
Can't find any definitions: https://cs.chromium.or
|
| + |
| std::ostream& operator<<(std::ostream& os, const V4Store& store) { |
| os << store.DebugString(); |
| return os; |
| @@ -110,17 +114,22 @@ void V4Store::ApplyUpdate( |
| HashPrefixMap hash_prefix_map; |
| ApplyUpdateResult apply_update_result; |
| if (response->response_type() == ListUpdateResponse::PARTIAL_UPDATE) { |
| + int removals_size = response->removals_size(); |
|
Nathan Parker
2016/07/15 16:49:38
no need for the temp removals_size if you're not r
vakh (use Gerrit instead)
2016/07/18 18:43:17
Done.
|
| + DCHECK(removals_size <= 1); |
| + RepeatedField<int32> raw_removals; |
| for (const auto& removal : response->removals()) { |
|
Nathan Parker
2016/07/15 16:49:38
You could simplify this since you expect 0 or 1 re
vakh (use Gerrit instead)
2016/07/18 18:43:17
Done.
|
| // TODO(vakh): Allow other compression types. |
| // See: https://bugs.chromium.org/p/chromium/issues/detail?id=624567 |
| DCHECK_EQ(RAW, removal.compression_type()); |
| + const RawIndices& raw_indices = removal.raw_indices(); |
| + raw_removals = raw_indices.indices(); |
|
Nathan Parker
2016/07/15 16:49:38
This does a copy
vakh (use Gerrit instead)
2016/07/18 18:43:17
Done.
|
| } |
| apply_update_result = UpdateHashPrefixMapFromAdditions( |
| response->additions(), &hash_prefix_map); |
| if (apply_update_result == APPLY_UPDATE_SUCCESS) { |
| - apply_update_result = |
| - new_store->MergeUpdate(hash_prefix_map_, hash_prefix_map); |
| + apply_update_result = new_store->MergeUpdate( |
| + hash_prefix_map_, hash_prefix_map, raw_removals); |
| } |
| // TODO(vakh): Generate the updated ListUpdateResponse to write to disk. |
| } else if (response->response_type() == ListUpdateResponse::FULL_UPDATE) { |
| @@ -149,7 +158,7 @@ void V4Store::ApplyUpdate( |
| // static |
| ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions( |
| - const ::google::protobuf::RepeatedPtrField<ThreatEntrySet>& additions, |
| + const RepeatedPtrField<ThreatEntrySet>& additions, |
| HashPrefixMap* additions_map) { |
| for (const auto& addition : additions) { |
| // TODO(vakh): Allow other compression types. |
| @@ -239,8 +248,12 @@ void V4Store::ReserveSpaceInPrefixMap(const HashPrefixMap& other_prefixes_map, |
| } |
| } |
| -ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, |
| - const HashPrefixMap& additions_map) { |
| +ApplyUpdateResult V4Store::MergeUpdate( |
| + const HashPrefixMap& old_prefixes_map, |
| + const HashPrefixMap& additions_map, |
| + const RepeatedField<int32>& raw_removals) { |
| + const int* removals_iter = raw_removals.begin(); |
|
Nathan Parker
2016/07/15 16:49:38
Put this down by total_picked_from_old, since you
vakh (use Gerrit instead)
2016/07/18 18:43:17
Done.
|
| + |
| DCHECK(hash_prefix_map_.empty()); |
| hash_prefix_map_.clear(); |
| ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_); |
| @@ -262,6 +275,12 @@ ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, |
| // The two constructs to merge are maps: old_prefixes_map, additions_map. |
| // At least one of the maps still has elements that need to be merged into the |
| // new store. |
| + |
| + // Keep track of the number of elements picked from the old map. This is used |
| + // to determine which elements to drop based on the raw_removals. Note that |
| + // picked is not the same as merged. A picked element isn't merged if its |
| + // index is on the raw_removals list. |
| + int total_picked_from_old = 0; |
| while (old_has_unmerged || additions_has_unmerged) { |
| // If the same hash prefix appears in the existing store and the additions |
| // list, something is clearly wrong. Discard the update. |
| @@ -281,13 +300,21 @@ ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, |
| if (pick_from_old) { |
| next_smallest_prefix_size = next_smallest_prefix_old.size(); |
| - // Append the smallest hash to the appropriate list. |
| - hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; |
| - |
| // Update the iterator map, which means that we have merged one hash |
| // prefix of size |next_size_for_old| from the old store. |
| old_iterator_map[next_smallest_prefix_size] += next_smallest_prefix_size; |
| + if (removals_iter == raw_removals.end() || |
| + *removals_iter != total_picked_from_old) { |
| + // Append the smallest hash to the appropriate list. |
| + hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; |
| + } else { |
| + // Element not added to new map. Move the removals iterator forward. |
| + removals_iter++; |
| + } |
| + |
| + total_picked_from_old++; |
| + |
| // Find the next smallest unmerged element in the old store's map. |
| old_has_unmerged = GetNextSmallestUnmergedPrefix( |
| old_prefixes_map, old_iterator_map, &next_smallest_prefix_old); |
| @@ -310,7 +337,8 @@ ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, |
| } |
| } |
| - return APPLY_UPDATE_SUCCESS; |
| + return (removals_iter == raw_removals.end()) ? APPLY_UPDATE_SUCCESS |
| + : REMOVALS_INDEX_TOO_LARGE; |
| } |
| StoreReadResult V4Store::ReadFromDisk() { |