Index: components/safe_browsing_db/v4_store.cc |
diff --git a/components/safe_browsing_db/v4_store.cc b/components/safe_browsing_db/v4_store.cc |
index 69d49a258452f55a79260ba95c1a72f3a6b98356..87412dee3019a3c86545f26ead0f6209444d087e 100644 |
--- a/components/safe_browsing_db/v4_store.cc |
+++ b/components/safe_browsing_db/v4_store.cc |
@@ -12,6 +12,7 @@ |
#include "components/safe_browsing_db/v4_rice.h" |
#include "components/safe_browsing_db/v4_store.h" |
#include "components/safe_browsing_db/v4_store.pb.h" |
+#include "crypto/sha2.h" |
namespace safe_browsing { |
@@ -113,28 +114,48 @@ bool V4Store::Reset() { |
return true; |
} |
-ApplyUpdateResult V4Store::ProcessFullUpdate( |
+// static |
+ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( |
+ const HashPrefixMap& hash_prefix_map_old, |
std::unique_ptr<ListUpdateResponse> response, |
const std::unique_ptr<V4Store>& new_store) { |
- HashPrefixMap hash_prefix_map; |
- ApplyUpdateResult apply_update_result = |
- UpdateHashPrefixMapFromAdditions(response->additions(), &hash_prefix_map); |
- if (apply_update_result == APPLY_UPDATE_SUCCESS) { |
- new_store->hash_prefix_map_ = hash_prefix_map; |
- RecordStoreWriteResult(new_store->WriteToDisk(std::move(response))); |
+ DCHECK(response->has_response_type()); |
+ DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); |
+ |
+ ApplyUpdateResult result = |
+ ProcessUpdate(hash_prefix_map_old, response, new_store.get()); |
Nathan Parker
2016/08/08 21:17:19
This change removes the nicely optimized short-cut
vakh (use Gerrit instead)
2016/08/08 22:46:47
Done.
|
+ if (result == APPLY_UPDATE_SUCCESS) { |
+ // TODO(vakh): Create a ListUpdateResponse containing RICE encoded |
+ // hash prefixes and response_type as FULL_UPDATE, and write that to disk. |
} |
- return apply_update_result; |
+ return result; |
} |
-ApplyUpdateResult V4Store::ProcessPartialUpdate( |
+// static |
+ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk( |
std::unique_ptr<ListUpdateResponse> response, |
const std::unique_ptr<V4Store>& new_store) { |
- // TODO(vakh): |
- // 1. Done: Merge the old store and the new update in new_store. |
- // 2. Create a ListUpdateResponse containing RICE encoded hash-prefixes and |
- // response_type as FULL_UPDATE, and write that to disk. |
- // 3. Remove this if condition after completing 1. and 2. |
+ ApplyUpdateResult result = ProcessFullUpdate(response, new_store.get()); |
+ if (result == APPLY_UPDATE_SUCCESS) { |
+ RecordStoreWriteResult(new_store->WriteToDisk(std::move(response))); |
+ } |
+ return result; |
+} |
+// static |
+ApplyUpdateResult V4Store::ProcessFullUpdate( |
+ const std::unique_ptr<ListUpdateResponse>& response, |
+ V4Store* new_store) { |
+ DCHECK(response->has_response_type()); |
+ DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type()); |
+ return ProcessUpdate(HashPrefixMap(), response, new_store); |
+} |
+ |
+// static |
+ApplyUpdateResult V4Store::ProcessUpdate( |
+ const HashPrefixMap& hash_prefix_map_old, |
+ const std::unique_ptr<ListUpdateResponse>& response, |
+ V4Store* new_store) { |
const RepeatedField<int32>* raw_removals = nullptr; |
RepeatedField<int32> rice_removals; |
size_t removals_size = response->removals_size(); |
@@ -167,12 +188,23 @@ ApplyUpdateResult V4Store::ProcessPartialUpdate( |
HashPrefixMap hash_prefix_map; |
ApplyUpdateResult apply_update_result = |
UpdateHashPrefixMapFromAdditions(response->additions(), &hash_prefix_map); |
+ if (apply_update_result != APPLY_UPDATE_SUCCESS) { |
+ return apply_update_result; |
+ } |
- if (apply_update_result == APPLY_UPDATE_SUCCESS) { |
- apply_update_result = |
- new_store->MergeUpdate(hash_prefix_map_, hash_prefix_map, raw_removals); |
+ std::string expected_checksum; |
+ if (response->has_checksum() && response->checksum().has_sha256()) { |
+ expected_checksum = response->checksum().sha256(); |
+ } |
+ |
+ apply_update_result = new_store->MergeUpdate( |
+ hash_prefix_map_old, hash_prefix_map, raw_removals, expected_checksum); |
+ if (apply_update_result != APPLY_UPDATE_SUCCESS) { |
+ return apply_update_result; |
} |
- return apply_update_result; |
+ |
+ new_store->state_ = response->new_client_state(); |
+ return APPLY_UPDATE_SUCCESS; |
} |
void V4Store::ApplyUpdate( |
@@ -181,13 +213,14 @@ void V4Store::ApplyUpdate( |
UpdatedStoreReadyCallback callback) { |
std::unique_ptr<V4Store> new_store( |
new V4Store(this->task_runner_, this->store_path_)); |
- new_store->state_ = response->new_client_state(); |
ApplyUpdateResult apply_update_result; |
if (response->response_type() == ListUpdateResponse::PARTIAL_UPDATE) { |
- apply_update_result = ProcessPartialUpdate(std::move(response), new_store); |
+ apply_update_result = ProcessPartialUpdateAndWriteToDisk( |
+ hash_prefix_map_, std::move(response), new_store); |
} else if (response->response_type() == ListUpdateResponse::FULL_UPDATE) { |
- apply_update_result = ProcessFullUpdate(std::move(response), new_store); |
+ apply_update_result = |
+ ProcessFullUpdateAndWriteToDisk(std::move(response), new_store); |
} else { |
apply_update_result = UNEXPECTED_RESPONSE_TYPE_FAILURE; |
NOTREACHED() << "Unexpected response type: " << response->response_type(); |
@@ -198,6 +231,8 @@ void V4Store::ApplyUpdate( |
callback_task_runner->PostTask( |
FROM_HERE, base::Bind(callback, base::Passed(&new_store))); |
} else { |
+ DVLOG(1) << "ApplyUpdate failed: " << *this; |
Nathan Parker
2016/08/08 21:17:19
Make it a single DVLOG.
vakh (use Gerrit instead)
2016/08/08 22:46:47
Done.
|
+ DVLOG(1) << "reason: " << apply_update_result; |
// new_store failed updating. Pass a nullptr to the callback. |
callback_task_runner->PostTask(FROM_HERE, base::Bind(callback, nullptr)); |
} |
@@ -321,10 +356,10 @@ void V4Store::ReserveSpaceInPrefixMap(const HashPrefixMap& other_prefixes_map, |
} |
} |
-ApplyUpdateResult V4Store::MergeUpdate( |
- const HashPrefixMap& old_prefixes_map, |
- const HashPrefixMap& additions_map, |
- const RepeatedField<int32>* raw_removals) { |
+ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, |
+ const HashPrefixMap& additions_map, |
+ const RepeatedField<int32>* raw_removals, |
+ const std::string& expected_checksum) { |
DCHECK(hash_prefix_map_.empty()); |
hash_prefix_map_.clear(); |
ReserveSpaceInPrefixMap(old_prefixes_map, &hash_prefix_map_); |
@@ -347,6 +382,13 @@ ApplyUpdateResult V4Store::MergeUpdate( |
// At least one of the maps still has elements that need to be merged into the |
// new store. |
+ // |all_prefixes_concatenated| stores the concatenated list of hash prefixes |
+ // in lexographically sorted order. It is used to calculate the |checksum| at |
+ // the end. This checksum is matched against the expected checksum sent by |
+ // the server. |
+ HashPrefixes all_prefixes_concatenated; |
+ bool calculate_checksum = !expected_checksum.empty(); |
+ |
// Keep track of the number of elements picked from the old map. This is used |
// to determine which elements to drop based on the raw_removals. Note that |
// picked is not the same as merged. A picked element isn't merged if its |
@@ -380,6 +422,10 @@ ApplyUpdateResult V4Store::MergeUpdate( |
*removals_iter != total_picked_from_old) { |
// Append the smallest hash to the appropriate list. |
hash_prefix_map_[next_smallest_prefix_size] += next_smallest_prefix_old; |
+ |
+ if (calculate_checksum) { |
+ all_prefixes_concatenated += next_smallest_prefix_old; |
Nathan Parker
2016/08/08 21:17:19
Rather than building up another whole copy of the
vakh (use Gerrit instead)
2016/08/08 22:46:47
Done.
|
+ } |
} else { |
// Element not added to new map. Move the removals iterator forward. |
removals_iter++; |
@@ -397,6 +443,10 @@ ApplyUpdateResult V4Store::MergeUpdate( |
hash_prefix_map_[next_smallest_prefix_size] += |
next_smallest_prefix_additions; |
+ if (calculate_checksum) { |
+ all_prefixes_concatenated += next_smallest_prefix_additions; |
+ } |
+ |
// Update the iterator map, which means that we have merged one hash |
// prefix of size |next_smallest_prefix_size| from the update. |
additions_iterator_map[next_smallest_prefix_size] += |
@@ -409,6 +459,18 @@ ApplyUpdateResult V4Store::MergeUpdate( |
} |
} |
+ if (calculate_checksum) { |
+ std::string checksum = crypto::SHA256HashString(all_prefixes_concatenated); |
+ if (checksum != expected_checksum) { |
+ std::string checksum_base64, expected_checksum_base64; |
+ base::Base64Encode(checksum, &checksum_base64); |
+ base::Base64Encode(expected_checksum, &expected_checksum_base64); |
+ DVLOG(1) << "calculated checksum: " << checksum_base64; |
Nathan Parker
2016/08/08 21:17:19
How about making it a single DVLOG, with an error
vakh (use Gerrit instead)
2016/08/08 22:46:47
Done.
|
+ DVLOG(1) << "expected checksum: " << expected_checksum_base64; |
+ return CHECKSUM_MISMATCH_FAILURE; |
+ } |
+ } |
+ |
return (!raw_removals || removals_iter == raw_removals->end()) |
Nathan Parker
2016/08/08 21:17:19
It might be useful to check (!raw_removals || remo
vakh (use Gerrit instead)
2016/08/08 22:46:47
Done.
|
? APPLY_UPDATE_SUCCESS |
: REMOVALS_INDEX_TOO_LARGE_FAILURE; |
@@ -450,16 +512,15 @@ StoreReadResult V4Store::ReadFromDisk() { |
return HASH_PREFIX_INFO_MISSING_FAILURE; |
} |
- const ListUpdateResponse& response = file_format.list_update_response(); |
- ApplyUpdateResult apply_update_result = UpdateHashPrefixMapFromAdditions( |
- response.additions(), &hash_prefix_map_); |
+ std::unique_ptr<ListUpdateResponse> response(new ListUpdateResponse); |
+ response->Swap(file_format.mutable_list_update_response()); |
+ ApplyUpdateResult apply_update_result = ProcessFullUpdate(response, this); |
RecordApplyUpdateResultWhenReadingFromDisk(apply_update_result); |
if (apply_update_result != APPLY_UPDATE_SUCCESS) { |
hash_prefix_map_.clear(); |
return HASH_PREFIX_MAP_GENERATION_FAILURE; |
} |
- state_ = response.new_client_state(); |
return READ_SUCCESS; |
} |