Chromium Code Reviews| Index: components/safe_browsing_db/v4_store.cc |
| diff --git a/components/safe_browsing_db/v4_store.cc b/components/safe_browsing_db/v4_store.cc |
| index e34cb6f47e9dbf6754ea45f1a26d2f223e6148db..476c0588702ceb07dc33df31304f16925ed589e2 100644 |
| --- a/components/safe_browsing_db/v4_store.cc |
| +++ b/components/safe_browsing_db/v4_store.cc |
| @@ -23,14 +23,8 @@ const uint32_t kFileMagic = 0x600D71FE; |
| const uint32_t kFileVersion = 9; |
| -void RecordStoreReadResult(StoreReadResult result) { |
| - UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4StoreReadResult", result, |
| - STORE_READ_RESULT_MAX); |
| -} |
| - |
| -void RecordStoreWriteResult(StoreWriteResult result) { |
| - UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4StoreWriteResult", result, |
| - STORE_WRITE_RESULT_MAX); |
| +void RecordAddUnlumpedHashesTime(base::TimeDelta time) { |
|
Nathan Parker
2016/09/30 22:32:05
style nit: Personally I think it's easier to follo
vakh (use Gerrit instead)
2016/10/03 21:30:36
Done. Added the store specific histograms for Time
|
| + UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4AddUnlumpedHashesTime", time); |
| } |
| void RecordApplyUpdateResult(ApplyUpdateResult result) { |
| @@ -38,22 +32,54 @@ void RecordApplyUpdateResult(ApplyUpdateResult result) { |
| APPLY_UPDATE_RESULT_MAX); |
| } |
| +void RecordApplyUpdateResultWhenReadingFromDisk(ApplyUpdateResult result) { |
| + UMA_HISTOGRAM_ENUMERATION( |
| + "SafeBrowsing.V4ApplyUpdateResultWhenReadingFromDisk", result, |
| + APPLY_UPDATE_RESULT_MAX); |
| +} |
| + |
| void RecordDecodeAdditionsResult(V4DecodeResult result) { |
| UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4DecodeAdditionsResult", result, |
| DECODE_RESULT_MAX); |
| } |
| +void RecordDecodeAdditionsTime(base::TimeDelta time) { |
| + UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4DecodeAdditionsTime", time); |
| +} |
| + |
| void RecordDecodeRemovalsResult(V4DecodeResult result) { |
| UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4DecodeRemovalsResult", result, |
| DECODE_RESULT_MAX); |
| } |
| -// TODO(vakh): Collect and record the metrics for time taken to process updates. |
| +void RecordDecodeRemovalsTime(base::TimeDelta time) { |
| + UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4DecodeRemovalsTime", time); |
| +} |
| -void RecordApplyUpdateResultWhenReadingFromDisk(ApplyUpdateResult result) { |
| - UMA_HISTOGRAM_ENUMERATION( |
| - "SafeBrowsing.V4ApplyUpdateResultWhenReadingFromDisk", result, |
| - APPLY_UPDATE_RESULT_MAX); |
| +void RecordMergeUpdateTime(base::TimeDelta time) { |
| + UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4MergeUpdateTime", time); |
| +} |
| + |
| +void RecordProcessFullUpdateTime(base::TimeDelta time) { |
| + UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4ProcessFullUpdateTime", time); |
| +} |
| + |
| +void RecordProcessPartialUpdateTime(base::TimeDelta time) { |
| + UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4ProcessPartialUpdateTime", time); |
| +} |
| + |
| +void RecordReadFromDiskTime(base::TimeDelta time) { |
| + UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4ReadFromDiskTime", time); |
| +} |
| + |
| +void RecordStoreReadResult(StoreReadResult result) { |
| + UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4StoreReadResult", result, |
| + STORE_READ_RESULT_MAX); |
| +} |
| + |
| +void RecordStoreWriteResult(StoreWriteResult result) { |
| + UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4StoreWriteResult", result, |
| + STORE_WRITE_RESULT_MAX); |
| } |
| // Returns the name of the temporary file used to buffer data for |
| @@ -115,8 +141,11 @@ ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( |
| DCHECK(response->has_response_type()); |
| DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); |
| + base::TimeTicks before = base::TimeTicks::Now(); |
|
Nathan Parker
2016/09/30 22:32:05
I'd add a
using base::TimeTicks
to simplify all o
vakh (use Gerrit instead)
2016/10/03 21:30:36
Done.
|
| ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response); |
| + base::TimeTicks after = base::TimeTicks::Now(); |
| if (result == APPLY_UPDATE_SUCCESS) { |
| + RecordProcessPartialUpdateTime(after - before); |
|
Nathan Parker
2016/09/30 22:32:05
nit: You can skip the |afer| and just do TimeTicks
vakh (use Gerrit instead)
2016/10/03 21:30:36
Done.
|
| // TODO(vakh): Create a ListUpdateResponse containing RICE encoded |
| // hash prefixes and response_type as FULL_UPDATE, and write that to disk. |
| } |
| @@ -125,9 +154,12 @@ ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( |
| ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk( |
| std::unique_ptr<ListUpdateResponse> response) { |
| + base::TimeTicks before = base::TimeTicks::Now(); |
| ApplyUpdateResult result = ProcessFullUpdate(response); |
| + base::TimeTicks after = base::TimeTicks::Now(); |
| if (result == APPLY_UPDATE_SUCCESS) { |
| RecordStoreWriteResult(WriteToDisk(std::move(response))); |
| + RecordProcessFullUpdateTime(after - before); |
| } |
| return result; |
| } |
| @@ -160,16 +192,19 @@ ApplyUpdateResult V4Store::ProcessUpdate( |
| DCHECK(removal.has_rice_indices()); |
| const RiceDeltaEncoding& rice_indices = removal.rice_indices(); |
| + base::TimeTicks before = base::TimeTicks::Now(); |
| V4DecodeResult decode_result = V4RiceDecoder::DecodeIntegers( |
| rice_indices.first_value(), rice_indices.rice_parameter(), |
| rice_indices.num_entries(), rice_indices.encoded_data(), |
| &rice_removals); |
| + base::TimeTicks after = base::TimeTicks::Now(); |
| + |
| RecordDecodeRemovalsResult(decode_result); |
| if (decode_result != DECODE_SUCCESS) { |
| return RICE_DECODING_FAILURE; |
| - } else { |
| - raw_removals = &rice_removals; |
| } |
| + RecordDecodeRemovalsTime(after - before); |
| + raw_removals = &rice_removals; |
| } else { |
| NOTREACHED() << "Unexpected compression_type type: " << compression_type; |
| return UNEXPECTED_COMPRESSION_TYPE_REMOVALS_FAILURE; |
| @@ -188,11 +223,14 @@ ApplyUpdateResult V4Store::ProcessUpdate( |
| expected_checksum = response->checksum().sha256(); |
| } |
| + base::TimeTicks before = base::TimeTicks::Now(); |
| apply_update_result = MergeUpdate(hash_prefix_map_old, hash_prefix_map, |
| raw_removals, expected_checksum); |
| + base::TimeTicks after = base::TimeTicks::Now(); |
| if (apply_update_result != APPLY_UPDATE_SUCCESS) { |
| return apply_update_result; |
| } |
| + RecordMergeUpdateTime(after - before); |
| state_ = response->new_client_state(); |
| return APPLY_UPDATE_SUCCESS; |
| @@ -250,13 +288,16 @@ ApplyUpdateResult V4Store::UpdateHashPrefixMapFromAdditions( |
| const RiceDeltaEncoding& rice_hashes = addition.rice_hashes(); |
| std::vector<uint32_t> raw_hashes; |
| + base::TimeTicks before = base::TimeTicks::Now(); |
| V4DecodeResult decode_result = V4RiceDecoder::DecodePrefixes( |
| rice_hashes.first_value(), rice_hashes.rice_parameter(), |
| rice_hashes.num_entries(), rice_hashes.encoded_data(), &raw_hashes); |
| + base::TimeTicks after = base::TimeTicks::Now(); |
| RecordDecodeAdditionsResult(decode_result); |
| if (decode_result != DECODE_SUCCESS) { |
| return RICE_DECODING_FAILURE; |
| } else { |
| + RecordDecodeAdditionsTime(after - before); |
| char* raw_hashes_start = reinterpret_cast<char*>(raw_hashes.data()); |
| size_t raw_hashes_size = sizeof(uint32_t) * raw_hashes.size(); |
| @@ -307,9 +348,12 @@ ApplyUpdateResult V4Store::AddUnlumpedHashes(PrefixSize prefix_size, |
| return ADDITIONS_SIZE_UNEXPECTED_FAILURE; |
| } |
| + base::TimeTicks before = base::TimeTicks::Now(); |
|
Nathan Parker
2016/09/30 22:32:05
What is this recording? The copying of the string
vakh (use Gerrit instead)
2016/10/03 21:30:36
My goal is to remove the TODO.
If UMA shows that t
|
| // TODO(vakh): Figure out a way to avoid the following copy operation. |
| (*additions_map)[prefix_size] = |
| std::string(raw_hashes_begin, raw_hashes_begin + raw_hashes_length); |
| + base::TimeTicks after = base::TimeTicks::Now(); |
| + RecordAddUnlumpedHashesTime(after - before); |
| return APPLY_UPDATE_SUCCESS; |
| } |
| @@ -487,6 +531,7 @@ ApplyUpdateResult V4Store::MergeUpdate(const HashPrefixMap& old_prefixes_map, |
| StoreReadResult V4Store::ReadFromDisk() { |
| DCHECK(task_runner_->RunsTasksOnCurrentThread()); |
| + base::TimeTicks before = base::TimeTicks::Now(); |
| std::string contents; |
| bool read_success = base::ReadFileToString(store_path_, &contents); |
| if (!read_success) { |
| @@ -519,11 +564,13 @@ StoreReadResult V4Store::ReadFromDisk() { |
| std::unique_ptr<ListUpdateResponse> response(new ListUpdateResponse); |
| response->Swap(file_format.mutable_list_update_response()); |
| ApplyUpdateResult apply_update_result = ProcessFullUpdate(response); |
| + base::TimeTicks after = base::TimeTicks::Now(); |
| RecordApplyUpdateResultWhenReadingFromDisk(apply_update_result); |
| if (apply_update_result != APPLY_UPDATE_SUCCESS) { |
| hash_prefix_map_.clear(); |
| return HASH_PREFIX_MAP_GENERATION_FAILURE; |
| } |
| + RecordReadFromDiskTime(after - before); |
| return READ_SUCCESS; |
| } |