Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(638)

Side by Side Diff: components/safe_browsing_db/v4_store.cc

Issue 2383063003: Add UMA metrics for the time it takes to read store from disk and apply update (Closed)
Patch Set: Use base::StringPrintf to generate the partial histogram name. Thanks Windows! Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/base64.h" 5 #include "base/base64.h"
6 #include "base/bind.h" 6 #include "base/bind.h"
7 #include "base/files/file_util.h" 7 #include "base/files/file_util.h"
8 #include "base/memory/ptr_util.h" 8 #include "base/memory/ptr_util.h"
9 #include "base/metrics/histogram_macros.h" 9 #include "base/metrics/histogram_macros.h"
10 #include "base/metrics/sparse_histogram.h" 10 #include "base/metrics/sparse_histogram.h"
11 #include "base/stl_util.h" 11 #include "base/stl_util.h"
12 #include "base/strings/stringprintf.h" 12 #include "base/strings/stringprintf.h"
13 #include "components/safe_browsing_db/v4_rice.h" 13 #include "components/safe_browsing_db/v4_rice.h"
14 #include "components/safe_browsing_db/v4_store.h" 14 #include "components/safe_browsing_db/v4_store.h"
15 #include "components/safe_browsing_db/v4_store.pb.h" 15 #include "components/safe_browsing_db/v4_store.pb.h"
16 #include "crypto/secure_hash.h" 16 #include "crypto/secure_hash.h"
17 #include "crypto/sha2.h" 17 #include "crypto/sha2.h"
18 18
19 using base::TimeTicks;
20
19 namespace safe_browsing { 21 namespace safe_browsing {
20 22
21 namespace { 23 namespace {
24
22 const uint32_t kFileMagic = 0x600D71FE; 25 const uint32_t kFileMagic = 0x600D71FE;
23 26
24 const uint32_t kFileVersion = 9; 27 const uint32_t kFileVersion = 9;
25 28
26 void RecordStoreReadResult(StoreReadResult result) { 29 std::string GetUmaSuffixForStore(const base::FilePath& file_path) {
27 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4StoreReadResult", result, 30 return base::StringPrintf(
28 STORE_READ_RESULT_MAX); 31 ".%" PRIsFP, file_path.BaseName().RemoveExtension().value().c_str());
Nathan Parker 2016/10/04 20:30:43 [no action required] Is this just a way to convert
vakh (use Gerrit instead) 2016/10/04 21:05:15 The file is always ASCII since we chose the name f
29 } 32 }
30 33
31 void RecordStoreWriteResult(StoreWriteResult result) { 34 void RecordTimeWithAndWithoutStore(const std::string& metric,
32 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4StoreWriteResult", result, 35 base::TimeDelta time,
33 STORE_WRITE_RESULT_MAX); 36 const base::FilePath& file_path) {
37 std::string suffix = GetUmaSuffixForStore(file_path);
38
39 // The histograms below are an expansion of the UMA_HISTOGRAM_LONG_TIMES
40 // macro adapted to allow for a dynamically suffixed histogram name.
41 // Note: The factory creates and owns the histogram.
42 base::HistogramBase* histogram = base::Histogram::FactoryTimeGet(
43 metric, base::TimeDelta::FromMilliseconds(1),
44 base::TimeDelta::FromHours(1), 50,
Nathan Parker 2016/10/04 20:30:44 What bucket values does this produce? I'm guessing
vakh (use Gerrit instead) 2016/10/04 21:05:15 The bucket count is 50. It's the same as UMA_HISTO
45 base::HistogramBase::kUmaTargetedHistogramFlag);
46 if (histogram) {
47 histogram->AddTime(time);
48 }
49
50 base::HistogramBase* histogram_suffix = base::Histogram::FactoryTimeGet(
51 metric + suffix, base::TimeDelta::FromMilliseconds(1),
52 base::TimeDelta::FromHours(1), 50,
53 base::HistogramBase::kUmaTargetedHistogramFlag);
54 if (histogram_suffix) {
55 histogram_suffix->AddTime(time);
56 }
57 }
58
59 void RecordAddUnlumpedHashesTime(base::TimeDelta time) {
60 UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4AddUnlumpedHashesTime", time);
34 } 61 }
35 62
36 void RecordApplyUpdateResult(ApplyUpdateResult result) { 63 void RecordApplyUpdateResult(ApplyUpdateResult result) {
37 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4ApplyUpdateResult", result, 64 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4ApplyUpdateResult", result,
38 APPLY_UPDATE_RESULT_MAX); 65 APPLY_UPDATE_RESULT_MAX);
39 } 66 }
40 67
68 void RecordApplyUpdateResultWhenReadingFromDisk(ApplyUpdateResult result) {
69 UMA_HISTOGRAM_ENUMERATION(
70 "SafeBrowsing.V4ApplyUpdateResultWhenReadingFromDisk", result,
71 APPLY_UPDATE_RESULT_MAX);
72 }
73
41 void RecordDecodeAdditionsResult(V4DecodeResult result) { 74 void RecordDecodeAdditionsResult(V4DecodeResult result) {
42 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4DecodeAdditionsResult", result, 75 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4DecodeAdditionsResult", result,
43 DECODE_RESULT_MAX); 76 DECODE_RESULT_MAX);
44 } 77 }
45 78
79 void RecordDecodeAdditionsTime(base::TimeDelta time,
80 const base::FilePath& file_path) {
81 RecordTimeWithAndWithoutStore("SafeBrowsing.V4DecodeAdditionsTime", time,
82 file_path);
83 }
84
46 void RecordDecodeRemovalsResult(V4DecodeResult result) { 85 void RecordDecodeRemovalsResult(V4DecodeResult result) {
47 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4DecodeRemovalsResult", result, 86 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4DecodeRemovalsResult", result,
48 DECODE_RESULT_MAX); 87 DECODE_RESULT_MAX);
49 } 88 }
50 89
51 // TODO(vakh): Collect and record the metrics for time taken to process updates. 90 void RecordDecodeRemovalsTime(base::TimeDelta time,
91 const base::FilePath& file_path) {
92 RecordTimeWithAndWithoutStore("SafeBrowsing.V4DecodeRemovalsTime", time,
93 file_path);
94 }
52 95
53 void RecordApplyUpdateResultWhenReadingFromDisk(ApplyUpdateResult result) { 96 void RecordMergeUpdateTime(base::TimeDelta time,
54 UMA_HISTOGRAM_ENUMERATION( 97 const base::FilePath& file_path) {
55 "SafeBrowsing.V4ApplyUpdateResultWhenReadingFromDisk", result, 98 RecordTimeWithAndWithoutStore("SafeBrowsing.V4MergeUpdateTime", time,
56 APPLY_UPDATE_RESULT_MAX); 99 file_path);
100 }
101
102 void RecordProcessFullUpdateTime(base::TimeDelta time,
103 const base::FilePath& file_path) {
104 RecordTimeWithAndWithoutStore("SafeBrowsing.V4ProcessFullUpdateTime", time,
105 file_path);
106 }
107
108 void RecordProcessPartialUpdateTime(base::TimeDelta time,
109 const base::FilePath& file_path) {
110 RecordTimeWithAndWithoutStore("SafeBrowsing.V4ProcessPartialUpdateTime", time,
111 file_path);
112 }
113
114 void RecordReadFromDiskTime(base::TimeDelta time,
115 const base::FilePath& file_path) {
116 RecordTimeWithAndWithoutStore("SafeBrowsing.V4ReadFromDiskTime", time,
117 file_path);
118 }
119
120 void RecordStoreReadResult(StoreReadResult result) {
121 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4StoreReadResult", result,
122 STORE_READ_RESULT_MAX);
123 }
124
125 void RecordStoreWriteResult(StoreWriteResult result) {
126 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4StoreWriteResult", result,
127 STORE_WRITE_RESULT_MAX);
57 } 128 }
58 129
59 // Returns the name of the temporary file used to buffer data for 130 // Returns the name of the temporary file used to buffer data for
60 // |filename|. Exported for unit tests. 131 // |filename|. Exported for unit tests.
61 const base::FilePath TemporaryFileForFilename(const base::FilePath& filename) { 132 const base::FilePath TemporaryFileForFilename(const base::FilePath& filename) {
62 return base::FilePath(filename.value() + FILE_PATH_LITERAL("_new")); 133 return base::FilePath(filename.value() + FILE_PATH_LITERAL("_new"));
63 } 134 }
64 135
65 } // namespace 136 } // namespace
66 137
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
108 state_ = ""; 179 state_ = "";
109 return true; 180 return true;
110 } 181 }
111 182
112 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( 183 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk(
113 const HashPrefixMap& hash_prefix_map_old, 184 const HashPrefixMap& hash_prefix_map_old,
114 std::unique_ptr<ListUpdateResponse> response) { 185 std::unique_ptr<ListUpdateResponse> response) {
115 DCHECK(response->has_response_type()); 186 DCHECK(response->has_response_type());
116 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); 187 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type());
117 188
189 TimeTicks before = TimeTicks::Now();
118 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response); 190 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response);
119 if (result == APPLY_UPDATE_SUCCESS) { 191 if (result == APPLY_UPDATE_SUCCESS) {
192 RecordProcessPartialUpdateTime(TimeTicks::Now() - before, store_path_);
120 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded 193 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded
121 // hash prefixes and response_type as FULL_UPDATE, and write that to disk. 194 // hash prefixes and response_type as FULL_UPDATE, and write that to disk.
122 } 195 }
123 return result; 196 return result;
124 } 197 }
125 198
126 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk( 199 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk(
127 std::unique_ptr<ListUpdateResponse> response) { 200 std::unique_ptr<ListUpdateResponse> response) {
201 TimeTicks before = TimeTicks::Now();
128 ApplyUpdateResult result = ProcessFullUpdate(response); 202 ApplyUpdateResult result = ProcessFullUpdate(response);
129 if (result == APPLY_UPDATE_SUCCESS) { 203 if (result == APPLY_UPDATE_SUCCESS) {
130 RecordStoreWriteResult(WriteToDisk(std::move(response))); 204 RecordStoreWriteResult(WriteToDisk(std::move(response)));
205 RecordProcessFullUpdateTime(TimeTicks::Now() - before, store_path_);
131 } 206 }
132 return result; 207 return result;
133 } 208 }
134 209
135 ApplyUpdateResult V4Store::ProcessFullUpdate( 210 ApplyUpdateResult V4Store::ProcessFullUpdate(
136 const std::unique_ptr<ListUpdateResponse>& response) { 211 const std::unique_ptr<ListUpdateResponse>& response) {
137 DCHECK(response->has_response_type()); 212 DCHECK(response->has_response_type());
138 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type()); 213 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type());
139 // TODO(vakh): For a full update, we don't need to process the update in 214 // TODO(vakh): For a full update, we don't need to process the update in
140 // lexographical order to store it, but we do need to do that for calculating 215 // lexographical order to store it, but we do need to do that for calculating
(...skipping 12 matching lines...) Expand all
153 DCHECK_LE(removals_size, 1u); 228 DCHECK_LE(removals_size, 1u);
154 if (removals_size == 1) { 229 if (removals_size == 1) {
155 const ThreatEntrySet& removal = response->removals().Get(0); 230 const ThreatEntrySet& removal = response->removals().Get(0);
156 const CompressionType compression_type = removal.compression_type(); 231 const CompressionType compression_type = removal.compression_type();
157 if (compression_type == RAW) { 232 if (compression_type == RAW) {
158 raw_removals = &removal.raw_indices().indices(); 233 raw_removals = &removal.raw_indices().indices();
159 } else if (compression_type == RICE) { 234 } else if (compression_type == RICE) {
160 DCHECK(removal.has_rice_indices()); 235 DCHECK(removal.has_rice_indices());
161 236
162 const RiceDeltaEncoding& rice_indices = removal.rice_indices(); 237 const RiceDeltaEncoding& rice_indices = removal.rice_indices();
238 TimeTicks before = TimeTicks::Now();
163 V4DecodeResult decode_result = V4RiceDecoder::DecodeIntegers( 239 V4DecodeResult decode_result = V4RiceDecoder::DecodeIntegers(
164 rice_indices.first_value(), rice_indices.rice_parameter(), 240 rice_indices.first_value(), rice_indices.rice_parameter(),
165 rice_indices.num_entries(), rice_indices.encoded_data(), 241 rice_indices.num_entries(), rice_indices.encoded_data(),
166 &rice_removals); 242 &rice_removals);
243
167 RecordDecodeRemovalsResult(decode_result); 244 RecordDecodeRemovalsResult(decode_result);
168 if (decode_result != DECODE_SUCCESS) { 245 if (decode_result != DECODE_SUCCESS) {
169 return RICE_DECODING_FAILURE; 246 return RICE_DECODING_FAILURE;
170 } else {
171 raw_removals = &rice_removals;
172 } 247 }
248 RecordDecodeRemovalsTime(TimeTicks::Now() - before, store_path_);
249 raw_removals = &rice_removals;
173 } else { 250 } else {
174 NOTREACHED() << "Unexpected compression_type type: " << compression_type; 251 NOTREACHED() << "Unexpected compression_type type: " << compression_type;
175 return UNEXPECTED_COMPRESSION_TYPE_REMOVALS_FAILURE; 252 return UNEXPECTED_COMPRESSION_TYPE_REMOVALS_FAILURE;
176 } 253 }
177 } 254 }
178 255
179 HashPrefixMap hash_prefix_map; 256 HashPrefixMap hash_prefix_map;
180 ApplyUpdateResult apply_update_result = 257 ApplyUpdateResult apply_update_result =
181 UpdateHashPrefixMapFromAdditions(response->additions(), &hash_prefix_map); 258 UpdateHashPrefixMapFromAdditions(response->additions(), &hash_prefix_map);
182 if (apply_update_result != APPLY_UPDATE_SUCCESS) { 259 if (apply_update_result != APPLY_UPDATE_SUCCESS) {
183 return apply_update_result; 260 return apply_update_result;
184 } 261 }
185 262
186 std::string expected_checksum; 263 std::string expected_checksum;
187 if (response->has_checksum() && response->checksum().has_sha256()) { 264 if (response->has_checksum() && response->checksum().has_sha256()) {
188 expected_checksum = response->checksum().sha256(); 265 expected_checksum = response->checksum().sha256();
189 } 266 }
190 267
268 TimeTicks before = TimeTicks::Now();
191 apply_update_result = MergeUpdate(hash_prefix_map_old, hash_prefix_map, 269 apply_update_result = MergeUpdate(hash_prefix_map_old, hash_prefix_map,
192 raw_removals, expected_checksum); 270 raw_removals, expected_checksum);
193 if (apply_update_result != APPLY_UPDATE_SUCCESS) { 271 if (apply_update_result != APPLY_UPDATE_SUCCESS) {
194 return apply_update_result; 272 return apply_update_result;
195 } 273 }
274 RecordMergeUpdateTime(TimeTicks::Now() - before, store_path_);
196 275
197 state_ = response->new_client_state(); 276 state_ = response->new_client_state();
198 return APPLY_UPDATE_SUCCESS; 277 return APPLY_UPDATE_SUCCESS;
199 } 278 }
200 279
201 void V4Store::ApplyUpdate( 280 void V4Store::ApplyUpdate(
202 std::unique_ptr<ListUpdateResponse> response, 281 std::unique_ptr<ListUpdateResponse> response,
203 const scoped_refptr<base::SingleThreadTaskRunner>& callback_task_runner, 282 const scoped_refptr<base::SingleThreadTaskRunner>& callback_task_runner,
204 UpdatedStoreReadyCallback callback) { 283 UpdatedStoreReadyCallback callback) {
205 std::unique_ptr<V4Store> new_store( 284 std::unique_ptr<V4Store> new_store(
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
243 DCHECK(addition.raw_hashes().has_raw_hashes()); 322 DCHECK(addition.raw_hashes().has_raw_hashes());
244 323
245 apply_update_result = 324 apply_update_result =
246 AddUnlumpedHashes(addition.raw_hashes().prefix_size(), 325 AddUnlumpedHashes(addition.raw_hashes().prefix_size(),
247 addition.raw_hashes().raw_hashes(), additions_map); 326 addition.raw_hashes().raw_hashes(), additions_map);
248 } else if (compression_type == RICE) { 327 } else if (compression_type == RICE) {
249 DCHECK(addition.has_rice_hashes()); 328 DCHECK(addition.has_rice_hashes());
250 329
251 const RiceDeltaEncoding& rice_hashes = addition.rice_hashes(); 330 const RiceDeltaEncoding& rice_hashes = addition.rice_hashes();
252 std::vector<uint32_t> raw_hashes; 331 std::vector<uint32_t> raw_hashes;
332 TimeTicks before = TimeTicks::Now();
253 V4DecodeResult decode_result = V4RiceDecoder::DecodePrefixes( 333 V4DecodeResult decode_result = V4RiceDecoder::DecodePrefixes(
254 rice_hashes.first_value(), rice_hashes.rice_parameter(), 334 rice_hashes.first_value(), rice_hashes.rice_parameter(),
255 rice_hashes.num_entries(), rice_hashes.encoded_data(), &raw_hashes); 335 rice_hashes.num_entries(), rice_hashes.encoded_data(), &raw_hashes);
256 RecordDecodeAdditionsResult(decode_result); 336 RecordDecodeAdditionsResult(decode_result);
257 if (decode_result != DECODE_SUCCESS) { 337 if (decode_result != DECODE_SUCCESS) {
258 return RICE_DECODING_FAILURE; 338 return RICE_DECODING_FAILURE;
259 } else { 339 } else {
340 RecordDecodeAdditionsTime(TimeTicks::Now() - before, store_path_);
260 char* raw_hashes_start = reinterpret_cast<char*>(raw_hashes.data()); 341 char* raw_hashes_start = reinterpret_cast<char*>(raw_hashes.data());
261 size_t raw_hashes_size = sizeof(uint32_t) * raw_hashes.size(); 342 size_t raw_hashes_size = sizeof(uint32_t) * raw_hashes.size();
262 343
263 // Rice-Golomb encoding is used to send compressed compressed 4-byte 344 // Rice-Golomb encoding is used to send compressed compressed 4-byte
264 // hash prefixes. Hash prefixes longer than 4 bytes will not be 345 // hash prefixes. Hash prefixes longer than 4 bytes will not be
265 // compressed, and will be served in raw format instead. 346 // compressed, and will be served in raw format instead.
266 // Source: https://developers.google.com/safe-browsing/v4/compression 347 // Source: https://developers.google.com/safe-browsing/v4/compression
267 const PrefixSize kPrefixSize = 4; 348 const PrefixSize kPrefixSize = 4;
268 apply_update_result = AddUnlumpedHashes(kPrefixSize, raw_hashes_start, 349 apply_update_result = AddUnlumpedHashes(kPrefixSize, raw_hashes_start,
269 raw_hashes_size, additions_map); 350 raw_hashes_size, additions_map);
(...skipping 30 matching lines...) Expand all
300 return PREFIX_SIZE_TOO_SMALL_FAILURE; 381 return PREFIX_SIZE_TOO_SMALL_FAILURE;
301 } 382 }
302 if (prefix_size > kMaxHashPrefixLength) { 383 if (prefix_size > kMaxHashPrefixLength) {
303 NOTREACHED(); 384 NOTREACHED();
304 return PREFIX_SIZE_TOO_LARGE_FAILURE; 385 return PREFIX_SIZE_TOO_LARGE_FAILURE;
305 } 386 }
306 if (raw_hashes_length % prefix_size != 0) { 387 if (raw_hashes_length % prefix_size != 0) {
307 return ADDITIONS_SIZE_UNEXPECTED_FAILURE; 388 return ADDITIONS_SIZE_UNEXPECTED_FAILURE;
308 } 389 }
309 390
391 TimeTicks before = TimeTicks::Now();
310 // TODO(vakh): Figure out a way to avoid the following copy operation. 392 // TODO(vakh): Figure out a way to avoid the following copy operation.
311 (*additions_map)[prefix_size] = 393 (*additions_map)[prefix_size] =
312 std::string(raw_hashes_begin, raw_hashes_begin + raw_hashes_length); 394 std::string(raw_hashes_begin, raw_hashes_begin + raw_hashes_length);
395 RecordAddUnlumpedHashesTime(TimeTicks::Now() - before);
313 return APPLY_UPDATE_SUCCESS; 396 return APPLY_UPDATE_SUCCESS;
314 } 397 }
315 398
316 // static 399 // static
317 bool V4Store::GetNextSmallestUnmergedPrefix( 400 bool V4Store::GetNextSmallestUnmergedPrefix(
318 const HashPrefixMap& hash_prefix_map, 401 const HashPrefixMap& hash_prefix_map,
319 const IteratorMap& iterator_map, 402 const IteratorMap& iterator_map,
320 HashPrefix* smallest_hash_prefix) { 403 HashPrefix* smallest_hash_prefix) {
321 HashPrefix current_hash_prefix; 404 HashPrefix current_hash_prefix;
322 bool has_unmerged = false; 405 bool has_unmerged = false;
(...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after
480 return CHECKSUM_MISMATCH_FAILURE; 563 return CHECKSUM_MISMATCH_FAILURE;
481 } 564 }
482 } 565 }
483 566
484 return APPLY_UPDATE_SUCCESS; 567 return APPLY_UPDATE_SUCCESS;
485 } 568 }
486 569
487 StoreReadResult V4Store::ReadFromDisk() { 570 StoreReadResult V4Store::ReadFromDisk() {
488 DCHECK(task_runner_->RunsTasksOnCurrentThread()); 571 DCHECK(task_runner_->RunsTasksOnCurrentThread());
489 572
573 TimeTicks before = TimeTicks::Now();
490 std::string contents; 574 std::string contents;
491 bool read_success = base::ReadFileToString(store_path_, &contents); 575 bool read_success = base::ReadFileToString(store_path_, &contents);
492 if (!read_success) { 576 if (!read_success) {
493 return FILE_UNREADABLE_FAILURE; 577 return FILE_UNREADABLE_FAILURE;
494 } 578 }
495 579
496 if (contents.empty()) { 580 if (contents.empty()) {
497 return FILE_EMPTY_FAILURE; 581 return FILE_EMPTY_FAILURE;
498 } 582 }
499 583
(...skipping 17 matching lines...) Expand all
517 } 601 }
518 602
519 std::unique_ptr<ListUpdateResponse> response(new ListUpdateResponse); 603 std::unique_ptr<ListUpdateResponse> response(new ListUpdateResponse);
520 response->Swap(file_format.mutable_list_update_response()); 604 response->Swap(file_format.mutable_list_update_response());
521 ApplyUpdateResult apply_update_result = ProcessFullUpdate(response); 605 ApplyUpdateResult apply_update_result = ProcessFullUpdate(response);
522 RecordApplyUpdateResultWhenReadingFromDisk(apply_update_result); 606 RecordApplyUpdateResultWhenReadingFromDisk(apply_update_result);
523 if (apply_update_result != APPLY_UPDATE_SUCCESS) { 607 if (apply_update_result != APPLY_UPDATE_SUCCESS) {
524 hash_prefix_map_.clear(); 608 hash_prefix_map_.clear();
525 return HASH_PREFIX_MAP_GENERATION_FAILURE; 609 return HASH_PREFIX_MAP_GENERATION_FAILURE;
526 } 610 }
611 RecordReadFromDiskTime(TimeTicks::Now() - before, store_path_);
527 612
528 return READ_SUCCESS; 613 return READ_SUCCESS;
529 } 614 }
530 615
531 StoreWriteResult V4Store::WriteToDisk( 616 StoreWriteResult V4Store::WriteToDisk(
532 std::unique_ptr<ListUpdateResponse> response) const { 617 std::unique_ptr<ListUpdateResponse> response) const {
533 // Do not write partial updates to the disk. 618 // Do not write partial updates to the disk.
534 // After merging the updates, the ListUpdateResponse passed to this method 619 // After merging the updates, the ListUpdateResponse passed to this method
535 // should be a FULL_UPDATE. 620 // should be a FULL_UPDATE.
536 if (!response->has_response_type() || 621 if (!response->has_response_type() ||
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
597 if (result == 0) { 682 if (result == 0) {
598 return true; 683 return true;
599 } else if (result < 0) { 684 } else if (result < 0) {
600 return HashPrefixMatches(hash_prefix, begin, mid); 685 return HashPrefixMatches(hash_prefix, begin, mid);
601 } else { 686 } else {
602 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); 687 return HashPrefixMatches(hash_prefix, mid + prefix_size, end);
603 } 688 }
604 } 689 }
605 690
606 } // namespace safe_browsing 691 } // namespace safe_browsing
OLDNEW
« no previous file with comments | « components/safe_browsing_db/v4_store.h ('k') | components/safe_browsing_db/v4_store_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698