Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/base64.h" | 5 #include "base/base64.h" |
| 6 #include "base/bind.h" | 6 #include "base/bind.h" |
| 7 #include "base/files/file_util.h" | 7 #include "base/files/file_util.h" |
| 8 #include "base/memory/ptr_util.h" | 8 #include "base/memory/ptr_util.h" |
| 9 #include "base/metrics/histogram_macros.h" | 9 #include "base/metrics/histogram_macros.h" |
| 10 #include "base/metrics/sparse_histogram.h" | 10 #include "base/metrics/sparse_histogram.h" |
| 11 #include "base/stl_util.h" | 11 #include "base/stl_util.h" |
| 12 #include "base/strings/stringprintf.h" | 12 #include "base/strings/stringprintf.h" |
| 13 #include "components/safe_browsing_db/v4_rice.h" | 13 #include "components/safe_browsing_db/v4_rice.h" |
| 14 #include "components/safe_browsing_db/v4_store.h" | 14 #include "components/safe_browsing_db/v4_store.h" |
| 15 #include "components/safe_browsing_db/v4_store.pb.h" | 15 #include "components/safe_browsing_db/v4_store.pb.h" |
| 16 #include "crypto/secure_hash.h" | 16 #include "crypto/secure_hash.h" |
| 17 #include "crypto/sha2.h" | 17 #include "crypto/sha2.h" |
| 18 | 18 |
| 19 namespace safe_browsing { | 19 namespace safe_browsing { |
| 20 | 20 |
| 21 namespace { | 21 namespace { |
| 22 const uint32_t kFileMagic = 0x600D71FE; | 22 const uint32_t kFileMagic = 0x600D71FE; |
| 23 | 23 |
| 24 const uint32_t kFileVersion = 9; | 24 const uint32_t kFileVersion = 9; |
| 25 | 25 |
| 26 void RecordStoreReadResult(StoreReadResult result) { | 26 void RecordAddUnlumpedHashesTime(base::TimeDelta time) { |
|
Nathan Parker
2016/09/30 22:32:05
style nit: Personally I think it's easier to follo
vakh (use Gerrit instead)
2016/10/03 21:30:36
Done. Added the store specific histograms for Time
| |
| 27 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4StoreReadResult", result, | 27 UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4AddUnlumpedHashesTime", time); |
| 28 STORE_READ_RESULT_MAX); | |
| 29 } | |
| 30 | |
| 31 void RecordStoreWriteResult(StoreWriteResult result) { | |
| 32 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4StoreWriteResult", result, | |
| 33 STORE_WRITE_RESULT_MAX); | |
| 34 } | 28 } |
| 35 | 29 |
| 36 void RecordApplyUpdateResult(ApplyUpdateResult result) { | 30 void RecordApplyUpdateResult(ApplyUpdateResult result) { |
| 37 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4ApplyUpdateResult", result, | 31 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4ApplyUpdateResult", result, |
| 38 APPLY_UPDATE_RESULT_MAX); | 32 APPLY_UPDATE_RESULT_MAX); |
| 39 } | 33 } |
| 40 | 34 |
| 35 void RecordApplyUpdateResultWhenReadingFromDisk(ApplyUpdateResult result) { | |
| 36 UMA_HISTOGRAM_ENUMERATION( | |
| 37 "SafeBrowsing.V4ApplyUpdateResultWhenReadingFromDisk", result, | |
| 38 APPLY_UPDATE_RESULT_MAX); | |
| 39 } | |
| 40 | |
| 41 void RecordDecodeAdditionsResult(V4DecodeResult result) { | 41 void RecordDecodeAdditionsResult(V4DecodeResult result) { |
| 42 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4DecodeAdditionsResult", result, | 42 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4DecodeAdditionsResult", result, |
| 43 DECODE_RESULT_MAX); | 43 DECODE_RESULT_MAX); |
| 44 } | 44 } |
| 45 | 45 |
| 46 void RecordDecodeAdditionsTime(base::TimeDelta time) { | |
| 47 UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4DecodeAdditionsTime", time); | |
| 48 } | |
| 49 | |
| 46 void RecordDecodeRemovalsResult(V4DecodeResult result) { | 50 void RecordDecodeRemovalsResult(V4DecodeResult result) { |
| 47 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4DecodeRemovalsResult", result, | 51 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4DecodeRemovalsResult", result, |
| 48 DECODE_RESULT_MAX); | 52 DECODE_RESULT_MAX); |
| 49 } | 53 } |
| 50 | 54 |
| 51 // TODO(vakh): Collect and record the metrics for time taken to process updates. | 55 void RecordDecodeRemovalsTime(base::TimeDelta time) { |
| 56 UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4DecodeRemovalsTime", time); | |
| 57 } | |
| 52 | 58 |
| 53 void RecordApplyUpdateResultWhenReadingFromDisk(ApplyUpdateResult result) { | 59 void RecordMergeUpdateTime(base::TimeDelta time) { |
| 54 UMA_HISTOGRAM_ENUMERATION( | 60 UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4MergeUpdateTime", time); |
| 55 "SafeBrowsing.V4ApplyUpdateResultWhenReadingFromDisk", result, | 61 } |
| 56 APPLY_UPDATE_RESULT_MAX); | 62 |
| 63 void RecordProcessFullUpdateTime(base::TimeDelta time) { | |
| 64 UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4ProcessFullUpdateTime", time); | |
| 65 } | |
| 66 | |
| 67 void RecordProcessPartialUpdateTime(base::TimeDelta time) { | |
| 68 UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4ProcessPartialUpdateTime", time); | |
| 69 } | |
| 70 | |
| 71 void RecordReadFromDiskTime(base::TimeDelta time) { | |
| 72 UMA_HISTOGRAM_LONG_TIMES("SafeBrowsing.V4ReadFromDiskTime", time); | |
| 73 } | |
| 74 | |
| 75 void RecordStoreReadResult(StoreReadResult result) { | |
| 76 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4StoreReadResult", result, | |
| 77 STORE_READ_RESULT_MAX); | |
| 78 } | |
| 79 | |
| 80 void RecordStoreWriteResult(StoreWriteResult result) { | |
| 81 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4StoreWriteResult", result, | |
| 82 STORE_WRITE_RESULT_MAX); | |
| 57 } | 83 } |
| 58 | 84 |
| 59 // Returns the name of the temporary file used to buffer data for | 85 // Returns the name of the temporary file used to buffer data for |
| 60 // |filename|. Exported for unit tests. | 86 // |filename|. Exported for unit tests. |
| 61 const base::FilePath TemporaryFileForFilename(const base::FilePath& filename) { | 87 const base::FilePath TemporaryFileForFilename(const base::FilePath& filename) { |
| 62 return base::FilePath(filename.value() + FILE_PATH_LITERAL("_new")); | 88 return base::FilePath(filename.value() + FILE_PATH_LITERAL("_new")); |
| 63 } | 89 } |
| 64 | 90 |
| 65 } // namespace | 91 } // namespace |
| 66 | 92 |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 108 state_ = ""; | 134 state_ = ""; |
| 109 return true; | 135 return true; |
| 110 } | 136 } |
| 111 | 137 |
| 112 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( | 138 ApplyUpdateResult V4Store::ProcessPartialUpdateAndWriteToDisk( |
| 113 const HashPrefixMap& hash_prefix_map_old, | 139 const HashPrefixMap& hash_prefix_map_old, |
| 114 std::unique_ptr<ListUpdateResponse> response) { | 140 std::unique_ptr<ListUpdateResponse> response) { |
| 115 DCHECK(response->has_response_type()); | 141 DCHECK(response->has_response_type()); |
| 116 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); | 142 DCHECK_EQ(ListUpdateResponse::PARTIAL_UPDATE, response->response_type()); |
| 117 | 143 |
| 144 base::TimeTicks before = base::TimeTicks::Now(); | |
|
Nathan Parker
2016/09/30 22:32:05
I'd add a
using base::TimeTicks
to simplify all o
vakh (use Gerrit instead)
2016/10/03 21:30:36
Done.
| |
| 118 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response); | 145 ApplyUpdateResult result = ProcessUpdate(hash_prefix_map_old, response); |
| 146 base::TimeTicks after = base::TimeTicks::Now(); | |
| 119 if (result == APPLY_UPDATE_SUCCESS) { | 147 if (result == APPLY_UPDATE_SUCCESS) { |
| 148 RecordProcessPartialUpdateTime(after - before); | |
|
Nathan Parker
2016/09/30 22:32:05
nit: You can skip the |afer| and just do TimeTicks
vakh (use Gerrit instead)
2016/10/03 21:30:36
Done.
| |
| 120 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded | 149 // TODO(vakh): Create a ListUpdateResponse containing RICE encoded |
| 121 // hash prefixes and response_type as FULL_UPDATE, and write that to disk. | 150 // hash prefixes and response_type as FULL_UPDATE, and write that to disk. |
| 122 } | 151 } |
| 123 return result; | 152 return result; |
| 124 } | 153 } |
| 125 | 154 |
| 126 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk( | 155 ApplyUpdateResult V4Store::ProcessFullUpdateAndWriteToDisk( |
| 127 std::unique_ptr<ListUpdateResponse> response) { | 156 std::unique_ptr<ListUpdateResponse> response) { |
| 157 base::TimeTicks before = base::TimeTicks::Now(); | |
| 128 ApplyUpdateResult result = ProcessFullUpdate(response); | 158 ApplyUpdateResult result = ProcessFullUpdate(response); |
| 159 base::TimeTicks after = base::TimeTicks::Now(); | |
| 129 if (result == APPLY_UPDATE_SUCCESS) { | 160 if (result == APPLY_UPDATE_SUCCESS) { |
| 130 RecordStoreWriteResult(WriteToDisk(std::move(response))); | 161 RecordStoreWriteResult(WriteToDisk(std::move(response))); |
| 162 RecordProcessFullUpdateTime(after - before); | |
| 131 } | 163 } |
| 132 return result; | 164 return result; |
| 133 } | 165 } |
| 134 | 166 |
| 135 ApplyUpdateResult V4Store::ProcessFullUpdate( | 167 ApplyUpdateResult V4Store::ProcessFullUpdate( |
| 136 const std::unique_ptr<ListUpdateResponse>& response) { | 168 const std::unique_ptr<ListUpdateResponse>& response) { |
| 137 DCHECK(response->has_response_type()); | 169 DCHECK(response->has_response_type()); |
| 138 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type()); | 170 DCHECK_EQ(ListUpdateResponse::FULL_UPDATE, response->response_type()); |
| 139 // TODO(vakh): For a full update, we don't need to process the update in | 171 // TODO(vakh): For a full update, we don't need to process the update in |
| 140 // lexographical order to store it, but we do need to do that for calculating | 172 // lexographical order to store it, but we do need to do that for calculating |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 153 DCHECK_LE(removals_size, 1u); | 185 DCHECK_LE(removals_size, 1u); |
| 154 if (removals_size == 1) { | 186 if (removals_size == 1) { |
| 155 const ThreatEntrySet& removal = response->removals().Get(0); | 187 const ThreatEntrySet& removal = response->removals().Get(0); |
| 156 const CompressionType compression_type = removal.compression_type(); | 188 const CompressionType compression_type = removal.compression_type(); |
| 157 if (compression_type == RAW) { | 189 if (compression_type == RAW) { |
| 158 raw_removals = &removal.raw_indices().indices(); | 190 raw_removals = &removal.raw_indices().indices(); |
| 159 } else if (compression_type == RICE) { | 191 } else if (compression_type == RICE) { |
| 160 DCHECK(removal.has_rice_indices()); | 192 DCHECK(removal.has_rice_indices()); |
| 161 | 193 |
| 162 const RiceDeltaEncoding& rice_indices = removal.rice_indices(); | 194 const RiceDeltaEncoding& rice_indices = removal.rice_indices(); |
| 195 base::TimeTicks before = base::TimeTicks::Now(); | |
| 163 V4DecodeResult decode_result = V4RiceDecoder::DecodeIntegers( | 196 V4DecodeResult decode_result = V4RiceDecoder::DecodeIntegers( |
| 164 rice_indices.first_value(), rice_indices.rice_parameter(), | 197 rice_indices.first_value(), rice_indices.rice_parameter(), |
| 165 rice_indices.num_entries(), rice_indices.encoded_data(), | 198 rice_indices.num_entries(), rice_indices.encoded_data(), |
| 166 &rice_removals); | 199 &rice_removals); |
| 200 base::TimeTicks after = base::TimeTicks::Now(); | |
| 201 | |
| 167 RecordDecodeRemovalsResult(decode_result); | 202 RecordDecodeRemovalsResult(decode_result); |
| 168 if (decode_result != DECODE_SUCCESS) { | 203 if (decode_result != DECODE_SUCCESS) { |
| 169 return RICE_DECODING_FAILURE; | 204 return RICE_DECODING_FAILURE; |
| 170 } else { | |
| 171 raw_removals = &rice_removals; | |
| 172 } | 205 } |
| 206 RecordDecodeRemovalsTime(after - before); | |
| 207 raw_removals = &rice_removals; | |
| 173 } else { | 208 } else { |
| 174 NOTREACHED() << "Unexpected compression_type type: " << compression_type; | 209 NOTREACHED() << "Unexpected compression_type type: " << compression_type; |
| 175 return UNEXPECTED_COMPRESSION_TYPE_REMOVALS_FAILURE; | 210 return UNEXPECTED_COMPRESSION_TYPE_REMOVALS_FAILURE; |
| 176 } | 211 } |
| 177 } | 212 } |
| 178 | 213 |
| 179 HashPrefixMap hash_prefix_map; | 214 HashPrefixMap hash_prefix_map; |
| 180 ApplyUpdateResult apply_update_result = | 215 ApplyUpdateResult apply_update_result = |
| 181 UpdateHashPrefixMapFromAdditions(response->additions(), &hash_prefix_map); | 216 UpdateHashPrefixMapFromAdditions(response->additions(), &hash_prefix_map); |
| 182 if (apply_update_result != APPLY_UPDATE_SUCCESS) { | 217 if (apply_update_result != APPLY_UPDATE_SUCCESS) { |
| 183 return apply_update_result; | 218 return apply_update_result; |
| 184 } | 219 } |
| 185 | 220 |
| 186 std::string expected_checksum; | 221 std::string expected_checksum; |
| 187 if (response->has_checksum() && response->checksum().has_sha256()) { | 222 if (response->has_checksum() && response->checksum().has_sha256()) { |
| 188 expected_checksum = response->checksum().sha256(); | 223 expected_checksum = response->checksum().sha256(); |
| 189 } | 224 } |
| 190 | 225 |
| 226 base::TimeTicks before = base::TimeTicks::Now(); | |
| 191 apply_update_result = MergeUpdate(hash_prefix_map_old, hash_prefix_map, | 227 apply_update_result = MergeUpdate(hash_prefix_map_old, hash_prefix_map, |
| 192 raw_removals, expected_checksum); | 228 raw_removals, expected_checksum); |
| 229 base::TimeTicks after = base::TimeTicks::Now(); | |
| 193 if (apply_update_result != APPLY_UPDATE_SUCCESS) { | 230 if (apply_update_result != APPLY_UPDATE_SUCCESS) { |
| 194 return apply_update_result; | 231 return apply_update_result; |
| 195 } | 232 } |
| 233 RecordMergeUpdateTime(after - before); | |
| 196 | 234 |
| 197 state_ = response->new_client_state(); | 235 state_ = response->new_client_state(); |
| 198 return APPLY_UPDATE_SUCCESS; | 236 return APPLY_UPDATE_SUCCESS; |
| 199 } | 237 } |
| 200 | 238 |
| 201 void V4Store::ApplyUpdate( | 239 void V4Store::ApplyUpdate( |
| 202 std::unique_ptr<ListUpdateResponse> response, | 240 std::unique_ptr<ListUpdateResponse> response, |
| 203 const scoped_refptr<base::SingleThreadTaskRunner>& callback_task_runner, | 241 const scoped_refptr<base::SingleThreadTaskRunner>& callback_task_runner, |
| 204 UpdatedStoreReadyCallback callback) { | 242 UpdatedStoreReadyCallback callback) { |
| 205 std::unique_ptr<V4Store> new_store( | 243 std::unique_ptr<V4Store> new_store( |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 243 DCHECK(addition.raw_hashes().has_raw_hashes()); | 281 DCHECK(addition.raw_hashes().has_raw_hashes()); |
| 244 | 282 |
| 245 apply_update_result = | 283 apply_update_result = |
| 246 AddUnlumpedHashes(addition.raw_hashes().prefix_size(), | 284 AddUnlumpedHashes(addition.raw_hashes().prefix_size(), |
| 247 addition.raw_hashes().raw_hashes(), additions_map); | 285 addition.raw_hashes().raw_hashes(), additions_map); |
| 248 } else if (compression_type == RICE) { | 286 } else if (compression_type == RICE) { |
| 249 DCHECK(addition.has_rice_hashes()); | 287 DCHECK(addition.has_rice_hashes()); |
| 250 | 288 |
| 251 const RiceDeltaEncoding& rice_hashes = addition.rice_hashes(); | 289 const RiceDeltaEncoding& rice_hashes = addition.rice_hashes(); |
| 252 std::vector<uint32_t> raw_hashes; | 290 std::vector<uint32_t> raw_hashes; |
| 291 base::TimeTicks before = base::TimeTicks::Now(); | |
| 253 V4DecodeResult decode_result = V4RiceDecoder::DecodePrefixes( | 292 V4DecodeResult decode_result = V4RiceDecoder::DecodePrefixes( |
| 254 rice_hashes.first_value(), rice_hashes.rice_parameter(), | 293 rice_hashes.first_value(), rice_hashes.rice_parameter(), |
| 255 rice_hashes.num_entries(), rice_hashes.encoded_data(), &raw_hashes); | 294 rice_hashes.num_entries(), rice_hashes.encoded_data(), &raw_hashes); |
| 295 base::TimeTicks after = base::TimeTicks::Now(); | |
| 256 RecordDecodeAdditionsResult(decode_result); | 296 RecordDecodeAdditionsResult(decode_result); |
| 257 if (decode_result != DECODE_SUCCESS) { | 297 if (decode_result != DECODE_SUCCESS) { |
| 258 return RICE_DECODING_FAILURE; | 298 return RICE_DECODING_FAILURE; |
| 259 } else { | 299 } else { |
| 300 RecordDecodeAdditionsTime(after - before); | |
| 260 char* raw_hashes_start = reinterpret_cast<char*>(raw_hashes.data()); | 301 char* raw_hashes_start = reinterpret_cast<char*>(raw_hashes.data()); |
| 261 size_t raw_hashes_size = sizeof(uint32_t) * raw_hashes.size(); | 302 size_t raw_hashes_size = sizeof(uint32_t) * raw_hashes.size(); |
| 262 | 303 |
| 263 // Rice-Golomb encoding is used to send compressed compressed 4-byte | 304 // Rice-Golomb encoding is used to send compressed compressed 4-byte |
| 264 // hash prefixes. Hash prefixes longer than 4 bytes will not be | 305 // hash prefixes. Hash prefixes longer than 4 bytes will not be |
| 265 // compressed, and will be served in raw format instead. | 306 // compressed, and will be served in raw format instead. |
| 266 // Source: https://developers.google.com/safe-browsing/v4/compression | 307 // Source: https://developers.google.com/safe-browsing/v4/compression |
| 267 const PrefixSize kPrefixSize = 4; | 308 const PrefixSize kPrefixSize = 4; |
| 268 apply_update_result = AddUnlumpedHashes(kPrefixSize, raw_hashes_start, | 309 apply_update_result = AddUnlumpedHashes(kPrefixSize, raw_hashes_start, |
| 269 raw_hashes_size, additions_map); | 310 raw_hashes_size, additions_map); |
| (...skipping 30 matching lines...) Expand all Loading... | |
| 300 return PREFIX_SIZE_TOO_SMALL_FAILURE; | 341 return PREFIX_SIZE_TOO_SMALL_FAILURE; |
| 301 } | 342 } |
| 302 if (prefix_size > kMaxHashPrefixLength) { | 343 if (prefix_size > kMaxHashPrefixLength) { |
| 303 NOTREACHED(); | 344 NOTREACHED(); |
| 304 return PREFIX_SIZE_TOO_LARGE_FAILURE; | 345 return PREFIX_SIZE_TOO_LARGE_FAILURE; |
| 305 } | 346 } |
| 306 if (raw_hashes_length % prefix_size != 0) { | 347 if (raw_hashes_length % prefix_size != 0) { |
| 307 return ADDITIONS_SIZE_UNEXPECTED_FAILURE; | 348 return ADDITIONS_SIZE_UNEXPECTED_FAILURE; |
| 308 } | 349 } |
| 309 | 350 |
| 351 base::TimeTicks before = base::TimeTicks::Now(); | |
|
Nathan Parker
2016/09/30 22:32:05
What is this recording? The copying of the string
vakh (use Gerrit instead)
2016/10/03 21:30:36
My goal is to remove the TODO.
If UMA shows that t
| |
| 310 // TODO(vakh): Figure out a way to avoid the following copy operation. | 352 // TODO(vakh): Figure out a way to avoid the following copy operation. |
| 311 (*additions_map)[prefix_size] = | 353 (*additions_map)[prefix_size] = |
| 312 std::string(raw_hashes_begin, raw_hashes_begin + raw_hashes_length); | 354 std::string(raw_hashes_begin, raw_hashes_begin + raw_hashes_length); |
| 355 base::TimeTicks after = base::TimeTicks::Now(); | |
| 356 RecordAddUnlumpedHashesTime(after - before); | |
| 313 return APPLY_UPDATE_SUCCESS; | 357 return APPLY_UPDATE_SUCCESS; |
| 314 } | 358 } |
| 315 | 359 |
| 316 // static | 360 // static |
| 317 bool V4Store::GetNextSmallestUnmergedPrefix( | 361 bool V4Store::GetNextSmallestUnmergedPrefix( |
| 318 const HashPrefixMap& hash_prefix_map, | 362 const HashPrefixMap& hash_prefix_map, |
| 319 const IteratorMap& iterator_map, | 363 const IteratorMap& iterator_map, |
| 320 HashPrefix* smallest_hash_prefix) { | 364 HashPrefix* smallest_hash_prefix) { |
| 321 HashPrefix current_hash_prefix; | 365 HashPrefix current_hash_prefix; |
| 322 bool has_unmerged = false; | 366 bool has_unmerged = false; |
| (...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 480 return CHECKSUM_MISMATCH_FAILURE; | 524 return CHECKSUM_MISMATCH_FAILURE; |
| 481 } | 525 } |
| 482 } | 526 } |
| 483 | 527 |
| 484 return APPLY_UPDATE_SUCCESS; | 528 return APPLY_UPDATE_SUCCESS; |
| 485 } | 529 } |
| 486 | 530 |
| 487 StoreReadResult V4Store::ReadFromDisk() { | 531 StoreReadResult V4Store::ReadFromDisk() { |
| 488 DCHECK(task_runner_->RunsTasksOnCurrentThread()); | 532 DCHECK(task_runner_->RunsTasksOnCurrentThread()); |
| 489 | 533 |
| 534 base::TimeTicks before = base::TimeTicks::Now(); | |
| 490 std::string contents; | 535 std::string contents; |
| 491 bool read_success = base::ReadFileToString(store_path_, &contents); | 536 bool read_success = base::ReadFileToString(store_path_, &contents); |
| 492 if (!read_success) { | 537 if (!read_success) { |
| 493 return FILE_UNREADABLE_FAILURE; | 538 return FILE_UNREADABLE_FAILURE; |
| 494 } | 539 } |
| 495 | 540 |
| 496 if (contents.empty()) { | 541 if (contents.empty()) { |
| 497 return FILE_EMPTY_FAILURE; | 542 return FILE_EMPTY_FAILURE; |
| 498 } | 543 } |
| 499 | 544 |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 512 return FILE_VERSION_INCOMPATIBLE_FAILURE; | 557 return FILE_VERSION_INCOMPATIBLE_FAILURE; |
| 513 } | 558 } |
| 514 | 559 |
| 515 if (!file_format.has_list_update_response()) { | 560 if (!file_format.has_list_update_response()) { |
| 516 return HASH_PREFIX_INFO_MISSING_FAILURE; | 561 return HASH_PREFIX_INFO_MISSING_FAILURE; |
| 517 } | 562 } |
| 518 | 563 |
| 519 std::unique_ptr<ListUpdateResponse> response(new ListUpdateResponse); | 564 std::unique_ptr<ListUpdateResponse> response(new ListUpdateResponse); |
| 520 response->Swap(file_format.mutable_list_update_response()); | 565 response->Swap(file_format.mutable_list_update_response()); |
| 521 ApplyUpdateResult apply_update_result = ProcessFullUpdate(response); | 566 ApplyUpdateResult apply_update_result = ProcessFullUpdate(response); |
| 567 base::TimeTicks after = base::TimeTicks::Now(); | |
| 522 RecordApplyUpdateResultWhenReadingFromDisk(apply_update_result); | 568 RecordApplyUpdateResultWhenReadingFromDisk(apply_update_result); |
| 523 if (apply_update_result != APPLY_UPDATE_SUCCESS) { | 569 if (apply_update_result != APPLY_UPDATE_SUCCESS) { |
| 524 hash_prefix_map_.clear(); | 570 hash_prefix_map_.clear(); |
| 525 return HASH_PREFIX_MAP_GENERATION_FAILURE; | 571 return HASH_PREFIX_MAP_GENERATION_FAILURE; |
| 526 } | 572 } |
| 573 RecordReadFromDiskTime(after - before); | |
| 527 | 574 |
| 528 return READ_SUCCESS; | 575 return READ_SUCCESS; |
| 529 } | 576 } |
| 530 | 577 |
| 531 StoreWriteResult V4Store::WriteToDisk( | 578 StoreWriteResult V4Store::WriteToDisk( |
| 532 std::unique_ptr<ListUpdateResponse> response) const { | 579 std::unique_ptr<ListUpdateResponse> response) const { |
| 533 // Do not write partial updates to the disk. | 580 // Do not write partial updates to the disk. |
| 534 // After merging the updates, the ListUpdateResponse passed to this method | 581 // After merging the updates, the ListUpdateResponse passed to this method |
| 535 // should be a FULL_UPDATE. | 582 // should be a FULL_UPDATE. |
| 536 if (!response->has_response_type() || | 583 if (!response->has_response_type() || |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 597 if (result == 0) { | 644 if (result == 0) { |
| 598 return true; | 645 return true; |
| 599 } else if (result < 0) { | 646 } else if (result < 0) { |
| 600 return HashPrefixMatches(hash_prefix, begin, mid); | 647 return HashPrefixMatches(hash_prefix, begin, mid); |
| 601 } else { | 648 } else { |
| 602 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); | 649 return HashPrefixMatches(hash_prefix, mid + prefix_size, end); |
| 603 } | 650 } |
| 604 } | 651 } |
| 605 | 652 |
| 606 } // namespace safe_browsing | 653 } // namespace safe_browsing |
| OLD | NEW |