OLD | NEW |
---|---|
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "components/safe_browsing_db/v4_get_hash_protocol_manager.h" | 5 #include "components/safe_browsing_db/v4_get_hash_protocol_manager.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 | 8 |
9 #include "base/base64url.h" | 9 #include "base/base64url.h" |
10 #include "base/macros.h" | 10 #include "base/macros.h" |
(...skipping 30 matching lines...) Expand all Loading... | |
41 // expected. | 41 // expected. |
42 NO_METADATA_ERROR = 4, | 42 NO_METADATA_ERROR = 4, |
43 | 43 |
44 // A match in the response contained a ThreatType that was inconsistent | 44 // A match in the response contained a ThreatType that was inconsistent |
45 // with the other matches. | 45 // with the other matches. |
46 INCONSISTENT_THREAT_TYPE_ERROR = 5, | 46 INCONSISTENT_THREAT_TYPE_ERROR = 5, |
47 | 47 |
48 // A match in the response contained a metadata, but the metadata is invalid. | 48 // A match in the response contained a metadata, but the metadata is invalid. |
49 UNEXPECTED_METADATA_VALUE_ERROR = 6, | 49 UNEXPECTED_METADATA_VALUE_ERROR = 6, |
50 | 50 |
51 // A match in the response had no information in the threat field. | |
52 NO_THREAT_ERROR = 7, | |
53 | |
51 // Memory space for histograms is determined by the max. ALWAYS | 54 // Memory space for histograms is determined by the max. ALWAYS |
52 // ADD NEW VALUES BEFORE THIS ONE. | 55 // ADD NEW VALUES BEFORE THIS ONE. |
53 PARSE_RESULT_TYPE_MAX = 7, | 56 PARSE_RESULT_TYPE_MAX = 8, |
57 }; | |
58 | |
59 // Enumerate full hash cache hits/misses for histogramming purposes. | |
60 // DO NOT CHANGE THE ORDERING OF THESE VALUES. | |
61 enum V4FullHashCacheResultType { | |
62 // Full hashes for which there is no cache hit. | |
63 FULL_HASH_CACHE_MISS = 0, | |
64 | |
65 // Full hashes with a cache hit. | |
66 FULL_HASH_CACHE_HIT = 1, | |
67 | |
68 // Full hashes with a negative cache hit. | |
69 FULL_HASH_NEGATIVE_CACHE_HIT = 2, | |
70 | |
71 // Memory space for histograms is determined by the max. ALWAYS | |
72 // ADD NEW VALUES BEFORE THIS ONE. | |
73 FULL_HASH_CACHE_RESULT_MAX | |
54 }; | 74 }; |
55 | 75 |
56 // Record parsing errors of a GetHash result. | 76 // Record parsing errors of a GetHash result. |
57 void RecordParseGetHashResult(ParseResultType result_type) { | 77 void RecordParseGetHashResult(ParseResultType result_type) { |
58 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.ParseV4HashResult", result_type, | 78 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.ParseV4HashResult", result_type, |
59 PARSE_RESULT_TYPE_MAX); | 79 PARSE_RESULT_TYPE_MAX); |
60 } | 80 } |
61 | 81 |
62 // Record a GetHash result. | 82 // Record a GetHash result. |
63 void RecordGetHashResult(safe_browsing::V4OperationResult result) { | 83 void RecordGetHashResult(safe_browsing::V4OperationResult result) { |
64 UMA_HISTOGRAM_ENUMERATION( | 84 UMA_HISTOGRAM_ENUMERATION( |
65 "SafeBrowsing.GetV4HashResult", result, | 85 "SafeBrowsing.GetV4HashResult", result, |
66 safe_browsing::V4OperationResult::OPERATION_RESULT_MAX); | 86 safe_browsing::V4OperationResult::OPERATION_RESULT_MAX); |
67 } | 87 } |
68 | 88 |
89 // Record a full hash cache hit result. | |
90 void RecordV4FullHashCacheResult(V4FullHashCacheResultType result_type) { | |
91 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.V4FullHashCacheResult", result_type, | |
92 FULL_HASH_CACHE_RESULT_MAX); | |
93 } | |
94 | |
69 } // namespace | 95 } // namespace |
70 | 96 |
71 namespace safe_browsing { | 97 namespace safe_browsing { |
72 | 98 |
73 const char kUmaV4HashResponseMetricName[] = | 99 const char kUmaV4HashResponseMetricName[] = |
74 "SafeBrowsing.GetV4HashHttpResponseOrErrorCode"; | 100 "SafeBrowsing.GetV4HashHttpResponseOrErrorCode"; |
75 | 101 |
76 // The default V4GetHashProtocolManagerFactory. | 102 // The default V4GetHashProtocolManagerFactory. |
77 class V4GetHashProtocolManagerFactoryImpl | 103 class V4GetHashProtocolManagerFactoryImpl |
78 : public V4GetHashProtocolManagerFactory { | 104 : public V4GetHashProtocolManagerFactory { |
79 public: | 105 public: |
80 V4GetHashProtocolManagerFactoryImpl() {} | 106 V4GetHashProtocolManagerFactoryImpl() {} |
81 ~V4GetHashProtocolManagerFactoryImpl() override {} | 107 ~V4GetHashProtocolManagerFactoryImpl() override {} |
82 V4GetHashProtocolManager* CreateProtocolManager( | 108 std::unique_ptr<V4GetHashProtocolManager> CreateProtocolManager( |
83 net::URLRequestContextGetter* request_context_getter, | 109 net::URLRequestContextGetter* request_context_getter, |
110 const base::hash_set<UpdateListIdentifier>& stores_to_request, | |
84 const V4ProtocolConfig& config) override { | 111 const V4ProtocolConfig& config) override { |
85 return new V4GetHashProtocolManager(request_context_getter, config); | 112 return base::WrapUnique(new V4GetHashProtocolManager( |
Nathan Parker
2016/09/09 21:26:21
The new recommended way, from ptr_util.h:
// Make
vakh (use Gerrit instead)
2016/09/09 23:25:17
I used that at first, but it requires the construc
| |
113 request_context_getter, stores_to_request, config)); | |
86 } | 114 } |
87 | 115 |
88 private: | 116 private: |
89 DISALLOW_COPY_AND_ASSIGN(V4GetHashProtocolManagerFactoryImpl); | 117 DISALLOW_COPY_AND_ASSIGN(V4GetHashProtocolManagerFactoryImpl); |
90 }; | 118 }; |
91 | 119 |
120 // ---------------------------------------------------------------- | |
121 | |
122 CachedHashPrefixInfo::CachedHashPrefixInfo() {} | |
123 | |
124 CachedHashPrefixInfo::CachedHashPrefixInfo(const CachedHashPrefixInfo& other) = | |
125 default; | |
126 | |
127 CachedHashPrefixInfo::~CachedHashPrefixInfo() {} | |
128 | |
129 // ---------------------------------------------------------------- | |
130 | |
131 FullHashCallbackInfo::FullHashCallbackInfo() {} | |
132 | |
133 FullHashCallbackInfo::FullHashCallbackInfo( | |
134 const std::vector<FullHashInfo>& cached_full_hash_infos, | |
135 const std::vector<HashPrefix>& prefixes_requested, | |
136 std::unique_ptr<net::URLFetcher> fetcher, | |
137 const FullHashToStoreAndHashPrefixesMap& | |
138 full_hash_to_store_and_hash_prefixes, | |
139 FullHashCallback callback) | |
140 : cached_full_hash_infos(cached_full_hash_infos), | |
141 callback(callback), | |
142 fetcher(std::move(fetcher)), | |
143 full_hash_to_store_and_hash_prefixes( | |
144 full_hash_to_store_and_hash_prefixes), | |
145 prefixes_requested(prefixes_requested) {} | |
146 | |
147 FullHashCallbackInfo::~FullHashCallbackInfo() {} | |
148 | |
149 // ---------------------------------------------------------------- | |
150 | |
151 FullHashInfo::FullHashInfo(const FullHash& full_hash, | |
152 const UpdateListIdentifier& list_id, | |
153 const base::Time& positive_ttl) | |
154 : full_hash(full_hash), list_id(list_id), positive_ttl(positive_ttl) {} | |
155 | |
156 FullHashInfo::FullHashInfo(const FullHashInfo& other) = default; | |
157 | |
158 FullHashInfo::~FullHashInfo() {} | |
159 | |
160 bool FullHashInfo::operator==(const FullHashInfo& other) const { | |
161 return full_hash == other.full_hash && list_id == other.list_id && | |
162 positive_ttl == other.positive_ttl && metadata == other.metadata; | |
163 } | |
164 | |
165 bool FullHashInfo::operator!=(const FullHashInfo& other) const { | |
166 return !operator==(other); | |
167 } | |
168 | |
92 // V4GetHashProtocolManager implementation -------------------------------- | 169 // V4GetHashProtocolManager implementation -------------------------------- |
93 | 170 |
94 // static | 171 // static |
95 V4GetHashProtocolManagerFactory* V4GetHashProtocolManager::factory_ = NULL; | 172 V4GetHashProtocolManagerFactory* V4GetHashProtocolManager::factory_ = NULL; |
96 | 173 |
97 // static | 174 // static |
98 V4GetHashProtocolManager* V4GetHashProtocolManager::Create( | 175 std::unique_ptr<V4GetHashProtocolManager> V4GetHashProtocolManager::Create( |
99 net::URLRequestContextGetter* request_context_getter, | 176 net::URLRequestContextGetter* request_context_getter, |
177 const base::hash_set<UpdateListIdentifier>& stores_to_request, | |
100 const V4ProtocolConfig& config) { | 178 const V4ProtocolConfig& config) { |
101 if (!factory_) | 179 if (!factory_) |
102 factory_ = new V4GetHashProtocolManagerFactoryImpl(); | 180 factory_ = new V4GetHashProtocolManagerFactoryImpl(); |
103 return factory_->CreateProtocolManager(request_context_getter, config); | 181 return factory_->CreateProtocolManager(request_context_getter, |
182 stores_to_request, config); | |
104 } | 183 } |
105 | 184 |
106 void V4GetHashProtocolManager::ResetGetHashErrors() { | 185 // static |
107 gethash_error_count_ = 0; | 186 void V4GetHashProtocolManager::RegisterFactory( |
108 gethash_back_off_mult_ = 1; | 187 std::unique_ptr<V4GetHashProtocolManagerFactory> factory) { |
188 if (factory_) | |
189 delete factory_; | |
190 factory_ = factory.release(); | |
109 } | 191 } |
110 | 192 |
111 V4GetHashProtocolManager::V4GetHashProtocolManager( | 193 V4GetHashProtocolManager::V4GetHashProtocolManager( |
112 net::URLRequestContextGetter* request_context_getter, | 194 net::URLRequestContextGetter* request_context_getter, |
195 const base::hash_set<UpdateListIdentifier>& stores_to_request, | |
113 const V4ProtocolConfig& config) | 196 const V4ProtocolConfig& config) |
114 : gethash_error_count_(0), | 197 : gethash_error_count_(0), |
115 gethash_back_off_mult_(1), | 198 gethash_back_off_mult_(1), |
116 next_gethash_time_(Time::FromDoubleT(0)), | 199 next_gethash_time_(Time::FromDoubleT(0)), |
117 config_(config), | 200 config_(config), |
118 request_context_getter_(request_context_getter), | 201 request_context_getter_(request_context_getter), |
119 url_fetcher_id_(0), | 202 url_fetcher_id_(0), |
120 clock_(new base::DefaultClock()) {} | 203 clock_(new base::DefaultClock()) { |
121 | 204 for (const UpdateListIdentifier& store : stores_to_request) { |
122 V4GetHashProtocolManager::~V4GetHashProtocolManager() { | 205 platform_types_.insert(store.platform_type); |
123 } | 206 threat_entry_types_.insert(store.threat_entry_type); |
124 | 207 threat_types_.insert(store.threat_type); |
125 // static | 208 } |
Nathan Parker
2016/09/09 21:26:21
nit: Could just DCHECK(!stores_to_request.empty())
vakh (use Gerrit instead)
2016/09/09 23:25:17
Done.
| |
126 void V4GetHashProtocolManager::RegisterFactory( | 209 DCHECK(!platform_types_.empty()); |
127 std::unique_ptr<V4GetHashProtocolManagerFactory> factory) { | 210 DCHECK(!threat_entry_types_.empty()); |
128 if (factory_) | 211 DCHECK(!threat_types_.empty()); |
129 delete factory_; | 212 } |
130 factory_ = factory.release(); | 213 |
214 V4GetHashProtocolManager::~V4GetHashProtocolManager() {} | |
215 | |
216 void V4GetHashProtocolManager::ClearCache() { | |
217 DCHECK(CalledOnValidThread()); | |
218 full_hash_cache_.clear(); | |
219 } | |
220 | |
221 void V4GetHashProtocolManager::GetFullHashes( | |
222 const FullHashToStoreAndHashPrefixesMap& | |
223 full_hash_to_store_and_hash_prefixes, | |
224 FullHashCallback callback) { | |
225 DCHECK(CalledOnValidThread()); | |
226 DCHECK(!full_hash_to_store_and_hash_prefixes.empty()); | |
227 | |
228 std::vector<HashPrefix> prefixes_to_request; | |
229 std::vector<FullHashInfo> cached_full_hash_infos; | |
230 GetFullHashCachedResults(full_hash_to_store_and_hash_prefixes, Time::Now(), | |
231 &prefixes_to_request, &cached_full_hash_infos); | |
232 | |
233 if (prefixes_to_request.empty()) { | |
234 // 100% cache hits (positive or negative) so we can call the callback right | |
235 // away. | |
236 callback.Run(cached_full_hash_infos); | |
237 return; | |
238 } | |
239 | |
240 // We need to wait the minimum waiting duration, and if we are in backoff, | |
241 // we need to check if we're past the next allowed time. If we are, we can | |
242 // proceed with the request. If not, we are required to return empty results | |
243 // (i.e. treat the page as safe). | |
Nathan Parker
2016/09/09 21:26:21
nit: "i.e. just use the cache and potentially trea
vakh (use Gerrit instead)
2016/09/09 23:25:17
Done.
| |
244 if (clock_->Now() <= next_gethash_time_) { | |
245 if (gethash_error_count_) { | |
246 RecordGetHashResult(V4OperationResult::BACKOFF_ERROR); | |
247 } else { | |
248 RecordGetHashResult(V4OperationResult::MIN_WAIT_DURATION_ERROR); | |
249 } | |
250 callback.Run(cached_full_hash_infos); | |
251 return; | |
252 } | |
253 | |
254 std::string req_base64 = GetHashRequest(prefixes_to_request); | |
255 GURL gethash_url; | |
256 net::HttpRequestHeaders headers; | |
257 GetHashUrlAndHeaders(req_base64, &gethash_url, &headers); | |
258 | |
259 std::unique_ptr<net::URLFetcher> owned_fetcher = net::URLFetcher::Create( | |
260 url_fetcher_id_++, gethash_url, net::URLFetcher::GET, this); | |
261 net::URLFetcher* fetcher = owned_fetcher.get(); | |
262 pending_hash_requests_[fetcher].reset(new FullHashCallbackInfo( | |
263 cached_full_hash_infos, prefixes_to_request, std::move(owned_fetcher), | |
264 full_hash_to_store_and_hash_prefixes, callback)); | |
265 | |
266 fetcher->SetExtraRequestHeaders(headers.ToString()); | |
267 fetcher->SetLoadFlags(net::LOAD_DISABLE_CACHE); | |
268 fetcher->SetRequestContext(request_context_getter_.get()); | |
269 fetcher->Start(); | |
270 } | |
271 | |
272 void V4GetHashProtocolManager::GetFullHashesWithApis( | |
273 const GURL& url, | |
274 ThreatMetadataForApiCallback api_callback) { | |
275 DCHECK(url.SchemeIs(url::kHttpScheme) || url.SchemeIs(url::kHttpsScheme)); | |
276 | |
277 base::hash_set<FullHash> full_hashes; | |
278 V4ProtocolManagerUtil::UrlToFullHashes(url, &full_hashes); | |
279 | |
280 FullHashToStoreAndHashPrefixesMap full_hash_to_store_and_hash_prefixes; | |
281 for (const FullHash& full_hash : full_hashes) { | |
282 HashPrefix prefix; | |
283 bool result = | |
284 V4ProtocolManagerUtil::FullHashToSmallestHashPrefix(full_hash, &prefix); | |
285 DCHECK(result); | |
286 full_hash_to_store_and_hash_prefixes[full_hash].push_back( | |
Nathan Parker
2016/09/09 21:26:21
.emplace_back(GetChromeUrlApiId(), prefix), if tha
vakh (use Gerrit instead)
2016/09/09 23:25:18
Done.
| |
287 StoreAndHashPrefix(GetChromeUrlApiId(), prefix)); | |
288 } | |
289 | |
290 GetFullHashes(full_hash_to_store_and_hash_prefixes, | |
291 base::Bind(&V4GetHashProtocolManager::OnFullHashForApi, | |
292 base::Unretained(this), api_callback, full_hashes)); | |
293 } | |
294 | |
295 void V4GetHashProtocolManager::GetFullHashCachedResults( | |
296 const FullHashToStoreAndHashPrefixesMap& | |
297 full_hash_to_store_and_hash_prefixes, | |
298 const Time& now, | |
299 std::vector<HashPrefix>* prefixes_to_request, | |
300 std::vector<FullHashInfo>* cached_full_hash_infos) const { | |
301 DCHECK(!full_hash_to_store_and_hash_prefixes.empty()); | |
302 DCHECK(prefixes_to_request); | |
Nathan Parker
2016/09/09 21:26:21
nit: can skip checking the ptr if you're going to
vakh (use Gerrit instead)
2016/09/09 23:25:17
Done.
| |
303 DCHECK(prefixes_to_request->empty()); | |
304 DCHECK(cached_full_hash_infos); | |
305 DCHECK(cached_full_hash_infos->empty()); | |
306 | |
307 // Caching behavior is documented here: | |
308 // https://developers.google.com/safe-browsing/v4/caching#about-caching | |
309 // | |
310 // The cache operates as follows: | |
311 // Lookup: | |
312 // Case 1: The prefix is in the cache. | |
313 // Case a: The full hash is in the cache. | |
314 // Case i : The positive full hash result has not expired. | |
315 // The result is unsafe and we do not need to send a new | |
316 // request. | |
317 // Case ii: The positive full hash result has expired. | |
318 // We need to send a request for full hashes. | |
319 // Case b: The full hash is not in the cache. | |
320 // Case i : The negative cache entry has not expired. | |
321 // The result is still safe and we do not need to send a | |
322 // new request. | |
323 // Case ii: The negative cache entry has expired. | |
324 // We need to send a request for full hashes. | |
325 // Case 2: The prefix is not in the cache. | |
326 // We need to send a request for full hashes. | |
327 // | |
328 // Note on eviction: | |
329 // CachedHashPrefixInfo entries can be removed from the cache only when | |
330 // the negative cache expire time and the cache expire time of all full | |
331 // hash results for that prefix have expired. | |
332 // Individual full hash results can be removed from the prefix's | |
333 // cache entry if they expire AND their expire time is after the negative | |
334 // cache expire time. | |
335 | |
336 // TODO(vakh): Perform cache cleanup. | |
337 base::hash_set<HashPrefix> unique_prefixes_to_request; | |
338 for (const auto& it : full_hash_to_store_and_hash_prefixes) { | |
Nathan Parker
2016/09/09 21:26:21
I'm assuming this code is not modified, ya? If th
vakh (use Gerrit instead)
2016/09/09 23:25:17
Most of it has been re-written. Please do take a l
Nathan Parker
2016/09/10 00:26:25
Seems ok to me. I see you aren't removing anythin
vakh (use Gerrit instead)
2016/09/10 00:29:29
Acknowledged.
| |
339 const FullHash& full_hash = it.first; | |
340 const StoreAndHashPrefixes& matched = it.second; | |
341 for (const StoreAndHashPrefix& matched_it : matched) { | |
342 const UpdateListIdentifier& list_id = matched_it.list_id; | |
343 const HashPrefix& prefix = matched_it.hash_prefix; | |
344 const auto& prefix_entry = full_hash_cache_.find(prefix); | |
Nathan Parker
2016/09/10 00:26:25
I think this shouldn't be a reference, since find
vakh (use Gerrit instead)
2016/09/10 00:29:29
It's a common pattern:
https://cs.chromium.org/sea
Nathan Parker
2016/09/12 17:02:07
But this is a more common pattern:
https://cs.chro
Scott Hess - ex-Googler
2016/09/12 23:31:34
In general, the temporary lives for the scope of t
vakh (use Gerrit instead)
2016/09/13 00:12:06
Done. Changed to "auto it = "
Scott Hess - ex-Googler
2016/09/13 00:31:34
Compiler is standing to the side saying "I'm going
vakh (use Gerrit instead)
2016/09/13 01:18:56
Acknowledged. :)
| |
345 if (prefix_entry != full_hash_cache_.end()) { | |
346 // Case 1. | |
347 const CachedHashPrefixInfo& cached_prefix_info = prefix_entry->second; | |
348 bool found_full_hash = false; | |
349 for (const FullHashInfo& full_hash_info : | |
350 cached_prefix_info.full_hash_infos) { | |
351 if (full_hash_info.full_hash == full_hash && | |
352 full_hash_info.list_id == list_id) { | |
353 // Case a. | |
354 found_full_hash = true; | |
355 bool positive_ttl_unexpired = full_hash_info.positive_ttl > now; | |
356 if (positive_ttl_unexpired) { | |
357 // Case i. | |
358 cached_full_hash_infos->push_back(full_hash_info); | |
359 RecordV4FullHashCacheResult(FULL_HASH_CACHE_HIT); | |
360 } else { | |
361 // Case ii. | |
362 unique_prefixes_to_request.insert(prefix); | |
363 RecordV4FullHashCacheResult(FULL_HASH_CACHE_MISS); | |
364 } | |
365 break; | |
366 } | |
367 } | |
368 | |
369 if (!found_full_hash) { | |
370 // Case b. | |
371 if (cached_prefix_info.negative_ttl > now) { | |
372 // Case i. | |
373 RecordV4FullHashCacheResult(FULL_HASH_NEGATIVE_CACHE_HIT); | |
374 } else { | |
375 // Case ii. | |
376 unique_prefixes_to_request.insert(prefix); | |
377 RecordV4FullHashCacheResult(FULL_HASH_CACHE_MISS); | |
378 } | |
379 } | |
380 } else { | |
381 // Case 2. | |
382 unique_prefixes_to_request.insert(prefix); | |
383 RecordV4FullHashCacheResult(FULL_HASH_CACHE_MISS); | |
384 } | |
385 } | |
386 } | |
387 | |
388 prefixes_to_request->insert(prefixes_to_request->begin(), | |
389 unique_prefixes_to_request.begin(), | |
390 unique_prefixes_to_request.end()); | |
131 } | 391 } |
132 | 392 |
133 std::string V4GetHashProtocolManager::GetHashRequest( | 393 std::string V4GetHashProtocolManager::GetHashRequest( |
134 const std::vector<SBPrefix>& prefixes, | 394 const std::vector<HashPrefix>& prefixes_to_request) { |
135 const std::vector<PlatformType>& platforms, | 395 DCHECK(!prefixes_to_request.empty()); |
136 ThreatType threat_type) { | 396 |
137 // Build the request. Client info and client states are not added to the | |
138 // request protocol buffer. Client info is passed as params in the url. | |
139 FindFullHashesRequest req; | 397 FindFullHashesRequest req; |
140 ThreatInfo* info = req.mutable_threat_info(); | 398 ThreatInfo* info = req.mutable_threat_info(); |
141 info->add_threat_types(threat_type); | 399 for (const PlatformType p : platform_types_) { |
142 info->add_threat_entry_types(URL); | |
143 for (const PlatformType p : platforms) { | |
144 info->add_platform_types(p); | 400 info->add_platform_types(p); |
145 } | 401 } |
146 for (const SBPrefix& prefix : prefixes) { | 402 for (const ThreatEntryType tet : threat_entry_types_) { |
147 std::string hash(reinterpret_cast<const char*>(&prefix), sizeof(SBPrefix)); | 403 info->add_threat_entry_types(tet); |
148 info->add_threat_entries()->set_hash(hash); | 404 } |
405 for (const ThreatType tt : threat_types_) { | |
406 info->add_threat_types(tt); | |
407 } | |
408 for (const HashPrefix& prefix : prefixes_to_request) { | |
409 info->add_threat_entries()->set_hash(prefix); | |
149 } | 410 } |
150 | 411 |
151 // Serialize and Base64 encode. | 412 // Serialize and Base64 encode. |
152 std::string req_data, req_base64; | 413 std::string req_data, req_base64; |
153 req.SerializeToString(&req_data); | 414 req.SerializeToString(&req_data); |
154 base::Base64UrlEncode(req_data, base::Base64UrlEncodePolicy::INCLUDE_PADDING, | 415 base::Base64UrlEncode(req_data, base::Base64UrlEncodePolicy::INCLUDE_PADDING, |
155 &req_base64); | 416 &req_base64); |
156 return req_base64; | 417 return req_base64; |
157 } | 418 } |
158 | 419 |
420 void V4GetHashProtocolManager::GetHashUrlAndHeaders( | |
421 const std::string& req_base64, | |
422 GURL* gurl, | |
423 net::HttpRequestHeaders* headers) const { | |
424 V4ProtocolManagerUtil::GetRequestUrlAndHeaders(req_base64, "fullHashes:find", | |
425 config_, gurl, headers); | |
426 } | |
427 | |
428 void V4GetHashProtocolManager::HandleGetHashError(const Time& now) { | |
429 DCHECK(CalledOnValidThread()); | |
430 TimeDelta next = V4ProtocolManagerUtil::GetNextBackOffInterval( | |
431 &gethash_error_count_, &gethash_back_off_mult_); | |
432 next_gethash_time_ = now + next; | |
433 } | |
434 | |
435 void V4GetHashProtocolManager::OnFullHashForApi( | |
436 ThreatMetadataForApiCallback api_callback, | |
437 const base::hash_set<FullHash>& full_hashes, | |
438 const std::vector<FullHashInfo>& full_hash_infos) { | |
439 ThreatMetadata md; | |
440 for (const FullHashInfo& full_hash_info : full_hash_infos) { | |
441 DCHECK_EQ(GetChromeUrlApiId(), full_hash_info.list_id); | |
442 DCHECK(full_hashes.find(full_hash_info.full_hash) != full_hashes.end()); | |
443 md.api_permissions.insert(full_hash_info.metadata.api_permissions.begin(), | |
444 full_hash_info.metadata.api_permissions.end()); | |
445 } | |
446 | |
447 // TODO(vakh): Figure out what UMA metrics to report. This code was previously | |
448 // calling RecordV4GetHashCheckResult with appropriate values but that's not | |
449 // applicable anymore. | |
450 api_callback.Run(md); | |
451 } | |
452 | |
159 bool V4GetHashProtocolManager::ParseHashResponse( | 453 bool V4GetHashProtocolManager::ParseHashResponse( |
160 const std::string& data, | 454 const std::string& data, |
161 std::vector<SBFullHashResult>* full_hashes, | 455 std::vector<FullHashInfo>* full_hash_infos, |
162 base::Time* negative_cache_expire) { | 456 Time* negative_cache_expire) { |
163 FindFullHashesResponse response; | 457 FindFullHashesResponse response; |
164 | 458 |
165 if (!response.ParseFromString(data)) { | 459 if (!response.ParseFromString(data)) { |
166 RecordParseGetHashResult(PARSE_FROM_STRING_ERROR); | 460 RecordParseGetHashResult(PARSE_FROM_STRING_ERROR); |
167 return false; | 461 return false; |
168 } | 462 } |
169 | 463 |
170 // negative_cache_duration should always be set. | 464 // negative_cache_duration should always be set. |
171 DCHECK(response.has_negative_cache_duration()); | 465 DCHECK(response.has_negative_cache_duration()); |
466 | |
172 // Seconds resolution is good enough so we ignore the nanos field. | 467 // Seconds resolution is good enough so we ignore the nanos field. |
173 *negative_cache_expire = | 468 *negative_cache_expire = |
174 clock_->Now() + base::TimeDelta::FromSeconds( | 469 clock_->Now() + |
175 response.negative_cache_duration().seconds()); | 470 TimeDelta::FromSeconds(response.negative_cache_duration().seconds()); |
176 | 471 |
177 if (response.has_minimum_wait_duration()) { | 472 if (response.has_minimum_wait_duration()) { |
178 // Seconds resolution is good enough so we ignore the nanos field. | 473 // Seconds resolution is good enough so we ignore the nanos field. |
179 next_gethash_time_ = | 474 next_gethash_time_ = |
180 clock_->Now() + base::TimeDelta::FromSeconds( | 475 clock_->Now() + |
181 response.minimum_wait_duration().seconds()); | 476 TimeDelta::FromSeconds(response.minimum_wait_duration().seconds()); |
182 } | 477 } |
183 | 478 |
184 // We only expect one threat type per request, so we make sure | |
185 // the threat types are consistent between matches. | |
186 ThreatType expected_threat_type = THREAT_TYPE_UNSPECIFIED; | |
187 | |
188 // Loop over the threat matches and fill in full_hashes. | |
189 for (const ThreatMatch& match : response.matches()) { | 479 for (const ThreatMatch& match : response.matches()) { |
190 // Make sure the platform and threat entry type match. | 480 if (!match.has_platform_type()) { |
191 if (!(match.has_threat_entry_type() && match.threat_entry_type() == URL && | 481 RecordParseGetHashResult(UNEXPECTED_PLATFORM_TYPE_ERROR); |
192 match.has_threat())) { | 482 return false; |
483 } | |
484 if (!match.has_threat_entry_type()) { | |
193 RecordParseGetHashResult(UNEXPECTED_THREAT_ENTRY_TYPE_ERROR); | 485 RecordParseGetHashResult(UNEXPECTED_THREAT_ENTRY_TYPE_ERROR); |
194 return false; | 486 return false; |
195 } | 487 } |
196 | |
197 if (!match.has_threat_type()) { | 488 if (!match.has_threat_type()) { |
198 RecordParseGetHashResult(UNEXPECTED_THREAT_TYPE_ERROR); | 489 RecordParseGetHashResult(UNEXPECTED_THREAT_TYPE_ERROR); |
199 return false; | 490 return false; |
200 } | 491 } |
201 | 492 if (!match.has_threat()) { |
202 if (expected_threat_type == THREAT_TYPE_UNSPECIFIED) { | 493 RecordParseGetHashResult(NO_THREAT_ERROR); |
203 expected_threat_type = match.threat_type(); | |
204 } else if (match.threat_type() != expected_threat_type) { | |
205 RecordParseGetHashResult(INCONSISTENT_THREAT_TYPE_ERROR); | |
206 return false; | 494 return false; |
207 } | 495 } |
208 | 496 |
209 // Fill in the full hash. | 497 UpdateListIdentifier list_id( |
210 SBFullHashResult result; | 498 match.platform_type(), match.threat_entry_type(), match.threat_type()); |
211 result.hash = StringToSBFullHash(match.threat().hash()); | 499 base::Time positive_ttl; |
212 | |
213 if (match.has_cache_duration()) { | 500 if (match.has_cache_duration()) { |
214 // Seconds resolution is good enough so we ignore the nanos field. | 501 // Seconds resolution is good enough so we ignore the nanos field. |
215 result.cache_expire_after = | 502 positive_ttl = clock_->Now() + |
216 clock_->Now() + | 503 TimeDelta::FromSeconds(match.cache_duration().seconds()); |
217 base::TimeDelta::FromSeconds(match.cache_duration().seconds()); | |
218 } else { | 504 } else { |
219 result.cache_expire_after = clock_->Now(); | 505 positive_ttl = clock_->Now(); |
506 } | |
507 FullHashInfo full_hash_info(match.threat().hash(), list_id, positive_ttl); | |
508 if (!ParseMetadata(match, &full_hash_info.metadata)) { | |
509 return false; | |
220 } | 510 } |
221 | 511 |
222 // Different threat types will handle the metadata differently. | 512 full_hash_infos->push_back(full_hash_info); |
223 if (match.threat_type() == API_ABUSE) { | 513 } |
224 if (match.has_platform_type() && | 514 return true; |
225 match.platform_type() == CHROME_PLATFORM) { | 515 } |
226 if (match.has_threat_entry_metadata()) { | 516 |
227 // For API Abuse, store a list of the returned permissions. | 517 bool V4GetHashProtocolManager::ParseMetadata(const ThreatMatch& match, |
228 for (const ThreatEntryMetadata::MetadataEntry& m : | 518 ThreatMetadata* metadata) { |
229 match.threat_entry_metadata().entries()) { | 519 DCHECK(metadata); |
230 if (m.key() == "permission") { | 520 // Different threat types will handle the metadata differently. |
231 result.metadata.api_permissions.insert(m.value()); | 521 if (match.threat_type() == API_ABUSE) { |
232 } else { | 522 if (match.has_platform_type() && match.platform_type() == CHROME_PLATFORM) { |
233 RecordParseGetHashResult(UNEXPECTED_METADATA_VALUE_ERROR); | 523 if (match.has_threat_entry_metadata()) { |
Nathan Parker
2016/09/09 21:26:20
Can you reduce the nested if's by negating them an
vakh (use Gerrit instead)
2016/09/09 23:25:17
Done.
| |
234 return false; | 524 // For API Abuse, store a list of the returned permissions. |
235 } | 525 for (const ThreatEntryMetadata::MetadataEntry& m : |
236 } | 526 match.threat_entry_metadata().entries()) { |
237 } else { | 527 if (m.key() == "permission") { |
238 RecordParseGetHashResult(NO_METADATA_ERROR); | 528 metadata->api_permissions.insert(m.value()); |
239 return false; | |
240 } | |
241 } else { | |
242 RecordParseGetHashResult(UNEXPECTED_PLATFORM_TYPE_ERROR); | |
243 return false; | |
244 } | |
245 } else if (match.threat_type() == MALWARE_THREAT || | |
246 match.threat_type() == POTENTIALLY_HARMFUL_APPLICATION) { | |
247 for (const ThreatEntryMetadata::MetadataEntry& m : | |
248 match.threat_entry_metadata().entries()) { | |
249 // TODO: Need to confirm the below key/value pairs with CSD backend. | |
250 if (m.key() == "pha_pattern_type" || | |
251 m.key() == "malware_pattern_type") { | |
252 if (m.value() == "LANDING") { | |
253 result.metadata.threat_pattern_type = | |
254 ThreatPatternType::MALWARE_LANDING; | |
255 break; | |
256 } else if (m.value() == "DISTRIBUTION") { | |
257 result.metadata.threat_pattern_type = | |
258 ThreatPatternType::MALWARE_DISTRIBUTION; | |
259 break; | |
260 } else { | 529 } else { |
261 RecordParseGetHashResult(UNEXPECTED_METADATA_VALUE_ERROR); | 530 RecordParseGetHashResult(UNEXPECTED_METADATA_VALUE_ERROR); |
262 return false; | 531 return false; |
263 } | 532 } |
264 } | 533 } |
534 } else { | |
535 RecordParseGetHashResult(NO_METADATA_ERROR); | |
536 return false; | |
265 } | 537 } |
266 } else if (match.threat_type() == SOCIAL_ENGINEERING_PUBLIC) { | 538 } else { |
267 for (const ThreatEntryMetadata::MetadataEntry& m : | 539 RecordParseGetHashResult(UNEXPECTED_PLATFORM_TYPE_ERROR); |
268 match.threat_entry_metadata().entries()) { | 540 return false; |
269 if (m.key() == "se_pattern_type") { | 541 } |
270 if (m.value() == "SOCIAL_ENGINEERING_ADS") { | 542 } else if (match.threat_type() == MALWARE_THREAT || |
271 result.metadata.threat_pattern_type = | 543 match.threat_type() == POTENTIALLY_HARMFUL_APPLICATION) { |
272 ThreatPatternType::SOCIAL_ENGINEERING_ADS; | 544 for (const ThreatEntryMetadata::MetadataEntry& m : |
273 break; | 545 match.threat_entry_metadata().entries()) { |
274 } else if (m.value() == "SOCIAL_ENGINEERING_LANDING") { | 546 // TODO: Need to confirm the below key/value pairs with CSD backend. |
275 result.metadata.threat_pattern_type = | 547 if (m.key() == "pha_pattern_type" || m.key() == "malware_pattern_type") { |
276 ThreatPatternType::SOCIAL_ENGINEERING_LANDING; | 548 if (m.value() == "LANDING") { |
277 break; | 549 metadata->threat_pattern_type = ThreatPatternType::MALWARE_LANDING; |
278 } else if (m.value() == "PHISHING") { | 550 break; |
279 result.metadata.threat_pattern_type = ThreatPatternType::PHISHING; | 551 } else if (m.value() == "DISTRIBUTION") { |
280 break; | 552 metadata->threat_pattern_type = |
281 } else { | 553 ThreatPatternType::MALWARE_DISTRIBUTION; |
282 RecordParseGetHashResult(UNEXPECTED_METADATA_VALUE_ERROR); | 554 break; |
283 return false; | 555 } else { |
284 } | 556 RecordParseGetHashResult(UNEXPECTED_METADATA_VALUE_ERROR); |
557 return false; | |
285 } | 558 } |
286 } | 559 } |
287 } else { | |
288 RecordParseGetHashResult(UNEXPECTED_THREAT_TYPE_ERROR); | |
289 return false; | |
290 } | 560 } |
561 } else if (match.threat_type() == SOCIAL_ENGINEERING_PUBLIC) { | |
562 for (const ThreatEntryMetadata::MetadataEntry& m : | |
563 match.threat_entry_metadata().entries()) { | |
564 if (m.key() == "se_pattern_type") { | |
565 if (m.value() == "SOCIAL_ENGINEERING_ADS") { | |
566 metadata->threat_pattern_type = | |
567 ThreatPatternType::SOCIAL_ENGINEERING_ADS; | |
568 break; | |
569 } else if (m.value() == "SOCIAL_ENGINEERING_LANDING") { | |
570 metadata->threat_pattern_type = | |
571 ThreatPatternType::SOCIAL_ENGINEERING_LANDING; | |
572 break; | |
573 } else if (m.value() == "PHISHING") { | |
574 metadata->threat_pattern_type = ThreatPatternType::PHISHING; | |
575 break; | |
576 } else { | |
577 RecordParseGetHashResult(UNEXPECTED_METADATA_VALUE_ERROR); | |
578 return false; | |
579 } | |
580 } | |
581 } | |
582 } else { | |
583 RecordParseGetHashResult(UNEXPECTED_THREAT_TYPE_ERROR); | |
584 return false; | |
585 } | |
291 | 586 |
292 full_hashes->push_back(result); | |
293 } | |
294 return true; | 587 return true; |
295 } | 588 } |
296 | 589 |
297 void V4GetHashProtocolManager::GetFullHashes( | 590 void V4GetHashProtocolManager::ResetGetHashErrors() { |
298 const std::vector<SBPrefix>& prefixes, | 591 gethash_error_count_ = 0; |
299 const std::vector<PlatformType>& platforms, | 592 gethash_back_off_mult_ = 1; |
300 ThreatType threat_type, | |
301 FullHashCallback callback) { | |
302 DCHECK(CalledOnValidThread()); | |
303 // We need to wait the minimum waiting duration, and if we are in backoff, | |
304 // we need to check if we're past the next allowed time. If we are, we can | |
305 // proceed with the request. If not, we are required to return empty results | |
306 // (i.e. treat the page as safe). | |
307 if (clock_->Now() <= next_gethash_time_) { | |
308 if (gethash_error_count_) { | |
309 RecordGetHashResult(V4OperationResult::BACKOFF_ERROR); | |
310 } else { | |
311 RecordGetHashResult(V4OperationResult::MIN_WAIT_DURATION_ERROR); | |
312 } | |
313 std::vector<SBFullHashResult> full_hashes; | |
314 callback.Run(full_hashes, base::Time()); | |
315 return; | |
316 } | |
317 | |
318 std::string req_base64 = GetHashRequest(prefixes, platforms, threat_type); | |
319 GURL gethash_url; | |
320 net::HttpRequestHeaders headers; | |
321 GetHashUrlAndHeaders(req_base64, &gethash_url, &headers); | |
322 | |
323 std::unique_ptr<net::URLFetcher> owned_fetcher = net::URLFetcher::Create( | |
324 url_fetcher_id_++, gethash_url, net::URLFetcher::GET, this); | |
325 net::URLFetcher* fetcher = owned_fetcher.get(); | |
326 fetcher->SetExtraRequestHeaders(headers.ToString()); | |
327 hash_requests_[fetcher] = std::make_pair(std::move(owned_fetcher), callback); | |
328 | |
329 fetcher->SetLoadFlags(net::LOAD_DISABLE_CACHE); | |
330 fetcher->SetRequestContext(request_context_getter_.get()); | |
331 fetcher->Start(); | |
332 } | |
333 | |
334 void V4GetHashProtocolManager::GetFullHashesWithApis( | |
335 const std::vector<SBPrefix>& prefixes, | |
336 FullHashCallback callback) { | |
337 std::vector<PlatformType> platform = {CHROME_PLATFORM}; | |
338 GetFullHashes(prefixes, platform, API_ABUSE, callback); | |
339 } | 593 } |
340 | 594 |
341 void V4GetHashProtocolManager::SetClockForTests( | 595 void V4GetHashProtocolManager::SetClockForTests( |
342 std::unique_ptr<base::Clock> clock) { | 596 std::unique_ptr<base::Clock> clock) { |
343 clock_ = std::move(clock); | 597 clock_ = std::move(clock); |
344 } | 598 } |
345 | 599 |
600 void V4GetHashProtocolManager::UpdateCache( | |
601 const std::vector<HashPrefix>& prefixes_requested, | |
602 const std::vector<FullHashInfo>& full_hash_infos, | |
603 const Time& negative_cache_expire) { | |
604 // If negative_cache_expire is null, don't cache the results it's not clear | |
Nathan Parker
2016/09/09 21:26:21
nit: s/it's/since it's/
vakh (use Gerrit instead)
2016/09/09 23:25:18
Done.
| |
605 // till what time they should be considered valid. | |
606 if (negative_cache_expire.is_null()) { | |
607 return; | |
608 } | |
609 | |
610 for (const HashPrefix& prefix : prefixes_requested) { | |
611 // Create or reset the cached result for this prefix. | |
612 full_hash_cache_[prefix].full_hash_infos.clear(); | |
Nathan Parker
2016/09/09 21:26:21
This looks up prefix multiple times. You could us
vakh (use Gerrit instead)
2016/09/09 23:25:17
Changed to get a reference to it first (or add it
| |
613 full_hash_cache_[prefix].negative_ttl = negative_cache_expire; | |
614 | |
615 for (const FullHashInfo& full_hash_info : full_hash_infos) { | |
616 if (V4ProtocolManagerUtil::FullHashMatchesHashPrefix( | |
617 full_hash_info.full_hash, prefix)) { | |
618 full_hash_cache_[prefix].full_hash_infos.push_back(full_hash_info); | |
619 } | |
620 } | |
621 } | |
622 } | |
623 | |
624 void V4GetHashProtocolManager::MergeResults( | |
625 const FullHashToStoreAndHashPrefixesMap& | |
626 full_hash_to_store_and_hash_prefixes, | |
627 const std::vector<FullHashInfo>& full_hash_infos, | |
628 std::vector<FullHashInfo>* merged_full_hash_infos) { | |
629 for (const FullHashInfo& fhi : full_hash_infos) { | |
630 bool matched_full_hash = | |
631 full_hash_to_store_and_hash_prefixes.end() != | |
632 full_hash_to_store_and_hash_prefixes.find(fhi.full_hash); | |
633 if (matched_full_hash) { | |
634 for (const StoreAndHashPrefix& sahp : | |
635 full_hash_to_store_and_hash_prefixes.at(fhi.full_hash)) { | |
Nathan Parker
2016/09/09 21:26:21
rather than looking up full_hash again, save the i
vakh (use Gerrit instead)
2016/09/09 23:25:17
Done.
| |
636 if (fhi.list_id == sahp.list_id) { | |
637 merged_full_hash_infos->push_back(fhi); | |
638 break; | |
639 } | |
640 } | |
641 } | |
642 } | |
643 } | |
644 | |
346 // net::URLFetcherDelegate implementation ---------------------------------- | 645 // net::URLFetcherDelegate implementation ---------------------------------- |
347 | 646 |
348 // SafeBrowsing request responses are handled here. | 647 // SafeBrowsing request responses are handled here. |
349 void V4GetHashProtocolManager::OnURLFetchComplete( | 648 void V4GetHashProtocolManager::OnURLFetchComplete( |
350 const net::URLFetcher* source) { | 649 const net::URLFetcher* source) { |
351 DCHECK(CalledOnValidThread()); | 650 DCHECK(CalledOnValidThread()); |
352 | 651 |
353 HashRequests::iterator it = hash_requests_.find(source); | 652 PendingHashRequests::iterator it = pending_hash_requests_.find(source); |
354 DCHECK(it != hash_requests_.end()) << "Request not found"; | 653 DCHECK(it != pending_hash_requests_.end()) << "Request not found"; |
355 | 654 |
356 int response_code = source->GetResponseCode(); | 655 int response_code = source->GetResponseCode(); |
357 net::URLRequestStatus status = source->GetStatus(); | 656 net::URLRequestStatus status = source->GetStatus(); |
358 V4ProtocolManagerUtil::RecordHttpResponseOrErrorCode( | 657 V4ProtocolManagerUtil::RecordHttpResponseOrErrorCode( |
359 kUmaV4HashResponseMetricName, status, response_code); | 658 kUmaV4HashResponseMetricName, status, response_code); |
360 | 659 |
361 const FullHashCallback& callback = it->second.second; | 660 std::vector<FullHashInfo> full_hash_infos; |
362 std::vector<SBFullHashResult> full_hashes; | 661 Time negative_cache_expire; |
363 base::Time negative_cache_expire; | |
364 if (status.is_success() && response_code == net::HTTP_OK) { | 662 if (status.is_success() && response_code == net::HTTP_OK) { |
365 RecordGetHashResult(V4OperationResult::STATUS_200); | 663 RecordGetHashResult(V4OperationResult::STATUS_200); |
366 ResetGetHashErrors(); | 664 ResetGetHashErrors(); |
367 std::string data; | 665 std::string data; |
368 source->GetResponseAsString(&data); | 666 source->GetResponseAsString(&data); |
369 if (!ParseHashResponse(data, &full_hashes, &negative_cache_expire)) { | 667 if (!ParseHashResponse(data, &full_hash_infos, &negative_cache_expire)) { |
370 full_hashes.clear(); | 668 full_hash_infos.clear(); |
371 RecordGetHashResult(V4OperationResult::PARSE_ERROR); | 669 RecordGetHashResult(V4OperationResult::PARSE_ERROR); |
372 } | 670 } |
373 } else { | 671 } else { |
374 HandleGetHashError(clock_->Now()); | 672 HandleGetHashError(clock_->Now()); |
375 | 673 |
376 DVLOG(1) << "SafeBrowsing GetEncodedFullHashes request for: " | 674 DVLOG(1) << "SafeBrowsing GetEncodedFullHashes request for: " |
377 << source->GetURL() << " failed with error: " << status.error() | 675 << source->GetURL() << " failed with error: " << status.error() |
378 << " and response code: " << response_code; | 676 << " and response code: " << response_code; |
379 | 677 |
380 if (status.status() == net::URLRequestStatus::FAILED) { | 678 if (status.status() == net::URLRequestStatus::FAILED) { |
381 RecordGetHashResult(V4OperationResult::NETWORK_ERROR); | 679 RecordGetHashResult(V4OperationResult::NETWORK_ERROR); |
382 } else { | 680 } else { |
383 RecordGetHashResult(V4OperationResult::HTTP_ERROR); | 681 RecordGetHashResult(V4OperationResult::HTTP_ERROR); |
384 } | 682 } |
385 } | 683 } |
386 | 684 |
387 // Invoke the callback with full_hashes, even if there was a parse error or | 685 const std::unique_ptr<FullHashCallbackInfo>& fhci = it->second; |
388 // an error response code (in which case full_hashes will be empty). The | 686 UpdateCache(fhci->prefixes_requested, full_hash_infos, negative_cache_expire); |
389 // caller can't be blocked indefinitely. | 687 MergeResults(fhci->full_hash_to_store_and_hash_prefixes, full_hash_infos, |
390 callback.Run(full_hashes, negative_cache_expire); | 688 &fhci->cached_full_hash_infos); |
391 | 689 |
392 hash_requests_.erase(it); | 690 const FullHashCallback& callback = fhci->callback; |
Nathan Parker
2016/09/09 21:26:21
just call fhci->callback.Run(..)
vakh (use Gerrit instead)
2016/09/09 23:25:17
Done.
| |
691 callback.Run(fhci->cached_full_hash_infos); | |
692 | |
693 pending_hash_requests_.erase(it); | |
393 } | 694 } |
394 | 695 |
395 void V4GetHashProtocolManager::HandleGetHashError(const Time& now) { | 696 #ifndef DEBUG |
396 DCHECK(CalledOnValidThread()); | 697 std::ostream& operator<<(std::ostream& os, const FullHashInfo& fhi) { |
397 base::TimeDelta next = V4ProtocolManagerUtil::GetNextBackOffInterval( | 698 os << "{full_hash: " << fhi.full_hash << "; list_id: " << fhi.list_id |
398 &gethash_error_count_, &gethash_back_off_mult_); | 699 << "; positive_ttl: " << fhi.positive_ttl |
399 next_gethash_time_ = now + next; | 700 << "; metadata.api_permissions.size(): " |
701 << fhi.metadata.api_permissions.size() << "}"; | |
702 return os; | |
400 } | 703 } |
401 | 704 #endif |
402 void V4GetHashProtocolManager::GetHashUrlAndHeaders( | |
403 const std::string& req_base64, | |
404 GURL* gurl, | |
405 net::HttpRequestHeaders* headers) const { | |
406 V4ProtocolManagerUtil::GetRequestUrlAndHeaders(req_base64, "fullHashes:find", | |
407 config_, gurl, headers); | |
408 } | |
409 | 705 |
410 } // namespace safe_browsing | 706 } // namespace safe_browsing |
OLD | NEW |