Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "chrome/browser/safe_browsing/protocol_manager.h" | 5 #include "chrome/browser/safe_browsing/protocol_manager.h" |
| 6 | 6 |
| 7 #include <utility> | 7 #include <utility> |
| 8 | 8 |
| 9 #include "base/base64.h" | 9 #include "base/base64.h" |
| 10 #include "base/environment.h" | 10 #include "base/environment.h" |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 85 // Maximum time, in seconds, from start up before we must issue an update query. | 85 // Maximum time, in seconds, from start up before we must issue an update query. |
| 86 static const int kSbTimerStartIntervalSecMax = 300; | 86 static const int kSbTimerStartIntervalSecMax = 300; |
| 87 | 87 |
| 88 // The maximum time, in seconds, to wait for a response to an update request. | 88 // The maximum time, in seconds, to wait for a response to an update request. |
| 89 static const int kSbMaxUpdateWaitSec = 30; | 89 static const int kSbMaxUpdateWaitSec = 30; |
| 90 | 90 |
| 91 // Maximum back off multiplier. | 91 // Maximum back off multiplier. |
| 92 static const size_t kSbMaxBackOff = 8; | 92 static const size_t kSbMaxBackOff = 8; |
| 93 | 93 |
| 94 const char kUmaHashResponseMetricName[] = "SB2.GetHashResponseOrErrorCode"; | 94 const char kUmaHashResponseMetricName[] = "SB2.GetHashResponseOrErrorCode"; |
| 95 const char kUmaV4ResponseMetricName[] = | |
| 96 "SafeBrowsing.V4HttpResponseOrErrorCode"; | |
|
Nathan Parker
2016/01/07 05:28:29
Should this have "Hash" in it somewhere? (since th
kcarattini
2016/01/07 06:06:13
Done.
| |
| 95 | 97 |
| 96 // The V4 URL prefix where browser fetches hashes from the V4 server. | 98 // The V4 URL prefix where browser fetches hashes from the V4 server. |
| 97 const char kSbV4UrlPrefix[] = "https://safebrowsing.googleapis.com/v4"; | 99 const char kSbV4UrlPrefix[] = "https://safebrowsing.googleapis.com/v4"; |
| 98 | 100 |
| 99 // The default SBProtocolManagerFactory. | 101 // The default SBProtocolManagerFactory. |
| 100 class SBProtocolManagerFactoryImpl : public SBProtocolManagerFactory { | 102 class SBProtocolManagerFactoryImpl : public SBProtocolManagerFactory { |
| 101 public: | 103 public: |
| 102 SBProtocolManagerFactoryImpl() {} | 104 SBProtocolManagerFactoryImpl() {} |
| 103 ~SBProtocolManagerFactoryImpl() override {} | 105 ~SBProtocolManagerFactoryImpl() override {} |
| 104 SafeBrowsingProtocolManager* CreateProtocolManager( | 106 SafeBrowsingProtocolManager* CreateProtocolManager( |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 179 ResultType result_type) { | 181 ResultType result_type) { |
| 180 if (is_download) { | 182 if (is_download) { |
| 181 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type, | 183 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type, |
| 182 GET_HASH_RESULT_MAX); | 184 GET_HASH_RESULT_MAX); |
| 183 } else { | 185 } else { |
| 184 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type, | 186 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type, |
| 185 GET_HASH_RESULT_MAX); | 187 GET_HASH_RESULT_MAX); |
| 186 } | 188 } |
| 187 } | 189 } |
| 188 | 190 |
| 191 // static | |
| 192 void SafeBrowsingProtocolManager::RecordGetV4HashResult( | |
| 193 ResultType result_type) { | |
| 194 UMA_HISTOGRAM_ENUMERATION("SafeBrowsing.GetV4HashResult", result_type, | |
| 195 GET_HASH_RESULT_MAX); | |
| 196 } | |
| 197 | |
| 189 void SafeBrowsingProtocolManager::RecordHttpResponseOrErrorCode( | 198 void SafeBrowsingProtocolManager::RecordHttpResponseOrErrorCode( |
| 190 const char* metric_name, | 199 const char* metric_name, |
| 191 const net::URLRequestStatus& status, | 200 const net::URLRequestStatus& status, |
| 192 int response_code) { | 201 int response_code) { |
| 193 UMA_HISTOGRAM_SPARSE_SLOWLY( | 202 UMA_HISTOGRAM_SPARSE_SLOWLY( |
| 194 metric_name, status.is_success() ? response_code : status.error()); | 203 metric_name, status.is_success() ? response_code : status.error()); |
| 195 } | 204 } |
| 196 | 205 |
| 197 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const { | 206 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const { |
| 198 return update_timer_.IsRunning(); | 207 return update_timer_.IsRunning(); |
| (...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 331 const std::vector<SBPrefix>& prefixes, | 340 const std::vector<SBPrefix>& prefixes, |
| 332 const std::vector<PlatformType>& platforms, | 341 const std::vector<PlatformType>& platforms, |
| 333 ThreatType threat_type, | 342 ThreatType threat_type, |
| 334 FullHashCallback callback) { | 343 FullHashCallback callback) { |
| 335 DCHECK(CalledOnValidThread()); | 344 DCHECK(CalledOnValidThread()); |
| 336 // We need to wait the minimum waiting duration, and if we are in backoff, | 345 // We need to wait the minimum waiting duration, and if we are in backoff, |
| 337 // we need to check if we're past the next allowed time. If we are, we can | 346 // we need to check if we're past the next allowed time. If we are, we can |
| 338 // proceed with the request. If not, we are required to return empty results | 347 // proceed with the request. If not, we are required to return empty results |
| 339 // (i.e. treat the page as safe). | 348 // (i.e. treat the page as safe). |
| 340 if (Time::Now() <= next_gethash_v4_time_) { | 349 if (Time::Now() <= next_gethash_v4_time_) { |
| 341 // TODO(kcarattini): Add UMA recording. | 350 if (gethash_v4_error_count_) { |
| 351 RecordGetV4HashResult(GET_HASH_BACKOFF_ERROR); | |
| 352 } else { | |
| 353 RecordGetV4HashResult(GET_HASH_MIN_WAIT_DURATION_ERROR); | |
| 354 } | |
| 342 std::vector<SBFullHashResult> full_hashes; | 355 std::vector<SBFullHashResult> full_hashes; |
| 343 callback.Run(full_hashes, base::TimeDelta()); | 356 callback.Run(full_hashes, base::TimeDelta()); |
| 344 return; | 357 return; |
| 345 } | 358 } |
| 346 | 359 |
| 347 std::string req_base64 = GetV4HashRequest(prefixes, platforms, threat_type); | 360 std::string req_base64 = GetV4HashRequest(prefixes, platforms, threat_type); |
| 348 GURL gethash_url = GetV4HashUrl(req_base64); | 361 GURL gethash_url = GetV4HashUrl(req_base64); |
| 349 | 362 |
| 350 net::URLFetcher* fetcher = | 363 net::URLFetcher* fetcher = |
| 351 net::URLFetcher::Create(url_fetcher_id_++, gethash_url, | 364 net::URLFetcher::Create(url_fetcher_id_++, gethash_url, |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 387 // required, the SafeBrowsing servers will tell us to get it again. | 400 // required, the SafeBrowsing servers will tell us to get it again. |
| 388 void SafeBrowsingProtocolManager::OnURLFetchComplete( | 401 void SafeBrowsingProtocolManager::OnURLFetchComplete( |
| 389 const net::URLFetcher* source) { | 402 const net::URLFetcher* source) { |
| 390 DCHECK(CalledOnValidThread()); | 403 DCHECK(CalledOnValidThread()); |
| 391 scoped_ptr<const net::URLFetcher> fetcher; | 404 scoped_ptr<const net::URLFetcher> fetcher; |
| 392 | 405 |
| 393 HashRequests::iterator it = hash_requests_.find(source); | 406 HashRequests::iterator it = hash_requests_.find(source); |
| 394 HashRequests::iterator v4_it = v4_hash_requests_.find(source); | 407 HashRequests::iterator v4_it = v4_hash_requests_.find(source); |
| 395 int response_code = source->GetResponseCode(); | 408 int response_code = source->GetResponseCode(); |
| 396 net::URLRequestStatus status = source->GetStatus(); | 409 net::URLRequestStatus status = source->GetStatus(); |
| 397 RecordHttpResponseOrErrorCode(kUmaHashResponseMetricName, status, | 410 |
| 398 response_code); | |
| 399 if (it != hash_requests_.end()) { | 411 if (it != hash_requests_.end()) { |
| 400 // GetHash response. | 412 // GetHash response. |
| 413 RecordHttpResponseOrErrorCode(kUmaHashResponseMetricName, status, | |
| 414 response_code); | |
| 401 fetcher.reset(it->first); | 415 fetcher.reset(it->first); |
| 402 const FullHashDetails& details = it->second; | 416 const FullHashDetails& details = it->second; |
| 403 std::vector<SBFullHashResult> full_hashes; | 417 std::vector<SBFullHashResult> full_hashes; |
| 404 base::TimeDelta cache_lifetime; | 418 base::TimeDelta cache_lifetime; |
| 405 if (status.is_success() && (response_code == net::HTTP_OK || | 419 if (status.is_success() && (response_code == net::HTTP_OK || |
| 406 response_code == net::HTTP_NO_CONTENT)) { | 420 response_code == net::HTTP_NO_CONTENT)) { |
| 407 // For tracking our GetHash false positive (net::HTTP_NO_CONTENT) rate, | 421 // For tracking our GetHash false positive (net::HTTP_NO_CONTENT) rate, |
| 408 // compared to real (net::HTTP_OK) responses. | 422 // compared to real (net::HTTP_OK) responses. |
| 409 if (response_code == net::HTTP_OK) | 423 if (response_code == net::HTTP_OK) |
| 410 RecordGetHashResult(details.is_download, GET_HASH_STATUS_200); | 424 RecordGetHashResult(details.is_download, GET_HASH_STATUS_200); |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 436 } | 450 } |
| 437 | 451 |
| 438 // Invoke the callback with full_hashes, even if there was a parse error or | 452 // Invoke the callback with full_hashes, even if there was a parse error or |
| 439 // an error response code (in which case full_hashes will be empty). The | 453 // an error response code (in which case full_hashes will be empty). The |
| 440 // caller can't be blocked indefinitely. | 454 // caller can't be blocked indefinitely. |
| 441 details.callback.Run(full_hashes, cache_lifetime); | 455 details.callback.Run(full_hashes, cache_lifetime); |
| 442 | 456 |
| 443 hash_requests_.erase(it); | 457 hash_requests_.erase(it); |
| 444 } else if (v4_it != v4_hash_requests_.end()) { | 458 } else if (v4_it != v4_hash_requests_.end()) { |
| 445 // V4 FindFullHashes response. | 459 // V4 FindFullHashes response. |
| 460 RecordHttpResponseOrErrorCode(kUmaV4ResponseMetricName, status, | |
| 461 response_code); | |
| 446 fetcher.reset(v4_it->first); | 462 fetcher.reset(v4_it->first); |
| 447 const FullHashDetails& details = v4_it->second; | 463 const FullHashDetails& details = v4_it->second; |
| 448 std::vector<SBFullHashResult> full_hashes; | 464 std::vector<SBFullHashResult> full_hashes; |
| 449 base::TimeDelta negative_cache_duration; | 465 base::TimeDelta negative_cache_duration; |
| 450 if (status.is_success() && response_code == net::HTTP_OK) { | 466 if (status.is_success() && response_code == net::HTTP_OK) { |
| 451 // TODO(kcarattini): Add UMA reporting. | 467 RecordGetV4HashResult(GET_HASH_STATUS_200); |
| 452 gethash_v4_error_count_ = 0; | 468 gethash_v4_error_count_ = 0; |
| 453 gethash_v4_back_off_mult_ = 1; | 469 gethash_v4_back_off_mult_ = 1; |
| 454 std::string data; | 470 std::string data; |
| 455 source->GetResponseAsString(&data); | 471 source->GetResponseAsString(&data); |
| 456 if (!ParseV4HashResponse(data, &full_hashes, &negative_cache_duration)) { | 472 if (!ParseV4HashResponse(data, &full_hashes, &negative_cache_duration)) { |
| 457 full_hashes.clear(); | 473 full_hashes.clear(); |
| 458 // TODO(kcarattini): Add UMA reporting. | 474 RecordGetV4HashResult(GET_HASH_PARSE_ERROR); |
| 459 } | 475 } |
| 460 } else { | 476 } else { |
| 461 HandleGetHashV4Error(Time::Now()); | 477 HandleGetHashV4Error(Time::Now()); |
| 462 // TODO(kcarattini): Add UMA reporting. | |
| 463 if (status.status() == net::URLRequestStatus::FAILED) { | 478 if (status.status() == net::URLRequestStatus::FAILED) { |
| 479 RecordGetV4HashResult(GET_HASH_NETWORK_ERROR); | |
| 464 DVLOG(1) << "SafeBrowsing GetEncodedFullHashes request for: " << | 480 DVLOG(1) << "SafeBrowsing GetEncodedFullHashes request for: " << |
| 465 source->GetURL() << " failed with error: " << status.error(); | 481 source->GetURL() << " failed with error: " << status.error(); |
| 466 } else { | 482 } else { |
| 483 RecordGetV4HashResult(GET_HASH_HTTP_ERROR); | |
| 467 DVLOG(1) << "SafeBrowsing GetEncodedFullHashes request for: " << | 484 DVLOG(1) << "SafeBrowsing GetEncodedFullHashes request for: " << |
| 468 source->GetURL() << " failed with error: " << response_code; | 485 source->GetURL() << " failed with error: " << response_code; |
| 469 } | 486 } |
| 470 } | 487 } |
| 471 | 488 |
| 472 // Invoke the callback with full_hashes, even if there was a parse error or | 489 // Invoke the callback with full_hashes, even if there was a parse error or |
| 473 // an error response code (in which case full_hashes will be empty). The | 490 // an error response code (in which case full_hashes will be empty). The |
| 474 // caller can't be blocked indefinitely. | 491 // caller can't be blocked indefinitely. |
| 475 details.callback.Run(full_hashes, negative_cache_duration); | 492 details.callback.Run(full_hashes, negative_cache_duration); |
| 476 | 493 |
| 477 v4_hash_requests_.erase(it); | 494 v4_hash_requests_.erase(it); |
| 478 } else { | 495 } else { |
| 479 // Update or chunk response. | 496 // Update or chunk response. |
|
Nathan Parker
2016/01/07 05:28:29
I always thought it was odd that this OnURLFetchCo
kcarattini
2016/01/07 06:06:13
Added a TODO to consider pulling this out in the f
| |
| 497 RecordHttpResponseOrErrorCode(kUmaHashResponseMetricName, status, | |
| 498 response_code); | |
| 480 fetcher.reset(request_.release()); | 499 fetcher.reset(request_.release()); |
| 481 | 500 |
| 482 if (request_type_ == UPDATE_REQUEST || | 501 if (request_type_ == UPDATE_REQUEST || |
| 483 request_type_ == BACKUP_UPDATE_REQUEST) { | 502 request_type_ == BACKUP_UPDATE_REQUEST) { |
| 484 if (!fetcher.get()) { | 503 if (!fetcher.get()) { |
| 485 // We've timed out waiting for an update response, so we've cancelled | 504 // We've timed out waiting for an update response, so we've cancelled |
| 486 // the update request and scheduled a new one. Ignore this response. | 505 // the update request and scheduled a new one. Ignore this response. |
| 487 return; | 506 return; |
| 488 } | 507 } |
| 489 | 508 |
| (...skipping 513 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1003 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails( | 1022 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails( |
| 1004 FullHashCallback callback, | 1023 FullHashCallback callback, |
| 1005 bool is_download) | 1024 bool is_download) |
| 1006 : callback(callback), is_download(is_download) {} | 1025 : callback(callback), is_download(is_download) {} |
| 1007 | 1026 |
| 1008 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {} | 1027 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {} |
| 1009 | 1028 |
| 1010 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {} | 1029 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {} |
| 1011 | 1030 |
| 1012 } // namespace safe_browsing | 1031 } // namespace safe_browsing |
| OLD | NEW |