Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(390)

Unified Diff: chrome/browser/safe_browsing/protocol_manager.cc

Issue 11784038: Add backup URL support for SafeBrowsing data requests. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fix bad response case Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: chrome/browser/safe_browsing/protocol_manager.cc
diff --git a/chrome/browser/safe_browsing/protocol_manager.cc b/chrome/browser/safe_browsing/protocol_manager.cc
index e825e16fb166b30014f1d9811d7e317fbee9e423..0dfd62e3921852d0227720fc0656b4b9270acefc 100644
--- a/chrome/browser/safe_browsing/protocol_manager.cc
+++ b/chrome/browser/safe_browsing/protocol_manager.cc
@@ -21,6 +21,7 @@
#include "google_apis/google_api_keys.h"
#include "net/base/escape.h"
#include "net/base/load_flags.h"
+#include "net/base/net_errors.h"
#include "net/url_request/url_fetcher.h"
#include "net/url_request/url_request_context_getter.h"
#include "net/url_request/url_request_status.h"
@@ -28,8 +29,33 @@
using base::Time;
using base::TimeDelta;
+namespace {
+
+enum UpdateResult {
+ UPDATE_RESULT_FAIL,
+ UPDATE_RESULT_SUCCESS,
+ UPDATE_RESULT_BACKUP_CONNECT_FAIL,
+ UPDATE_RESULT_BACKUP_CONNECT_SUCCESS,
+ UPDATE_RESULT_BACKUP_HTTP_FAIL,
+ UPDATE_RESULT_BACKUP_HTTP_SUCCESS,
+ UPDATE_RESULT_BACKUP_NETWORK_FAIL,
+ UPDATE_RESULT_BACKUP_NETWORK_SUCCESS,
+ UPDATE_RESULT_MAX,
+ UPDATE_RESULT_BACKUP_START = UPDATE_RESULT_BACKUP_CONNECT_FAIL,
+};
+
+void RecordUpdateResult(UpdateResult result) {
+ DCHECK(result >= 0 && result < UPDATE_RESULT_MAX);
+ UMA_HISTOGRAM_ENUMERATION("SB2.UpdateResult", result, UPDATE_RESULT_MAX);
+}
+
+} // namespace
+
+// Minimum time, in seconds, from start up before we must issue an update query.
+static const int kSbTimerStartIntervalSecMin = 60;
+
// Maximum time, in seconds, from start up before we must issue an update query.
-static const int kSbTimerStartIntervalSec = 5 * 60;
+static const int kSbTimerStartIntervalSecMax = 300;
// The maximum time, in seconds, to wait for a response to an update request.
static const int kSbMaxUpdateWaitSec = 30;
@@ -80,7 +106,8 @@ SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
update_back_off_mult_(1),
gethash_back_off_mult_(1),
next_update_interval_(base::TimeDelta::FromSeconds(
- base::RandInt(60, kSbTimerStartIntervalSec))),
+ base::RandInt(kSbTimerStartIntervalSecMin,
+ kSbTimerStartIntervalSecMax))),
update_state_(FIRST_REQUEST),
chunk_pending_to_write_(false),
version_(config.version),
@@ -88,10 +115,18 @@ SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
client_name_(config.client_name),
request_context_getter_(request_context_getter),
url_prefix_(config.url_prefix),
+ backup_update_reason_(BACKUP_UPDATE_REASON_MAX),
disable_auto_update_(config.disable_auto_update),
url_fetcher_id_(0) {
DCHECK(!url_prefix_.empty());
+ backup_url_prefixes_[BACKUP_UPDATE_REASON_CONNECT] =
+ config.backup_connect_error_url_prefix;
+ backup_url_prefixes_[BACKUP_UPDATE_REASON_HTTP] =
+ config.backup_http_error_url_prefix;
+ backup_url_prefixes_[BACKUP_UPDATE_REASON_NETWORK] =
+ config.backup_network_error_url_prefix;
+
// Set the backoff multiplier fuzz to a random value between 0 and 1.
back_off_fuzz_ = static_cast<float>(base::RandDouble());
if (version_.empty())
@@ -225,7 +260,8 @@ void SafeBrowsingProtocolManager::OnURLFetchComplete(
// Update or chunk response.
fetcher.reset(request_.release());
- if (request_type_ == UPDATE_REQUEST) {
+ if (request_type_ == UPDATE_REQUEST ||
+ request_type_ == BACKUP_UPDATE_REQUEST) {
if (!fetcher.get()) {
// We've timed out waiting for an update response, so we've cancelled
// the update request and scheduled a new one. Ignore this response.
@@ -236,7 +272,8 @@ void SafeBrowsingProtocolManager::OnURLFetchComplete(
timeout_timer_.Stop();
}
- if (source->GetStatus().is_success() && source->GetResponseCode() == 200) {
+ net::URLRequestStatus status = source->GetStatus();
+ if (status.is_success() && source->GetResponseCode() == 200) {
// We have data from the SafeBrowsing service.
std::string data;
source->GetResponseAsString(&data);
@@ -258,28 +295,53 @@ void SafeBrowsingProtocolManager::OnURLFetchComplete(
}
break;
case UPDATE_REQUEST:
+ case BACKUP_UPDATE_REQUEST:
if (chunk_request_urls_.empty() && parsed_ok) {
// We are up to date since the servers gave us nothing new, so we
// are done with this update cycle.
UpdateFinished(true);
}
break;
+ case NO_REQUEST:
+ // This can happen if HandleServiceResponse fails above.
+ break;
default:
NOTREACHED();
break;
}
} else {
- // The SafeBrowsing service error, or very bad response code: back off.
- if (request_type_ == CHUNK_REQUEST)
- chunk_request_urls_.clear();
- UpdateFinished(false);
- if (source->GetStatus().status() == net::URLRequestStatus::FAILED) {
+ if (status.status() == net::URLRequestStatus::FAILED) {
VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
<< " failed with error: " << source->GetStatus().error();
} else {
VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
<< " failed with error: " << source->GetResponseCode();
}
+ if (request_type_ == CHUNK_REQUEST) {
+ // The SafeBrowsing service error, or very bad response code: back off.
+ chunk_request_urls_.clear();
+ } else if (request_type_ == UPDATE_REQUEST) {
+ BackupUpdateReason backup_update_reason = BACKUP_UPDATE_REASON_MAX;
+ if (status.is_success()) {
+ backup_update_reason = BACKUP_UPDATE_REASON_HTTP;
+ } else {
+ switch (status.error()) {
+ case net::ERR_INTERNET_DISCONNECTED:
+ case net::ERR_NETWORK_CHANGED:
+ backup_update_reason = BACKUP_UPDATE_REASON_NETWORK;
+ break;
+ default:
+ backup_update_reason = BACKUP_UPDATE_REASON_CONNECT;
+ break;
+ }
+ }
+ if (backup_update_reason != BACKUP_UPDATE_REASON_MAX &&
+ !backup_url_prefixes_[backup_update_reason].empty()) {
+ IssueBackupUpdateRequest(backup_update_reason);
+ return;
+ }
+ }
+ UpdateFinished(false);
}
}
@@ -294,7 +356,8 @@ bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url,
SafeBrowsingProtocolParser parser;
switch (request_type_) {
- case UPDATE_REQUEST: {
+ case UPDATE_REQUEST:
+ case BACKUP_UPDATE_REQUEST: {
int next_update_sec = -1;
bool reset = false;
scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
@@ -467,6 +530,31 @@ void SafeBrowsingProtocolManager::IssueUpdateRequest() {
base::Unretained(this)));
}
+// The backup request can run immediately since the chunks have already been
+// retrieved from the DB.
+void SafeBrowsingProtocolManager::IssueBackupUpdateRequest(
+ BackupUpdateReason backup_update_reason) {
+ DCHECK(CalledOnValidThread());
+ DCHECK_EQ(request_type_, UPDATE_REQUEST);
+ DCHECK(backup_update_reason >= 0 &&
+ backup_update_reason < BACKUP_UPDATE_REASON_MAX);
+ request_type_ = BACKUP_UPDATE_REQUEST;
+ backup_update_reason_ = backup_update_reason;
+
+ GURL backup_update_url = BackupUpdateUrl(backup_update_reason);
+ request_.reset(net::URLFetcher::Create(
+ url_fetcher_id_++, backup_update_url, net::URLFetcher::POST, this));
+ request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
+ request_->SetRequestContext(request_context_getter_);
+ request_->SetUploadData("text/plain", update_list_data_);
+ request_->Start();
+
+ // Begin the update request timeout.
+ timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
+ this,
+ &SafeBrowsingProtocolManager::UpdateResponseTimeout);
+}
+
void SafeBrowsingProtocolManager::IssueChunkRequest() {
DCHECK(CalledOnValidThread());
// We are only allowed to have one request outstanding at any time. Also,
@@ -498,11 +586,11 @@ void SafeBrowsingProtocolManager::OnGetChunksComplete(
}
// Format our stored chunks:
- std::string list_data;
+ update_list_data_.clear();
bool found_malware = false;
bool found_phishing = false;
for (size_t i = 0; i < lists.size(); ++i) {
- list_data.append(FormatList(lists[i]));
+ update_list_data_.append(FormatList(lists[i]));
if (lists[i].name == safe_browsing_util::kPhishingList)
found_phishing = true;
@@ -513,11 +601,11 @@ void SafeBrowsingProtocolManager::OnGetChunksComplete(
// If we have an empty database, let the server know we want data for these
// lists.
if (!found_phishing)
- list_data.append(FormatList(
+ update_list_data_.append(FormatList(
SBListChunkRanges(safe_browsing_util::kPhishingList)));
if (!found_malware)
- list_data.append(FormatList(
+ update_list_data_.append(FormatList(
SBListChunkRanges(safe_browsing_util::kMalwareList)));
GURL update_url = UpdateUrl();
@@ -525,7 +613,7 @@ void SafeBrowsingProtocolManager::OnGetChunksComplete(
url_fetcher_id_++, update_url, net::URLFetcher::POST, this));
request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
request_->SetRequestContext(request_context_getter_);
- request_->SetUploadData("text/plain", list_data);
+ request_->SetUploadData("text/plain", update_list_data_);
request_->Start();
// Begin the update request timeout.
@@ -538,7 +626,8 @@ void SafeBrowsingProtocolManager::OnGetChunksComplete(
// will run. Close the current update session and schedule another update.
void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
DCHECK(CalledOnValidThread());
- DCHECK_EQ(request_type_, UPDATE_REQUEST);
+ DCHECK(request_type_ == UPDATE_REQUEST ||
+ request_type_ == BACKUP_UPDATE_REQUEST);
request_.reset();
UpdateFinished(false);
}
@@ -589,6 +678,19 @@ void SafeBrowsingProtocolManager::UpdateFinished(bool success, bool back_off) {
DCHECK(CalledOnValidThread());
UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
update_size_ = 0;
+ bool update_success = success || request_type_ == CHUNK_REQUEST;
mattm 2013/01/12 02:24:49 This means a failed CHUNK_REQUEST would be histogr
cbentzel 2013/01/14 11:33:35 Yes - the histogram is only for the first update d
cbentzel 2013/01/14 11:39:42 Unfortunately, the SafeBrowsing Protocol spec does
+ if (backup_update_reason_ == BACKUP_UPDATE_REASON_MAX) {
+ RecordUpdateResult(
+ update_success ? UPDATE_RESULT_SUCCESS : UPDATE_RESULT_FAIL);
+ } else {
+ UpdateResult update_result = static_cast<UpdateResult>(
+ UPDATE_RESULT_BACKUP_START +
+ (static_cast<int>(backup_update_reason_) * 2) +
+ update_success);
+ RecordUpdateResult(update_result);
+ }
+ backup_update_reason_ = BACKUP_UPDATE_REASON_MAX;
+ request_type_ = NO_REQUEST;
mattm 2013/01/12 02:24:49 should we clear update_list_data_ here too?
cbentzel 2013/01/14 11:33:35 I was clearing it at the start of OnGetChunksCompl
delegate_->UpdateFinished(success);
ScheduleNextUpdate(back_off);
}
@@ -599,6 +701,17 @@ GURL SafeBrowsingProtocolManager::UpdateUrl() const {
return GURL(url);
}
+GURL SafeBrowsingProtocolManager::BackupUpdateUrl(
+ BackupUpdateReason backup_update_reason) const {
+ DCHECK(backup_update_reason >= 0 &&
+ backup_update_reason < BACKUP_UPDATE_REASON_MAX);
+ DCHECK(!backup_url_prefixes_[backup_update_reason].empty());
+ std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
+ backup_url_prefixes_[backup_update_reason], "downloads", client_name_,
+ version_, additional_query_);
+ return GURL(url);
+}
+
GURL SafeBrowsingProtocolManager::GetHashUrl() const {
std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
url_prefix_, "gethash", client_name_, version_, additional_query_);

Powered by Google App Engine
This is Rietveld 408576698