Index: components/rappor/log_uploader.cc |
diff --git a/components/rappor/log_uploader.cc b/components/rappor/log_uploader.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..1ba0a55c3a1e1488183581ca10ce9a5c6e3487bd |
--- /dev/null |
+++ b/components/rappor/log_uploader.cc |
@@ -0,0 +1,150 @@ |
+// Copyright (c) 2013 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "components/rappor/log_uploader.h" |
+ |
+//#include "chrome/browser/metrics/compression_utils.h" |
+#include "net/base/load_flags.h" |
+#include "url/gurl.h" |
+ |
+using base::TimeDelta; |
+ |
+namespace { |
+ |
+// The delay, in seconds, between uploading when there are queued logs from |
+// previous sessions to send. |
+const int kUnsentLogsIntervalSeconds = 3; |
+ |
+// When uploading metrics to the server fails, we progressively wait longer and |
+// longer before sending the next log. This backoff process helps reduce load |
+// on a server that is having issues. |
+// The following is the multiplier we use to expand that inter-log duration. |
+const double kBackoffMultiplier = 1.1; |
+ |
+// The maximum backoff multiplier. |
+const int kMaxBackoffMultiplier = 10; |
+ |
+// If an upload fails, and the transmission was over this byte count, then we |
+// will discard the log, and not try to retransmit it. We also don't persist |
+// the log to the prefs for transmission during the next chrome session if this |
+// limit is exceeded. |
+const size_t kUploadLogAvoidRetransmitSize = 50000; |
+ |
+// The maximum number of unsent logs we will keep. |
+const size_t kMaxQueuedLogs = 10; |
+ |
+} // anonymous namespace |
+ |
+namespace rappor { |
+ |
+LogUploader::LogUploader(const char* server_url, const char* mime_type) |
+ : server_url_(server_url), mime_type_(mime_type), callback_pending_(false) { |
+ upload_interval_ = TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds); |
Alexei Svitkine (slow)
2013/12/24 16:59:30
Nit: Put this in the init list.
Steven Holte
2014/01/04 00:12:54
Done.
|
+} |
+ |
+void LogUploader::SetRequestContext( |
+ net::URLRequestContextGetter* request_context) { |
+ request_context_ = request_context; |
+} |
+ |
+void LogUploader::QueueLog(const std::string& log) { |
+ queued_logs_.push(log); |
+ if (!upload_timer_.IsRunning() && !callback_pending_) { |
+ StartScheduledUpload(); |
+ } |
+} |
+ |
+void LogUploader::ScheduleNextUpload() { |
+ if (upload_timer_.IsRunning() || callback_pending_) |
+ return; |
+ |
+ upload_timer_.Start( |
+ FROM_HERE, upload_interval_, this, &LogUploader::StartScheduledUpload); |
+} |
+ |
+void LogUploader::StartScheduledUpload() { |
+ callback_pending_ = true; |
+ current_fetch_.reset( |
+ net::URLFetcher::Create(GURL(server_url_), net::URLFetcher::POST, this)); |
+ current_fetch_->SetRequestContext(request_context_); |
+ /* |
+ std::string compressed_log_text; |
+ bool compression_successful = |
+ chrome::GzipCompress(log_text, &compressed_log_text); |
+ DCHECK(compression_successful); |
+ if (compression_successful) { |
+ current_fetch_->SetUploadData(kMimeType, compressed_log_text); |
+ // Tell the server that we're uploading gzipped protobufs. |
+ current_fetch_->SetExtraRequestHeaders("content-encoding: gzip"); |
+ }*/ |
+ current_fetch_->SetUploadData(mime_type_, queued_logs_.front()); |
+ |
+ // We already drop cookies server-side, but we might as well strip them out |
+ // client-side as well. |
+ current_fetch_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES | |
+ net::LOAD_DO_NOT_SEND_COOKIES); |
+ current_fetch_->Start(); |
+} |
+ |
+void LogUploader::OnURLFetchComplete(const net::URLFetcher* source) { |
+ // We're not allowed to re-use the existing |URLFetcher|s, so free them here. |
+ // Note however that |source| is aliased to the fetcher, so we should be |
+ // careful not to delete it too early. |
+ DCHECK_EQ(current_fetch_.get(), source); |
+ scoped_ptr<net::URLFetcher> s(current_fetch_.Pass()); |
+ |
+ int response_code = source->GetResponseCode(); |
+ |
+ bool upload_succeeded = response_code == 200; |
+ |
+ // Provide boolean for error recovery (allow us to ignore response_code). |
+ bool discard_log = false; |
+ const size_t log_size = queued_logs_.front().length(); |
+ if (!upload_succeeded && log_size > kUploadLogAvoidRetransmitSize) { |
+ discard_log = true; |
+ } else if (queued_logs_.size() > kMaxQueuedLogs) { |
+ discard_log = true; |
+ } else if (response_code == 400) { |
+ // Bad syntax. Retransmission won't work. |
+ discard_log = true; |
+ } |
+ |
+ if (upload_succeeded || discard_log) |
+ queued_logs_.pop(); |
+ |
+ // Error 400 indicates a problem with the log, not with the server, so |
+ // don't consider that a sign that the server is in trouble. |
+ bool server_is_healthy = upload_succeeded || response_code == 400; |
+ UploadFinished(server_is_healthy, !queued_logs_.empty()); |
+} |
+ |
+void LogUploader::UploadFinished(bool server_is_healthy, |
+ bool more_logs_remaining) { |
+ DCHECK(callback_pending_); |
+ callback_pending_ = false; |
+ // If the server is having issues, back off. Otherwise, reset to default. |
+ if (!server_is_healthy) { |
+ BackOffUploadInterval(); |
+ } else { |
+ upload_interval_ = TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds); |
+ } |
+ |
+ if (more_logs_remaining) { |
+ ScheduleNextUpload(); |
+ } |
+} |
+ |
+void LogUploader::BackOffUploadInterval() { |
+ DCHECK_GT(kBackoffMultiplier, 1.0); |
+ upload_interval_ = TimeDelta::FromMicroseconds(static_cast<int64>( |
+ kBackoffMultiplier * upload_interval_.InMicroseconds())); |
+ |
+ TimeDelta max_interval = kMaxBackoffMultiplier * |
+ TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds); |
+ if (upload_interval_ > max_interval || upload_interval_.InSeconds() < 0) { |
+ upload_interval_ = max_interval; |
+ } |
+} |
+ |
+} // namespace rappor |