Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1475)

Unified Diff: components/rappor/log_uploader.cc

Issue 49753002: RAPPOR implementation (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: components/rappor/log_uploader.cc
diff --git a/components/rappor/log_uploader.cc b/components/rappor/log_uploader.cc
new file mode 100644
index 0000000000000000000000000000000000000000..462bb3784147d30d277ef7347c97b9f046b0ae5b
--- /dev/null
+++ b/components/rappor/log_uploader.cc
@@ -0,0 +1,137 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/rappor/log_uploader.h"
+
+//#include "chrome/browser/metrics/compression_utils.h"
Ilya Sherman 2014/01/10 11:00:32 nit: ?
Steven Holte 2014/01/14 00:47:54 Done.
+#include "net/base/load_flags.h"
+#include "url/gurl.h"
+
+using base::TimeDelta;
+
+namespace {
+
+// The delay, in seconds, between uploading when there are queued logs to send.
+const int kUnsentLogsIntervalSeconds = 3;
+
+// When uploading metrics to the server fails, we progressively wait longer and
+// longer before sending the next log. This backoff process helps reduce load
+// on a server that is having issues.
+// The following is the multiplier we use to expand that inter-log duration.
+const double kBackoffMultiplier = 1.1;
+
+// The maximum backoff multiplier.
+const int kMaxBackoffMultiplier = 10;
+
+// If an upload fails, and the transmission was over this byte count, then we
+// will discard the log, and not try to retransmit it. We also don't persist
+// the log to the prefs for transmission during the next chrome session if this
Ilya Sherman 2014/01/10 11:00:32 nit: "chrome" -> "Chrome"
Steven Holte 2014/01/14 00:47:54 Removed the sentence because it doesn't actually a
+// limit is exceeded.
+const size_t kUploadLogAvoidRetransmitSize = 50000;
+
+// The maximum number of unsent logs we will keep.
+const size_t kMaxQueuedLogs = 10;
Ilya Sherman 2014/01/10 11:00:32 nit: It's generally better to limit based on size,
Steven Holte 2014/01/14 00:47:54 Added TODO
+
+} // anonymous namespace
Ilya Sherman 2014/01/10 11:00:32 nit: "anonymous namespace" -> "namespace" (for con
Steven Holte 2014/01/14 00:47:54 Done.
+
+namespace rappor {
+
+LogUploader::LogUploader(const char* server_url, const char* mime_type,
+ net::URLRequestContextGetter* request_context)
Ilya Sherman 2014/01/10 11:00:32 Who owns the request context?
Steven Holte 2014/01/14 00:47:54 Used scoped_refptr
+ : server_url_(server_url), mime_type_(mime_type),
+ request_context_(request_context), callback_pending_(false),
Ilya Sherman 2014/01/10 11:00:32 nit: One field per line, please.
Steven Holte 2014/01/14 00:47:54 Done.
+ upload_interval_(TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds)) {
+}
+
+LogUploader::~LogUploader() {}
+
+void LogUploader::QueueLog(const std::string& log) {
+ queued_logs_.push(log);
+ if (!upload_timer_.IsRunning() && !callback_pending_)
+ StartScheduledUpload();
+}
+
+void LogUploader::ScheduleNextUpload() {
+ if (upload_timer_.IsRunning() || callback_pending_)
+ return;
+
+ upload_timer_.Start(
+ FROM_HERE, upload_interval_, this, &LogUploader::StartScheduledUpload);
+}
+
+void LogUploader::StartScheduledUpload() {
+ callback_pending_ = true;
+ current_fetch_.reset(
+ net::URLFetcher::Create(GURL(server_url_), net::URLFetcher::POST, this));
+ current_fetch_->SetRequestContext(request_context_);
+ current_fetch_->SetUploadData(mime_type_, queued_logs_.front());
+
+ // We already drop cookies server-side, but we might as well strip them out
+ // client-side as well.
+ current_fetch_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
+ net::LOAD_DO_NOT_SEND_COOKIES);
+ current_fetch_->Start();
+}
+
+void LogUploader::OnURLFetchComplete(const net::URLFetcher* source) {
+ // We're not allowed to re-use the existing |URLFetcher|s, so free them here.
+ // Note however that |source| is aliased to the fetcher, so we should be
+ // careful not to delete it too early.
+ DCHECK_EQ(current_fetch_.get(), source);
+ scoped_ptr<net::URLFetcher> s(current_fetch_.Pass());
+
+ int response_code = source->GetResponseCode();
+
+ bool upload_succeeded = response_code == 200;
+
+ // Provide boolean for error recovery (allow us to ignore response_code).
+ bool discard_log = false;
+ const size_t log_size = queued_logs_.front().length();
+ if (!upload_succeeded && log_size > kUploadLogAvoidRetransmitSize) {
+ discard_log = true;
+ } else if (queued_logs_.size() > kMaxQueuedLogs) {
+ discard_log = true;
+ } else if (response_code == 400) {
+ // Bad syntax. Retransmission won't work.
+ discard_log = true;
+ }
+
+ if (upload_succeeded || discard_log)
+ queued_logs_.pop();
+
+ // Error 400 indicates a problem with the log, not with the server, so
+ // don't consider that a sign that the server is in trouble.
+ bool server_is_healthy = upload_succeeded || response_code == 400;
+ UploadFinished(server_is_healthy, !queued_logs_.empty());
+}
+
+void LogUploader::UploadFinished(bool server_is_healthy,
+ bool more_logs_remaining) {
+ DCHECK(callback_pending_);
+ callback_pending_ = false;
+ // If the server is having issues, back off. Otherwise, reset to default.
+ if (!server_is_healthy) {
+ BackOffUploadInterval();
+ } else {
Ilya Sherman 2014/01/10 11:00:32 nit: No need for curlies.
Steven Holte 2014/01/14 00:47:54 Done.
+ upload_interval_ = TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds);
+ }
+
+ if (more_logs_remaining) {
+ ScheduleNextUpload();
+ }
Ilya Sherman 2014/01/10 11:00:32 nit: No need for curlies.
Steven Holte 2014/01/14 00:47:54 Done.
+}
+
+void LogUploader::BackOffUploadInterval() {
+ DCHECK_GT(kBackoffMultiplier, 1.0);
+ upload_interval_ = TimeDelta::FromMicroseconds(static_cast<int64>(
+ kBackoffMultiplier * upload_interval_.InMicroseconds()));
+
+ TimeDelta max_interval = kMaxBackoffMultiplier *
+ TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds);
+ if (upload_interval_ > max_interval || upload_interval_.InSeconds() < 0) {
+ upload_interval_ = max_interval;
+ }
Ilya Sherman 2014/01/10 11:00:32 nit: No need for curlies.
Steven Holte 2014/01/14 00:47:54 Done.
+}
+
+} // namespace rappor

Powered by Google App Engine
This is Rietveld 408576698