| Index: components/rappor/log_uploader.cc
|
| diff --git a/components/rappor/log_uploader.cc b/components/rappor/log_uploader.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..827bd137a677f3580a213db76388873d2e519ca8
|
| --- /dev/null
|
| +++ b/components/rappor/log_uploader.cc
|
| @@ -0,0 +1,134 @@
|
| +// Copyright 2014 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "components/rappor/log_uploader.h"
|
| +
|
| +#include "net/base/load_flags.h"
|
| +
|
| +using base::TimeDelta;
|
| +
|
| +namespace {
|
| +
|
| +// The delay, in seconds, between uploading when there are queued logs to send.
|
| +const int kUnsentLogsIntervalSeconds = 3;
|
| +
|
| +// When uploading metrics to the server fails, we progressively wait longer and
|
| +// longer before sending the next log. This backoff process helps reduce load
|
| +// on a server that is having issues.
|
| +// The following is the multiplier we use to expand that inter-log duration.
|
| +const double kBackoffMultiplier = 1.1;
|
| +
|
| +// The maximum backoff multiplier.
|
| +const int kMaxBackoffMultiplier = 10;
|
| +
|
| +// If an upload fails, and the transmission was over this byte count, then we
|
| +// will discard the log, and not try to retransmit it.
|
| +const size_t kUploadLogAvoidRetransmitSize = 50000;
|
| +
|
| +// The maximum number of unsent logs we will keep.
|
| +// TODO(holte): Limit based on log size instead.
|
| +const size_t kMaxQueuedLogs = 10;
|
| +
|
| +} // namespace
|
| +
|
| +namespace rappor {
|
| +
|
| +LogUploader::LogUploader(const GURL& server_url,
|
| + const std::string& mime_type,
|
| + net::URLRequestContextGetter* request_context)
|
| + : server_url_(server_url),
|
| + mime_type_(mime_type),
|
| + request_context_(request_context),
|
| + has_callback_pending_(false),
|
| + upload_interval_(TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds)) {
|
| +}
|
| +
|
| +LogUploader::~LogUploader() {}
|
| +
|
| +void LogUploader::QueueLog(const std::string& log) {
|
| + queued_logs_.push(log);
|
| + if (!upload_timer_.IsRunning() && !has_callback_pending_)
|
| + StartScheduledUpload();
|
| +}
|
| +
|
| +void LogUploader::ScheduleNextUpload() {
|
| + if (upload_timer_.IsRunning() || has_callback_pending_)
|
| + return;
|
| +
|
| + upload_timer_.Start(
|
| + FROM_HERE, upload_interval_, this, &LogUploader::StartScheduledUpload);
|
| +}
|
| +
|
| +void LogUploader::StartScheduledUpload() {
|
| + has_callback_pending_ = true;
|
| + current_fetch_.reset(
|
| + net::URLFetcher::Create(server_url_, net::URLFetcher::POST, this));
|
| + current_fetch_->SetRequestContext(request_context_.get());
|
| + current_fetch_->SetUploadData(mime_type_, queued_logs_.front());
|
| +
|
| + // We already drop cookies server-side, but we might as well strip them out
|
| + // client-side as well.
|
| + current_fetch_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
|
| + net::LOAD_DO_NOT_SEND_COOKIES);
|
| + current_fetch_->Start();
|
| +}
|
| +
|
| +void LogUploader::OnURLFetchComplete(const net::URLFetcher* source) {
|
| + // We're not allowed to re-use the existing |URLFetcher|s, so free them here.
|
| + // Note however that |source| is aliased to the fetcher, so we should be
|
| + // careful not to delete it too early.
|
| + DCHECK_EQ(current_fetch_.get(), source);
|
| + scoped_ptr<net::URLFetcher> s(current_fetch_.Pass());
|
| +
|
| + int response_code = source->GetResponseCode();
|
| +
|
| + bool upload_succeeded = response_code == 200;
|
| +
|
| + // Provide boolean for error recovery (allow us to ignore response_code).
|
| + bool discard_log = false;
|
| + const size_t log_size = queued_logs_.front().length();
|
| + if (!upload_succeeded && log_size > kUploadLogAvoidRetransmitSize) {
|
| + discard_log = true;
|
| + } else if (queued_logs_.size() > kMaxQueuedLogs) {
|
| + discard_log = true;
|
| + } else if (response_code == 400) {
|
| + // Bad syntax. Retransmission won't work.
|
| + discard_log = true;
|
| + }
|
| +
|
| + if (upload_succeeded || discard_log)
|
| + queued_logs_.pop();
|
| +
|
| + // Error 400 indicates a problem with the log, not with the server, so
|
| + // don't consider that a sign that the server is in trouble.
|
| + bool server_is_healthy = upload_succeeded || response_code == 400;
|
| + UploadFinished(server_is_healthy, !queued_logs_.empty());
|
| +}
|
| +
|
| +void LogUploader::UploadFinished(bool server_is_healthy,
|
| + bool more_logs_remaining) {
|
| + DCHECK(has_callback_pending_);
|
| + has_callback_pending_ = false;
|
| + // If the server is having issues, back off. Otherwise, reset to default.
|
| + if (!server_is_healthy)
|
| + BackOffUploadInterval();
|
| + else
|
| + upload_interval_ = TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds);
|
| +
|
| + if (more_logs_remaining)
|
| + ScheduleNextUpload();
|
| +}
|
| +
|
| +void LogUploader::BackOffUploadInterval() {
|
| + DCHECK_GT(kBackoffMultiplier, 1.0);
|
| + upload_interval_ = TimeDelta::FromMicroseconds(static_cast<int64>(
|
| + kBackoffMultiplier * upload_interval_.InMicroseconds()));
|
| +
|
| + TimeDelta max_interval = kMaxBackoffMultiplier *
|
| + TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds);
|
| + if (upload_interval_ > max_interval || upload_interval_.InSeconds() < 0)
|
| + upload_interval_ = max_interval;
|
| +}
|
| +
|
| +} // namespace rappor
|
|
|