Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "components/rappor/log_uploader.h" | |
| 6 | |
| 7 #include "net/base/load_flags.h" | |
| 8 | |
| 9 using base::TimeDelta; | |
| 10 | |
| 11 namespace { | |
| 12 | |
| 13 // The delay, in seconds, between uploading when there are queued logs to send. | |
| 14 const int kUnsentLogsIntervalSeconds = 3; | |
| 15 | |
| 16 // When uploading metrics to the server fails, we progressively wait longer and | |
| 17 // longer before sending the next log. This backoff process helps reduce load | |
| 18 // on a server that is having issues. | |
| 19 // The following is the multiplier we use to expand that inter-log duration. | |
| 20 const double kBackoffMultiplier = 1.1; | |
| 21 | |
| 22 // The maximum backoff multiplier. | |
| 23 const int kMaxBackoffMultiplier = 10; | |
| 24 | |
| 25 // If an upload fails, and the transmission was over this byte count, then we | |
| 26 // will discard the log, and not try to retransmit it. | |
| 27 const size_t kUploadLogAvoidRetransmitSize = 50000; | |
| 28 | |
| 29 // The maximum number of unsent logs we will keep. | |
| 30 // TODO(holte): Limit based on log size instead. | |
| 31 const size_t kMaxQueuedLogs = 10; | |
| 32 | |
| 33 } // namespace | |
| 34 | |
| 35 namespace rappor { | |
| 36 | |
| 37 LogUploader::LogUploader(const GURL& server_url, | |
| 38 const std::string& mime_type, | |
| 39 net::URLRequestContextGetter* request_context) | |
| 40 : server_url_(server_url), | |
| 41 mime_type_(mime_type), | |
| 42 request_context_(request_context), | |
| 43 has_callback_pending_(false), | |
| 44 upload_interval_(TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds)) { | |
| 45 } | |
| 46 | |
| 47 LogUploader::~LogUploader() {} | |
| 48 | |
| 49 void LogUploader::QueueLog(const std::string& log) { | |
| 50 queued_logs_.push(log); | |
| 51 if (!upload_timer_.IsRunning() && !has_callback_pending_) | |
| 52 StartScheduledUpload(); | |
| 53 } | |
| 54 | |
| 55 void LogUploader::ScheduleNextUpload() { | |
| 56 if (upload_timer_.IsRunning() || has_callback_pending_) | |
| 57 return; | |
| 58 | |
| 59 upload_timer_.Start( | |
| 60 FROM_HERE, upload_interval_, this, &LogUploader::StartScheduledUpload); | |
| 61 } | |
| 62 | |
| 63 void LogUploader::StartScheduledUpload() { | |
| 64 has_callback_pending_ = true; | |
| 65 current_fetch_.reset( | |
| 66 net::URLFetcher::Create(server_url_, net::URLFetcher::POST, this)); | |
| 67 current_fetch_->SetRequestContext(request_context_.get()); | |
| 68 current_fetch_->SetUploadData(mime_type_, queued_logs_.front()); | |
| 69 | |
| 70 // We already drop cookies server-side, but we might as well strip them out | |
| 71 // client-side as well. | |
| 72 current_fetch_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES | | |
| 73 net::LOAD_DO_NOT_SEND_COOKIES); | |
| 74 current_fetch_->Start(); | |
| 75 } | |
| 76 | |
| 77 void LogUploader::OnURLFetchComplete(const net::URLFetcher* source) { | |
| 78 // We're not allowed to re-use the existing |URLFetcher|s, so free them here. | |
| 79 // Note however that |source| is aliased to the fetcher, so we should be | |
| 80 // careful not to delete it too early. | |
| 81 DCHECK_EQ(current_fetch_.get(), source); | |
| 82 scoped_ptr<net::URLFetcher> s(current_fetch_.Pass()); | |
|
Alexei Svitkine (slow)
2014/01/24 22:09:03
Use more meaningful variable names than |s|.
| |
| 83 | |
| 84 int response_code = source->GetResponseCode(); | |
| 85 | |
| 86 bool upload_succeeded = response_code == 200; | |
| 87 | |
| 88 // Provide boolean for error recovery (allow us to ignore response_code). | |
| 89 bool discard_log = false; | |
| 90 const size_t log_size = queued_logs_.front().length(); | |
| 91 if (!upload_succeeded && log_size > kUploadLogAvoidRetransmitSize) { | |
| 92 discard_log = true; | |
|
Alexei Svitkine (slow)
2014/01/24 22:09:03
Add a histogram to log these sort of events, so we
| |
| 93 } else if (queued_logs_.size() > kMaxQueuedLogs) { | |
| 94 discard_log = true; | |
| 95 } else if (response_code == 400) { | |
| 96 // Bad syntax. Retransmission won't work. | |
| 97 discard_log = true; | |
| 98 } | |
| 99 | |
| 100 if (upload_succeeded || discard_log) | |
| 101 queued_logs_.pop(); | |
| 102 | |
| 103 // Error 400 indicates a problem with the log, not with the server, so | |
| 104 // don't consider that a sign that the server is in trouble. | |
| 105 bool server_is_healthy = upload_succeeded || response_code == 400; | |
| 106 UploadFinished(server_is_healthy, !queued_logs_.empty()); | |
| 107 } | |
| 108 | |
| 109 void LogUploader::UploadFinished(bool server_is_healthy, | |
| 110 bool more_logs_remaining) { | |
| 111 DCHECK(has_callback_pending_); | |
| 112 has_callback_pending_ = false; | |
| 113 // If the server is having issues, back off. Otherwise, reset to default. | |
| 114 if (!server_is_healthy) | |
| 115 BackOffUploadInterval(); | |
| 116 else | |
| 117 upload_interval_ = TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds); | |
| 118 | |
| 119 if (more_logs_remaining) | |
| 120 ScheduleNextUpload(); | |
| 121 } | |
| 122 | |
| 123 void LogUploader::BackOffUploadInterval() { | |
| 124 DCHECK_GT(kBackoffMultiplier, 1.0); | |
| 125 upload_interval_ = TimeDelta::FromMicroseconds(static_cast<int64>( | |
| 126 kBackoffMultiplier * upload_interval_.InMicroseconds())); | |
| 127 | |
| 128 TimeDelta max_interval = kMaxBackoffMultiplier * | |
| 129 TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds); | |
| 130 if (upload_interval_ > max_interval || upload_interval_.InSeconds() < 0) | |
| 131 upload_interval_ = max_interval; | |
| 132 } | |
| 133 | |
| 134 } // namespace rappor | |
| OLD | NEW |