OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "components/rappor/log_uploader.h" |
| 6 |
| 7 //#include "chrome/browser/metrics/compression_utils.h" |
| 8 #include "net/base/load_flags.h" |
| 9 #include "url/gurl.h" |
| 10 |
| 11 using base::TimeDelta; |
| 12 |
| 13 namespace { |
| 14 |
| 15 // The delay, in seconds, between uploading when there are queued logs from |
| 16 // previous sessions to send. |
| 17 const int kUnsentLogsIntervalSeconds = 3; |
| 18 |
| 19 // When uploading metrics to the server fails, we progressively wait longer and |
| 20 // longer before sending the next log. This backoff process helps reduce load |
| 21 // on a server that is having issues. |
| 22 // The following is the multiplier we use to expand that inter-log duration. |
| 23 const double kBackoffMultiplier = 1.1; |
| 24 |
| 25 // The maximum backoff multiplier. |
| 26 const int kMaxBackoffMultiplier = 10; |
| 27 |
| 28 // If an upload fails, and the transmission was over this byte count, then we |
| 29 // will discard the log, and not try to retransmit it. We also don't persist |
| 30 // the log to the prefs for transmission during the next chrome session if this |
| 31 // limit is exceeded. |
| 32 const size_t kUploadLogAvoidRetransmitSize = 50000; |
| 33 |
| 34 // The maximum number of unsent logs we will keep. |
| 35 const size_t kMaxQueuedLogs = 10; |
| 36 |
| 37 } // anonymous namespace |
| 38 |
| 39 namespace rappor { |
| 40 |
| 41 LogUploader::LogUploader(const char* server_url, const char* mime_type) |
| 42 : server_url_(server_url), mime_type_(mime_type), callback_pending_(false) { |
| 43 upload_interval_ = TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds); |
| 44 } |
| 45 |
| 46 void LogUploader::SetRequestContext( |
| 47 net::URLRequestContextGetter* request_context) { |
| 48 request_context_ = request_context; |
| 49 } |
| 50 |
| 51 void LogUploader::QueueLog(const std::string& log) { |
| 52 queued_logs_.push(log); |
| 53 if (!upload_timer_.IsRunning() && !callback_pending_) { |
| 54 StartScheduledUpload(); |
| 55 } |
| 56 } |
| 57 |
| 58 void LogUploader::ScheduleNextUpload() { |
| 59 if (upload_timer_.IsRunning() || callback_pending_) |
| 60 return; |
| 61 |
| 62 upload_timer_.Start( |
| 63 FROM_HERE, upload_interval_, this, &LogUploader::StartScheduledUpload); |
| 64 } |
| 65 |
| 66 void LogUploader::StartScheduledUpload() { |
| 67 callback_pending_ = true; |
| 68 current_fetch_.reset( |
| 69 net::URLFetcher::Create(GURL(server_url_), net::URLFetcher::POST, this)); |
| 70 current_fetch_->SetRequestContext(request_context_); |
| 71 /* |
| 72 std::string compressed_log_text; |
| 73 bool compression_successful = |
| 74 chrome::GzipCompress(log_text, &compressed_log_text); |
| 75 DCHECK(compression_successful); |
| 76 if (compression_successful) { |
| 77 current_fetch_->SetUploadData(kMimeType, compressed_log_text); |
| 78 // Tell the server that we're uploading gzipped protobufs. |
| 79 current_fetch_->SetExtraRequestHeaders("content-encoding: gzip"); |
| 80 }*/ |
| 81 current_fetch_->SetUploadData(mime_type_, queued_logs_.front()); |
| 82 |
| 83 // We already drop cookies server-side, but we might as well strip them out |
| 84 // client-side as well. |
| 85 current_fetch_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES | |
| 86 net::LOAD_DO_NOT_SEND_COOKIES); |
| 87 current_fetch_->Start(); |
| 88 } |
| 89 |
| 90 void LogUploader::OnURLFetchComplete(const net::URLFetcher* source) { |
| 91 // We're not allowed to re-use the existing |URLFetcher|s, so free them here. |
| 92 // Note however that |source| is aliased to the fetcher, so we should be |
| 93 // careful not to delete it too early. |
| 94 DCHECK_EQ(current_fetch_.get(), source); |
| 95 scoped_ptr<net::URLFetcher> s(current_fetch_.Pass()); |
| 96 |
| 97 int response_code = source->GetResponseCode(); |
| 98 |
| 99 bool upload_succeeded = response_code == 200; |
| 100 |
| 101 // Provide boolean for error recovery (allow us to ignore response_code). |
| 102 bool discard_log = false; |
| 103 const size_t log_size = queued_logs_.front().length(); |
| 104 if (!upload_succeeded && log_size > kUploadLogAvoidRetransmitSize) { |
| 105 discard_log = true; |
| 106 } else if (queued_logs_.size() > kMaxQueuedLogs) { |
| 107 discard_log = true; |
| 108 } else if (response_code == 400) { |
| 109 // Bad syntax. Retransmission won't work. |
| 110 discard_log = true; |
| 111 } |
| 112 |
| 113 if (upload_succeeded || discard_log) |
| 114 queued_logs_.pop(); |
| 115 |
| 116 // Error 400 indicates a problem with the log, not with the server, so |
| 117 // don't consider that a sign that the server is in trouble. |
| 118 bool server_is_healthy = upload_succeeded || response_code == 400; |
| 119 UploadFinished(server_is_healthy, !queued_logs_.empty()); |
| 120 } |
| 121 |
| 122 void LogUploader::UploadFinished(bool server_is_healthy, |
| 123 bool more_logs_remaining) { |
| 124 DCHECK(callback_pending_); |
| 125 callback_pending_ = false; |
| 126 // If the server is having issues, back off. Otherwise, reset to default. |
| 127 if (!server_is_healthy) { |
| 128 BackOffUploadInterval(); |
| 129 } else { |
| 130 upload_interval_ = TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds); |
| 131 } |
| 132 |
| 133 if (more_logs_remaining) { |
| 134 ScheduleNextUpload(); |
| 135 } |
| 136 } |
| 137 |
| 138 void LogUploader::BackOffUploadInterval() { |
| 139 DCHECK_GT(kBackoffMultiplier, 1.0); |
| 140 upload_interval_ = TimeDelta::FromMicroseconds(static_cast<int64>( |
| 141 kBackoffMultiplier * upload_interval_.InMicroseconds())); |
| 142 |
| 143 TimeDelta max_interval = kMaxBackoffMultiplier * |
| 144 TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds); |
| 145 if (upload_interval_ > max_interval || upload_interval_.InSeconds() < 0) { |
| 146 upload_interval_ = max_interval; |
| 147 } |
| 148 } |
| 149 |
| 150 } // namespace rappor |
OLD | NEW |