Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(248)

Side by Side Diff: components/rappor/log_uploader.cc

Issue 49753002: RAPPOR implementation (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "components/rappor/log_uploader.h"
6
7 //#include "chrome/browser/metrics/compression_utils.h"
8 #include "net/base/load_flags.h"
9 #include "url/gurl.h"
10
11 using base::TimeDelta;
12
13 namespace {
14
15 // The delay, in seconds, between uploading when there are queued logs to send.
16 const int kUnsentLogsIntervalSeconds = 3;
17
18 // When uploading metrics to the server fails, we progressively wait longer and
19 // longer before sending the next log. This backoff process helps reduce load
20 // on a server that is having issues.
21 // The following is the multiplier we use to expand that inter-log duration.
22 const double kBackoffMultiplier = 1.1;
23
24 // The maximum backoff multiplier.
25 const int kMaxBackoffMultiplier = 10;
26
27 // If an upload fails, and the transmission was over this byte count, then we
28 // will discard the log, and not try to retransmit it. We also don't persist
29 // the log to the prefs for transmission during the next chrome session if this
30 // limit is exceeded.
31 const size_t kUploadLogAvoidRetransmitSize = 50000;
32
33 // The maximum number of unsent logs we will keep.
34 const size_t kMaxQueuedLogs = 10;
35
36 } // anonymous namespace
37
38 namespace rappor {
39
40 LogUploader::LogUploader(const char* server_url, const char* mime_type)
41 : server_url_(server_url), mime_type_(mime_type), callback_pending_(false),
42 upload_interval_(TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds)) {
43 }
44
45 void LogUploader::SetRequestContext(
46 net::URLRequestContextGetter* request_context) {
47 request_context_ = request_context;
48 }
49
50 void LogUploader::QueueLog(const std::string& log) {
51 queued_logs_.push(log);
52 if (!upload_timer_.IsRunning() && !callback_pending_) {
Alexei Svitkine (slow) 2014/01/09 19:23:09 Nit: No need for {}s
Steven Holte 2014/01/09 22:03:01 Done.
53 StartScheduledUpload();
54 }
55 }
56
57 void LogUploader::ScheduleNextUpload() {
58 if (upload_timer_.IsRunning() || callback_pending_)
59 return;
60
61 upload_timer_.Start(
62 FROM_HERE, upload_interval_, this, &LogUploader::StartScheduledUpload);
63 }
64
65 void LogUploader::StartScheduledUpload() {
66 callback_pending_ = true;
67 current_fetch_.reset(
68 net::URLFetcher::Create(GURL(server_url_), net::URLFetcher::POST, this));
69 current_fetch_->SetRequestContext(request_context_);
70 /*
71 std::string compressed_log_text;
72 bool compression_successful =
73 chrome::GzipCompress(log_text, &compressed_log_text);
74 DCHECK(compression_successful);
75 if (compression_successful) {
76 current_fetch_->SetUploadData(kMimeType, compressed_log_text);
77 // Tell the server that we're uploading gzipped protobufs.
78 current_fetch_->SetExtraRequestHeaders("content-encoding: gzip");
79 }*/
80 current_fetch_->SetUploadData(mime_type_, queued_logs_.front());
81
82 // We already drop cookies server-side, but we might as well strip them out
83 // client-side as well.
84 current_fetch_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
85 net::LOAD_DO_NOT_SEND_COOKIES);
86 current_fetch_->Start();
87 }
88
89 void LogUploader::OnURLFetchComplete(const net::URLFetcher* source) {
90 // We're not allowed to re-use the existing |URLFetcher|s, so free them here.
91 // Note however that |source| is aliased to the fetcher, so we should be
92 // careful not to delete it too early.
93 DCHECK_EQ(current_fetch_.get(), source);
94 scoped_ptr<net::URLFetcher> s(current_fetch_.Pass());
95
96 int response_code = source->GetResponseCode();
97
98 bool upload_succeeded = response_code == 200;
99
100 // Provide boolean for error recovery (allow us to ignore response_code).
101 bool discard_log = false;
102 const size_t log_size = queued_logs_.front().length();
103 if (!upload_succeeded && log_size > kUploadLogAvoidRetransmitSize) {
104 discard_log = true;
105 } else if (queued_logs_.size() > kMaxQueuedLogs) {
106 discard_log = true;
107 } else if (response_code == 400) {
108 // Bad syntax. Retransmission won't work.
109 discard_log = true;
110 }
111
112 if (upload_succeeded || discard_log)
113 queued_logs_.pop();
114
115 // Error 400 indicates a problem with the log, not with the server, so
116 // don't consider that a sign that the server is in trouble.
117 bool server_is_healthy = upload_succeeded || response_code == 400;
118 UploadFinished(server_is_healthy, !queued_logs_.empty());
119 }
120
121 void LogUploader::UploadFinished(bool server_is_healthy,
122 bool more_logs_remaining) {
123 DCHECK(callback_pending_);
124 callback_pending_ = false;
125 // If the server is having issues, back off. Otherwise, reset to default.
126 if (!server_is_healthy) {
127 BackOffUploadInterval();
128 } else {
129 upload_interval_ = TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds);
130 }
131
132 if (more_logs_remaining) {
133 ScheduleNextUpload();
134 }
135 }
136
137 void LogUploader::BackOffUploadInterval() {
138 DCHECK_GT(kBackoffMultiplier, 1.0);
139 upload_interval_ = TimeDelta::FromMicroseconds(static_cast<int64>(
140 kBackoffMultiplier * upload_interval_.InMicroseconds()));
141
142 TimeDelta max_interval = kMaxBackoffMultiplier *
143 TimeDelta::FromSeconds(kUnsentLogsIntervalSeconds);
144 if (upload_interval_ > max_interval || upload_interval_.InSeconds() < 0) {
145 upload_interval_ = max_interval;
146 }
147 }
148
149 } // namespace rappor
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698