Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(268)

Side by Side Diff: net/url_request/url_request_job.cc

Issue 6382003: Reorder the methods in net/url_request/. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Compiling net_unittests != compiling the rest of chrome Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « net/url_request/url_request_job.h ('k') | net/url_request/url_request_job_tracker.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/url_request/url_request_job.h" 5 #include "net/url_request/url_request_job.h"
6 6
7 #include "base/message_loop.h" 7 #include "base/message_loop.h"
8 #include "base/metrics/histogram.h" 8 #include "base/metrics/histogram.h"
9 #include "base/string_number_conversions.h" 9 #include "base/string_number_conversions.h"
10 #include "base/string_util.h" 10 #include "base/string_util.h"
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
46 observed_packet_count_(0) { 46 observed_packet_count_(0) {
47 load_flags_ = request_->load_flags(); 47 load_flags_ = request_->load_flags();
48 is_profiling_ = request->enable_profiling(); 48 is_profiling_ = request->enable_profiling();
49 if (is_profiling()) { 49 if (is_profiling()) {
50 metrics_.reset(new URLRequestJobMetrics()); 50 metrics_.reset(new URLRequestJobMetrics());
51 metrics_->start_time_ = TimeTicks::Now(); 51 metrics_->start_time_ = TimeTicks::Now();
52 } 52 }
53 g_url_request_job_tracker.AddNewJob(this); 53 g_url_request_job_tracker.AddNewJob(this);
54 } 54 }
55 55
56 URLRequestJob::~URLRequestJob() {
57 g_url_request_job_tracker.RemoveJob(this);
58 }
59
60 void URLRequestJob::SetUpload(net::UploadData* upload) { 56 void URLRequestJob::SetUpload(net::UploadData* upload) {
61 } 57 }
62 58
63 void URLRequestJob::SetExtraRequestHeaders( 59 void URLRequestJob::SetExtraRequestHeaders(
64 const net::HttpRequestHeaders& headers) { 60 const net::HttpRequestHeaders& headers) {
65 } 61 }
66 62
67 void URLRequestJob::Kill() { 63 void URLRequestJob::Kill() {
68 // Make sure the request is notified that we are done. We assume that the 64 // Make sure the request is notified that we are done. We assume that the
69 // request took care of setting its error status before calling Kill. 65 // request took care of setting its error status before calling Kill.
70 if (request_) 66 if (request_)
71 NotifyCanceled(); 67 NotifyCanceled();
72 } 68 }
73 69
74 void URLRequestJob::DetachRequest() { 70 void URLRequestJob::DetachRequest() {
75 request_ = NULL; 71 request_ = NULL;
76 } 72 }
77 73
74 // This function calls ReadData to get stream data. If a filter exists, passes
75 // the data to the attached filter. Then returns the output from filter back to
76 // the caller.
77 bool URLRequestJob::Read(net::IOBuffer* buf, int buf_size, int *bytes_read) {
78 bool rv = false;
79
80 DCHECK_LT(buf_size, 1000000); // sanity check
81 DCHECK(buf);
82 DCHECK(bytes_read);
83 DCHECK(filtered_read_buffer_ == NULL);
84 DCHECK_EQ(0, filtered_read_buffer_len_);
85
86 *bytes_read = 0;
87
88 // Skip Filter if not present
89 if (!filter_.get()) {
90 rv = ReadRawDataHelper(buf, buf_size, bytes_read);
91 } else {
92 // Save the caller's buffers while we do IO
93 // in the filter's buffers.
94 filtered_read_buffer_ = buf;
95 filtered_read_buffer_len_ = buf_size;
96
97 if (ReadFilteredData(bytes_read)) {
98 rv = true; // we have data to return
99 } else {
100 rv = false; // error, or a new IO is pending
101 }
102 }
103 if (rv && *bytes_read == 0)
104 NotifyDone(URLRequestStatus());
105 return rv;
106 }
107
108 void URLRequestJob::StopCaching() {
109 // Nothing to do here.
110 }
111
112 net::LoadState URLRequestJob::GetLoadState() const {
113 return net::LOAD_STATE_IDLE;
114 }
115
116 uint64 URLRequestJob::GetUploadProgress() const {
117 return 0;
118 }
119
120 bool URLRequestJob::GetCharset(std::string* charset) {
121 return false;
122 }
123
124 void URLRequestJob::GetResponseInfo(net::HttpResponseInfo* info) {
125 }
126
127 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
128 return false;
129 }
130
131 bool URLRequestJob::GetContentEncodings(
132 std::vector<Filter::FilterType>* encoding_types) {
133 return false;
134 }
135
78 void URLRequestJob::SetupFilter() { 136 void URLRequestJob::SetupFilter() {
79 std::vector<Filter::FilterType> encoding_types; 137 std::vector<Filter::FilterType> encoding_types;
80 if (GetContentEncodings(&encoding_types)) { 138 if (GetContentEncodings(&encoding_types)) {
81 filter_.reset(Filter::Factory(encoding_types, *this)); 139 filter_.reset(Filter::Factory(encoding_types, *this));
82 } 140 }
83 } 141 }
84 142
85 bool URLRequestJob::IsRedirectResponse(GURL* location, 143 bool URLRequestJob::IsRedirectResponse(GURL* location,
86 int* http_status_code) { 144 int* http_status_code) {
87 // For non-HTTP jobs, headers will be null. 145 // For non-HTTP jobs, headers will be null.
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
151 209
152 GURL redirect_url = deferred_redirect_url_; 210 GURL redirect_url = deferred_redirect_url_;
153 int redirect_status_code = deferred_redirect_status_code_; 211 int redirect_status_code = deferred_redirect_status_code_;
154 212
155 deferred_redirect_url_ = GURL(); 213 deferred_redirect_url_ = GURL();
156 deferred_redirect_status_code_ = -1; 214 deferred_redirect_status_code_ = -1;
157 215
158 FollowRedirect(redirect_url, redirect_status_code); 216 FollowRedirect(redirect_url, redirect_status_code);
159 } 217 }
160 218
161 int64 URLRequestJob::GetByteReadCount() const { 219 URLRequestJobMetrics* URLRequestJob::RetrieveMetrics() {
162 return filter_input_byte_count_; 220 if (is_profiling())
221 return metrics_.release();
222 else
223 return NULL;
163 } 224 }
164 225
165 bool URLRequestJob::GetMimeType(std::string* mime_type) const { 226 bool URLRequestJob::GetMimeType(std::string* mime_type) const {
166 return false; 227 return false;
167 } 228 }
168 229
169 bool URLRequestJob::GetURL(GURL* gurl) const { 230 bool URLRequestJob::GetURL(GURL* gurl) const {
170 if (!request_) 231 if (!request_)
171 return false; 232 return false;
172 *gurl = request_->url(); 233 *gurl = request_->url();
173 return true; 234 return true;
174 } 235 }
175 236
176 base::Time URLRequestJob::GetRequestTime() const { 237 base::Time URLRequestJob::GetRequestTime() const {
177 if (!request_) 238 if (!request_)
178 return base::Time(); 239 return base::Time();
179 return request_->request_time(); 240 return request_->request_time();
180 }; 241 }
242
243 bool URLRequestJob::IsDownload() const {
244 return (load_flags_ & net::LOAD_IS_DOWNLOAD) != 0;
245 }
246
247 bool URLRequestJob::IsSdchResponse() const {
248 return false;
249 }
181 250
182 bool URLRequestJob::IsCachedContent() const { 251 bool URLRequestJob::IsCachedContent() const {
183 return false; 252 return false;
184 } 253 }
185 254
255 int64 URLRequestJob::GetByteReadCount() const {
256 return filter_input_byte_count_;
257 }
258
186 int URLRequestJob::GetResponseCode() const { 259 int URLRequestJob::GetResponseCode() const {
187 return -1; 260 return -1;
188 } 261 }
189 262
190 int URLRequestJob::GetInputStreamBufferSize() const { 263 int URLRequestJob::GetInputStreamBufferSize() const {
191 return kFilterBufSize; 264 return kFilterBufSize;
192 } 265 }
193 266
194 // This function calls ReadData to get stream data. If a filter exists, passes 267 void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const {
195 // the data to the attached filter. Then returns the output from filter back to 268 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
196 // the caller. 269 return;
197 bool URLRequestJob::Read(net::IOBuffer* buf, int buf_size, int *bytes_read) {
198 bool rv = false;
199 270
200 DCHECK_LT(buf_size, 1000000); // sanity check 271 // Caller should verify that we're not cached content, but we can't always
201 DCHECK(buf); 272 // really check for it here because we may (at destruction time) call our own
202 DCHECK(bytes_read); 273 // class method and get a bogus const answer of false. This DCHECK only helps
203 DCHECK(filtered_read_buffer_ == NULL); 274 // when this method has a valid overridden definition.
204 DCHECK_EQ(0, filtered_read_buffer_len_); 275 DCHECK(!IsCachedContent());
205 276
206 *bytes_read = 0; 277 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
278 switch (statistic) {
279 case SDCH_DECODE: {
280 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_Latency_F_a", duration,
281 base::TimeDelta::FromMilliseconds(20),
282 base::TimeDelta::FromMinutes(10), 100);
283 UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Decode_Packets_b",
284 static_cast<int>(observed_packet_count_));
285 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
286 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
287 if (packet_times_.empty())
288 return;
289 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_Last_a",
290 final_packet_time_ - packet_times_[0],
291 base::TimeDelta::FromMilliseconds(20),
292 base::TimeDelta::FromMinutes(10), 100);
207 293
208 // Skip Filter if not present 294 DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
209 if (!filter_.get()) { 295 DCHECK(kSdchPacketHistogramCount > 4);
210 rv = ReadRawDataHelper(buf, buf_size, bytes_read); 296 if (packet_times_.size() <= 4)
211 } else { 297 return;
212 // Save the caller's buffers while we do IO 298 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_2nd_c",
213 // in the filter's buffers. 299 packet_times_[1] - packet_times_[0],
214 filtered_read_buffer_ = buf; 300 base::TimeDelta::FromMilliseconds(1),
215 filtered_read_buffer_len_ = buf_size; 301 base::TimeDelta::FromSeconds(10), 100);
302 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_2nd_To_3rd_c",
303 packet_times_[2] - packet_times_[1],
304 base::TimeDelta::FromMilliseconds(1),
305 base::TimeDelta::FromSeconds(10), 100);
306 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_3rd_To_4th_c",
307 packet_times_[3] - packet_times_[2],
308 base::TimeDelta::FromMilliseconds(1),
309 base::TimeDelta::FromSeconds(10), 100);
310 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_4th_To_5th_c",
311 packet_times_[4] - packet_times_[3],
312 base::TimeDelta::FromMilliseconds(1),
313 base::TimeDelta::FromSeconds(10), 100);
314 return;
315 }
316 case SDCH_PASSTHROUGH: {
317 // Despite advertising a dictionary, we handled non-sdch compressed
318 // content.
319 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_Latency_F_a",
320 duration,
321 base::TimeDelta::FromMilliseconds(20),
322 base::TimeDelta::FromMinutes(10), 100);
323 UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Pass-through_Packets_b",
324 observed_packet_count_);
325 if (packet_times_.empty())
326 return;
327 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_Last_a",
328 final_packet_time_ - packet_times_[0],
329 base::TimeDelta::FromMilliseconds(20),
330 base::TimeDelta::FromMinutes(10), 100);
331 DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
332 DCHECK(kSdchPacketHistogramCount > 4);
333 if (packet_times_.size() <= 4)
334 return;
335 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_2nd_c",
336 packet_times_[1] - packet_times_[0],
337 base::TimeDelta::FromMilliseconds(1),
338 base::TimeDelta::FromSeconds(10), 100);
339 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_2nd_To_3rd_c",
340 packet_times_[2] - packet_times_[1],
341 base::TimeDelta::FromMilliseconds(1),
342 base::TimeDelta::FromSeconds(10), 100);
343 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_3rd_To_4th_c",
344 packet_times_[3] - packet_times_[2],
345 base::TimeDelta::FromMilliseconds(1),
346 base::TimeDelta::FromSeconds(10), 100);
347 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_4th_To_5th_c",
348 packet_times_[4] - packet_times_[3],
349 base::TimeDelta::FromMilliseconds(1),
350 base::TimeDelta::FromSeconds(10), 100);
351 return;
352 }
216 353
217 if (ReadFilteredData(bytes_read)) { 354 case SDCH_EXPERIMENT_DECODE: {
218 rv = true; // we have data to return 355 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Decode",
219 } else { 356 duration,
220 rv = false; // error, or a new IO is pending 357 base::TimeDelta::FromMilliseconds(20),
358 base::TimeDelta::FromMinutes(10), 100);
359 // We already provided interpacket histograms above in the SDCH_DECODE
360 // case, so we don't need them here.
361 return;
221 } 362 }
363 case SDCH_EXPERIMENT_HOLDBACK: {
364 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback",
365 duration,
366 base::TimeDelta::FromMilliseconds(20),
367 base::TimeDelta::FromMinutes(10), 100);
368 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_Last_a",
369 final_packet_time_ - packet_times_[0],
370 base::TimeDelta::FromMilliseconds(20),
371 base::TimeDelta::FromMinutes(10), 100);
372
373 DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
374 DCHECK(kSdchPacketHistogramCount > 4);
375 if (packet_times_.size() <= 4)
376 return;
377 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_2nd_c",
378 packet_times_[1] - packet_times_[0],
379 base::TimeDelta::FromMilliseconds(1),
380 base::TimeDelta::FromSeconds(10), 100);
381 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_2nd_To_3rd_c",
382 packet_times_[2] - packet_times_[1],
383 base::TimeDelta::FromMilliseconds(1),
384 base::TimeDelta::FromSeconds(10), 100);
385 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_3rd_To_4th_c",
386 packet_times_[3] - packet_times_[2],
387 base::TimeDelta::FromMilliseconds(1),
388 base::TimeDelta::FromSeconds(10), 100);
389 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_4th_To_5th_c",
390 packet_times_[4] - packet_times_[3],
391 base::TimeDelta::FromMilliseconds(1),
392 base::TimeDelta::FromSeconds(10), 100);
393 return;
394 }
395 default:
396 NOTREACHED();
397 return;
222 } 398 }
223 if (rv && *bytes_read == 0)
224 NotifyDone(URLRequestStatus());
225 return rv;
226 } 399 }
227 400
228 void URLRequestJob::StopCaching() { 401 URLRequestJob::~URLRequestJob() {
229 // Nothing to do here. 402 g_url_request_job_tracker.RemoveJob(this);
230 }
231
232 net::LoadState URLRequestJob::GetLoadState() const {
233 return net::LOAD_STATE_IDLE;
234 }
235
236 uint64 URLRequestJob::GetUploadProgress() const {
237 return 0;
238 }
239
240 bool URLRequestJob::GetCharset(std::string* charset) {
241 return false;
242 }
243
244 void URLRequestJob::GetResponseInfo(net::HttpResponseInfo* info) {
245 }
246
247 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
248 return false;
249 }
250
251 bool URLRequestJob::GetContentEncodings(
252 std::vector<Filter::FilterType>* encoding_types) {
253 return false;
254 }
255
256 bool URLRequestJob::IsDownload() const {
257 return (load_flags_ & net::LOAD_IS_DOWNLOAD) != 0;
258 }
259
260 bool URLRequestJob::IsSdchResponse() const {
261 return false;
262 }
263
264 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
265 bool rv = false;
266
267 DCHECK(bytes_read);
268 DCHECK(filter_.get());
269
270 *bytes_read = 0;
271
272 // Get more pre-filtered data if needed.
273 // TODO(mbelshe): is it possible that the filter needs *MORE* data
274 // when there is some data already in the buffer?
275 if (!filter_->stream_data_len() && !is_done()) {
276 net::IOBuffer* stream_buffer = filter_->stream_buffer();
277 int stream_buffer_size = filter_->stream_buffer_size();
278 rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
279 }
280 return rv;
281 }
282
283 void URLRequestJob::FollowRedirect(const GURL& location, int http_status_code) {
284 g_url_request_job_tracker.OnJobRedirect(this, location, http_status_code);
285
286 int rv = request_->Redirect(location, http_status_code);
287 if (rv != net::OK)
288 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
289 }
290
291 void URLRequestJob::FilteredDataRead(int bytes_read) {
292 DCHECK(filter_.get()); // don't add data if there is no filter
293 filter_->FlushStreamBuffer(bytes_read);
294 }
295
296 bool URLRequestJob::ReadFilteredData(int* bytes_read) {
297 DCHECK(filter_.get()); // don't add data if there is no filter
298 DCHECK(filtered_read_buffer_ != NULL); // we need to have a buffer to fill
299 DCHECK_GT(filtered_read_buffer_len_, 0); // sanity check
300 DCHECK_LT(filtered_read_buffer_len_, 1000000); // sanity check
301 DCHECK(raw_read_buffer_ == NULL); // there should be no raw read buffer yet
302
303 bool rv = false;
304 *bytes_read = 0;
305
306 if (is_done())
307 return true;
308
309 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
310 // We don't have any raw data to work with, so
311 // read from the socket.
312 int filtered_data_read;
313 if (ReadRawDataForFilter(&filtered_data_read)) {
314 if (filtered_data_read > 0) {
315 filter_->FlushStreamBuffer(filtered_data_read); // Give data to filter.
316 } else {
317 return true; // EOF
318 }
319 } else {
320 return false; // IO Pending (or error)
321 }
322 }
323
324 if ((filter_->stream_data_len() || filter_needs_more_output_space_)
325 && !is_done()) {
326 // Get filtered data.
327 int filtered_data_len = filtered_read_buffer_len_;
328 Filter::FilterStatus status;
329 int output_buffer_size = filtered_data_len;
330 status = filter_->ReadData(filtered_read_buffer_->data(),
331 &filtered_data_len);
332
333 if (filter_needs_more_output_space_ && 0 == filtered_data_len) {
334 // filter_needs_more_output_space_ was mistaken... there are no more bytes
335 // and we should have at least tried to fill up the filter's input buffer.
336 // Correct the state, and try again.
337 filter_needs_more_output_space_ = false;
338 return ReadFilteredData(bytes_read);
339 }
340
341 switch (status) {
342 case Filter::FILTER_DONE: {
343 filter_needs_more_output_space_ = false;
344 *bytes_read = filtered_data_len;
345 rv = true;
346 break;
347 }
348 case Filter::FILTER_NEED_MORE_DATA: {
349 filter_needs_more_output_space_ =
350 (filtered_data_len == output_buffer_size);
351 // We have finished filtering all data currently in the buffer.
352 // There might be some space left in the output buffer. One can
353 // consider reading more data from the stream to feed the filter
354 // and filling up the output buffer. This leads to more complicated
355 // buffer management and data notification mechanisms.
356 // We can revisit this issue if there is a real perf need.
357 if (filtered_data_len > 0) {
358 *bytes_read = filtered_data_len;
359 rv = true;
360 } else {
361 // Read again since we haven't received enough data yet (e.g., we may
362 // not have a complete gzip header yet)
363 rv = ReadFilteredData(bytes_read);
364 }
365 break;
366 }
367 case Filter::FILTER_OK: {
368 filter_needs_more_output_space_ =
369 (filtered_data_len == output_buffer_size);
370 *bytes_read = filtered_data_len;
371 rv = true;
372 break;
373 }
374 case Filter::FILTER_ERROR: {
375 filter_needs_more_output_space_ = false;
376 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
377 net::ERR_CONTENT_DECODING_FAILED));
378 rv = false;
379 break;
380 }
381 default: {
382 NOTREACHED();
383 filter_needs_more_output_space_ = false;
384 rv = false;
385 break;
386 }
387 }
388 } else {
389 // we are done, or there is no data left.
390 rv = true;
391 }
392
393 if (rv) {
394 // When we successfully finished a read, we no longer need to
395 // save the caller's buffers. Release our reference.
396 filtered_read_buffer_ = NULL;
397 filtered_read_buffer_len_ = 0;
398 }
399 return rv;
400 }
401
402 bool URLRequestJob::ReadRawDataHelper(net::IOBuffer* buf, int buf_size,
403 int* bytes_read) {
404 DCHECK(!request_->status().is_io_pending());
405 DCHECK(raw_read_buffer_ == NULL);
406
407 // Keep a pointer to the read buffer, so we have access to it in the
408 // OnRawReadComplete() callback in the event that the read completes
409 // asynchronously.
410 raw_read_buffer_ = buf;
411 bool rv = ReadRawData(buf, buf_size, bytes_read);
412
413 if (!request_->status().is_io_pending()) {
414 // If the read completes synchronously, either success or failure,
415 // invoke the OnRawReadComplete callback so we can account for the
416 // completed read.
417 OnRawReadComplete(*bytes_read);
418 }
419 return rv;
420 }
421
422 bool URLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size,
423 int *bytes_read) {
424 DCHECK(bytes_read);
425 *bytes_read = 0;
426 NotifyDone(URLRequestStatus());
427 return false;
428 }
429
430 URLRequestJobMetrics* URLRequestJob::RetrieveMetrics() {
431 if (is_profiling())
432 return metrics_.release();
433 else
434 return NULL;
435 } 403 }
436 404
437 void URLRequestJob::NotifyHeadersComplete() { 405 void URLRequestJob::NotifyHeadersComplete() {
438 if (!request_ || !request_->delegate()) 406 if (!request_ || !request_->delegate())
439 return; // The request was destroyed, so there is no more work to do. 407 return; // The request was destroyed, so there is no more work to do.
440 408
441 if (has_handled_response_) 409 if (has_handled_response_)
442 return; 410 return;
443 411
444 DCHECK(!request_->status().is_io_pending()); 412 DCHECK(!request_->status().is_io_pending());
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
522 } else { 490 } else {
523 // Chrome today only sends "Accept-Encoding" for compression schemes. 491 // Chrome today only sends "Accept-Encoding" for compression schemes.
524 // So, if there is a filter on the response, we know that the content 492 // So, if there is a filter on the response, we know that the content
525 // was compressed. 493 // was compressed.
526 is_compressed_ = true; 494 is_compressed_ = true;
527 } 495 }
528 496
529 request_->ResponseStarted(); 497 request_->ResponseStarted();
530 } 498 }
531 499
532 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
533 DCHECK(!has_handled_response_);
534 has_handled_response_ = true;
535 if (request_) {
536 request_->set_status(status);
537 request_->ResponseStarted();
538 }
539 }
540
541 void URLRequestJob::NotifyReadComplete(int bytes_read) { 500 void URLRequestJob::NotifyReadComplete(int bytes_read) {
542 if (!request_ || !request_->delegate()) 501 if (!request_ || !request_->delegate())
543 return; // The request was destroyed, so there is no more work to do. 502 return; // The request was destroyed, so there is no more work to do.
544 503
545 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome 504 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
546 // unit_tests have been fixed to not trip this. 505 // unit_tests have been fixed to not trip this.
547 //DCHECK(!request_->status().is_io_pending()); 506 //DCHECK(!request_->status().is_io_pending());
548 507
549 // The headers should be complete before reads complete 508 // The headers should be complete before reads complete
550 DCHECK(has_handled_response_); 509 DCHECK(has_handled_response_);
(...skipping 21 matching lines...) Expand all
572 if (ReadFilteredData(&filter_bytes_read)) { 531 if (ReadFilteredData(&filter_bytes_read)) {
573 postfilter_bytes_read_ += filter_bytes_read; 532 postfilter_bytes_read_ += filter_bytes_read;
574 request_->delegate()->OnReadCompleted(request_, filter_bytes_read); 533 request_->delegate()->OnReadCompleted(request_, filter_bytes_read);
575 } 534 }
576 } else { 535 } else {
577 postfilter_bytes_read_ += bytes_read; 536 postfilter_bytes_read_ += bytes_read;
578 request_->delegate()->OnReadCompleted(request_, bytes_read); 537 request_->delegate()->OnReadCompleted(request_, bytes_read);
579 } 538 }
580 } 539 }
581 540
541 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
542 DCHECK(!has_handled_response_);
543 has_handled_response_ = true;
544 if (request_) {
545 request_->set_status(status);
546 request_->ResponseStarted();
547 }
548 }
549
582 void URLRequestJob::NotifyDone(const URLRequestStatus &status) { 550 void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
583 DCHECK(!done_) << "Job sending done notification twice"; 551 DCHECK(!done_) << "Job sending done notification twice";
584 if (done_) 552 if (done_)
585 return; 553 return;
586 done_ = true; 554 done_ = true;
587 555
588 RecordCompressionHistograms(); 556 RecordCompressionHistograms();
589 557
590 if (is_profiling() && metrics_->total_bytes_read_ > 0) { 558 if (is_profiling() && metrics_->total_bytes_read_ > 0) {
591 // There are valid IO statistics. Fill in other fields of metrics for 559 // There are valid IO statistics. Fill in other fields of metrics for
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
651 net::ERR_ABORTED)); 619 net::ERR_ABORTED));
652 } 620 }
653 } 621 }
654 622
655 void URLRequestJob::NotifyRestartRequired() { 623 void URLRequestJob::NotifyRestartRequired() {
656 DCHECK(!has_handled_response_); 624 DCHECK(!has_handled_response_);
657 if (GetStatus().status() != URLRequestStatus::CANCELED) 625 if (GetStatus().status() != URLRequestStatus::CANCELED)
658 request_->Restart(); 626 request_->Restart();
659 } 627 }
660 628
661 bool URLRequestJob::FilterHasData() { 629 bool URLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size,
662 return filter_.get() && filter_->stream_data_len(); 630 int *bytes_read) {
631 DCHECK(bytes_read);
632 *bytes_read = 0;
633 NotifyDone(URLRequestStatus());
634 return false;
663 } 635 }
664 636
665 void URLRequestJob::OnRawReadComplete(int bytes_read) { 637 void URLRequestJob::FilteredDataRead(int bytes_read) {
666 DCHECK(raw_read_buffer_); 638 DCHECK(filter_.get()); // don't add data if there is no filter
667 if (bytes_read > 0) { 639 filter_->FlushStreamBuffer(bytes_read);
668 RecordBytesRead(bytes_read);
669 }
670 raw_read_buffer_ = NULL;
671 } 640 }
672 641
673 void URLRequestJob::RecordBytesRead(int bytes_read) { 642 bool URLRequestJob::ReadFilteredData(int* bytes_read) {
674 if (is_profiling()) { 643 DCHECK(filter_.get()); // don't add data if there is no filter
675 ++(metrics_->number_of_read_IO_); 644 DCHECK(filtered_read_buffer_ != NULL); // we need to have a buffer to fill
676 metrics_->total_bytes_read_ += bytes_read; 645 DCHECK_GT(filtered_read_buffer_len_, 0); // sanity check
646 DCHECK_LT(filtered_read_buffer_len_, 1000000); // sanity check
647 DCHECK(raw_read_buffer_ == NULL); // there should be no raw read buffer yet
648
649 bool rv = false;
650 *bytes_read = 0;
651
652 if (is_done())
653 return true;
654
655 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
656 // We don't have any raw data to work with, so
657 // read from the socket.
658 int filtered_data_read;
659 if (ReadRawDataForFilter(&filtered_data_read)) {
660 if (filtered_data_read > 0) {
661 filter_->FlushStreamBuffer(filtered_data_read); // Give data to filter.
662 } else {
663 return true; // EOF
664 }
665 } else {
666 return false; // IO Pending (or error)
667 }
677 } 668 }
678 filter_input_byte_count_ += bytes_read; 669
679 UpdatePacketReadTimes(); // Facilitate stats recording if it is active. 670 if ((filter_->stream_data_len() || filter_needs_more_output_space_)
680 g_url_request_job_tracker.OnBytesRead(this, raw_read_buffer_->data(), 671 && !is_done()) {
681 bytes_read); 672 // Get filtered data.
673 int filtered_data_len = filtered_read_buffer_len_;
674 Filter::FilterStatus status;
675 int output_buffer_size = filtered_data_len;
676 status = filter_->ReadData(filtered_read_buffer_->data(),
677 &filtered_data_len);
678
679 if (filter_needs_more_output_space_ && 0 == filtered_data_len) {
680 // filter_needs_more_output_space_ was mistaken... there are no more bytes
681 // and we should have at least tried to fill up the filter's input buffer.
682 // Correct the state, and try again.
683 filter_needs_more_output_space_ = false;
684 return ReadFilteredData(bytes_read);
685 }
686
687 switch (status) {
688 case Filter::FILTER_DONE: {
689 filter_needs_more_output_space_ = false;
690 *bytes_read = filtered_data_len;
691 rv = true;
692 break;
693 }
694 case Filter::FILTER_NEED_MORE_DATA: {
695 filter_needs_more_output_space_ =
696 (filtered_data_len == output_buffer_size);
697 // We have finished filtering all data currently in the buffer.
698 // There might be some space left in the output buffer. One can
699 // consider reading more data from the stream to feed the filter
700 // and filling up the output buffer. This leads to more complicated
701 // buffer management and data notification mechanisms.
702 // We can revisit this issue if there is a real perf need.
703 if (filtered_data_len > 0) {
704 *bytes_read = filtered_data_len;
705 rv = true;
706 } else {
707 // Read again since we haven't received enough data yet (e.g., we may
708 // not have a complete gzip header yet)
709 rv = ReadFilteredData(bytes_read);
710 }
711 break;
712 }
713 case Filter::FILTER_OK: {
714 filter_needs_more_output_space_ =
715 (filtered_data_len == output_buffer_size);
716 *bytes_read = filtered_data_len;
717 rv = true;
718 break;
719 }
720 case Filter::FILTER_ERROR: {
721 filter_needs_more_output_space_ = false;
722 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
723 net::ERR_CONTENT_DECODING_FAILED));
724 rv = false;
725 break;
726 }
727 default: {
728 NOTREACHED();
729 filter_needs_more_output_space_ = false;
730 rv = false;
731 break;
732 }
733 }
734 } else {
735 // we are done, or there is no data left.
736 rv = true;
737 }
738
739 if (rv) {
740 // When we successfully finished a read, we no longer need to
741 // save the caller's buffers. Release our reference.
742 filtered_read_buffer_ = NULL;
743 filtered_read_buffer_len_ = 0;
744 }
745 return rv;
746 }
747
748 void URLRequestJob::EnablePacketCounting(size_t max_packets_timed) {
749 if (max_packets_timed_ < max_packets_timed)
750 max_packets_timed_ = max_packets_timed;
751 packet_timing_enabled_ = true;
682 } 752 }
683 753
684 const URLRequestStatus URLRequestJob::GetStatus() { 754 const URLRequestStatus URLRequestJob::GetStatus() {
685 if (request_) 755 if (request_)
686 return request_->status(); 756 return request_->status();
687 // If the request is gone, we must be cancelled. 757 // If the request is gone, we must be cancelled.
688 return URLRequestStatus(URLRequestStatus::CANCELED, 758 return URLRequestStatus(URLRequestStatus::CANCELED,
689 net::ERR_ABORTED); 759 net::ERR_ABORTED);
690 } 760 }
691 761
692 void URLRequestJob::SetStatus(const URLRequestStatus &status) { 762 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
693 if (request_) 763 if (request_)
694 request_->set_status(status); 764 request_->set_status(status);
695 } 765 }
696 766
767 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
768 bool rv = false;
769
770 DCHECK(bytes_read);
771 DCHECK(filter_.get());
772
773 *bytes_read = 0;
774
775 // Get more pre-filtered data if needed.
776 // TODO(mbelshe): is it possible that the filter needs *MORE* data
777 // when there is some data already in the buffer?
778 if (!filter_->stream_data_len() && !is_done()) {
779 net::IOBuffer* stream_buffer = filter_->stream_buffer();
780 int stream_buffer_size = filter_->stream_buffer_size();
781 rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
782 }
783 return rv;
784 }
785
786 bool URLRequestJob::ReadRawDataHelper(net::IOBuffer* buf, int buf_size,
787 int* bytes_read) {
788 DCHECK(!request_->status().is_io_pending());
789 DCHECK(raw_read_buffer_ == NULL);
790
791 // Keep a pointer to the read buffer, so we have access to it in the
792 // OnRawReadComplete() callback in the event that the read completes
793 // asynchronously.
794 raw_read_buffer_ = buf;
795 bool rv = ReadRawData(buf, buf_size, bytes_read);
796
797 if (!request_->status().is_io_pending()) {
798 // If the read completes synchronously, either success or failure,
799 // invoke the OnRawReadComplete callback so we can account for the
800 // completed read.
801 OnRawReadComplete(*bytes_read);
802 }
803 return rv;
804 }
805
806 void URLRequestJob::FollowRedirect(const GURL& location, int http_status_code) {
807 g_url_request_job_tracker.OnJobRedirect(this, location, http_status_code);
808
809 int rv = request_->Redirect(location, http_status_code);
810 if (rv != net::OK)
811 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
812 }
813
814 void URLRequestJob::OnRawReadComplete(int bytes_read) {
815 DCHECK(raw_read_buffer_);
816 if (bytes_read > 0) {
817 RecordBytesRead(bytes_read);
818 }
819 raw_read_buffer_ = NULL;
820 }
821
822 void URLRequestJob::RecordBytesRead(int bytes_read) {
823 if (is_profiling()) {
824 ++(metrics_->number_of_read_IO_);
825 metrics_->total_bytes_read_ += bytes_read;
826 }
827 filter_input_byte_count_ += bytes_read;
828 UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
829 g_url_request_job_tracker.OnBytesRead(this, raw_read_buffer_->data(),
830 bytes_read);
831 }
832
833 bool URLRequestJob::FilterHasData() {
834 return filter_.get() && filter_->stream_data_len();
835 }
836
697 void URLRequestJob::UpdatePacketReadTimes() { 837 void URLRequestJob::UpdatePacketReadTimes() {
698 if (!packet_timing_enabled_) 838 if (!packet_timing_enabled_)
699 return; 839 return;
700 840
701 if (filter_input_byte_count_ <= bytes_observed_in_packets_) { 841 if (filter_input_byte_count_ <= bytes_observed_in_packets_) {
702 DCHECK(filter_input_byte_count_ == bytes_observed_in_packets_); 842 DCHECK(filter_input_byte_count_ == bytes_observed_in_packets_);
703 return; // No new bytes have arrived. 843 return; // No new bytes have arrived.
704 } 844 }
705 845
706 if (!bytes_observed_in_packets_) 846 if (!bytes_observed_in_packets_)
707 request_time_snapshot_ = GetRequestTime(); 847 request_time_snapshot_ = GetRequestTime();
708 848
709 final_packet_time_ = base::Time::Now(); 849 final_packet_time_ = base::Time::Now();
710 const size_t kTypicalPacketSize = 1430; 850 const size_t kTypicalPacketSize = 1430;
711 while (filter_input_byte_count_ > bytes_observed_in_packets_) { 851 while (filter_input_byte_count_ > bytes_observed_in_packets_) {
712 ++observed_packet_count_; 852 ++observed_packet_count_;
713 if (max_packets_timed_ > packet_times_.size()) { 853 if (max_packets_timed_ > packet_times_.size()) {
714 packet_times_.push_back(final_packet_time_); 854 packet_times_.push_back(final_packet_time_);
715 DCHECK(static_cast<size_t>(observed_packet_count_) == 855 DCHECK(static_cast<size_t>(observed_packet_count_) ==
716 packet_times_.size()); 856 packet_times_.size());
717 } 857 }
718 bytes_observed_in_packets_ += kTypicalPacketSize; 858 bytes_observed_in_packets_ += kTypicalPacketSize;
719 } 859 }
720 // Since packets may not be full, we'll remember the number of bytes we've 860 // Since packets may not be full, we'll remember the number of bytes we've
721 // accounted for in packets thus far. 861 // accounted for in packets thus far.
722 bytes_observed_in_packets_ = filter_input_byte_count_; 862 bytes_observed_in_packets_ = filter_input_byte_count_;
723 } 863 }
724 864
725 void URLRequestJob::EnablePacketCounting(size_t max_packets_timed) {
726 if (max_packets_timed_ < max_packets_timed)
727 max_packets_timed_ = max_packets_timed;
728 packet_timing_enabled_ = true;
729 }
730
731 void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const {
732 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
733 return;
734
735 // Caller should verify that we're not cached content, but we can't always
736 // really check for it here because we may (at destruction time) call our own
737 // class method and get a bogus const answer of false. This DCHECK only helps
738 // when this method has a valid overridden definition.
739 DCHECK(!IsCachedContent());
740
741 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
742 switch (statistic) {
743 case SDCH_DECODE: {
744 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_Latency_F_a", duration,
745 base::TimeDelta::FromMilliseconds(20),
746 base::TimeDelta::FromMinutes(10), 100);
747 UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Decode_Packets_b",
748 static_cast<int>(observed_packet_count_));
749 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
750 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
751 if (packet_times_.empty())
752 return;
753 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_Last_a",
754 final_packet_time_ - packet_times_[0],
755 base::TimeDelta::FromMilliseconds(20),
756 base::TimeDelta::FromMinutes(10), 100);
757
758 DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
759 DCHECK(kSdchPacketHistogramCount > 4);
760 if (packet_times_.size() <= 4)
761 return;
762 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_2nd_c",
763 packet_times_[1] - packet_times_[0],
764 base::TimeDelta::FromMilliseconds(1),
765 base::TimeDelta::FromSeconds(10), 100);
766 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_2nd_To_3rd_c",
767 packet_times_[2] - packet_times_[1],
768 base::TimeDelta::FromMilliseconds(1),
769 base::TimeDelta::FromSeconds(10), 100);
770 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_3rd_To_4th_c",
771 packet_times_[3] - packet_times_[2],
772 base::TimeDelta::FromMilliseconds(1),
773 base::TimeDelta::FromSeconds(10), 100);
774 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_4th_To_5th_c",
775 packet_times_[4] - packet_times_[3],
776 base::TimeDelta::FromMilliseconds(1),
777 base::TimeDelta::FromSeconds(10), 100);
778 return;
779 }
780 case SDCH_PASSTHROUGH: {
781 // Despite advertising a dictionary, we handled non-sdch compressed
782 // content.
783 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_Latency_F_a",
784 duration,
785 base::TimeDelta::FromMilliseconds(20),
786 base::TimeDelta::FromMinutes(10), 100);
787 UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Pass-through_Packets_b",
788 observed_packet_count_);
789 if (packet_times_.empty())
790 return;
791 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_Last_a",
792 final_packet_time_ - packet_times_[0],
793 base::TimeDelta::FromMilliseconds(20),
794 base::TimeDelta::FromMinutes(10), 100);
795 DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
796 DCHECK(kSdchPacketHistogramCount > 4);
797 if (packet_times_.size() <= 4)
798 return;
799 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_2nd_c",
800 packet_times_[1] - packet_times_[0],
801 base::TimeDelta::FromMilliseconds(1),
802 base::TimeDelta::FromSeconds(10), 100);
803 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_2nd_To_3rd_c",
804 packet_times_[2] - packet_times_[1],
805 base::TimeDelta::FromMilliseconds(1),
806 base::TimeDelta::FromSeconds(10), 100);
807 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_3rd_To_4th_c",
808 packet_times_[3] - packet_times_[2],
809 base::TimeDelta::FromMilliseconds(1),
810 base::TimeDelta::FromSeconds(10), 100);
811 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_4th_To_5th_c",
812 packet_times_[4] - packet_times_[3],
813 base::TimeDelta::FromMilliseconds(1),
814 base::TimeDelta::FromSeconds(10), 100);
815 return;
816 }
817
818 case SDCH_EXPERIMENT_DECODE: {
819 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Decode",
820 duration,
821 base::TimeDelta::FromMilliseconds(20),
822 base::TimeDelta::FromMinutes(10), 100);
823 // We already provided interpacket histograms above in the SDCH_DECODE
824 // case, so we don't need them here.
825 return;
826 }
827 case SDCH_EXPERIMENT_HOLDBACK: {
828 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback",
829 duration,
830 base::TimeDelta::FromMilliseconds(20),
831 base::TimeDelta::FromMinutes(10), 100);
832 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_Last_a",
833 final_packet_time_ - packet_times_[0],
834 base::TimeDelta::FromMilliseconds(20),
835 base::TimeDelta::FromMinutes(10), 100);
836
837 DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
838 DCHECK(kSdchPacketHistogramCount > 4);
839 if (packet_times_.size() <= 4)
840 return;
841 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_2nd_c",
842 packet_times_[1] - packet_times_[0],
843 base::TimeDelta::FromMilliseconds(1),
844 base::TimeDelta::FromSeconds(10), 100);
845 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_2nd_To_3rd_c",
846 packet_times_[2] - packet_times_[1],
847 base::TimeDelta::FromMilliseconds(1),
848 base::TimeDelta::FromSeconds(10), 100);
849 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_3rd_To_4th_c",
850 packet_times_[3] - packet_times_[2],
851 base::TimeDelta::FromMilliseconds(1),
852 base::TimeDelta::FromSeconds(10), 100);
853 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_4th_To_5th_c",
854 packet_times_[4] - packet_times_[3],
855 base::TimeDelta::FromMilliseconds(1),
856 base::TimeDelta::FromSeconds(10), 100);
857 return;
858 }
859 default:
860 NOTREACHED();
861 return;
862 }
863 }
864
865 // The common type of histogram we use for all compression-tracking histograms. 865 // The common type of histogram we use for all compression-tracking histograms.
866 #define COMPRESSION_HISTOGRAM(name, sample) \ 866 #define COMPRESSION_HISTOGRAM(name, sample) \
867 do { \ 867 do { \
868 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ 868 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \
869 500, 1000000, 100); \ 869 500, 1000000, 100); \
870 } while(0) 870 } while(0)
871 871
872 void URLRequestJob::RecordCompressionHistograms() { 872 void URLRequestJob::RecordCompressionHistograms() {
873 if (IsCachedContent() || // Don't record cached content 873 if (IsCachedContent() || // Don't record cached content
874 !GetStatus().is_success() || // Don't record failed content 874 !GetStatus().is_success() || // Don't record failed content
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
927 927
928 if (is_compressed_) { 928 if (is_compressed_) {
929 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); 929 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B);
930 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); 930 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B);
931 } else { 931 } else {
932 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); 932 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B);
933 } 933 }
934 } 934 }
935 935
936 } // namespace net 936 } // namespace net
OLDNEW
« no previous file with comments | « net/url_request/url_request_job.h ('k') | net/url_request/url_request_job_tracker.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698