Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(65)

Side by Side Diff: net/url_request/url_request_job.cc

Issue 1662763002: [ON HOLD] Implement pull-based design for content decoding (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Refactor common logic Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/url_request/url_request_job.h" 5 #include "net/url_request/url_request_job.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/callback_helpers.h"
10 #include "base/compiler_specific.h" 11 #include "base/compiler_specific.h"
11 #include "base/location.h" 12 #include "base/location.h"
12 #include "base/metrics/histogram_macros.h" 13 #include "base/metrics/histogram_macros.h"
13 #include "base/power_monitor/power_monitor.h" 14 #include "base/power_monitor/power_monitor.h"
14 #include "base/profiler/scoped_tracker.h" 15 #include "base/profiler/scoped_tracker.h"
15 #include "base/single_thread_task_runner.h" 16 #include "base/single_thread_task_runner.h"
16 #include "base/strings/string_number_conversions.h" 17 #include "base/strings/string_number_conversions.h"
17 #include "base/strings/string_util.h" 18 #include "base/strings/string_util.h"
18 #include "base/thread_task_runner_handle.h" 19 #include "base/thread_task_runner_handle.h"
19 #include "base/values.h" 20 #include "base/values.h"
20 #include "net/base/auth.h" 21 #include "net/base/auth.h"
21 #include "net/base/host_port_pair.h" 22 #include "net/base/host_port_pair.h"
22 #include "net/base/io_buffer.h" 23 #include "net/base/io_buffer.h"
23 #include "net/base/load_flags.h" 24 #include "net/base/load_flags.h"
24 #include "net/base/load_states.h" 25 #include "net/base/load_states.h"
25 #include "net/base/net_errors.h" 26 #include "net/base/net_errors.h"
26 #include "net/base/network_delegate.h" 27 #include "net/base/network_delegate.h"
27 #include "net/base/network_quality_estimator.h" 28 #include "net/base/network_quality_estimator.h"
28 #include "net/filter/filter.h" 29 #include "net/filter/filter.h"
29 #include "net/http/http_response_headers.h" 30 #include "net/http/http_response_headers.h"
30 #include "net/url_request/url_request_context.h" 31 #include "net/url_request/url_request_context.h"
31 32
32 namespace net { 33 namespace net {
33 34
34 namespace { 35 namespace {
35 36
36 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event. 37 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event.
37 scoped_ptr<base::Value> FiltersSetCallback( 38 scoped_ptr<base::Value> StreamSourceSetCallback(
38 Filter* filter, 39 StreamSource* stream_source,
39 NetLogCaptureMode /* capture_mode */) { 40 NetLogCaptureMode /* capture_mode */) {
40 scoped_ptr<base::DictionaryValue> event_params(new base::DictionaryValue()); 41 scoped_ptr<base::DictionaryValue> event_params(new base::DictionaryValue());
41 event_params->SetString("filters", filter->OrderedFilterList()); 42 event_params->SetString("filters", stream_source->OrderedStreamSourceList());
42 return std::move(event_params); 43 return std::move(event_params);
43 } 44 }
44 45
45 std::string ComputeMethodForRedirect(const std::string& method, 46 std::string ComputeMethodForRedirect(const std::string& method,
46 int http_status_code) { 47 int http_status_code) {
47 // For 303 redirects, all request methods except HEAD are converted to GET, 48 // For 303 redirects, all request methods except HEAD are converted to GET,
48 // as per the latest httpbis draft. The draft also allows POST requests to 49 // as per the latest httpbis draft. The draft also allows POST requests to
49 // be converted to GETs when following 301/302 redirects, for historical 50 // be converted to GETs when following 301/302 redirects, for historical
50 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and 51 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and
51 // the httpbis draft say to prompt the user to confirm the generation of new 52 // the httpbis draft say to prompt the user to confirm the generation of new
52 // requests, other than GET and HEAD requests, but IE omits these prompts and 53 // requests, other than GET and HEAD requests, but IE omits these prompts and
53 // so shall we. 54 // so shall we.
54 // See: 55 // See:
55 // https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-17#section-7.3 56 // https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-17#section-7.3
56 if ((http_status_code == 303 && method != "HEAD") || 57 if ((http_status_code == 303 && method != "HEAD") ||
57 ((http_status_code == 301 || http_status_code == 302) && 58 ((http_status_code == 301 || http_status_code == 302) &&
58 method == "POST")) { 59 method == "POST")) {
59 return "GET"; 60 return "GET";
60 } 61 }
61 return method; 62 return method;
62 } 63 }
63 64
64 } // namespace 65 } // namespace
65 66
67 // StreamSources own the previous StreamSource in the chain, but the ultimate
68 // source is URLRequestJob, which has other ownership semantics, so this class
69 // is a proxy for URLRequestJob that is owned by the first filter (in dataflow
70 // order).
71 class URLRequestJob::URLRequestJobStreamSource : public StreamSource {
72 public:
73 URLRequestJobStreamSource(const base::WeakPtr<URLRequestJob>& job)
74 : StreamSource(StreamSource::SOURCE_NONE, nullptr), job_(job) {}
75
76 ~URLRequestJobStreamSource() override {}
77
78 void OnReadComplete(IOBuffer* dest_buffer,
79 size_t dest_buffer_size,
80 Error error,
81 size_t bytes_read) {
82 DCHECK_NE(ERR_IO_PENDING, error);
83 DCHECK_EQ(dest_buffer, pending_read_buffer_.get());
84 DCHECK(!callback_.is_null());
85
86 pending_read_buffer_ = nullptr;
87 base::ResetAndReturn(&callback_).Run(error, bytes_read);
88 }
89
90 // StreamSource implementation:
91 Error ReadInternal(IOBuffer* dest_buffer,
92 size_t buffer_size,
93 size_t* bytes_read) override {
94 URLRequestJob* job = job_.get();
95 DCHECK(job);
mmenke 2016/03/04 21:15:57 If we can DCHECK on it, I don't think it needs to
xunjieli 2016/04/20 19:16:11 Done.
96
97 // If ReadRawData() returns true, the underlying data source has
98 // synchronously succeeded, which might be an EOF.
99 int bytes_read_raw = 0;
100 Error error = job->ReadRawDataHelper(
101 dest_buffer, buffer_size, &bytes_read_raw,
102 base::Bind(&URLRequestJobStreamSource::OnReadComplete,
103 base::Unretained(this), base::Unretained(dest_buffer),
104 buffer_size));
105 if (error == OK)
106 *bytes_read = base::checked_cast<size_t>(bytes_read_raw);
107
108 return error;
109 }
110
111 private:
112 const base::WeakPtr<URLRequestJob> job_;
113 };
114
66 URLRequestJob::URLRequestJob(URLRequest* request, 115 URLRequestJob::URLRequestJob(URLRequest* request,
67 NetworkDelegate* network_delegate) 116 NetworkDelegate* network_delegate)
68 : request_(request), 117 : request_(request),
69 done_(false), 118 done_(false),
70 prefilter_bytes_read_(0),
71 postfilter_bytes_read_(0),
72 filter_needs_more_output_space_(false), 119 filter_needs_more_output_space_(false),
73 filtered_read_buffer_len_(0), 120 filtered_read_buffer_len_(0),
74 has_handled_response_(false), 121 has_handled_response_(false),
75 expected_content_size_(-1), 122 expected_content_size_(-1),
76 network_delegate_(network_delegate), 123 network_delegate_(network_delegate),
77 last_notified_total_received_bytes_(0), 124 last_notified_total_received_bytes_(0),
78 last_notified_total_sent_bytes_(0), 125 last_notified_total_sent_bytes_(0),
126 raw_bytes_read_(0),
127 postfilter_bytes_read_(0),
79 weak_factory_(this) { 128 weak_factory_(this) {
80 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); 129 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
81 if (power_monitor) 130 if (power_monitor)
82 power_monitor->AddObserver(this); 131 power_monitor->AddObserver(this);
83 } 132 }
84 133
85 URLRequestJob::~URLRequestJob() { 134 URLRequestJob::~URLRequestJob() {
86 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); 135 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
87 if (power_monitor) 136 if (power_monitor)
88 power_monitor->RemoveObserver(this); 137 power_monitor->RemoveObserver(this);
(...skipping 15 matching lines...) Expand all
104 // Kill(). 153 // Kill().
105 // TODO(mmenke): The URLRequest is currently deleted before this method 154 // TODO(mmenke): The URLRequest is currently deleted before this method
106 // invokes its async callback whenever this is called by the URLRequest. 155 // invokes its async callback whenever this is called by the URLRequest.
107 // Try to simplify how cancellation works. 156 // Try to simplify how cancellation works.
108 NotifyCanceled(); 157 NotifyCanceled();
109 } 158 }
110 159
111 // This function calls ReadRawData to get stream data. If a filter exists, it 160 // This function calls ReadRawData to get stream data. If a filter exists, it
112 // passes the data to the attached filter. It then returns the output from 161 // passes the data to the attached filter. It then returns the output from
113 // filter back to the caller. 162 // filter back to the caller.
163 // This method passes reads down the filter chain, where they eventually end up
164 // at URLRequestJobStreamSource::Read, which calls back into
165 // URLRequestJob::ReadRawData.
114 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) { 166 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
115 DCHECK_LT(buf_size, 1000000); // Sanity check. 167 DCHECK_LT(buf_size, 1000000); // Sanity check.
116 DCHECK(buf); 168 DCHECK(buf);
117 DCHECK(bytes_read); 169 DCHECK(bytes_read);
118 DCHECK(filtered_read_buffer_.get() == NULL); 170 DCHECK(filtered_read_buffer_.get() == NULL);
119 DCHECK_EQ(0, filtered_read_buffer_len_); 171 DCHECK_EQ(0, filtered_read_buffer_len_);
120 172
121 Error error = OK; 173 Error error = OK;
122 *bytes_read = 0; 174 *bytes_read = 0;
123 175
124 // Skip Filter if not present. 176 size_t bytes_read_n = 0;
125 if (!filter_) { 177 error = source_->Read(buf, buf_size, &bytes_read_n,
126 error = ReadRawDataHelper(buf, buf_size, bytes_read); 178 base::Bind(&URLRequestJob::SourceReadComplete,
127 } else { 179 weak_factory_.GetWeakPtr()));
128 // Save the caller's buffers while we do IO 180 *bytes_read = bytes_read_n;
129 // in the filter's buffers.
130 filtered_read_buffer_ = buf;
131 filtered_read_buffer_len_ = buf_size;
132
133 error = ReadFilteredData(bytes_read);
134
135 // Synchronous EOF from the filter.
136 if (error == OK && *bytes_read == 0)
137 DoneReading();
138 }
139 181
140 if (error == OK) { 182 if (error == OK) {
183 postfilter_bytes_read_ += bytes_read_n;
141 // If URLRequestJob read zero bytes, the job is at EOF. 184 // If URLRequestJob read zero bytes, the job is at EOF.
142 if (*bytes_read == 0) 185 if (*bytes_read == 0) {
186 DoneReading();
143 NotifyDone(URLRequestStatus()); 187 NotifyDone(URLRequestStatus());
188 }
144 } else if (error == ERR_IO_PENDING) { 189 } else if (error == ERR_IO_PENDING) {
145 SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING)); 190 SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING));
146 } else { 191 } else {
147 NotifyDone(URLRequestStatus::FromError(error)); 192 NotifyDone(URLRequestStatus::FromError(error));
148 *bytes_read = -1; 193 *bytes_read = -1;
149 } 194 }
150 return error == OK; 195 return error == OK;
151 } 196 }
152 197
198 void URLRequestJob::SourceReadComplete(Error error, size_t bytes_read) {
199 DCHECK_NE(ERR_IO_PENDING, error);
200 DCHECK(error == OK || bytes_read == 0);
201
202 // Synchronize the URLRequest state machine with the URLRequestJob state
203 // machine. If this read succeeded, either the request is at EOF and the
204 // URLRequest state machine goes to 'finished', or it is not and the
205 // URLRequest state machine goes to 'success'. If the read failed, the
206 // URLRequest state machine goes directly to 'finished'. If filtered data is
207 // pending, then there's nothing to do, since the status of the request is
208 // already pending.
209 //
210 // Update the URLRequest's status first, so that NotifyReadCompleted has an
211 // accurate view of the request.
212 if (error == OK && bytes_read > 0) {
213 postfilter_bytes_read_ += bytes_read;
214 SetStatus(URLRequestStatus());
215 } else {
216 NotifyDone(URLRequestStatus::FromError(error));
217 }
218 if (error == OK) {
219 if (bytes_read == 0)
220 DoneReading();
221 request_->NotifyReadCompleted(bytes_read);
222 }
223 }
224
153 void URLRequestJob::StopCaching() { 225 void URLRequestJob::StopCaching() {
154 // Nothing to do here. 226 // Nothing to do here.
155 } 227 }
156 228
157 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { 229 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const {
158 // Most job types don't send request headers. 230 // Most job types don't send request headers.
159 return false; 231 return false;
160 } 232 }
161 233
162 int64_t URLRequestJob::GetTotalReceivedBytes() const { 234 int64_t URLRequestJob::GetTotalReceivedBytes() const {
(...skipping 28 matching lines...) Expand all
191 } 263 }
192 264
193 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) { 265 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
194 return false; 266 return false;
195 } 267 }
196 268
197 void URLRequestJob::PopulateNetErrorDetails(NetErrorDetails* details) const { 269 void URLRequestJob::PopulateNetErrorDetails(NetErrorDetails* details) const {
198 return; 270 return;
199 } 271 }
200 272
201 Filter* URLRequestJob::SetupFilter() const { 273 scoped_ptr<StreamSource> URLRequestJob::SetupSource() {
202 return NULL; 274 scoped_ptr<URLRequestJobStreamSource> source(
275 new URLRequestJobStreamSource(weak_factory_.GetWeakPtr()));
276 return std::move(source);
203 } 277 }
204 278
205 bool URLRequestJob::IsRedirectResponse(GURL* location, 279 bool URLRequestJob::IsRedirectResponse(GURL* location,
206 int* http_status_code) { 280 int* http_status_code) {
207 // For non-HTTP jobs, headers will be null. 281 // For non-HTTP jobs, headers will be null.
208 HttpResponseHeaders* headers = request_->response_headers(); 282 HttpResponseHeaders* headers = request_->response_headers();
209 if (!headers) 283 if (!headers)
210 return false; 284 return false;
211 285
212 std::string value; 286 std::string value;
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after
437 // Need to check for a NULL auth_info because the server may have failed 511 // Need to check for a NULL auth_info because the server may have failed
438 // to send a challenge with the 401 response. 512 // to send a challenge with the 401 response.
439 if (auth_info.get()) { 513 if (auth_info.get()) {
440 request_->NotifyAuthRequired(auth_info.get()); 514 request_->NotifyAuthRequired(auth_info.get());
441 // Wait for SetAuth or CancelAuth to be called. 515 // Wait for SetAuth or CancelAuth to be called.
442 return; 516 return;
443 } 517 }
444 } 518 }
445 519
446 has_handled_response_ = true; 520 has_handled_response_ = true;
447 if (request_->status().is_success()) 521 if (request_->status().is_success()) {
448 filter_.reset(SetupFilter()); 522 // filter_.reset(SetupFilter());
523 source_ = SetupSource();
524 }
449 525
450 if (!filter_.get()) { 526 if (source_->type() == StreamSource::SOURCE_NONE) {
451 std::string content_length; 527 std::string content_length;
452 request_->GetResponseHeaderByName("content-length", &content_length); 528 request_->GetResponseHeaderByName("content-length", &content_length);
453 if (!content_length.empty()) 529 if (!content_length.empty())
454 base::StringToInt64(content_length, &expected_content_size_); 530 base::StringToInt64(content_length, &expected_content_size_);
455 } else { 531 } else {
456 request_->net_log().AddEvent( 532 request_->net_log().AddEvent(
457 NetLog::TYPE_URL_REQUEST_FILTERS_SET, 533 NetLog::TYPE_URL_REQUEST_FILTERS_SET,
458 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get()))); 534 base::Bind(&StreamSourceSetCallback, base::Unretained(source_.get())));
459 } 535 }
460 536
461 request_->NotifyResponseStarted(); 537 request_->NotifyResponseStarted();
462 538
463 // |this| may be destroyed at this point. 539 // |this| may be destroyed at this point.
464 } 540 }
465 541
466 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) { 542 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) {
467 if (result >= 0) { 543 if (result >= 0) {
468 *error = OK; 544 *error = OK;
(...skipping 21 matching lines...) Expand all
490 DCHECK(has_handled_response_); 566 DCHECK(has_handled_response_);
491 567
492 Error error; 568 Error error;
493 int bytes_read; 569 int bytes_read;
494 ConvertResultToError(result, &error, &bytes_read); 570 ConvertResultToError(result, &error, &bytes_read);
495 571
496 DCHECK_NE(ERR_IO_PENDING, error); 572 DCHECK_NE(ERR_IO_PENDING, error);
497 573
498 GatherRawReadStats(error, bytes_read); 574 GatherRawReadStats(error, bytes_read);
499 575
500 if (filter_.get() && error == OK) { 576 // Notify StreamSource.
501 // |bytes_read| being 0 indicates an EOF was received. ReadFilteredData 577 if (error == OK) {
502 // can incorrectly return ERR_IO_PENDING when 0 bytes are passed to it, so 578 DCHECK(!read_raw_callback_.is_null());
503 // just don't call into the filter in that case. 579 StreamSource::OnReadCompleteCallback cb = read_raw_callback_;
504 int filter_bytes_read = 0; 580 read_raw_callback_.Reset();
505 if (bytes_read > 0) { 581 cb.Run(OK, bytes_read);
mmenke 2016/03/04 21:15:57 Can just use: base::ResetAndReturn(read_raw_callb
xunjieli 2016/04/20 19:16:10 Done.
506 // Tell the filter that it has more data.
507 PushInputToFilter(bytes_read);
508
509 // Filter the data.
510 error = ReadFilteredData(&filter_bytes_read);
511 }
512
513 if (error == OK && !filter_bytes_read)
514 DoneReading();
515
516 DVLOG(1) << __FUNCTION__ << "() "
517 << "\"" << request_->url().spec() << "\""
518 << " pre bytes read = " << bytes_read
519 << " pre total = " << prefilter_bytes_read_
520 << " post total = " << postfilter_bytes_read_;
521 bytes_read = filter_bytes_read;
522 } else {
523 DVLOG(1) << __FUNCTION__ << "() "
524 << "\"" << request_->url().spec() << "\""
525 << " pre bytes read = " << bytes_read
526 << " pre total = " << prefilter_bytes_read_
527 << " post total = " << postfilter_bytes_read_;
528 } 582 }
529 583
530 // Synchronize the URLRequest state machine with the URLRequestJob state
531 // machine. If this read succeeded, either the request is at EOF and the
532 // URLRequest state machine goes to 'finished', or it is not and the
533 // URLRequest state machine goes to 'success'. If the read failed, the
534 // URLRequest state machine goes directly to 'finished'. If filtered data is
535 // pending, then there's nothing to do, since the status of the request is
536 // already pending.
537 //
538 // Update the URLRequest's status first, so that NotifyReadCompleted has an
539 // accurate view of the request.
540 if (error == OK && bytes_read > 0) {
541 SetStatus(URLRequestStatus());
542 } else if (error != ERR_IO_PENDING) {
543 NotifyDone(URLRequestStatus::FromError(error));
544 }
545
546 // NotifyReadCompleted should be called after SetStatus or NotifyDone updates
547 // the status.
548 if (error == OK)
549 request_->NotifyReadCompleted(bytes_read);
550
551 // |this| may be destroyed at this point. 584 // |this| may be destroyed at this point.
552 } 585 }
553 586
587 #if 0
588 void URLRequestJob::OnRawReadComplete(int bytes_read) {
589 if (bytes_read > 0) {
590 }
591 DCHECK(!read_raw_callback_.is_null());
592 StreamSource::OnReadCompleteCallback cb = read_raw_callback_;
593 read_raw_callback_.Reset();
594 cb.Run(OK, bytes_read);
595 }
596 #endif
mmenke 2016/03/04 21:15:57 Should remove all the commented out code.
xunjieli 2016/04/20 19:16:11 Done.
597
554 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { 598 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
555 DCHECK(!has_handled_response_); 599 DCHECK(!has_handled_response_);
556 DCHECK(request_->status().is_io_pending()); 600 DCHECK(request_->status().is_io_pending());
557 601
558 has_handled_response_ = true; 602 has_handled_response_ = true;
559 // There may be relevant information in the response info even in the 603 // There may be relevant information in the response info even in the
560 // error case. 604 // error case.
561 GetResponseInfo(&request_->response_info_); 605 GetResponseInfo(&request_->response_info_);
562 606
563 SetStatus(status); 607 SetStatus(status);
(...skipping 26 matching lines...) Expand all
590 request_->set_status(status); 634 request_->set_status(status);
591 } 635 }
592 636
593 // If the request succeeded (And wasn't cancelled) and the response code was 637 // If the request succeeded (And wasn't cancelled) and the response code was
594 // 4xx or 5xx, record whether or not the main frame was blank. This is 638 // 4xx or 5xx, record whether or not the main frame was blank. This is
595 // intended to be a short-lived histogram, used to figure out how important 639 // intended to be a short-lived histogram, used to figure out how important
596 // fixing http://crbug.com/331745 is. 640 // fixing http://crbug.com/331745 is.
597 if (request_->status().is_success()) { 641 if (request_->status().is_success()) {
598 int response_code = GetResponseCode(); 642 int response_code = GetResponseCode();
599 if (400 <= response_code && response_code <= 599) { 643 if (400 <= response_code && response_code <= 599) {
600 bool page_has_content = (postfilter_bytes_read_ != 0); 644 bool page_has_content = (postfilter_bytes_read() != 0);
601 if (request_->load_flags() & net::LOAD_MAIN_FRAME) { 645 if (request_->load_flags() & net::LOAD_MAIN_FRAME) {
602 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentMainFrame", 646 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentMainFrame",
603 page_has_content); 647 page_has_content);
604 } else { 648 } else {
605 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentNonMainFrame", 649 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentNonMainFrame",
606 page_has_content); 650 page_has_content);
607 } 651 }
608 } 652 }
609 } 653 }
610 654
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
656 return 0; 700 return 0;
657 } 701 }
658 702
659 void URLRequestJob::DoneReading() { 703 void URLRequestJob::DoneReading() {
660 // Do nothing. 704 // Do nothing.
661 } 705 }
662 706
663 void URLRequestJob::DoneReadingRedirectResponse() { 707 void URLRequestJob::DoneReadingRedirectResponse() {
664 } 708 }
665 709
666 void URLRequestJob::PushInputToFilter(int bytes_read) {
667 DCHECK(filter_);
668 filter_->FlushStreamBuffer(bytes_read);
669 }
670
671 Error URLRequestJob::ReadFilteredData(int* bytes_read) {
672 DCHECK(filter_);
673 DCHECK(filtered_read_buffer_.get());
674 DCHECK_GT(filtered_read_buffer_len_, 0);
675 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check.
676 DCHECK(!raw_read_buffer_);
677
678 *bytes_read = 0;
679 Error error = ERR_FAILED;
680
681 for (;;) {
682 if (is_done())
683 return OK;
684
685 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
686 // We don't have any raw data to work with, so read from the transaction.
687 int filtered_data_read;
688 error = ReadRawDataForFilter(&filtered_data_read);
689 // If ReadRawDataForFilter returned some data, fall through to the case
690 // below; otherwise, return early.
691 if (error != OK || filtered_data_read == 0)
692 return error;
693 filter_->FlushStreamBuffer(filtered_data_read);
694 }
695
696 if ((filter_->stream_data_len() || filter_needs_more_output_space_) &&
697 !is_done()) {
698 // Get filtered data.
699 int filtered_data_len = filtered_read_buffer_len_;
700 int output_buffer_size = filtered_data_len;
701 Filter::FilterStatus status =
702 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len);
703
704 if (filter_needs_more_output_space_ && !filtered_data_len) {
705 // filter_needs_more_output_space_ was mistaken... there are no more
706 // bytes and we should have at least tried to fill up the filter's input
707 // buffer. Correct the state, and try again.
708 filter_needs_more_output_space_ = false;
709 continue;
710 }
711 filter_needs_more_output_space_ =
712 (filtered_data_len == output_buffer_size);
713
714 switch (status) {
715 case Filter::FILTER_DONE: {
716 filter_needs_more_output_space_ = false;
717 *bytes_read = filtered_data_len;
718 postfilter_bytes_read_ += filtered_data_len;
719 error = OK;
720 break;
721 }
722 case Filter::FILTER_NEED_MORE_DATA: {
723 // We have finished filtering all data currently in the buffer.
724 // There might be some space left in the output buffer. One can
725 // consider reading more data from the stream to feed the filter
726 // and filling up the output buffer. This leads to more complicated
727 // buffer management and data notification mechanisms.
728 // We can revisit this issue if there is a real perf need.
729 if (filtered_data_len > 0) {
730 *bytes_read = filtered_data_len;
731 postfilter_bytes_read_ += filtered_data_len;
732 error = OK;
733 } else {
734 // Read again since we haven't received enough data yet (e.g., we
735 // may not have a complete gzip header yet).
736 continue;
737 }
738 break;
739 }
740 case Filter::FILTER_OK: {
741 *bytes_read = filtered_data_len;
742 postfilter_bytes_read_ += filtered_data_len;
743 error = OK;
744 break;
745 }
746 case Filter::FILTER_ERROR: {
747 DVLOG(1) << __FUNCTION__ << "() "
748 << "\"" << request_->url().spec() << "\""
749 << " Filter Error";
750 filter_needs_more_output_space_ = false;
751 error = ERR_CONTENT_DECODING_FAILED;
752 UMA_HISTOGRAM_ENUMERATION("Net.ContentDecodingFailed.FilterType",
753 filter_->type(), Filter::FILTER_TYPE_MAX);
mmenke 2016/03/04 21:15:57 Are we just getting rid of this histogram?
Randy Smith (Not in Mondays) 2016/03/09 23:03:56 I'd like to keep this histogram; it's a pain when
xunjieli 2016/04/20 19:16:11 Done. moved to stream_source.cc so we can log the
754 break;
755 }
756 default: {
757 NOTREACHED();
758 filter_needs_more_output_space_ = false;
759 error = ERR_FAILED;
760 break;
761 }
762 }
763
764 // If logging all bytes is enabled, log the filtered bytes read.
765 if (error == OK && filtered_data_len > 0 &&
766 request()->net_log().IsCapturing()) {
767 request()->net_log().AddByteTransferEvent(
768 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, filtered_data_len,
769 filtered_read_buffer_->data());
mmenke 2016/03/04 21:15:57 Think we want to keep this log event.
xunjieli 2016/04/20 19:16:11 Done.
770 }
771 } else {
772 // we are done, or there is no data left.
773 error = OK;
774 }
775 break;
776 }
777
778 if (error == OK) {
779 // When we successfully finished a read, we no longer need to save the
780 // caller's buffers. Release our reference.
781 filtered_read_buffer_ = NULL;
782 filtered_read_buffer_len_ = 0;
783 }
784 return error;
785 }
786
787 void URLRequestJob::DestroyFilters() {
788 filter_.reset();
789 }
790
791 const URLRequestStatus URLRequestJob::GetStatus() { 710 const URLRequestStatus URLRequestJob::GetStatus() {
792 return request_->status(); 711 return request_->status();
793 } 712 }
794 713
795 void URLRequestJob::SetStatus(const URLRequestStatus &status) { 714 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
796 // An error status should never be replaced by a non-error status by a 715 // An error status should never be replaced by a non-error status by a
797 // URLRequestJob. URLRequest has some retry paths, but it resets the status 716 // URLRequestJob. URLRequest has some retry paths, but it resets the status
798 // itself, if needed. 717 // itself, if needed.
799 DCHECK(request_->status().is_io_pending() || 718 DCHECK(request_->status().is_io_pending() ||
800 request_->status().is_success() || 719 request_->status().is_success() ||
801 (!status.is_success() && !status.is_io_pending())); 720 (!status.is_success() && !status.is_io_pending()));
802 request_->set_status(status); 721 request_->set_status(status);
803 } 722 }
804 723
805 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) { 724 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) {
806 request_->proxy_server_ = proxy_server; 725 request_->proxy_server_ = proxy_server;
807 } 726 }
808 727
809 Error URLRequestJob::ReadRawDataForFilter(int* bytes_read) { 728 int64_t URLRequestJob::prefilter_bytes_read() const {
810 Error error = ERR_FAILED; 729 return base::checked_cast<int64_t>(raw_bytes_read_);
811 DCHECK(bytes_read);
812 DCHECK(filter_.get());
813
814 *bytes_read = 0;
815
816 // Get more pre-filtered data if needed.
817 // TODO(mbelshe): is it possible that the filter needs *MORE* data
818 // when there is some data already in the buffer?
819 if (!filter_->stream_data_len() && !is_done()) {
820 IOBuffer* stream_buffer = filter_->stream_buffer();
821 int stream_buffer_size = filter_->stream_buffer_size();
822 error = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
823 }
824 return error;
825 } 730 }
826 731
827 Error URLRequestJob::ReadRawDataHelper(IOBuffer* buf, 732 int64_t URLRequestJob::postfilter_bytes_read() const {
828 int buf_size, 733 return postfilter_bytes_read_;
mmenke 2016/03/04 21:15:57 Making prefilter_bytes_read use a checked_cast, an
xunjieli 2016/04/20 19:16:10 Done.
829 int* bytes_read) { 734 }
735
736 Error URLRequestJob::ReadRawDataHelper(
737 IOBuffer* buf,
738 int buf_size,
739 int* bytes_read,
740 const StreamSource::OnReadCompleteCallback& callback) {
830 DCHECK(!raw_read_buffer_); 741 DCHECK(!raw_read_buffer_);
831 742
832 // Keep a pointer to the read buffer, so we have access to it in 743 // Keep a pointer to the read buffer, so URLRequestJob::GatherRawReadStats()
833 // GatherRawReadStats() in the event that the read completes asynchronously. 744 // has access to it to log stats.
834 raw_read_buffer_ = buf; 745 raw_read_buffer_ = buf;
835 Error error; 746 Error error;
836 ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read); 747 ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read);
837 748
838 if (error != ERR_IO_PENDING) { 749 if (error != ERR_IO_PENDING) {
839 // If the read completes synchronously, either success or failure, invoke 750 // If the read completes synchronously, either success or failure, invoke
840 // GatherRawReadStats so we can account for the completed read. 751 // GatherRawReadStats so we can account for the completed read.
841 GatherRawReadStats(error, *bytes_read); 752 GatherRawReadStats(error, *bytes_read);
753 } else {
754 read_raw_callback_ = callback;
842 } 755 }
843 return error; 756 return error;
844 } 757 }
845 758
846 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { 759 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) {
847 int rv = request_->Redirect(redirect_info); 760 int rv = request_->Redirect(redirect_info);
848 if (rv != OK) 761 if (rv != OK)
849 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 762 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
850 } 763 }
851 764
852 void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) { 765 void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) {
853 DCHECK(raw_read_buffer_ || bytes_read == 0); 766 DCHECK(raw_read_buffer_ || bytes_read == 0);
854 DCHECK_NE(ERR_IO_PENDING, error); 767 DCHECK_NE(ERR_IO_PENDING, error);
855 768
856 if (error != OK) { 769 if (error != OK) {
857 raw_read_buffer_ = nullptr; 770 raw_read_buffer_ = nullptr;
858 return; 771 return;
859 } 772 }
860 // If |filter_| is non-NULL, bytes will be logged after it is applied 773 if (bytes_read > 0 && request()->net_log().IsCapturing()) {
861 // instead.
862 if (!filter_.get() && bytes_read > 0 && request()->net_log().IsCapturing()) {
863 request()->net_log().AddByteTransferEvent( 774 request()->net_log().AddByteTransferEvent(
864 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, bytes_read, 775 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, bytes_read,
865 raw_read_buffer_->data()); 776 raw_read_buffer_->data());
866 } 777 }
867 778
868 if (bytes_read > 0) { 779 if (bytes_read > 0) {
869 RecordBytesRead(bytes_read); 780 RecordBytesRead(bytes_read);
870 } 781 }
871 raw_read_buffer_ = nullptr; 782 raw_read_buffer_ = nullptr;
872 } 783 }
873 784
874 void URLRequestJob::RecordBytesRead(int bytes_read) { 785 void URLRequestJob::RecordBytesRead(int bytes_read) {
875 DCHECK_GT(bytes_read, 0); 786 DCHECK_GT(bytes_read, 0);
876 prefilter_bytes_read_ += bytes_read; 787 raw_bytes_read_ += base::checked_cast<size_t>(bytes_read);
877 788
878 // On first read, notify NetworkQualityEstimator that response headers have 789 // On first read, notify NetworkQualityEstimator that response headers have
879 // been received. 790 // been received.
880 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch 791 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch
881 // Service Worker jobs twice. 792 // Service Worker jobs twice.
882 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the 793 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the
883 // first raw read of the response body. This is used as the signal that 794 // first raw read of the response body. This is used as the signal that
884 // response headers have been received. 795 // response headers have been received.
885 if (request_->context()->network_quality_estimator() && 796 if (request_->context()->network_quality_estimator() &&
886 prefilter_bytes_read_ == bytes_read) { 797 prefilter_bytes_read() == bytes_read) {
887 request_->context()->network_quality_estimator()->NotifyHeadersReceived( 798 request_->context()->network_quality_estimator()->NotifyHeadersReceived(
888 *request_); 799 *request_);
889 } 800 }
890 801
891 if (!filter_.get())
892 postfilter_bytes_read_ += bytes_read;
893 DVLOG(2) << __FUNCTION__ << "() " 802 DVLOG(2) << __FUNCTION__ << "() "
894 << "\"" << request_->url().spec() << "\"" 803 << "\"" << request_->url().spec() << "\""
895 << " pre bytes read = " << bytes_read 804 << " pre bytes read = " << bytes_read
896 << " pre total = " << prefilter_bytes_read_ 805 << " pre total = " << prefilter_bytes_read()
897 << " post total = " << postfilter_bytes_read_; 806 << " post total = " << postfilter_bytes_read();
898 UpdatePacketReadTimes(); // Facilitate stats recording if it is active. 807 UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
899 808
900 // Notify observers if any additional network usage has occurred. Note that 809 // Notify observers if any additional network usage has occurred. Note that
901 // the number of received bytes over the network sent by this notification 810 // the number of received bytes over the network sent by this notification
902 // could be vastly different from |bytes_read|, such as when a large chunk of 811 // could be vastly different from |bytes_read|, such as when a large chunk of
903 // network bytes is received before multiple smaller raw reads are performed 812 // network bytes is received before multiple smaller raw reads are performed
904 // on it. 813 // on it.
905 MaybeNotifyNetworkBytes(); 814 MaybeNotifyNetworkBytes();
906 } 815 }
907 816
908 bool URLRequestJob::FilterHasData() {
909 return filter_.get() && filter_->stream_data_len();
910 }
911
912 void URLRequestJob::UpdatePacketReadTimes() { 817 void URLRequestJob::UpdatePacketReadTimes() {
913 } 818 }
914 819
915 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location, 820 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location,
916 int http_status_code) { 821 int http_status_code) {
917 const GURL& url = request_->url(); 822 const GURL& url = request_->url();
918 823
919 RedirectInfo redirect_info; 824 RedirectInfo redirect_info;
920 825
921 redirect_info.status_code = http_status_code; 826 redirect_info.status_code = http_status_code;
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
973 int64_t total_sent_bytes = GetTotalSentBytes(); 878 int64_t total_sent_bytes = GetTotalSentBytes();
974 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); 879 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_);
975 if (total_sent_bytes > last_notified_total_sent_bytes_) { 880 if (total_sent_bytes > last_notified_total_sent_bytes_) {
976 network_delegate_->NotifyNetworkBytesSent( 881 network_delegate_->NotifyNetworkBytesSent(
977 request_, total_sent_bytes - last_notified_total_sent_bytes_); 882 request_, total_sent_bytes - last_notified_total_sent_bytes_);
978 } 883 }
979 last_notified_total_sent_bytes_ = total_sent_bytes; 884 last_notified_total_sent_bytes_ = total_sent_bytes;
980 } 885 }
981 886
982 } // namespace net 887 } // namespace net
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698