Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(98)

Side by Side Diff: net/url_request/url_request_job.cc

Issue 1662763002: [ON HOLD] Implement pull-based design for content decoding (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Address comments Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/url_request/url_request_job.h" 5 #include "net/url_request/url_request_job.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/callback_helpers.h"
10 #include "base/compiler_specific.h" 11 #include "base/compiler_specific.h"
11 #include "base/location.h" 12 #include "base/location.h"
12 #include "base/metrics/histogram_macros.h" 13 #include "base/metrics/histogram_macros.h"
13 #include "base/power_monitor/power_monitor.h" 14 #include "base/power_monitor/power_monitor.h"
14 #include "base/profiler/scoped_tracker.h" 15 #include "base/profiler/scoped_tracker.h"
15 #include "base/single_thread_task_runner.h" 16 #include "base/single_thread_task_runner.h"
16 #include "base/strings/string_number_conversions.h" 17 #include "base/strings/string_number_conversions.h"
17 #include "base/strings/string_util.h" 18 #include "base/strings/string_util.h"
18 #include "base/thread_task_runner_handle.h" 19 #include "base/thread_task_runner_handle.h"
19 #include "base/values.h" 20 #include "base/values.h"
20 #include "net/base/auth.h" 21 #include "net/base/auth.h"
21 #include "net/base/host_port_pair.h" 22 #include "net/base/host_port_pair.h"
22 #include "net/base/io_buffer.h" 23 #include "net/base/io_buffer.h"
23 #include "net/base/load_flags.h" 24 #include "net/base/load_flags.h"
24 #include "net/base/load_states.h" 25 #include "net/base/load_states.h"
25 #include "net/base/net_errors.h" 26 #include "net/base/net_errors.h"
26 #include "net/base/network_delegate.h" 27 #include "net/base/network_delegate.h"
27 #include "net/base/network_quality_estimator.h" 28 #include "net/base/network_quality_estimator.h"
28 #include "net/filter/filter.h" 29 #include "net/filter/stream_source_util.h"
29 #include "net/http/http_response_headers.h" 30 #include "net/http/http_response_headers.h"
30 #include "net/url_request/url_request_context.h" 31 #include "net/url_request/url_request_context.h"
31 32
32 namespace net { 33 namespace net {
33 34
34 namespace { 35 namespace {
35 36
36 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event. 37 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event.
37 std::unique_ptr<base::Value> FiltersSetCallback( 38 std::unique_ptr<base::Value> StreamSourceSetCallback(
38 Filter* filter, 39 StreamSource* stream_source,
39 NetLogCaptureMode /* capture_mode */) { 40 NetLogCaptureMode /* capture_mode */) {
40 std::unique_ptr<base::DictionaryValue> event_params( 41 std::unique_ptr<base::DictionaryValue> event_params(
41 new base::DictionaryValue()); 42 new base::DictionaryValue());
42 event_params->SetString("filters", filter->OrderedFilterList()); 43 event_params->SetString(
44 "filters", StreamSourceUtil::OrderedStreamSourceList(stream_source));
43 return std::move(event_params); 45 return std::move(event_params);
44 } 46 }
45 47
46 std::string ComputeMethodForRedirect(const std::string& method, 48 std::string ComputeMethodForRedirect(const std::string& method,
47 int http_status_code) { 49 int http_status_code) {
48 // For 303 redirects, all request methods except HEAD are converted to GET, 50 // For 303 redirects, all request methods except HEAD are converted to GET,
49 // as per the latest httpbis draft. The draft also allows POST requests to 51 // as per the latest httpbis draft. The draft also allows POST requests to
50 // be converted to GETs when following 301/302 redirects, for historical 52 // be converted to GETs when following 301/302 redirects, for historical
51 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and 53 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and
52 // the httpbis draft say to prompt the user to confirm the generation of new 54 // the httpbis draft say to prompt the user to confirm the generation of new
53 // requests, other than GET and HEAD requests, but IE omits these prompts and 55 // requests, other than GET and HEAD requests, but IE omits these prompts and
54 // so shall we. 56 // so shall we.
55 // See: 57 // See:
56 // https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-17#section-7.3 58 // https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-17#section-7.3
57 if ((http_status_code == 303 && method != "HEAD") || 59 if ((http_status_code == 303 && method != "HEAD") ||
58 ((http_status_code == 301 || http_status_code == 302) && 60 ((http_status_code == 301 || http_status_code == 302) &&
59 method == "POST")) { 61 method == "POST")) {
60 return "GET"; 62 return "GET";
61 } 63 }
62 return method; 64 return method;
63 } 65 }
64 66
65 } // namespace 67 } // namespace
66 68
69 // StreamSources own the previous StreamSource in the chain, but the ultimate
70 // source is URLRequestJob, which has other ownership semantics, so this class
71 // is a proxy for URLRequestJob that is owned by the first filter (in dataflow
72 // order).
73 class URLRequestJob::URLRequestJobStreamSource : public StreamSource {
74 public:
75 URLRequestJobStreamSource(URLRequestJob* job)
76 : StreamSource(StreamSource::TYPE_NONE, nullptr), job_(job) {}
77
78 ~URLRequestJobStreamSource() override {}
79
80 // StreamSource implementation:
81 Error Read(IOBuffer* dest_buffer,
82 size_t buffer_size,
83 size_t* bytes_read,
84 const OnReadCompleteCallback& callback) override {
85 DCHECK(job_);
86
87 // If ReadRawData() returns true, the underlying data source has
88 // synchronously succeeded, which might be an EOF.
89 int bytes_read_raw = 0;
90 Error error = job_->ReadRawDataHelper(dest_buffer, buffer_size,
91 &bytes_read_raw, callback);
92 if (error == OK)
93 *bytes_read = base::checked_cast<size_t>(bytes_read_raw);
94
95 return error;
96 }
97
98 private:
99 URLRequestJob* job_;
100 };
101
67 URLRequestJob::URLRequestJob(URLRequest* request, 102 URLRequestJob::URLRequestJob(URLRequest* request,
68 NetworkDelegate* network_delegate) 103 NetworkDelegate* network_delegate)
69 : request_(request), 104 : request_(request),
70 done_(false), 105 done_(false),
71 prefilter_bytes_read_(0),
72 postfilter_bytes_read_(0),
73 filter_needs_more_output_space_(false), 106 filter_needs_more_output_space_(false),
74 filtered_read_buffer_len_(0),
75 has_handled_response_(false), 107 has_handled_response_(false),
76 expected_content_size_(-1), 108 expected_content_size_(-1),
77 network_delegate_(network_delegate), 109 network_delegate_(network_delegate),
78 last_notified_total_received_bytes_(0), 110 last_notified_total_received_bytes_(0),
79 last_notified_total_sent_bytes_(0), 111 last_notified_total_sent_bytes_(0),
112 raw_bytes_read_(0),
113 postfilter_bytes_read_(0),
80 weak_factory_(this) { 114 weak_factory_(this) {
81 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); 115 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
82 if (power_monitor) 116 if (power_monitor)
83 power_monitor->AddObserver(this); 117 power_monitor->AddObserver(this);
84 } 118 }
85 119
86 URLRequestJob::~URLRequestJob() { 120 URLRequestJob::~URLRequestJob() {
87 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); 121 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
88 if (power_monitor) 122 if (power_monitor)
89 power_monitor->RemoveObserver(this); 123 power_monitor->RemoveObserver(this);
(...skipping 15 matching lines...) Expand all
105 // Kill(). 139 // Kill().
106 // TODO(mmenke): The URLRequest is currently deleted before this method 140 // TODO(mmenke): The URLRequest is currently deleted before this method
107 // invokes its async callback whenever this is called by the URLRequest. 141 // invokes its async callback whenever this is called by the URLRequest.
108 // Try to simplify how cancellation works. 142 // Try to simplify how cancellation works.
109 NotifyCanceled(); 143 NotifyCanceled();
110 } 144 }
111 145
112 // This function calls ReadRawData to get stream data. If a filter exists, it 146 // This function calls ReadRawData to get stream data. If a filter exists, it
113 // passes the data to the attached filter. It then returns the output from 147 // passes the data to the attached filter. It then returns the output from
114 // filter back to the caller. 148 // filter back to the caller.
149 // This method passes reads down the filter chain, where they eventually end up
150 // at URLRequestJobStreamSource::Read, which calls back into
151 // URLRequestJob::ReadRawData.
115 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) { 152 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
116 DCHECK_LT(buf_size, 1000000); // Sanity check. 153 DCHECK_LT(buf_size, 1000000); // Sanity check.
117 DCHECK(buf); 154 DCHECK(buf);
118 DCHECK(bytes_read); 155 DCHECK(bytes_read);
119 DCHECK(filtered_read_buffer_.get() == NULL);
120 DCHECK_EQ(0, filtered_read_buffer_len_);
121 156
122 Error error = OK; 157 Error error = OK;
123 *bytes_read = 0; 158 *bytes_read = 0;
124 159
125 // Skip Filter if not present. 160 size_t bytes_read_n = 0;
126 if (!filter_) { 161 error = source_->Read(buf, buf_size, &bytes_read_n,
127 error = ReadRawDataHelper(buf, buf_size, bytes_read); 162 base::Bind(&URLRequestJob::SourceReadComplete,
128 } else { 163 weak_factory_.GetWeakPtr()));
129 // Save the caller's buffers while we do IO 164 *bytes_read = bytes_read_n;
Randy Smith (Not in Mondays) 2016/04/26 21:54:02 Why not just pass bytes_read to source_->Read()?
xunjieli 2016/07/20 21:00:48 Done.
130 // in the filter's buffers.
131 filtered_read_buffer_ = buf;
132 filtered_read_buffer_len_ = buf_size;
133
134 error = ReadFilteredData(bytes_read);
135
136 // Synchronous EOF from the filter.
137 if (error == OK && *bytes_read == 0)
138 DoneReading();
139 }
140 165
141 if (error == OK) { 166 if (error == OK) {
167 postfilter_bytes_read_ += bytes_read_n;
168 if (request()->net_log().IsCapturing()) {
169 request()->net_log().AddByteTransferEvent(
170 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, bytes_read_n,
171 buf->data());
172 }
142 // If URLRequestJob read zero bytes, the job is at EOF. 173 // If URLRequestJob read zero bytes, the job is at EOF.
143 if (*bytes_read == 0) 174 if (*bytes_read == 0) {
175 DoneReading();
144 NotifyDone(URLRequestStatus()); 176 NotifyDone(URLRequestStatus());
177 }
145 } else if (error == ERR_IO_PENDING) { 178 } else if (error == ERR_IO_PENDING) {
146 SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING)); 179 SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING));
147 } else { 180 } else {
148 NotifyDone(URLRequestStatus::FromError(error)); 181 NotifyDone(URLRequestStatus::FromError(error));
149 *bytes_read = -1; 182 *bytes_read = -1;
150 } 183 }
151 return error == OK; 184 return error == OK;
152 } 185 }
153 186
187 void URLRequestJob::SourceReadComplete(Error error, size_t bytes_read) {
Randy Smith (Not in Mondays) 2016/04/26 21:54:02 nit, idea (i.e. not even as much force as suggesti
xunjieli 2016/07/20 21:00:48 Done.
188 DCHECK_NE(ERR_IO_PENDING, error);
189 DCHECK(error == OK || bytes_read == 0);
190
191 // Synchronize the URLRequest state machine with the URLRequestJob state
192 // machine. If this read succeeded, either the request is at EOF and the
193 // URLRequest state machine goes to 'finished', or it is not and the
194 // URLRequest state machine goes to 'success'. If the read failed, the
195 // URLRequest state machine goes directly to 'finished'. If filtered data is
196 // pending, then there's nothing to do, since the status of the request is
197 // already pending.
198 //
199 // Update the URLRequest's status first, so that NotifyReadCompleted has an
200 // accurate view of the request.
201 if (error == OK && bytes_read > 0) {
202 postfilter_bytes_read_ += bytes_read;
203 SetStatus(URLRequestStatus());
204 } else {
205 NotifyDone(URLRequestStatus::FromError(error));
206 }
207 if (error == OK) {
208 if (bytes_read == 0)
209 DoneReading();
210 request_->NotifyReadCompleted(bytes_read);
211 }
212 }
213
154 void URLRequestJob::StopCaching() { 214 void URLRequestJob::StopCaching() {
155 // Nothing to do here. 215 // Nothing to do here.
156 } 216 }
157 217
158 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { 218 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const {
159 // Most job types don't send request headers. 219 // Most job types don't send request headers.
160 return false; 220 return false;
161 } 221 }
162 222
163 int64_t URLRequestJob::GetTotalReceivedBytes() const { 223 int64_t URLRequestJob::GetTotalReceivedBytes() const {
(...skipping 28 matching lines...) Expand all
192 } 252 }
193 253
194 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) { 254 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
195 return false; 255 return false;
196 } 256 }
197 257
198 void URLRequestJob::PopulateNetErrorDetails(NetErrorDetails* details) const { 258 void URLRequestJob::PopulateNetErrorDetails(NetErrorDetails* details) const {
199 return; 259 return;
200 } 260 }
201 261
202 Filter* URLRequestJob::SetupFilter() const { 262 std::unique_ptr<StreamSource> URLRequestJob::SetupSource() {
203 return NULL; 263 std::unique_ptr<URLRequestJobStreamSource> source(
264 new URLRequestJobStreamSource(this));
265 return std::move(source);
204 } 266 }
205 267
206 bool URLRequestJob::IsRedirectResponse(GURL* location, 268 bool URLRequestJob::IsRedirectResponse(GURL* location,
207 int* http_status_code) { 269 int* http_status_code) {
208 // For non-HTTP jobs, headers will be null. 270 // For non-HTTP jobs, headers will be null.
209 HttpResponseHeaders* headers = request_->response_headers(); 271 HttpResponseHeaders* headers = request_->response_headers();
210 if (!headers) 272 if (!headers)
211 return false; 273 return false;
212 274
213 std::string value; 275 std::string value;
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after
438 // Need to check for a NULL auth_info because the server may have failed 500 // Need to check for a NULL auth_info because the server may have failed
439 // to send a challenge with the 401 response. 501 // to send a challenge with the 401 response.
440 if (auth_info.get()) { 502 if (auth_info.get()) {
441 request_->NotifyAuthRequired(auth_info.get()); 503 request_->NotifyAuthRequired(auth_info.get());
442 // Wait for SetAuth or CancelAuth to be called. 504 // Wait for SetAuth or CancelAuth to be called.
443 return; 505 return;
444 } 506 }
445 } 507 }
446 508
447 has_handled_response_ = true; 509 has_handled_response_ = true;
448 if (request_->status().is_success()) 510 if (request_->status().is_success()) {
449 filter_.reset(SetupFilter()); 511 source_ = SetupSource();
512 }
Randy Smith (Not in Mondays) 2016/04/26 21:54:02 nit: Why the curly braces?
xunjieli 2016/07/20 21:00:48 Done.
450 513
451 if (!filter_.get()) { 514 if (source_->type() == StreamSource::TYPE_NONE) {
452 std::string content_length; 515 std::string content_length;
453 request_->GetResponseHeaderByName("content-length", &content_length); 516 request_->GetResponseHeaderByName("content-length", &content_length);
454 if (!content_length.empty()) 517 if (!content_length.empty())
455 base::StringToInt64(content_length, &expected_content_size_); 518 base::StringToInt64(content_length, &expected_content_size_);
456 } else { 519 } else {
457 request_->net_log().AddEvent( 520 request_->net_log().AddEvent(
458 NetLog::TYPE_URL_REQUEST_FILTERS_SET, 521 NetLog::TYPE_URL_REQUEST_FILTERS_SET,
459 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get()))); 522 base::Bind(&StreamSourceSetCallback, base::Unretained(source_.get())));
460 } 523 }
461 524
462 request_->NotifyResponseStarted(); 525 request_->NotifyResponseStarted();
463 526
464 // |this| may be destroyed at this point. 527 // |this| may be destroyed at this point.
465 } 528 }
466 529
467 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) { 530 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) {
468 if (result >= 0) { 531 if (result >= 0) {
469 *error = OK; 532 *error = OK;
(...skipping 21 matching lines...) Expand all
491 DCHECK(has_handled_response_); 554 DCHECK(has_handled_response_);
492 555
493 Error error; 556 Error error;
494 int bytes_read; 557 int bytes_read;
495 ConvertResultToError(result, &error, &bytes_read); 558 ConvertResultToError(result, &error, &bytes_read);
496 559
497 DCHECK_NE(ERR_IO_PENDING, error); 560 DCHECK_NE(ERR_IO_PENDING, error);
498 561
499 GatherRawReadStats(error, bytes_read); 562 GatherRawReadStats(error, bytes_read);
500 563
501 if (filter_.get() && error == OK) { 564 // Notify StreamSource.
502 // |bytes_read| being 0 indicates an EOF was received. ReadFilteredData 565 if (error == OK) {
503 // can incorrectly return ERR_IO_PENDING when 0 bytes are passed to it, so 566 DCHECK(!read_raw_callback_.is_null());
Randy Smith (Not in Mondays) 2016/04/26 21:54:02 Why only if error == OK? Don't we want to pass th
xunjieli 2016/07/20 21:00:48 Done.
504 // just don't call into the filter in that case. 567 base::ResetAndReturn(&read_raw_callback_).Run(OK, bytes_read);
505 int filter_bytes_read = 0;
506 if (bytes_read > 0) {
507 // Tell the filter that it has more data.
508 PushInputToFilter(bytes_read);
509
510 // Filter the data.
511 error = ReadFilteredData(&filter_bytes_read);
512 }
513
514 if (error == OK && !filter_bytes_read)
515 DoneReading();
516
517 DVLOG(1) << __FUNCTION__ << "() "
518 << "\"" << request_->url().spec() << "\""
519 << " pre bytes read = " << bytes_read
520 << " pre total = " << prefilter_bytes_read_
521 << " post total = " << postfilter_bytes_read_;
522 bytes_read = filter_bytes_read;
523 } else {
524 DVLOG(1) << __FUNCTION__ << "() "
525 << "\"" << request_->url().spec() << "\""
526 << " pre bytes read = " << bytes_read
527 << " pre total = " << prefilter_bytes_read_
528 << " post total = " << postfilter_bytes_read_;
529 } 568 }
530 569
531 // Synchronize the URLRequest state machine with the URLRequestJob state
532 // machine. If this read succeeded, either the request is at EOF and the
533 // URLRequest state machine goes to 'finished', or it is not and the
534 // URLRequest state machine goes to 'success'. If the read failed, the
535 // URLRequest state machine goes directly to 'finished'. If filtered data is
536 // pending, then there's nothing to do, since the status of the request is
537 // already pending.
538 //
539 // Update the URLRequest's status first, so that NotifyReadCompleted has an
540 // accurate view of the request.
541 if (error == OK && bytes_read > 0) {
542 SetStatus(URLRequestStatus());
543 } else if (error != ERR_IO_PENDING) {
544 NotifyDone(URLRequestStatus::FromError(error));
545 }
546
547 // NotifyReadCompleted should be called after SetStatus or NotifyDone updates
548 // the status.
549 if (error == OK)
550 request_->NotifyReadCompleted(bytes_read);
551
552 // |this| may be destroyed at this point. 570 // |this| may be destroyed at this point.
553 } 571 }
554 572
555 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { 573 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
556 DCHECK(!has_handled_response_); 574 DCHECK(!has_handled_response_);
557 DCHECK(request_->status().is_io_pending()); 575 DCHECK(request_->status().is_io_pending());
558 576
559 has_handled_response_ = true; 577 has_handled_response_ = true;
560 // There may be relevant information in the response info even in the 578 // There may be relevant information in the response info even in the
561 // error case. 579 // error case.
(...skipping 29 matching lines...) Expand all
591 request_->set_status(status); 609 request_->set_status(status);
592 } 610 }
593 611
594 // If the request succeeded (And wasn't cancelled) and the response code was 612 // If the request succeeded (And wasn't cancelled) and the response code was
595 // 4xx or 5xx, record whether or not the main frame was blank. This is 613 // 4xx or 5xx, record whether or not the main frame was blank. This is
596 // intended to be a short-lived histogram, used to figure out how important 614 // intended to be a short-lived histogram, used to figure out how important
597 // fixing http://crbug.com/331745 is. 615 // fixing http://crbug.com/331745 is.
598 if (request_->status().is_success()) { 616 if (request_->status().is_success()) {
599 int response_code = GetResponseCode(); 617 int response_code = GetResponseCode();
600 if (400 <= response_code && response_code <= 599) { 618 if (400 <= response_code && response_code <= 599) {
601 bool page_has_content = (postfilter_bytes_read_ != 0); 619 bool page_has_content = (postfilter_bytes_read() != 0);
602 if (request_->load_flags() & net::LOAD_MAIN_FRAME) { 620 if (request_->load_flags() & net::LOAD_MAIN_FRAME) {
603 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentMainFrame", 621 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentMainFrame",
604 page_has_content); 622 page_has_content);
605 } else { 623 } else {
606 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentNonMainFrame", 624 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentNonMainFrame",
607 page_has_content); 625 page_has_content);
608 } 626 }
609 } 627 }
610 } 628 }
611 629
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
657 return 0; 675 return 0;
658 } 676 }
659 677
660 void URLRequestJob::DoneReading() { 678 void URLRequestJob::DoneReading() {
661 // Do nothing. 679 // Do nothing.
662 } 680 }
663 681
664 void URLRequestJob::DoneReadingRedirectResponse() { 682 void URLRequestJob::DoneReadingRedirectResponse() {
665 } 683 }
666 684
667 void URLRequestJob::PushInputToFilter(int bytes_read) {
668 DCHECK(filter_);
669 filter_->FlushStreamBuffer(bytes_read);
670 }
671
672 Error URLRequestJob::ReadFilteredData(int* bytes_read) {
673 DCHECK(filter_);
674 DCHECK(filtered_read_buffer_.get());
675 DCHECK_GT(filtered_read_buffer_len_, 0);
676 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check.
677 DCHECK(!raw_read_buffer_);
678
679 *bytes_read = 0;
680 Error error = ERR_FAILED;
681
682 for (;;) {
683 if (is_done())
684 return OK;
685
686 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
687 // We don't have any raw data to work with, so read from the transaction.
688 int filtered_data_read;
689 error = ReadRawDataForFilter(&filtered_data_read);
690 // If ReadRawDataForFilter returned some data, fall through to the case
691 // below; otherwise, return early.
692 if (error != OK || filtered_data_read == 0)
693 return error;
694 filter_->FlushStreamBuffer(filtered_data_read);
695 }
696
697 if ((filter_->stream_data_len() || filter_needs_more_output_space_) &&
698 !is_done()) {
699 // Get filtered data.
700 int filtered_data_len = filtered_read_buffer_len_;
701 int output_buffer_size = filtered_data_len;
702 Filter::FilterStatus status =
703 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len);
704
705 if (filter_needs_more_output_space_ && !filtered_data_len) {
706 // filter_needs_more_output_space_ was mistaken... there are no more
707 // bytes and we should have at least tried to fill up the filter's input
708 // buffer. Correct the state, and try again.
709 filter_needs_more_output_space_ = false;
710 continue;
711 }
712 filter_needs_more_output_space_ =
713 (filtered_data_len == output_buffer_size);
714
715 switch (status) {
716 case Filter::FILTER_DONE: {
717 filter_needs_more_output_space_ = false;
718 *bytes_read = filtered_data_len;
719 postfilter_bytes_read_ += filtered_data_len;
720 error = OK;
721 break;
722 }
723 case Filter::FILTER_NEED_MORE_DATA: {
724 // We have finished filtering all data currently in the buffer.
725 // There might be some space left in the output buffer. One can
726 // consider reading more data from the stream to feed the filter
727 // and filling up the output buffer. This leads to more complicated
728 // buffer management and data notification mechanisms.
729 // We can revisit this issue if there is a real perf need.
730 if (filtered_data_len > 0) {
731 *bytes_read = filtered_data_len;
732 postfilter_bytes_read_ += filtered_data_len;
733 error = OK;
734 } else {
735 // Read again since we haven't received enough data yet (e.g., we
736 // may not have a complete gzip header yet).
737 continue;
738 }
739 break;
740 }
741 case Filter::FILTER_OK: {
742 *bytes_read = filtered_data_len;
743 postfilter_bytes_read_ += filtered_data_len;
744 error = OK;
745 break;
746 }
747 case Filter::FILTER_ERROR: {
748 DVLOG(1) << __FUNCTION__ << "() "
749 << "\"" << request_->url().spec() << "\""
750 << " Filter Error";
751 filter_needs_more_output_space_ = false;
752 error = ERR_CONTENT_DECODING_FAILED;
753 UMA_HISTOGRAM_ENUMERATION("Net.ContentDecodingFailed.FilterType",
754 filter_->type(), Filter::FILTER_TYPE_MAX);
755 break;
756 }
757 default: {
758 NOTREACHED();
759 filter_needs_more_output_space_ = false;
760 error = ERR_FAILED;
761 break;
762 }
763 }
764
765 // If logging all bytes is enabled, log the filtered bytes read.
766 if (error == OK && filtered_data_len > 0 &&
767 request()->net_log().IsCapturing()) {
768 request()->net_log().AddByteTransferEvent(
769 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, filtered_data_len,
770 filtered_read_buffer_->data());
771 }
772 } else {
773 // we are done, or there is no data left.
774 error = OK;
775 }
776 break;
777 }
778
779 if (error == OK) {
780 // When we successfully finished a read, we no longer need to save the
781 // caller's buffers. Release our reference.
782 filtered_read_buffer_ = NULL;
783 filtered_read_buffer_len_ = 0;
784 }
785 return error;
786 }
787
788 void URLRequestJob::DestroyFilters() {
789 filter_.reset();
790 }
791
792 const URLRequestStatus URLRequestJob::GetStatus() { 685 const URLRequestStatus URLRequestJob::GetStatus() {
793 return request_->status(); 686 return request_->status();
794 } 687 }
795 688
796 void URLRequestJob::SetStatus(const URLRequestStatus &status) { 689 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
797 // An error status should never be replaced by a non-error status by a 690 // An error status should never be replaced by a non-error status by a
798 // URLRequestJob. URLRequest has some retry paths, but it resets the status 691 // URLRequestJob. URLRequest has some retry paths, but it resets the status
799 // itself, if needed. 692 // itself, if needed.
800 DCHECK(request_->status().is_io_pending() || 693 DCHECK(request_->status().is_io_pending() ||
801 request_->status().is_success() || 694 request_->status().is_success() ||
802 (!status.is_success() && !status.is_io_pending())); 695 (!status.is_success() && !status.is_io_pending()));
803 request_->set_status(status); 696 request_->set_status(status);
804 } 697 }
805 698
806 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) { 699 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) {
807 request_->proxy_server_ = proxy_server; 700 request_->proxy_server_ = proxy_server;
808 } 701 }
809 702
810 Error URLRequestJob::ReadRawDataForFilter(int* bytes_read) { 703 int64_t URLRequestJob::prefilter_bytes_read() const {
811 Error error = ERR_FAILED; 704 return base::checked_cast<int64_t>(raw_bytes_read_);
812 DCHECK(bytes_read);
813 DCHECK(filter_.get());
814
815 *bytes_read = 0;
816
817 // Get more pre-filtered data if needed.
818 // TODO(mbelshe): is it possible that the filter needs *MORE* data
819 // when there is some data already in the buffer?
820 if (!filter_->stream_data_len() && !is_done()) {
821 IOBuffer* stream_buffer = filter_->stream_buffer();
822 int stream_buffer_size = filter_->stream_buffer_size();
823 error = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
824 }
825 return error;
826 } 705 }
827 706
828 Error URLRequestJob::ReadRawDataHelper(IOBuffer* buf, 707 int64_t URLRequestJob::postfilter_bytes_read() const {
829 int buf_size, 708 return base::checked_cast<int64_t>(postfilter_bytes_read_);
830 int* bytes_read) { 709 }
710
711 Error URLRequestJob::ReadRawDataHelper(
712 IOBuffer* buf,
713 int buf_size,
714 int* bytes_read,
715 const StreamSource::OnReadCompleteCallback& callback) {
831 DCHECK(!raw_read_buffer_); 716 DCHECK(!raw_read_buffer_);
832 717
833 // Keep a pointer to the read buffer, so we have access to it in 718 // Keep a pointer to the read buffer, so URLRequestJob::GatherRawReadStats()
834 // GatherRawReadStats() in the event that the read completes asynchronously. 719 // has access to it to log stats.
835 raw_read_buffer_ = buf; 720 raw_read_buffer_ = buf;
836 Error error; 721 Error error;
837 ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read); 722 ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read);
838 723
839 if (error != ERR_IO_PENDING) { 724 if (error != ERR_IO_PENDING) {
840 // If the read completes synchronously, either success or failure, invoke 725 // If the read completes synchronously, either success or failure, invoke
841 // GatherRawReadStats so we can account for the completed read. 726 // GatherRawReadStats so we can account for the completed read.
842 GatherRawReadStats(error, *bytes_read); 727 GatherRawReadStats(error, *bytes_read);
728 } else {
729 read_raw_callback_ = callback;
843 } 730 }
844 return error; 731 return error;
845 } 732 }
846 733
847 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { 734 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) {
848 int rv = request_->Redirect(redirect_info); 735 int rv = request_->Redirect(redirect_info);
849 if (rv != OK) 736 if (rv != OK)
850 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 737 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
851 } 738 }
852 739
853 void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) { 740 void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) {
854 DCHECK(raw_read_buffer_ || bytes_read == 0); 741 DCHECK(raw_read_buffer_ || bytes_read == 0);
855 DCHECK_NE(ERR_IO_PENDING, error); 742 DCHECK_NE(ERR_IO_PENDING, error);
856 743
857 if (error != OK) { 744 if (error != OK) {
858 raw_read_buffer_ = nullptr; 745 raw_read_buffer_ = nullptr;
859 return; 746 return;
860 } 747 }
861 // If |filter_| is non-NULL, bytes will be logged after it is applied 748 if (bytes_read > 0 && request()->net_log().IsCapturing()) {
862 // instead.
863 if (!filter_.get() && bytes_read > 0 && request()->net_log().IsCapturing()) {
864 request()->net_log().AddByteTransferEvent( 749 request()->net_log().AddByteTransferEvent(
865 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, bytes_read, 750 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, bytes_read,
866 raw_read_buffer_->data()); 751 raw_read_buffer_->data());
867 } 752 }
868 753
869 if (bytes_read > 0) { 754 if (bytes_read > 0) {
870 RecordBytesRead(bytes_read); 755 RecordBytesRead(bytes_read);
871 } 756 }
872 raw_read_buffer_ = nullptr; 757 raw_read_buffer_ = nullptr;
873 } 758 }
874 759
875 void URLRequestJob::RecordBytesRead(int bytes_read) { 760 void URLRequestJob::RecordBytesRead(int bytes_read) {
876 DCHECK_GT(bytes_read, 0); 761 DCHECK_GT(bytes_read, 0);
877 prefilter_bytes_read_ += bytes_read; 762 raw_bytes_read_ += base::checked_cast<size_t>(bytes_read);
878 763
879 // On first read, notify NetworkQualityEstimator that response headers have 764 // On first read, notify NetworkQualityEstimator that response headers have
880 // been received. 765 // been received.
881 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch 766 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch
882 // Service Worker jobs twice. 767 // Service Worker jobs twice.
883 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the 768 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the
884 // first raw read of the response body. This is used as the signal that 769 // first raw read of the response body. This is used as the signal that
885 // response headers have been received. 770 // response headers have been received.
886 if (request_->context()->network_quality_estimator() && 771 if (request_->context()->network_quality_estimator() &&
887 prefilter_bytes_read_ == bytes_read) { 772 prefilter_bytes_read() == bytes_read) {
888 request_->context()->network_quality_estimator()->NotifyHeadersReceived( 773 request_->context()->network_quality_estimator()->NotifyHeadersReceived(
889 *request_); 774 *request_);
890 } 775 }
891 776
892 if (!filter_.get())
893 postfilter_bytes_read_ += bytes_read;
894 DVLOG(2) << __FUNCTION__ << "() " 777 DVLOG(2) << __FUNCTION__ << "() "
895 << "\"" << request_->url().spec() << "\"" 778 << "\"" << request_->url().spec() << "\""
896 << " pre bytes read = " << bytes_read 779 << " pre bytes read = " << bytes_read
897 << " pre total = " << prefilter_bytes_read_ 780 << " pre total = " << prefilter_bytes_read()
898 << " post total = " << postfilter_bytes_read_; 781 << " post total = " << postfilter_bytes_read();
899 UpdatePacketReadTimes(); // Facilitate stats recording if it is active. 782 UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
900 783
901 // Notify observers if any additional network usage has occurred. Note that 784 // Notify observers if any additional network usage has occurred. Note that
902 // the number of received bytes over the network sent by this notification 785 // the number of received bytes over the network sent by this notification
903 // could be vastly different from |bytes_read|, such as when a large chunk of 786 // could be vastly different from |bytes_read|, such as when a large chunk of
904 // network bytes is received before multiple smaller raw reads are performed 787 // network bytes is received before multiple smaller raw reads are performed
905 // on it. 788 // on it.
906 MaybeNotifyNetworkBytes(); 789 MaybeNotifyNetworkBytes();
907 } 790 }
908 791
909 bool URLRequestJob::FilterHasData() {
910 return filter_.get() && filter_->stream_data_len();
911 }
912
913 void URLRequestJob::UpdatePacketReadTimes() { 792 void URLRequestJob::UpdatePacketReadTimes() {
914 } 793 }
915 794
916 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location, 795 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location,
917 int http_status_code) { 796 int http_status_code) {
918 const GURL& url = request_->url(); 797 const GURL& url = request_->url();
919 798
920 RedirectInfo redirect_info; 799 RedirectInfo redirect_info;
921 800
922 redirect_info.status_code = http_status_code; 801 redirect_info.status_code = http_status_code;
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
982 int64_t total_sent_bytes = GetTotalSentBytes(); 861 int64_t total_sent_bytes = GetTotalSentBytes();
983 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); 862 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_);
984 if (total_sent_bytes > last_notified_total_sent_bytes_) { 863 if (total_sent_bytes > last_notified_total_sent_bytes_) {
985 network_delegate_->NotifyNetworkBytesSent( 864 network_delegate_->NotifyNetworkBytesSent(
986 request_, total_sent_bytes - last_notified_total_sent_bytes_); 865 request_, total_sent_bytes - last_notified_total_sent_bytes_);
987 } 866 }
988 last_notified_total_sent_bytes_ = total_sent_bytes; 867 last_notified_total_sent_bytes_ = total_sent_bytes;
989 } 868 }
990 869
991 } // namespace net 870 } // namespace net
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698