Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(220)

Side by Side Diff: net/url_request/url_request_job.cc

Issue 1662763002: [ON HOLD] Implement pull-based design for content decoding (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fix components_unittests Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/url_request/url_request_job.h" 5 #include "net/url_request/url_request_job.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/callback_helpers.h"
10 #include "base/compiler_specific.h" 11 #include "base/compiler_specific.h"
11 #include "base/location.h" 12 #include "base/location.h"
12 #include "base/metrics/histogram_macros.h" 13 #include "base/metrics/histogram_macros.h"
13 #include "base/power_monitor/power_monitor.h" 14 #include "base/power_monitor/power_monitor.h"
14 #include "base/profiler/scoped_tracker.h" 15 #include "base/profiler/scoped_tracker.h"
15 #include "base/single_thread_task_runner.h" 16 #include "base/single_thread_task_runner.h"
16 #include "base/strings/string_number_conversions.h" 17 #include "base/strings/string_number_conversions.h"
17 #include "base/strings/string_split.h" 18 #include "base/strings/string_split.h"
18 #include "base/strings/string_util.h" 19 #include "base/strings/string_util.h"
19 #include "base/threading/thread_task_runner_handle.h" 20 #include "base/threading/thread_task_runner_handle.h"
20 #include "base/values.h" 21 #include "base/values.h"
21 #include "net/base/auth.h" 22 #include "net/base/auth.h"
22 #include "net/base/host_port_pair.h" 23 #include "net/base/host_port_pair.h"
23 #include "net/base/io_buffer.h" 24 #include "net/base/io_buffer.h"
24 #include "net/base/load_flags.h" 25 #include "net/base/load_flags.h"
25 #include "net/base/load_states.h" 26 #include "net/base/load_states.h"
26 #include "net/base/net_errors.h" 27 #include "net/base/net_errors.h"
27 #include "net/base/network_delegate.h" 28 #include "net/base/network_delegate.h"
28 #include "net/filter/filter.h"
29 #include "net/http/http_response_headers.h" 29 #include "net/http/http_response_headers.h"
30 #include "net/nqe/network_quality_estimator.h" 30 #include "net/nqe/network_quality_estimator.h"
31 #include "net/url_request/url_request_context.h" 31 #include "net/url_request/url_request_context.h"
32 32
33 namespace net { 33 namespace net {
34 34
35 namespace { 35 namespace {
36 36
37 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event. 37 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event.
38 std::unique_ptr<base::Value> FiltersSetCallback( 38 std::unique_ptr<base::Value> StreamSourceSetCallback(
39 Filter* filter, 39 StreamSource* stream_source,
40 NetLogCaptureMode /* capture_mode */) { 40 NetLogCaptureMode /* capture_mode */) {
41 std::unique_ptr<base::DictionaryValue> event_params( 41 std::unique_ptr<base::DictionaryValue> event_params(
42 new base::DictionaryValue()); 42 new base::DictionaryValue());
43 event_params->SetString("filters", filter->OrderedFilterList()); 43 event_params->SetString("filters", stream_source->OrderedTypeStringList());
44 return std::move(event_params); 44 return std::move(event_params);
45 } 45 }
46 46
47 std::string ComputeMethodForRedirect(const std::string& method, 47 std::string ComputeMethodForRedirect(const std::string& method,
48 int http_status_code) { 48 int http_status_code) {
49 // For 303 redirects, all request methods except HEAD are converted to GET, 49 // For 303 redirects, all request methods except HEAD are converted to GET,
50 // as per the latest httpbis draft. The draft also allows POST requests to 50 // as per the latest httpbis draft. The draft also allows POST requests to
51 // be converted to GETs when following 301/302 redirects, for historical 51 // be converted to GETs when following 301/302 redirects, for historical
52 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and 52 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and
53 // the httpbis draft say to prompt the user to confirm the generation of new 53 // the httpbis draft say to prompt the user to confirm the generation of new
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
108 base::CompareCaseInsensitiveASCII(token, "unsafe-url") == 0) { 108 base::CompareCaseInsensitiveASCII(token, "unsafe-url") == 0) {
109 new_policy = URLRequest::NEVER_CLEAR_REFERRER; 109 new_policy = URLRequest::NEVER_CLEAR_REFERRER;
110 continue; 110 continue;
111 } 111 }
112 } 112 }
113 return new_policy; 113 return new_policy;
114 } 114 }
115 115
116 } // namespace 116 } // namespace
117 117
118 // StreamSources own the previous StreamSource in the chain, but the ultimate
119 // source is URLRequestJob, which has other ownership semantics, so this class
120 // is a proxy for URLRequestJob that is owned by the first filter (in dataflow
121 // order).
122 class URLRequestJob::URLRequestJobStreamSource : public StreamSource {
123 public:
124 URLRequestJobStreamSource(URLRequestJob* job)
mmenke 2016/07/21 18:14:09 explicit
xunjieli 2016/07/27 20:32:04 Done.
125 : StreamSource(StreamSource::TYPE_NONE), job_(job), weak_factory_(this) {}
126
127 ~URLRequestJobStreamSource() override {}
128
129 // StreamSource implementation:
130 int Read(IOBuffer* dest_buffer,
131 size_t buffer_size,
132 const CompletionCallback& callback) override {
133 DCHECK(job_);
134 // Used a wrapped callback to prevent ReadRawDataComplete call into a
135 // destroyed stream source because |this| is owned by the next filter in
136 // the filter chain.
mmenke 2016/07/21 18:14:09 Is this strictly needed? If callback_ is bound to
xunjieli 2016/07/27 20:32:04 Done. Sorry, you are right. It is not needed. I th
137 int rv = job_->ReadRawDataHelper(
138 dest_buffer, buffer_size,
139 base::Bind(&URLRequestJobStreamSource::OnReadCompleted,
140 weak_factory_.GetWeakPtr()));
141 if (rv != ERR_IO_PENDING)
142 return rv;
143 callback_ = callback;
144 return ERR_IO_PENDING;
145 }
146
147 private:
148 // Helper method to invoke |callback_|.
149 void OnReadCompleted(int rv) { base::ResetAndReturn(&callback_).Run(rv); }
150
151 URLRequestJob* job_;
152 CompletionCallback callback_;
153
154 base::WeakPtrFactory<URLRequestJobStreamSource> weak_factory_;
mmenke 2016/07/21 18:14:09 DISALLOW_COPY_AND_ASSIGN
xunjieli 2016/07/27 20:32:04 Done.
155 };
156
118 URLRequestJob::URLRequestJob(URLRequest* request, 157 URLRequestJob::URLRequestJob(URLRequest* request,
119 NetworkDelegate* network_delegate) 158 NetworkDelegate* network_delegate)
120 : request_(request), 159 : request_(request),
121 done_(false), 160 done_(false),
122 prefilter_bytes_read_(0),
123 postfilter_bytes_read_(0),
124 filter_needs_more_output_space_(false),
125 filtered_read_buffer_len_(0),
126 has_handled_response_(false), 161 has_handled_response_(false),
127 expected_content_size_(-1), 162 expected_content_size_(-1),
128 network_delegate_(network_delegate), 163 network_delegate_(network_delegate),
129 last_notified_total_received_bytes_(0), 164 last_notified_total_received_bytes_(0),
130 last_notified_total_sent_bytes_(0), 165 last_notified_total_sent_bytes_(0),
166 prefilter_bytes_read_(0),
167 postfilter_bytes_read_(0),
131 weak_factory_(this) { 168 weak_factory_(this) {
132 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); 169 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
133 if (power_monitor) 170 if (power_monitor)
134 power_monitor->AddObserver(this); 171 power_monitor->AddObserver(this);
135 } 172 }
136 173
137 URLRequestJob::~URLRequestJob() { 174 URLRequestJob::~URLRequestJob() {
138 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); 175 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
139 if (power_monitor) 176 if (power_monitor)
140 power_monitor->RemoveObserver(this); 177 power_monitor->RemoveObserver(this);
(...skipping 15 matching lines...) Expand all
156 // Kill(). 193 // Kill().
157 // TODO(mmenke): The URLRequest is currently deleted before this method 194 // TODO(mmenke): The URLRequest is currently deleted before this method
158 // invokes its async callback whenever this is called by the URLRequest. 195 // invokes its async callback whenever this is called by the URLRequest.
159 // Try to simplify how cancellation works. 196 // Try to simplify how cancellation works.
160 NotifyCanceled(); 197 NotifyCanceled();
161 } 198 }
162 199
163 // This function calls ReadRawData to get stream data. If a filter exists, it 200 // This function calls ReadRawData to get stream data. If a filter exists, it
164 // passes the data to the attached filter. It then returns the output from 201 // passes the data to the attached filter. It then returns the output from
165 // filter back to the caller. 202 // filter back to the caller.
203 // This method passes reads down the filter chain, where they eventually end up
204 // at URLRequestJobStreamSource::Read, which calls back into
205 // URLRequestJob::ReadRawData.
166 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) { 206 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
167 DCHECK_LT(buf_size, 1000000); // Sanity check. 207 DCHECK_LT(buf_size, 1000000); // Sanity check.
168 DCHECK(buf); 208 DCHECK(buf);
169 DCHECK(bytes_read); 209 DCHECK(bytes_read);
170 DCHECK(!filtered_read_buffer_);
171 DCHECK_EQ(0, filtered_read_buffer_len_);
172
173 Error error = OK;
174 *bytes_read = 0; 210 *bytes_read = 0;
175 211
176 // Skip Filter if not present. 212 pending_read_buffer_ = buf;
177 if (!filter_) { 213 int result = source_->Read(buf, buf_size,
178 error = ReadRawDataHelper(buf, buf_size, bytes_read); 214 base::Bind(&URLRequestJob::SourceReadComplete,
179 } else { 215 weak_factory_.GetWeakPtr(), false));
180 // Save the caller's buffers while we do IO 216 if (result > 0)
181 // in the filter's buffers. 217 *bytes_read = result;
182 filtered_read_buffer_ = buf;
183 filtered_read_buffer_len_ = buf_size;
184 218
185 error = ReadFilteredData(bytes_read); 219 if (result == ERR_IO_PENDING) {
186 220 SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING));
187 // Synchronous EOF from the filter. 221 return false;
188 if (error == OK && *bytes_read == 0)
189 DoneReading();
190 } 222 }
191 223
192 if (error == OK) { 224 SourceReadComplete(true, result);
193 // If URLRequestJob read zero bytes, the job is at EOF. 225 return result >= OK;
194 if (*bytes_read == 0) 226 }
195 NotifyDone(URLRequestStatus()); 227
196 } else if (error == ERR_IO_PENDING) { 228 void URLRequestJob::SourceReadComplete(bool synchronous, int result) {
197 SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING)); 229 DCHECK_NE(ERR_IO_PENDING, result);
230
231 if (result > 0) {
232 postfilter_bytes_read_ += result;
233 if (request()->net_log().IsCapturing()) {
234 request()->net_log().AddByteTransferEvent(
235 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, result,
236 pending_read_buffer_->data());
237 }
238 pending_read_buffer_ = nullptr;
239 SetStatus(URLRequestStatus());
240 if (!synchronous)
241 request_->NotifyReadCompleted(result);
242 } else if (result == 0) {
243 pending_read_buffer_ = nullptr;
244 DoneReading();
245 NotifyDone(URLRequestStatus());
246 if (!synchronous)
247 request_->NotifyReadCompleted(result);
198 } else { 248 } else {
199 NotifyDone(URLRequestStatus::FromError(error)); 249 pending_read_buffer_ = nullptr;
200 *bytes_read = -1; 250 NotifyDone(URLRequestStatus::FromError(result));
201 } 251 }
202 return error == OK;
203 } 252 }
204 253
205 void URLRequestJob::StopCaching() { 254 void URLRequestJob::StopCaching() {
206 // Nothing to do here. 255 // Nothing to do here.
207 } 256 }
208 257
209 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { 258 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const {
210 // Most job types don't send request headers. 259 // Most job types don't send request headers.
211 return false; 260 return false;
212 } 261 }
(...skipping 26 matching lines...) Expand all
239 } 288 }
240 289
241 bool URLRequestJob::GetRemoteEndpoint(IPEndPoint* endpoint) const { 290 bool URLRequestJob::GetRemoteEndpoint(IPEndPoint* endpoint) const {
242 return false; 291 return false;
243 } 292 }
244 293
245 void URLRequestJob::PopulateNetErrorDetails(NetErrorDetails* details) const { 294 void URLRequestJob::PopulateNetErrorDetails(NetErrorDetails* details) const {
246 return; 295 return;
247 } 296 }
248 297
249 std::unique_ptr<Filter> URLRequestJob::SetupFilter() const { 298 std::unique_ptr<StreamSource> URLRequestJob::SetupSource() {
250 return nullptr; 299 std::unique_ptr<URLRequestJobStreamSource> source(
300 new URLRequestJobStreamSource(this));
301 return std::move(source);
mmenke 2016/07/21 18:14:09 std::move not needed. Should probably just be: r
xunjieli 2016/07/27 20:32:04 Done.
251 } 302 }
252 303
253 bool URLRequestJob::IsRedirectResponse(GURL* location, 304 bool URLRequestJob::IsRedirectResponse(GURL* location,
254 int* http_status_code) { 305 int* http_status_code) {
255 // For non-HTTP jobs, headers will be null. 306 // For non-HTTP jobs, headers will be null.
256 HttpResponseHeaders* headers = request_->response_headers(); 307 HttpResponseHeaders* headers = request_->response_headers();
257 if (!headers) 308 if (!headers)
258 return false; 309 return false;
259 310
260 std::string value; 311 std::string value;
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after
489 // Need to check for a NULL auth_info because the server may have failed 540 // Need to check for a NULL auth_info because the server may have failed
490 // to send a challenge with the 401 response. 541 // to send a challenge with the 401 response.
491 if (auth_info.get()) { 542 if (auth_info.get()) {
492 request_->NotifyAuthRequired(auth_info.get()); 543 request_->NotifyAuthRequired(auth_info.get());
493 // Wait for SetAuth or CancelAuth to be called. 544 // Wait for SetAuth or CancelAuth to be called.
494 return; 545 return;
495 } 546 }
496 } 547 }
497 548
498 has_handled_response_ = true; 549 has_handled_response_ = true;
499 if (request_->status().is_success()) 550 if (request_->status().is_success()) {
500 filter_ = SetupFilter(); 551 DCHECK(!source_);
501 552 source_ = SetupSource();
502 if (!filter_.get()) { 553 if (source_->type() == StreamSource::TYPE_NONE) {
mmenke 2016/07/21 18:14:09 SetupSource() can return nullptr on failure. Need
xunjieli 2016/07/27 20:32:04 Done. I added a test in url_request_http_job_unitt
503 std::string content_length; 554 std::string content_length;
504 request_->GetResponseHeaderByName("content-length", &content_length); 555 request_->GetResponseHeaderByName("content-length", &content_length);
505 if (!content_length.empty()) 556 if (!content_length.empty())
506 base::StringToInt64(content_length, &expected_content_size_); 557 base::StringToInt64(content_length, &expected_content_size_);
507 } else { 558 } else {
508 request_->net_log().AddEvent( 559 request_->net_log().AddEvent(NetLog::TYPE_URL_REQUEST_FILTERS_SET,
509 NetLog::TYPE_URL_REQUEST_FILTERS_SET, 560 base::Bind(&StreamSourceSetCallback,
510 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get()))); 561 base::Unretained(source_.get())));
562 }
511 } 563 }
512 564
513 request_->NotifyResponseStarted(); 565 request_->NotifyResponseStarted();
514 566
515 // |this| may be destroyed at this point. 567 // |this| may be destroyed at this point.
516 } 568 }
517 569
518 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) { 570 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) {
519 if (result >= 0) { 571 if (result >= 0) {
520 *error = OK; 572 *error = OK;
521 *count = result; 573 *count = result;
522 } else { 574 } else {
523 *error = static_cast<Error>(result); 575 *error = static_cast<Error>(result);
524 *count = 0; 576 *count = 0;
525 } 577 }
526 } 578 }
527 579
528 void URLRequestJob::ReadRawDataComplete(int result) { 580 void URLRequestJob::ReadRawDataComplete(int result) {
529 DCHECK(request_->status().is_io_pending()); 581 DCHECK(request_->status().is_io_pending());
582 DCHECK_NE(ERR_IO_PENDING, result);
530 583
531 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed. 584 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed.
532 tracked_objects::ScopedTracker tracking_profile( 585 tracked_objects::ScopedTracker tracking_profile(
533 FROM_HERE_WITH_EXPLICIT_FUNCTION( 586 FROM_HERE_WITH_EXPLICIT_FUNCTION(
534 "475755 URLRequestJob::RawReadCompleted")); 587 "475755 URLRequestJob::RawReadCompleted"));
535 588
536 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome 589 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
537 // unit_tests have been fixed to not trip this. 590 // unit_tests have been fixed to not trip this.
538 #if 0 591 #if 0
539 DCHECK(!request_->status().is_io_pending()); 592 DCHECK(!request_->status().is_io_pending());
540 #endif 593 #endif
541 // The headers should be complete before reads complete 594 // The headers should be complete before reads complete
542 DCHECK(has_handled_response_); 595 DCHECK(has_handled_response_);
543 596
544 Error error; 597 GatherRawReadStats(result);
545 int bytes_read;
546 ConvertResultToError(result, &error, &bytes_read);
547 598
548 DCHECK_NE(ERR_IO_PENDING, error); 599 // Notify StreamSource.
600 DCHECK(!read_raw_callback_.is_null());
549 601
550 GatherRawReadStats(error, bytes_read); 602 base::ResetAndReturn(&read_raw_callback_).Run(result);
551
552 if (filter_.get() && error == OK) {
553 // |bytes_read| being 0 indicates an EOF was received. ReadFilteredData
554 // can incorrectly return ERR_IO_PENDING when 0 bytes are passed to it, so
555 // just don't call into the filter in that case.
556 int filter_bytes_read = 0;
557 if (bytes_read > 0) {
558 // Tell the filter that it has more data.
559 PushInputToFilter(bytes_read);
560
561 // Filter the data.
562 error = ReadFilteredData(&filter_bytes_read);
563 }
564
565 if (error == OK && !filter_bytes_read)
566 DoneReading();
567
568 DVLOG(1) << __FUNCTION__ << "() "
569 << "\"" << request_->url().spec() << "\""
570 << " pre bytes read = " << bytes_read
571 << " pre total = " << prefilter_bytes_read_
572 << " post total = " << postfilter_bytes_read_;
573 bytes_read = filter_bytes_read;
574 } else {
575 DVLOG(1) << __FUNCTION__ << "() "
576 << "\"" << request_->url().spec() << "\""
577 << " pre bytes read = " << bytes_read
578 << " pre total = " << prefilter_bytes_read_
579 << " post total = " << postfilter_bytes_read_;
580 }
581
582 // Synchronize the URLRequest state machine with the URLRequestJob state
583 // machine. If this read succeeded, either the request is at EOF and the
584 // URLRequest state machine goes to 'finished', or it is not and the
585 // URLRequest state machine goes to 'success'. If the read failed, the
586 // URLRequest state machine goes directly to 'finished'. If filtered data is
587 // pending, then there's nothing to do, since the status of the request is
588 // already pending.
589 //
590 // Update the URLRequest's status first, so that NotifyReadCompleted has an
591 // accurate view of the request.
592 if (error == OK && bytes_read > 0) {
593 SetStatus(URLRequestStatus());
594 } else if (error != ERR_IO_PENDING) {
595 NotifyDone(URLRequestStatus::FromError(error));
596 }
597
598 // NotifyReadCompleted should be called after SetStatus or NotifyDone updates
599 // the status.
600 if (error == OK)
601 request_->NotifyReadCompleted(bytes_read);
602 603
603 // |this| may be destroyed at this point. 604 // |this| may be destroyed at this point.
604 } 605 }
605 606
606 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { 607 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
607 DCHECK(!has_handled_response_); 608 DCHECK(!has_handled_response_);
608 DCHECK(request_->status().is_io_pending()); 609 DCHECK(request_->status().is_io_pending());
609 610
610 has_handled_response_ = true; 611 has_handled_response_ = true;
611 // There may be relevant information in the response info even in the 612 // There may be relevant information in the response info even in the
(...skipping 30 matching lines...) Expand all
642 request_->set_status(status); 643 request_->set_status(status);
643 } 644 }
644 645
645 // If the request succeeded (And wasn't cancelled) and the response code was 646 // If the request succeeded (And wasn't cancelled) and the response code was
646 // 4xx or 5xx, record whether or not the main frame was blank. This is 647 // 4xx or 5xx, record whether or not the main frame was blank. This is
647 // intended to be a short-lived histogram, used to figure out how important 648 // intended to be a short-lived histogram, used to figure out how important
648 // fixing http://crbug.com/331745 is. 649 // fixing http://crbug.com/331745 is.
649 if (request_->status().is_success()) { 650 if (request_->status().is_success()) {
650 int response_code = GetResponseCode(); 651 int response_code = GetResponseCode();
651 if (400 <= response_code && response_code <= 599) { 652 if (400 <= response_code && response_code <= 599) {
652 bool page_has_content = (postfilter_bytes_read_ != 0); 653 bool page_has_content = (postfilter_bytes_read() != 0);
653 if (request_->load_flags() & net::LOAD_MAIN_FRAME) { 654 if (request_->load_flags() & net::LOAD_MAIN_FRAME) {
654 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentMainFrame", 655 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentMainFrame",
655 page_has_content); 656 page_has_content);
656 } else { 657 } else {
657 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentNonMainFrame", 658 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentNonMainFrame",
658 page_has_content); 659 page_has_content);
659 } 660 }
660 } 661 }
661 } 662 }
662 663
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
708 return 0; 709 return 0;
709 } 710 }
710 711
711 void URLRequestJob::DoneReading() { 712 void URLRequestJob::DoneReading() {
712 // Do nothing. 713 // Do nothing.
713 } 714 }
714 715
715 void URLRequestJob::DoneReadingRedirectResponse() { 716 void URLRequestJob::DoneReadingRedirectResponse() {
716 } 717 }
717 718
718 void URLRequestJob::PushInputToFilter(int bytes_read) {
719 DCHECK(filter_);
720 filter_->FlushStreamBuffer(bytes_read);
721 }
722
723 Error URLRequestJob::ReadFilteredData(int* bytes_read) {
724 DCHECK(filter_);
725 DCHECK(filtered_read_buffer_.get());
726 DCHECK_GT(filtered_read_buffer_len_, 0);
727 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check.
728 DCHECK(!raw_read_buffer_);
729
730 *bytes_read = 0;
731 Error error = ERR_FAILED;
732
733 for (;;) {
734 if (is_done())
735 return OK;
736
737 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
738 // We don't have any raw data to work with, so read from the transaction.
739 int filtered_data_read;
740 error = ReadRawDataForFilter(&filtered_data_read);
741 // If ReadRawDataForFilter returned some data, fall through to the case
742 // below; otherwise, return early.
743 if (error != OK || filtered_data_read == 0)
744 return error;
745 filter_->FlushStreamBuffer(filtered_data_read);
746 }
747
748 if ((filter_->stream_data_len() || filter_needs_more_output_space_) &&
749 !is_done()) {
750 // Get filtered data.
751 int filtered_data_len = filtered_read_buffer_len_;
752 int output_buffer_size = filtered_data_len;
753 Filter::FilterStatus status =
754 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len);
755
756 if (filter_needs_more_output_space_ && !filtered_data_len) {
757 // filter_needs_more_output_space_ was mistaken... there are no more
758 // bytes and we should have at least tried to fill up the filter's input
759 // buffer. Correct the state, and try again.
760 filter_needs_more_output_space_ = false;
761 continue;
762 }
763 filter_needs_more_output_space_ =
764 (filtered_data_len == output_buffer_size);
765
766 switch (status) {
767 case Filter::FILTER_DONE: {
768 filter_needs_more_output_space_ = false;
769 *bytes_read = filtered_data_len;
770 postfilter_bytes_read_ += filtered_data_len;
771 error = OK;
772 break;
773 }
774 case Filter::FILTER_NEED_MORE_DATA: {
775 // We have finished filtering all data currently in the buffer.
776 // There might be some space left in the output buffer. One can
777 // consider reading more data from the stream to feed the filter
778 // and filling up the output buffer. This leads to more complicated
779 // buffer management and data notification mechanisms.
780 // We can revisit this issue if there is a real perf need.
781 if (filtered_data_len > 0) {
782 *bytes_read = filtered_data_len;
783 postfilter_bytes_read_ += filtered_data_len;
784 error = OK;
785 } else {
786 // Read again since we haven't received enough data yet (e.g., we
787 // may not have a complete gzip header yet).
788 continue;
789 }
790 break;
791 }
792 case Filter::FILTER_OK: {
793 *bytes_read = filtered_data_len;
794 postfilter_bytes_read_ += filtered_data_len;
795 error = OK;
796 break;
797 }
798 case Filter::FILTER_ERROR: {
799 DVLOG(1) << __FUNCTION__ << "() "
800 << "\"" << request_->url().spec() << "\""
801 << " Filter Error";
802 filter_needs_more_output_space_ = false;
803 error = ERR_CONTENT_DECODING_FAILED;
804 UMA_HISTOGRAM_ENUMERATION("Net.ContentDecodingFailed.FilterType",
805 filter_->type(), Filter::FILTER_TYPE_MAX);
806 break;
807 }
808 default: {
809 NOTREACHED();
810 filter_needs_more_output_space_ = false;
811 error = ERR_FAILED;
812 break;
813 }
814 }
815
816 // If logging all bytes is enabled, log the filtered bytes read.
817 if (error == OK && filtered_data_len > 0 &&
818 request()->net_log().IsCapturing()) {
819 request()->net_log().AddByteTransferEvent(
820 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, filtered_data_len,
821 filtered_read_buffer_->data());
822 }
823 } else {
824 // we are done, or there is no data left.
825 error = OK;
826 }
827 break;
828 }
829
830 if (error == OK) {
831 // When we successfully finished a read, we no longer need to save the
832 // caller's buffers. Release our reference.
833 filtered_read_buffer_ = NULL;
834 filtered_read_buffer_len_ = 0;
835 }
836 return error;
837 }
838
839 void URLRequestJob::DestroyFilters() {
840 filter_.reset();
841 }
842
843 const URLRequestStatus URLRequestJob::GetStatus() { 719 const URLRequestStatus URLRequestJob::GetStatus() {
844 return request_->status(); 720 return request_->status();
845 } 721 }
846 722
847 void URLRequestJob::SetStatus(const URLRequestStatus &status) { 723 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
848 // An error status should never be replaced by a non-error status by a 724 // An error status should never be replaced by a non-error status by a
849 // URLRequestJob. URLRequest has some retry paths, but it resets the status 725 // URLRequestJob. URLRequest has some retry paths, but it resets the status
850 // itself, if needed. 726 // itself, if needed.
851 DCHECK(request_->status().is_io_pending() || 727 DCHECK(request_->status().is_io_pending() ||
852 request_->status().is_success() || 728 request_->status().is_success() ||
853 (!status.is_success() && !status.is_io_pending())); 729 (!status.is_success() && !status.is_io_pending()));
854 request_->set_status(status); 730 request_->set_status(status);
855 } 731 }
856 732
857 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) { 733 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) {
858 request_->proxy_server_ = proxy_server; 734 request_->proxy_server_ = proxy_server;
859 } 735 }
860 736
861 Error URLRequestJob::ReadRawDataForFilter(int* bytes_read) { 737 int64_t URLRequestJob::prefilter_bytes_read() const {
862 Error error = ERR_FAILED; 738 return base::checked_cast<int64_t>(prefilter_bytes_read_);
863 DCHECK(bytes_read);
864 DCHECK(filter_.get());
865
866 *bytes_read = 0;
867
868 // Get more pre-filtered data if needed.
869 // TODO(mbelshe): is it possible that the filter needs *MORE* data
870 // when there is some data already in the buffer?
871 if (!filter_->stream_data_len() && !is_done()) {
872 IOBuffer* stream_buffer = filter_->stream_buffer();
873 int stream_buffer_size = filter_->stream_buffer_size();
874 error = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
875 }
876 return error;
877 } 739 }
878 740
879 Error URLRequestJob::ReadRawDataHelper(IOBuffer* buf, 741 int64_t URLRequestJob::postfilter_bytes_read() const {
880 int buf_size, 742 return base::checked_cast<int64_t>(postfilter_bytes_read_);
881 int* bytes_read) { 743 }
744
745 int URLRequestJob::ReadRawDataHelper(IOBuffer* buf,
746 int buf_size,
747 const CompletionCallback& callback) {
882 DCHECK(!raw_read_buffer_); 748 DCHECK(!raw_read_buffer_);
883 749
884 // Keep a pointer to the read buffer, so we have access to it in 750 // Keep a pointer to the read buffer, so URLRequestJob::GatherRawReadStats()
885 // GatherRawReadStats() in the event that the read completes asynchronously. 751 // has access to it to log stats.
886 raw_read_buffer_ = buf; 752 raw_read_buffer_ = buf;
887 Error error; 753 int result = ReadRawData(buf, buf_size);
888 ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read);
889 754
890 if (error != ERR_IO_PENDING) { 755 if (result != ERR_IO_PENDING) {
891 // If the read completes synchronously, either success or failure, invoke 756 // If the read completes synchronously, either success or failure, invoke
892 // GatherRawReadStats so we can account for the completed read. 757 // GatherRawReadStats so we can account for the completed read.
893 GatherRawReadStats(error, *bytes_read); 758 GatherRawReadStats(result);
759 } else {
760 read_raw_callback_ = callback;
894 } 761 }
895 return error; 762 return result;
896 } 763 }
897 764
898 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { 765 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) {
899 int rv = request_->Redirect(redirect_info); 766 int rv = request_->Redirect(redirect_info);
900 if (rv != OK) 767 if (rv != OK)
901 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); 768 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
902 } 769 }
903 770
904 void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) { 771 void URLRequestJob::GatherRawReadStats(int bytes_read) {
905 DCHECK(raw_read_buffer_ || bytes_read == 0); 772 DCHECK(raw_read_buffer_ || bytes_read == 0);
906 DCHECK_NE(ERR_IO_PENDING, error); 773 DCHECK_NE(ERR_IO_PENDING, bytes_read);
907
908 if (error != OK) {
909 raw_read_buffer_ = nullptr;
910 return;
911 }
912 // If |filter_| is non-NULL, bytes will be logged after it is applied
913 // instead.
914 if (!filter_.get() && bytes_read > 0 && request()->net_log().IsCapturing()) {
915 request()->net_log().AddByteTransferEvent(
916 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, bytes_read,
917 raw_read_buffer_->data());
918 }
919 774
920 if (bytes_read > 0) { 775 if (bytes_read > 0) {
776 if (request()->net_log().IsCapturing()) {
777 request()->net_log().AddByteTransferEvent(
778 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, bytes_read,
779 raw_read_buffer_->data());
780 }
921 RecordBytesRead(bytes_read); 781 RecordBytesRead(bytes_read);
922 } 782 }
923 raw_read_buffer_ = nullptr; 783 raw_read_buffer_ = nullptr;
924 } 784 }
925 785
926 void URLRequestJob::RecordBytesRead(int bytes_read) { 786 void URLRequestJob::RecordBytesRead(int bytes_read) {
927 DCHECK_GT(bytes_read, 0); 787 DCHECK_GT(bytes_read, 0);
928 prefilter_bytes_read_ += bytes_read; 788 prefilter_bytes_read_ += base::checked_cast<size_t>(bytes_read);
929 789
930 // On first read, notify NetworkQualityEstimator that response headers have 790 // On first read, notify NetworkQualityEstimator that response headers have
931 // been received. 791 // been received.
932 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch 792 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch
933 // Service Worker jobs twice. 793 // Service Worker jobs twice.
934 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the 794 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the
935 // first raw read of the response body. This is used as the signal that 795 // first raw read of the response body. This is used as the signal that
936 // response headers have been received. 796 // response headers have been received.
937 if (request_->context()->network_quality_estimator() && 797 if (request_->context()->network_quality_estimator() &&
938 prefilter_bytes_read_ == bytes_read) { 798 prefilter_bytes_read() == bytes_read) {
939 request_->context()->network_quality_estimator()->NotifyHeadersReceived( 799 request_->context()->network_quality_estimator()->NotifyHeadersReceived(
940 *request_); 800 *request_);
941 } 801 }
942 802
943 if (!filter_.get())
944 postfilter_bytes_read_ += bytes_read;
945 DVLOG(2) << __FUNCTION__ << "() " 803 DVLOG(2) << __FUNCTION__ << "() "
946 << "\"" << request_->url().spec() << "\"" 804 << "\"" << request_->url().spec() << "\""
947 << " pre bytes read = " << bytes_read 805 << " pre bytes read = " << bytes_read
948 << " pre total = " << prefilter_bytes_read_ 806 << " pre total = " << prefilter_bytes_read()
949 << " post total = " << postfilter_bytes_read_; 807 << " post total = " << postfilter_bytes_read();
950 UpdatePacketReadTimes(); // Facilitate stats recording if it is active. 808 UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
951 809
952 // Notify observers if any additional network usage has occurred. Note that 810 // Notify observers if any additional network usage has occurred. Note that
953 // the number of received bytes over the network sent by this notification 811 // the number of received bytes over the network sent by this notification
954 // could be vastly different from |bytes_read|, such as when a large chunk of 812 // could be vastly different from |bytes_read|, such as when a large chunk of
955 // network bytes is received before multiple smaller raw reads are performed 813 // network bytes is received before multiple smaller raw reads are performed
956 // on it. 814 // on it.
957 MaybeNotifyNetworkBytes(); 815 MaybeNotifyNetworkBytes();
958 } 816 }
959 817
960 bool URLRequestJob::FilterHasData() {
961 return filter_.get() && filter_->stream_data_len();
962 }
963
964 void URLRequestJob::UpdatePacketReadTimes() { 818 void URLRequestJob::UpdatePacketReadTimes() {
965 } 819 }
966 820
967 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location, 821 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location,
968 int http_status_code) { 822 int http_status_code) {
969 const GURL& url = request_->url(); 823 const GURL& url = request_->url();
970 824
971 RedirectInfo redirect_info; 825 RedirectInfo redirect_info;
972 826
973 redirect_info.status_code = http_status_code; 827 redirect_info.status_code = http_status_code;
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
1040 int64_t total_sent_bytes = GetTotalSentBytes(); 894 int64_t total_sent_bytes = GetTotalSentBytes();
1041 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); 895 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_);
1042 if (total_sent_bytes > last_notified_total_sent_bytes_) { 896 if (total_sent_bytes > last_notified_total_sent_bytes_) {
1043 network_delegate_->NotifyNetworkBytesSent( 897 network_delegate_->NotifyNetworkBytesSent(
1044 request_, total_sent_bytes - last_notified_total_sent_bytes_); 898 request_, total_sent_bytes - last_notified_total_sent_bytes_);
1045 } 899 }
1046 last_notified_total_sent_bytes_ = total_sent_bytes; 900 last_notified_total_sent_bytes_ = total_sent_bytes;
1047 } 901 }
1048 902
1049 } // namespace net 903 } // namespace net
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698