OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/url_request/url_request_job.h" | 5 #include "net/url_request/url_request_job.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 | 8 |
9 #include "base/bind.h" | 9 #include "base/bind.h" |
10 #include "base/callback_helpers.h" | |
10 #include "base/compiler_specific.h" | 11 #include "base/compiler_specific.h" |
11 #include "base/location.h" | 12 #include "base/location.h" |
13 #include "base/memory/ptr_util.h" | |
12 #include "base/metrics/histogram_macros.h" | 14 #include "base/metrics/histogram_macros.h" |
13 #include "base/power_monitor/power_monitor.h" | 15 #include "base/power_monitor/power_monitor.h" |
14 #include "base/profiler/scoped_tracker.h" | 16 #include "base/profiler/scoped_tracker.h" |
15 #include "base/single_thread_task_runner.h" | 17 #include "base/single_thread_task_runner.h" |
16 #include "base/strings/string_number_conversions.h" | 18 #include "base/strings/string_number_conversions.h" |
17 #include "base/strings/string_split.h" | 19 #include "base/strings/string_split.h" |
18 #include "base/strings/string_util.h" | 20 #include "base/strings/string_util.h" |
19 #include "base/threading/thread_task_runner_handle.h" | 21 #include "base/threading/thread_task_runner_handle.h" |
20 #include "base/values.h" | 22 #include "base/values.h" |
21 #include "net/base/auth.h" | 23 #include "net/base/auth.h" |
22 #include "net/base/host_port_pair.h" | 24 #include "net/base/host_port_pair.h" |
23 #include "net/base/io_buffer.h" | 25 #include "net/base/io_buffer.h" |
24 #include "net/base/load_flags.h" | 26 #include "net/base/load_flags.h" |
25 #include "net/base/load_states.h" | 27 #include "net/base/load_states.h" |
26 #include "net/base/net_errors.h" | 28 #include "net/base/net_errors.h" |
27 #include "net/base/network_delegate.h" | 29 #include "net/base/network_delegate.h" |
28 #include "net/filter/filter.h" | |
29 #include "net/http/http_response_headers.h" | 30 #include "net/http/http_response_headers.h" |
31 #include "net/log/net_log.h" | |
30 #include "net/log/net_log_event_type.h" | 32 #include "net/log/net_log_event_type.h" |
31 #include "net/nqe/network_quality_estimator.h" | 33 #include "net/nqe/network_quality_estimator.h" |
32 #include "net/url_request/url_request_context.h" | 34 #include "net/url_request/url_request_context.h" |
33 | 35 |
34 namespace net { | 36 namespace net { |
35 | 37 |
36 namespace { | 38 namespace { |
37 | 39 |
38 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event. | 40 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event. |
39 std::unique_ptr<base::Value> FiltersSetCallback( | 41 std::unique_ptr<base::Value> SourceStreamSetCallback( |
40 Filter* filter, | 42 SourceStream* source_stream, |
41 NetLogCaptureMode /* capture_mode */) { | 43 NetLogCaptureMode /* capture_mode */) { |
42 std::unique_ptr<base::DictionaryValue> event_params( | 44 std::unique_ptr<base::DictionaryValue> event_params( |
43 new base::DictionaryValue()); | 45 new base::DictionaryValue()); |
44 event_params->SetString("filters", filter->OrderedFilterList()); | 46 event_params->SetString("filters", source_stream->Description()); |
45 return std::move(event_params); | 47 return std::move(event_params); |
46 } | 48 } |
47 | 49 |
48 std::string ComputeMethodForRedirect(const std::string& method, | 50 std::string ComputeMethodForRedirect(const std::string& method, |
49 int http_status_code) { | 51 int http_status_code) { |
50 // For 303 redirects, all request methods except HEAD are converted to GET, | 52 // For 303 redirects, all request methods except HEAD are converted to GET, |
51 // as per the latest httpbis draft. The draft also allows POST requests to | 53 // as per the latest httpbis draft. The draft also allows POST requests to |
52 // be converted to GETs when following 301/302 redirects, for historical | 54 // be converted to GETs when following 301/302 redirects, for historical |
53 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and | 55 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and |
54 // the httpbis draft say to prompt the user to confirm the generation of new | 56 // the httpbis draft say to prompt the user to confirm the generation of new |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
106 if (base::CompareCaseInsensitiveASCII(token, "unsafe-url") == 0) { | 108 if (base::CompareCaseInsensitiveASCII(token, "unsafe-url") == 0) { |
107 new_policy = URLRequest::NEVER_CLEAR_REFERRER; | 109 new_policy = URLRequest::NEVER_CLEAR_REFERRER; |
108 continue; | 110 continue; |
109 } | 111 } |
110 } | 112 } |
111 return new_policy; | 113 return new_policy; |
112 } | 114 } |
113 | 115 |
114 } // namespace | 116 } // namespace |
115 | 117 |
118 // Each SourceStreams own the previous SourceStream in the chain, but the | |
119 // ultimate source is URLRequestJob, which has other ownership semantics, so | |
120 // this class is a proxy for URLRequestJob that is owned by the first stream | |
121 // (in dataflow order). | |
122 class URLRequestJob::URLRequestJobSourceStream : public SourceStream { | |
123 public: | |
124 explicit URLRequestJobSourceStream(URLRequestJob* job) | |
125 : SourceStream(SourceStream::TYPE_NONE), job_(job) { | |
126 DCHECK(job_); | |
127 } | |
128 | |
129 ~URLRequestJobSourceStream() override {} | |
130 | |
131 // SourceStream implementation: | |
132 int Read(IOBuffer* dest_buffer, | |
133 int buffer_size, | |
134 const CompletionCallback& callback) override { | |
135 DCHECK(job_); | |
136 return job_->ReadRawDataHelper(dest_buffer, buffer_size, callback); | |
137 } | |
138 | |
139 std::string Description() const override { return ""; } | |
mmenke
2016/09/27 19:13:22
std::string() is preferred. A naive compiler woul
xunjieli
2016/09/27 19:50:30
Done.
| |
140 | |
141 private: | |
142 // It is safe to keep a raw pointer because |job_| owns the last stream which | |
143 // indirectly owns |this|. Therefore, |job_| will not be destroyed when |this| | |
144 // is alive. | |
145 URLRequestJob* const job_; | |
146 | |
147 DISALLOW_COPY_AND_ASSIGN(URLRequestJobSourceStream); | |
148 }; | |
149 | |
116 URLRequestJob::URLRequestJob(URLRequest* request, | 150 URLRequestJob::URLRequestJob(URLRequest* request, |
117 NetworkDelegate* network_delegate) | 151 NetworkDelegate* network_delegate) |
118 : request_(request), | 152 : request_(request), |
119 done_(false), | 153 done_(false), |
120 prefilter_bytes_read_(0), | 154 prefilter_bytes_read_(0), |
121 postfilter_bytes_read_(0), | 155 postfilter_bytes_read_(0), |
122 filter_needs_more_output_space_(false), | |
123 filtered_read_buffer_len_(0), | |
124 has_handled_response_(false), | 156 has_handled_response_(false), |
125 expected_content_size_(-1), | 157 expected_content_size_(-1), |
126 network_delegate_(network_delegate), | 158 network_delegate_(network_delegate), |
127 last_notified_total_received_bytes_(0), | 159 last_notified_total_received_bytes_(0), |
128 last_notified_total_sent_bytes_(0), | 160 last_notified_total_sent_bytes_(0), |
129 weak_factory_(this) { | 161 weak_factory_(this) { |
130 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); | 162 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); |
131 if (power_monitor) | 163 if (power_monitor) |
132 power_monitor->AddObserver(this); | 164 power_monitor->AddObserver(this); |
133 } | 165 } |
(...skipping 17 matching lines...) Expand all Loading... | |
151 weak_factory_.InvalidateWeakPtrs(); | 183 weak_factory_.InvalidateWeakPtrs(); |
152 // Make sure the URLRequest is notified that the job is done. This assumes | 184 // Make sure the URLRequest is notified that the job is done. This assumes |
153 // that the URLRequest took care of setting its error status before calling | 185 // that the URLRequest took care of setting its error status before calling |
154 // Kill(). | 186 // Kill(). |
155 // TODO(mmenke): The URLRequest is currently deleted before this method | 187 // TODO(mmenke): The URLRequest is currently deleted before this method |
156 // invokes its async callback whenever this is called by the URLRequest. | 188 // invokes its async callback whenever this is called by the URLRequest. |
157 // Try to simplify how cancellation works. | 189 // Try to simplify how cancellation works. |
158 NotifyCanceled(); | 190 NotifyCanceled(); |
159 } | 191 } |
160 | 192 |
161 // This function calls ReadRawData to get stream data. If a filter exists, it | 193 // This method passes reads down the filter chain, where they eventually end up |
162 // passes the data to the attached filter. It then returns the output from | 194 // at URLRequestJobSourceStream::Read, which calls back into |
163 // filter back to the caller. | 195 // URLRequestJob::ReadRawData. |
164 int URLRequestJob::Read(IOBuffer* buf, int buf_size) { | 196 int URLRequestJob::Read(IOBuffer* buf, int buf_size) { |
165 DCHECK_LT(buf_size, 1000000); // Sanity check. | 197 DCHECK_LT(buf_size, 1000000); // Sanity check. |
166 DCHECK(buf); | 198 DCHECK(buf); |
167 DCHECK(!filtered_read_buffer_); | |
168 DCHECK_EQ(0, filtered_read_buffer_len_); | |
169 | 199 |
170 Error error = OK; | 200 pending_read_buffer_ = buf; |
171 int bytes_read = 0; | 201 int result = source_stream_->Read( |
172 | 202 buf, buf_size, base::Bind(&URLRequestJob::SourceStreamReadComplete, |
173 // Skip Filter if not present. | 203 weak_factory_.GetWeakPtr(), false)); |
174 if (!filter_) { | 204 if (result == ERR_IO_PENDING) |
175 error = ReadRawDataHelper(buf, buf_size, &bytes_read); | |
176 } else { | |
177 // Save the caller's buffers while we do IO | |
178 // in the filter's buffers. | |
179 filtered_read_buffer_ = buf; | |
180 filtered_read_buffer_len_ = buf_size; | |
181 | |
182 error = ReadFilteredData(&bytes_read); | |
183 | |
184 // Synchronous EOF from the filter. | |
185 if (error == OK && bytes_read == 0) | |
186 DoneReading(); | |
187 } | |
188 | |
189 if (error == ERR_IO_PENDING) | |
190 return ERR_IO_PENDING; | 205 return ERR_IO_PENDING; |
191 | 206 |
192 if (error < 0) { | 207 SourceStreamReadComplete(true, result); |
193 NotifyDone(URLRequestStatus::FromError(error)); | 208 return result; |
194 return error; | |
195 } | |
196 | |
197 if (bytes_read == 0) | |
198 NotifyDone(URLRequestStatus()); | |
199 return bytes_read; | |
200 } | 209 } |
201 | 210 |
202 void URLRequestJob::StopCaching() { | 211 void URLRequestJob::StopCaching() { |
203 // Nothing to do here. | 212 // Nothing to do here. |
204 } | 213 } |
205 | 214 |
206 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { | 215 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { |
207 // Most job types don't send request headers. | 216 // Most job types don't send request headers. |
208 return false; | 217 return false; |
209 } | 218 } |
(...skipping 22 matching lines...) Expand all Loading... | |
232 } | 241 } |
233 | 242 |
234 bool URLRequestJob::GetRemoteEndpoint(IPEndPoint* endpoint) const { | 243 bool URLRequestJob::GetRemoteEndpoint(IPEndPoint* endpoint) const { |
235 return false; | 244 return false; |
236 } | 245 } |
237 | 246 |
238 void URLRequestJob::PopulateNetErrorDetails(NetErrorDetails* details) const { | 247 void URLRequestJob::PopulateNetErrorDetails(NetErrorDetails* details) const { |
239 return; | 248 return; |
240 } | 249 } |
241 | 250 |
242 std::unique_ptr<Filter> URLRequestJob::SetupFilter() const { | |
243 return nullptr; | |
244 } | |
245 | |
246 bool URLRequestJob::IsRedirectResponse(GURL* location, | 251 bool URLRequestJob::IsRedirectResponse(GURL* location, |
247 int* http_status_code) { | 252 int* http_status_code) { |
248 // For non-HTTP jobs, headers will be null. | 253 // For non-HTTP jobs, headers will be null. |
249 HttpResponseHeaders* headers = request_->response_headers(); | 254 HttpResponseHeaders* headers = request_->response_headers(); |
250 if (!headers) | 255 if (!headers) |
251 return false; | 256 return false; |
252 | 257 |
253 std::string value; | 258 std::string value; |
254 if (!headers->IsRedirect(&value)) | 259 if (!headers->IsRedirect(&value)) |
255 return false; | 260 return false; |
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
475 // Need to check for a NULL auth_info because the server may have failed | 480 // Need to check for a NULL auth_info because the server may have failed |
476 // to send a challenge with the 401 response. | 481 // to send a challenge with the 401 response. |
477 if (auth_info.get()) { | 482 if (auth_info.get()) { |
478 request_->NotifyAuthRequired(auth_info.get()); | 483 request_->NotifyAuthRequired(auth_info.get()); |
479 // Wait for SetAuth or CancelAuth to be called. | 484 // Wait for SetAuth or CancelAuth to be called. |
480 return; | 485 return; |
481 } | 486 } |
482 } | 487 } |
483 | 488 |
484 has_handled_response_ = true; | 489 has_handled_response_ = true; |
485 if (request_->status().is_success()) | 490 if (request_->status().is_success()) { |
486 filter_ = SetupFilter(); | 491 DCHECK(!source_stream_); |
492 source_stream_ = SetUpSourceStream(); | |
487 | 493 |
488 if (!filter_.get()) { | 494 if (source_stream_ == nullptr) { |
mmenke
2016/09/27 19:13:22
!source_stream_? You're DCHECKing using that just
xunjieli
2016/09/27 19:50:30
Done.
| |
489 std::string content_length; | 495 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, |
490 request_->GetResponseHeaderByName("content-length", &content_length); | 496 ERR_CONTENT_DECODING_INIT_FAILED)); |
491 if (!content_length.empty()) | 497 return; |
492 base::StringToInt64(content_length, &expected_content_size_); | 498 } |
493 } else { | 499 |
494 request_->net_log().AddEvent( | 500 if (source_stream_->type() == SourceStream::TYPE_NONE) { |
495 NetLogEventType::URL_REQUEST_FILTERS_SET, | 501 std::string content_length; |
496 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get()))); | 502 request_->GetResponseHeaderByName("content-length", &content_length); |
503 if (!content_length.empty()) | |
504 base::StringToInt64(content_length, &expected_content_size_); | |
505 } else { | |
506 request_->net_log().AddEvent( | |
507 NetLogEventType::URL_REQUEST_FILTERS_SET, | |
508 base::Bind(&SourceStreamSetCallback, | |
509 base::Unretained(source_stream_.get()))); | |
510 } | |
497 } | 511 } |
498 | 512 |
499 request_->NotifyResponseStarted(URLRequestStatus()); | 513 request_->NotifyResponseStarted(URLRequestStatus()); |
500 | 514 |
501 // |this| may be destroyed at this point. | 515 // |this| may be destroyed at this point. |
502 } | 516 } |
503 | 517 |
504 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) { | 518 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) { |
505 if (result >= 0) { | 519 if (result >= 0) { |
506 *error = OK; | 520 *error = OK; |
507 *count = result; | 521 *count = result; |
508 } else { | 522 } else { |
509 *error = static_cast<Error>(result); | 523 *error = static_cast<Error>(result); |
510 *count = 0; | 524 *count = 0; |
511 } | 525 } |
512 } | 526 } |
513 | 527 |
514 void URLRequestJob::ReadRawDataComplete(int result) { | 528 void URLRequestJob::ReadRawDataComplete(int result) { |
515 DCHECK(request_->status().is_io_pending()); | 529 DCHECK(request_->status().is_io_pending()); |
530 DCHECK_NE(ERR_IO_PENDING, result); | |
516 | 531 |
517 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed. | 532 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed. |
518 tracked_objects::ScopedTracker tracking_profile( | 533 tracked_objects::ScopedTracker tracking_profile( |
519 FROM_HERE_WITH_EXPLICIT_FUNCTION( | 534 FROM_HERE_WITH_EXPLICIT_FUNCTION( |
520 "475755 URLRequestJob::RawReadCompleted")); | 535 "475755 URLRequestJob::RawReadCompleted")); |
521 | 536 |
522 // The headers should be complete before reads complete | 537 // The headers should be complete before reads complete |
523 DCHECK(has_handled_response_); | 538 DCHECK(has_handled_response_); |
524 | 539 |
525 Error error; | 540 GatherRawReadStats(result); |
526 int bytes_read; | |
527 ConvertResultToError(result, &error, &bytes_read); | |
528 | 541 |
529 DCHECK_NE(ERR_IO_PENDING, error); | 542 // Notify SourceStream. |
543 DCHECK(!read_raw_callback_.is_null()); | |
530 | 544 |
531 GatherRawReadStats(error, bytes_read); | 545 base::ResetAndReturn(&read_raw_callback_).Run(result); |
532 | |
533 if (filter_.get() && error == OK) { | |
534 // |bytes_read| being 0 indicates an EOF was received. ReadFilteredData | |
535 // can incorrectly return ERR_IO_PENDING when 0 bytes are passed to it, so | |
536 // just don't call into the filter in that case. | |
537 int filter_bytes_read = 0; | |
538 if (bytes_read > 0) { | |
539 // Tell the filter that it has more data. | |
540 PushInputToFilter(bytes_read); | |
541 | |
542 // Filter the data. | |
543 error = ReadFilteredData(&filter_bytes_read); | |
544 } | |
545 | |
546 if (error == OK && !filter_bytes_read) | |
547 DoneReading(); | |
548 | |
549 DVLOG(1) << __func__ << "() \"" << request_->url().spec() << "\"" | |
550 << " pre bytes read = " << bytes_read | |
551 << " pre total = " << prefilter_bytes_read_ | |
552 << " post total = " << postfilter_bytes_read_; | |
553 bytes_read = filter_bytes_read; | |
554 } else { | |
555 DVLOG(1) << __func__ << "() \"" << request_->url().spec() << "\"" | |
556 << " pre bytes read = " << bytes_read | |
557 << " pre total = " << prefilter_bytes_read_ | |
558 << " post total = " << postfilter_bytes_read_; | |
559 } | |
560 | |
561 if (error == ERR_IO_PENDING) | |
562 return; | |
563 | |
564 if (bytes_read <= 0) | |
565 NotifyDone(URLRequestStatus::FromError(error)); | |
566 | |
567 if (error == OK) | |
568 request_->NotifyReadCompleted(bytes_read); | |
569 | |
570 // |this| may be destroyed at this point. | 546 // |this| may be destroyed at this point. |
571 } | 547 } |
572 | 548 |
573 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { | 549 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { |
574 DCHECK(!has_handled_response_); | 550 DCHECK(!has_handled_response_); |
575 DCHECK(request_->status().is_io_pending()); | 551 DCHECK(request_->status().is_io_pending()); |
576 | 552 |
577 has_handled_response_ = true; | 553 has_handled_response_ = true; |
578 // There may be relevant information in the response info even in the | 554 // There may be relevant information in the response info even in the |
579 // error case. | 555 // error case. |
(...skipping 27 matching lines...) Expand all Loading... | |
607 request_->set_status(status); | 583 request_->set_status(status); |
608 } | 584 } |
609 | 585 |
610 // If the request succeeded (And wasn't cancelled) and the response code was | 586 // If the request succeeded (And wasn't cancelled) and the response code was |
611 // 4xx or 5xx, record whether or not the main frame was blank. This is | 587 // 4xx or 5xx, record whether or not the main frame was blank. This is |
612 // intended to be a short-lived histogram, used to figure out how important | 588 // intended to be a short-lived histogram, used to figure out how important |
613 // fixing http://crbug.com/331745 is. | 589 // fixing http://crbug.com/331745 is. |
614 if (request_->status().is_success()) { | 590 if (request_->status().is_success()) { |
615 int response_code = GetResponseCode(); | 591 int response_code = GetResponseCode(); |
616 if (400 <= response_code && response_code <= 599) { | 592 if (400 <= response_code && response_code <= 599) { |
617 bool page_has_content = (postfilter_bytes_read_ != 0); | 593 bool page_has_content = (postfilter_bytes_read() != 0); |
618 if (request_->load_flags() & net::LOAD_MAIN_FRAME_DEPRECATED) { | 594 if (request_->load_flags() & net::LOAD_MAIN_FRAME_DEPRECATED) { |
619 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentMainFrame", | 595 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentMainFrame", |
620 page_has_content); | 596 page_has_content); |
621 } else { | 597 } else { |
622 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentNonMainFrame", | 598 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentNonMainFrame", |
623 page_has_content); | 599 page_has_content); |
624 } | 600 } |
625 } | 601 } |
626 } | 602 } |
627 | 603 |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
673 return 0; | 649 return 0; |
674 } | 650 } |
675 | 651 |
676 void URLRequestJob::DoneReading() { | 652 void URLRequestJob::DoneReading() { |
677 // Do nothing. | 653 // Do nothing. |
678 } | 654 } |
679 | 655 |
680 void URLRequestJob::DoneReadingRedirectResponse() { | 656 void URLRequestJob::DoneReadingRedirectResponse() { |
681 } | 657 } |
682 | 658 |
683 void URLRequestJob::PushInputToFilter(int bytes_read) { | 659 std::unique_ptr<SourceStream> URLRequestJob::SetUpSourceStream() { |
684 DCHECK(filter_); | 660 return base::MakeUnique<URLRequestJobSourceStream>(this); |
685 filter_->FlushStreamBuffer(bytes_read); | |
686 } | |
687 | |
688 Error URLRequestJob::ReadFilteredData(int* bytes_read) { | |
689 DCHECK(filter_); | |
690 DCHECK(filtered_read_buffer_.get()); | |
691 DCHECK_GT(filtered_read_buffer_len_, 0); | |
692 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check. | |
693 DCHECK(!raw_read_buffer_); | |
694 | |
695 *bytes_read = 0; | |
696 Error error = ERR_FAILED; | |
697 | |
698 for (;;) { | |
699 if (is_done()) | |
700 return OK; | |
701 | |
702 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) { | |
703 // We don't have any raw data to work with, so read from the transaction. | |
704 int filtered_data_read; | |
705 error = ReadRawDataForFilter(&filtered_data_read); | |
706 // If ReadRawDataForFilter returned some data, fall through to the case | |
707 // below; otherwise, return early. | |
708 if (error != OK || filtered_data_read == 0) | |
709 return error; | |
710 filter_->FlushStreamBuffer(filtered_data_read); | |
711 } | |
712 | |
713 if ((filter_->stream_data_len() || filter_needs_more_output_space_) && | |
714 !is_done()) { | |
715 // Get filtered data. | |
716 int filtered_data_len = filtered_read_buffer_len_; | |
717 int output_buffer_size = filtered_data_len; | |
718 Filter::FilterStatus status = | |
719 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len); | |
720 | |
721 if (filter_needs_more_output_space_ && !filtered_data_len) { | |
722 // filter_needs_more_output_space_ was mistaken... there are no more | |
723 // bytes and we should have at least tried to fill up the filter's input | |
724 // buffer. Correct the state, and try again. | |
725 filter_needs_more_output_space_ = false; | |
726 continue; | |
727 } | |
728 filter_needs_more_output_space_ = | |
729 (filtered_data_len == output_buffer_size); | |
730 | |
731 switch (status) { | |
732 case Filter::FILTER_DONE: { | |
733 filter_needs_more_output_space_ = false; | |
734 *bytes_read = filtered_data_len; | |
735 postfilter_bytes_read_ += filtered_data_len; | |
736 error = OK; | |
737 break; | |
738 } | |
739 case Filter::FILTER_NEED_MORE_DATA: { | |
740 // We have finished filtering all data currently in the buffer. | |
741 // There might be some space left in the output buffer. One can | |
742 // consider reading more data from the stream to feed the filter | |
743 // and filling up the output buffer. This leads to more complicated | |
744 // buffer management and data notification mechanisms. | |
745 // We can revisit this issue if there is a real perf need. | |
746 if (filtered_data_len > 0) { | |
747 *bytes_read = filtered_data_len; | |
748 postfilter_bytes_read_ += filtered_data_len; | |
749 error = OK; | |
750 } else { | |
751 // Read again since we haven't received enough data yet (e.g., we | |
752 // may not have a complete gzip header yet). | |
753 continue; | |
754 } | |
755 break; | |
756 } | |
757 case Filter::FILTER_OK: { | |
758 *bytes_read = filtered_data_len; | |
759 postfilter_bytes_read_ += filtered_data_len; | |
760 error = OK; | |
761 break; | |
762 } | |
763 case Filter::FILTER_ERROR: { | |
764 DVLOG(1) << __func__ << "() \"" << request_->url().spec() << "\"" | |
765 << " Filter Error"; | |
766 filter_needs_more_output_space_ = false; | |
767 error = ERR_CONTENT_DECODING_FAILED; | |
768 UMA_HISTOGRAM_ENUMERATION("Net.ContentDecodingFailed.FilterType", | |
769 filter_->type(), Filter::FILTER_TYPE_MAX); | |
770 break; | |
771 } | |
772 default: { | |
773 NOTREACHED(); | |
774 filter_needs_more_output_space_ = false; | |
775 error = ERR_FAILED; | |
776 break; | |
777 } | |
778 } | |
779 | |
780 // If logging all bytes is enabled, log the filtered bytes read. | |
781 if (error == OK && filtered_data_len > 0 && | |
782 request()->net_log().IsCapturing()) { | |
783 request()->net_log().AddByteTransferEvent( | |
784 NetLogEventType::URL_REQUEST_JOB_FILTERED_BYTES_READ, | |
785 filtered_data_len, filtered_read_buffer_->data()); | |
786 } | |
787 } else { | |
788 // we are done, or there is no data left. | |
789 error = OK; | |
790 } | |
791 break; | |
792 } | |
793 | |
794 if (error == OK) { | |
795 // When we successfully finished a read, we no longer need to save the | |
796 // caller's buffers. Release our reference. | |
797 filtered_read_buffer_ = NULL; | |
798 filtered_read_buffer_len_ = 0; | |
799 } | |
800 return error; | |
801 } | |
802 | |
803 void URLRequestJob::DestroyFilters() { | |
804 filter_.reset(); | |
805 } | 661 } |
806 | 662 |
807 const URLRequestStatus URLRequestJob::GetStatus() { | 663 const URLRequestStatus URLRequestJob::GetStatus() { |
808 return request_->status(); | 664 return request_->status(); |
809 } | 665 } |
810 | 666 |
811 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) { | 667 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) { |
812 request_->proxy_server_ = proxy_server; | 668 request_->proxy_server_ = proxy_server; |
813 } | 669 } |
814 | 670 |
815 Error URLRequestJob::ReadRawDataForFilter(int* bytes_read) { | 671 void URLRequestJob::SourceStreamReadComplete(bool synchronous, int result) { |
816 Error error = ERR_FAILED; | 672 DCHECK_NE(ERR_IO_PENDING, result); |
817 DCHECK(bytes_read); | |
818 DCHECK(filter_.get()); | |
819 | 673 |
820 *bytes_read = 0; | 674 if (result > 0 && request()->net_log().IsCapturing()) { |
675 request()->net_log().AddByteTransferEvent( | |
676 NetLogEventType::URL_REQUEST_JOB_FILTERED_BYTES_READ, result, | |
677 pending_read_buffer_->data()); | |
678 } | |
679 pending_read_buffer_ = nullptr; | |
821 | 680 |
822 // Get more pre-filtered data if needed. | 681 if (result < 0) { |
823 // TODO(mbelshe): is it possible that the filter needs *MORE* data | 682 NotifyDone(URLRequestStatus::FromError(result)); |
824 // when there is some data already in the buffer? | 683 return; |
825 if (!filter_->stream_data_len() && !is_done()) { | |
826 IOBuffer* stream_buffer = filter_->stream_buffer(); | |
827 int stream_buffer_size = filter_->stream_buffer_size(); | |
828 error = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read); | |
829 } | 684 } |
830 return error; | 685 |
686 if (result > 0) { | |
687 postfilter_bytes_read_ += result; | |
688 if (!synchronous) | |
689 request_->NotifyReadCompleted(result); | |
690 return; | |
691 } | |
692 | |
693 // result == 0 | |
mmenke
2016/09/27 19:13:22
May want to make this a DCHECK instead of a commen
xunjieli
2016/09/27 19:50:30
Done.
| |
694 DoneReading(); | |
695 NotifyDone(URLRequestStatus()); | |
696 if (!synchronous) | |
697 request_->NotifyReadCompleted(result); | |
831 } | 698 } |
832 | 699 |
833 Error URLRequestJob::ReadRawDataHelper(IOBuffer* buf, | 700 int URLRequestJob::ReadRawDataHelper(IOBuffer* buf, |
834 int buf_size, | 701 int buf_size, |
835 int* bytes_read) { | 702 const CompletionCallback& callback) { |
836 DCHECK(!raw_read_buffer_); | 703 DCHECK(!raw_read_buffer_); |
837 | 704 |
838 // Keep a pointer to the read buffer, so we have access to it in | 705 // Keep a pointer to the read buffer, so URLRequestJob::GatherRawReadStats() |
839 // GatherRawReadStats() in the event that the read completes asynchronously. | 706 // has access to it to log stats. |
840 raw_read_buffer_ = buf; | 707 raw_read_buffer_ = buf; |
841 Error error; | |
842 ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read); | |
843 | 708 |
844 if (error != ERR_IO_PENDING) { | 709 // TODO(xunjieli): Make ReadRawData take in a callback rather than requiring |
710 // subclass to call ReadRawDataComplete upon asynchronous completion. | |
711 int result = ReadRawData(buf, buf_size); | |
712 | |
713 if (result != ERR_IO_PENDING) { | |
845 // If the read completes synchronously, either success or failure, invoke | 714 // If the read completes synchronously, either success or failure, invoke |
846 // GatherRawReadStats so we can account for the completed read. | 715 // GatherRawReadStats so we can account for the completed read. |
847 GatherRawReadStats(error, *bytes_read); | 716 GatherRawReadStats(result); |
717 } else { | |
718 read_raw_callback_ = callback; | |
848 } | 719 } |
849 return error; | 720 return result; |
850 } | 721 } |
851 | 722 |
852 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { | 723 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { |
853 int rv = request_->Redirect(redirect_info); | 724 int rv = request_->Redirect(redirect_info); |
854 if (rv != OK) | 725 if (rv != OK) |
855 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | 726 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
856 } | 727 } |
857 | 728 |
858 void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) { | 729 void URLRequestJob::GatherRawReadStats(int bytes_read) { |
859 DCHECK(raw_read_buffer_ || bytes_read == 0); | 730 DCHECK(raw_read_buffer_ || bytes_read == 0); |
860 DCHECK_NE(ERR_IO_PENDING, error); | 731 DCHECK_NE(ERR_IO_PENDING, bytes_read); |
861 | |
862 if (error != OK) { | |
863 raw_read_buffer_ = nullptr; | |
864 return; | |
865 } | |
866 // If |filter_| is non-NULL, bytes will be logged after it is applied | |
867 // instead. | |
868 if (!filter_.get() && bytes_read > 0 && request()->net_log().IsCapturing()) { | |
869 request()->net_log().AddByteTransferEvent( | |
870 NetLogEventType::URL_REQUEST_JOB_BYTES_READ, bytes_read, | |
871 raw_read_buffer_->data()); | |
872 } | |
873 | 732 |
874 if (bytes_read > 0) { | 733 if (bytes_read > 0) { |
734 if (request()->net_log().IsCapturing()) { | |
mmenke
2016/09/27 19:13:22
Hrm...This no longer has the filter check, so when
xunjieli
2016/09/27 19:50:30
Done. Hmm.. Should we get rid of this block then?
mmenke
2016/09/27 20:33:01
I think the intention was to log both the filtered
xunjieli
2016/09/27 20:42:56
Ah, sorry. You are right. I am not what I was thin
| |
735 request()->net_log().AddByteTransferEvent( | |
736 NetLogEventType::URL_REQUEST_JOB_BYTES_READ, bytes_read, | |
737 raw_read_buffer_->data()); | |
738 } | |
875 RecordBytesRead(bytes_read); | 739 RecordBytesRead(bytes_read); |
876 } | 740 } |
877 raw_read_buffer_ = nullptr; | 741 raw_read_buffer_ = nullptr; |
878 } | 742 } |
879 | 743 |
880 void URLRequestJob::RecordBytesRead(int bytes_read) { | 744 void URLRequestJob::RecordBytesRead(int bytes_read) { |
881 DCHECK_GT(bytes_read, 0); | 745 DCHECK_GT(bytes_read, 0); |
882 prefilter_bytes_read_ += bytes_read; | 746 prefilter_bytes_read_ += base::checked_cast<size_t>(bytes_read); |
883 | 747 |
884 // On first read, notify NetworkQualityEstimator that response headers have | 748 // On first read, notify NetworkQualityEstimator that response headers have |
885 // been received. | 749 // been received. |
886 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch | 750 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch |
887 // Service Worker jobs twice. | 751 // Service Worker jobs twice. |
888 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the | 752 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the |
889 // first raw read of the response body. This is used as the signal that | 753 // first raw read of the response body. This is used as the signal that |
890 // response headers have been received. | 754 // response headers have been received. |
891 if (request_->context()->network_quality_estimator() && | 755 if (request_->context()->network_quality_estimator() && |
892 prefilter_bytes_read_ == bytes_read) { | 756 prefilter_bytes_read() == bytes_read) { |
893 request_->context()->network_quality_estimator()->NotifyHeadersReceived( | 757 request_->context()->network_quality_estimator()->NotifyHeadersReceived( |
894 *request_); | 758 *request_); |
895 } | 759 } |
896 | 760 |
897 if (!filter_.get()) | 761 DVLOG(2) << __FUNCTION__ << "() " |
898 postfilter_bytes_read_ += bytes_read; | 762 << "\"" << request_->url().spec() << "\"" |
899 DVLOG(2) << __func__ << "() \"" << request_->url().spec() << "\"" | |
900 << " pre bytes read = " << bytes_read | 763 << " pre bytes read = " << bytes_read |
901 << " pre total = " << prefilter_bytes_read_ | 764 << " pre total = " << prefilter_bytes_read() |
902 << " post total = " << postfilter_bytes_read_; | 765 << " post total = " << postfilter_bytes_read(); |
903 UpdatePacketReadTimes(); // Facilitate stats recording if it is active. | 766 UpdatePacketReadTimes(); // Facilitate stats recording if it is active. |
904 | 767 |
905 // Notify observers if any additional network usage has occurred. Note that | 768 // Notify observers if any additional network usage has occurred. Note that |
906 // the number of received bytes over the network sent by this notification | 769 // the number of received bytes over the network sent by this notification |
907 // could be vastly different from |bytes_read|, such as when a large chunk of | 770 // could be vastly different from |bytes_read|, such as when a large chunk of |
908 // network bytes is received before multiple smaller raw reads are performed | 771 // network bytes is received before multiple smaller raw reads are performed |
909 // on it. | 772 // on it. |
910 MaybeNotifyNetworkBytes(); | 773 MaybeNotifyNetworkBytes(); |
911 } | 774 } |
912 | 775 |
913 bool URLRequestJob::FilterHasData() { | |
914 return filter_.get() && filter_->stream_data_len(); | |
915 } | |
916 | |
917 void URLRequestJob::UpdatePacketReadTimes() { | 776 void URLRequestJob::UpdatePacketReadTimes() { |
918 } | 777 } |
919 | 778 |
920 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location, | 779 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location, |
921 int http_status_code) { | 780 int http_status_code) { |
922 const GURL& url = request_->url(); | 781 const GURL& url = request_->url(); |
923 | 782 |
924 RedirectInfo redirect_info; | 783 RedirectInfo redirect_info; |
925 | 784 |
926 redirect_info.status_code = http_status_code; | 785 redirect_info.status_code = http_status_code; |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
994 int64_t total_sent_bytes = GetTotalSentBytes(); | 853 int64_t total_sent_bytes = GetTotalSentBytes(); |
995 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); | 854 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); |
996 if (total_sent_bytes > last_notified_total_sent_bytes_) { | 855 if (total_sent_bytes > last_notified_total_sent_bytes_) { |
997 network_delegate_->NotifyNetworkBytesSent( | 856 network_delegate_->NotifyNetworkBytesSent( |
998 request_, total_sent_bytes - last_notified_total_sent_bytes_); | 857 request_, total_sent_bytes - last_notified_total_sent_bytes_); |
999 } | 858 } |
1000 last_notified_total_sent_bytes_ = total_sent_bytes; | 859 last_notified_total_sent_bytes_ = total_sent_bytes; |
1001 } | 860 } |
1002 | 861 |
1003 } // namespace net | 862 } // namespace net |
OLD | NEW |