OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/url_request/url_request_job.h" | 5 #include "net/url_request/url_request_job.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 | 8 |
9 #include "base/bind.h" | 9 #include "base/bind.h" |
| 10 #include "base/callback_helpers.h" |
10 #include "base/compiler_specific.h" | 11 #include "base/compiler_specific.h" |
11 #include "base/location.h" | 12 #include "base/location.h" |
| 13 #include "base/memory/ptr_util.h" |
12 #include "base/metrics/histogram_macros.h" | 14 #include "base/metrics/histogram_macros.h" |
13 #include "base/power_monitor/power_monitor.h" | 15 #include "base/power_monitor/power_monitor.h" |
14 #include "base/profiler/scoped_tracker.h" | 16 #include "base/profiler/scoped_tracker.h" |
15 #include "base/single_thread_task_runner.h" | 17 #include "base/single_thread_task_runner.h" |
16 #include "base/strings/string_number_conversions.h" | 18 #include "base/strings/string_number_conversions.h" |
17 #include "base/strings/string_split.h" | 19 #include "base/strings/string_split.h" |
18 #include "base/strings/string_util.h" | 20 #include "base/strings/string_util.h" |
19 #include "base/threading/thread_task_runner_handle.h" | 21 #include "base/threading/thread_task_runner_handle.h" |
20 #include "base/values.h" | 22 #include "base/values.h" |
21 #include "net/base/auth.h" | 23 #include "net/base/auth.h" |
22 #include "net/base/host_port_pair.h" | 24 #include "net/base/host_port_pair.h" |
23 #include "net/base/io_buffer.h" | 25 #include "net/base/io_buffer.h" |
24 #include "net/base/load_flags.h" | 26 #include "net/base/load_flags.h" |
25 #include "net/base/load_states.h" | 27 #include "net/base/load_states.h" |
26 #include "net/base/net_errors.h" | 28 #include "net/base/net_errors.h" |
27 #include "net/base/network_delegate.h" | 29 #include "net/base/network_delegate.h" |
28 #include "net/filter/filter.h" | |
29 #include "net/http/http_response_headers.h" | 30 #include "net/http/http_response_headers.h" |
30 #include "net/log/net_log.h" | 31 #include "net/log/net_log.h" |
31 #include "net/log/net_log_capture_mode.h" | 32 #include "net/log/net_log_capture_mode.h" |
32 #include "net/log/net_log_event_type.h" | 33 #include "net/log/net_log_event_type.h" |
33 #include "net/log/net_log_with_source.h" | 34 #include "net/log/net_log_with_source.h" |
34 #include "net/nqe/network_quality_estimator.h" | 35 #include "net/nqe/network_quality_estimator.h" |
35 #include "net/proxy/proxy_server.h" | 36 #include "net/proxy/proxy_server.h" |
36 #include "net/url_request/url_request_context.h" | 37 #include "net/url_request/url_request_context.h" |
37 | 38 |
38 namespace net { | 39 namespace net { |
39 | 40 |
40 namespace { | 41 namespace { |
41 | 42 |
42 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event. | 43 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event. |
43 std::unique_ptr<base::Value> FiltersSetCallback( | 44 std::unique_ptr<base::Value> SourceStreamSetCallback( |
44 Filter* filter, | 45 SourceStream* source_stream, |
45 NetLogCaptureMode /* capture_mode */) { | 46 NetLogCaptureMode /* capture_mode */) { |
46 std::unique_ptr<base::DictionaryValue> event_params( | 47 std::unique_ptr<base::DictionaryValue> event_params( |
47 new base::DictionaryValue()); | 48 new base::DictionaryValue()); |
48 event_params->SetString("filters", filter->OrderedFilterList()); | 49 event_params->SetString("filters", source_stream->Description()); |
49 return std::move(event_params); | 50 return std::move(event_params); |
50 } | 51 } |
51 | 52 |
52 std::string ComputeMethodForRedirect(const std::string& method, | 53 std::string ComputeMethodForRedirect(const std::string& method, |
53 int http_status_code) { | 54 int http_status_code) { |
54 // For 303 redirects, all request methods except HEAD are converted to GET, | 55 // For 303 redirects, all request methods except HEAD are converted to GET, |
55 // as per the latest httpbis draft. The draft also allows POST requests to | 56 // as per the latest httpbis draft. The draft also allows POST requests to |
56 // be converted to GETs when following 301/302 redirects, for historical | 57 // be converted to GETs when following 301/302 redirects, for historical |
57 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and | 58 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and |
58 // the httpbis draft say to prompt the user to confirm the generation of new | 59 // the httpbis draft say to prompt the user to confirm the generation of new |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
110 if (base::CompareCaseInsensitiveASCII(token, "unsafe-url") == 0) { | 111 if (base::CompareCaseInsensitiveASCII(token, "unsafe-url") == 0) { |
111 new_policy = URLRequest::NEVER_CLEAR_REFERRER; | 112 new_policy = URLRequest::NEVER_CLEAR_REFERRER; |
112 continue; | 113 continue; |
113 } | 114 } |
114 } | 115 } |
115 return new_policy; | 116 return new_policy; |
116 } | 117 } |
117 | 118 |
118 } // namespace | 119 } // namespace |
119 | 120 |
| 121 // Each SourceStreams own the previous SourceStream in the chain, but the |
| 122 // ultimate source is URLRequestJob, which has other ownership semantics, so |
| 123 // this class is a proxy for URLRequestJob that is owned by the first stream |
| 124 // (in dataflow order). |
| 125 class URLRequestJob::URLRequestJobSourceStream : public SourceStream { |
| 126 public: |
| 127 explicit URLRequestJobSourceStream(URLRequestJob* job) |
| 128 : SourceStream(SourceStream::TYPE_NONE), job_(job) { |
| 129 DCHECK(job_); |
| 130 } |
| 131 |
| 132 ~URLRequestJobSourceStream() override {} |
| 133 |
| 134 // SourceStream implementation: |
| 135 int Read(IOBuffer* dest_buffer, |
| 136 int buffer_size, |
| 137 const CompletionCallback& callback) override { |
| 138 DCHECK(job_); |
| 139 return job_->ReadRawDataHelper(dest_buffer, buffer_size, callback); |
| 140 } |
| 141 |
| 142 std::string Description() const override { return std::string(); } |
| 143 |
| 144 private: |
| 145 // It is safe to keep a raw pointer because |job_| owns the last stream which |
| 146 // indirectly owns |this|. Therefore, |job_| will not be destroyed when |this| |
| 147 // is alive. |
| 148 URLRequestJob* const job_; |
| 149 |
| 150 DISALLOW_COPY_AND_ASSIGN(URLRequestJobSourceStream); |
| 151 }; |
| 152 |
120 URLRequestJob::URLRequestJob(URLRequest* request, | 153 URLRequestJob::URLRequestJob(URLRequest* request, |
121 NetworkDelegate* network_delegate) | 154 NetworkDelegate* network_delegate) |
122 : request_(request), | 155 : request_(request), |
123 done_(false), | 156 done_(false), |
124 prefilter_bytes_read_(0), | 157 prefilter_bytes_read_(0), |
125 postfilter_bytes_read_(0), | 158 postfilter_bytes_read_(0), |
126 filter_needs_more_output_space_(false), | |
127 filtered_read_buffer_len_(0), | |
128 has_handled_response_(false), | 159 has_handled_response_(false), |
129 expected_content_size_(-1), | 160 expected_content_size_(-1), |
130 network_delegate_(network_delegate), | 161 network_delegate_(network_delegate), |
131 last_notified_total_received_bytes_(0), | 162 last_notified_total_received_bytes_(0), |
132 last_notified_total_sent_bytes_(0), | 163 last_notified_total_sent_bytes_(0), |
133 weak_factory_(this) { | 164 weak_factory_(this) { |
134 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); | 165 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); |
135 if (power_monitor) | 166 if (power_monitor) |
136 power_monitor->AddObserver(this); | 167 power_monitor->AddObserver(this); |
137 } | 168 } |
(...skipping 17 matching lines...) Expand all Loading... |
155 weak_factory_.InvalidateWeakPtrs(); | 186 weak_factory_.InvalidateWeakPtrs(); |
156 // Make sure the URLRequest is notified that the job is done. This assumes | 187 // Make sure the URLRequest is notified that the job is done. This assumes |
157 // that the URLRequest took care of setting its error status before calling | 188 // that the URLRequest took care of setting its error status before calling |
158 // Kill(). | 189 // Kill(). |
159 // TODO(mmenke): The URLRequest is currently deleted before this method | 190 // TODO(mmenke): The URLRequest is currently deleted before this method |
160 // invokes its async callback whenever this is called by the URLRequest. | 191 // invokes its async callback whenever this is called by the URLRequest. |
161 // Try to simplify how cancellation works. | 192 // Try to simplify how cancellation works. |
162 NotifyCanceled(); | 193 NotifyCanceled(); |
163 } | 194 } |
164 | 195 |
165 // This function calls ReadRawData to get stream data. If a filter exists, it | 196 // This method passes reads down the filter chain, where they eventually end up |
166 // passes the data to the attached filter. It then returns the output from | 197 // at URLRequestJobSourceStream::Read, which calls back into |
167 // filter back to the caller. | 198 // URLRequestJob::ReadRawData. |
168 int URLRequestJob::Read(IOBuffer* buf, int buf_size) { | 199 int URLRequestJob::Read(IOBuffer* buf, int buf_size) { |
169 DCHECK_LT(buf_size, 1000000); // Sanity check. | 200 DCHECK_LT(buf_size, 1000000); // Sanity check. |
170 DCHECK(buf); | 201 DCHECK(buf); |
171 DCHECK(!filtered_read_buffer_); | |
172 DCHECK_EQ(0, filtered_read_buffer_len_); | |
173 | 202 |
174 Error error = OK; | 203 pending_read_buffer_ = buf; |
175 int bytes_read = 0; | 204 int result = source_stream_->Read( |
176 | 205 buf, buf_size, base::Bind(&URLRequestJob::SourceStreamReadComplete, |
177 // Skip Filter if not present. | 206 weak_factory_.GetWeakPtr(), false)); |
178 if (!filter_) { | 207 if (result == ERR_IO_PENDING) |
179 error = ReadRawDataHelper(buf, buf_size, &bytes_read); | |
180 } else { | |
181 // Save the caller's buffers while we do IO | |
182 // in the filter's buffers. | |
183 filtered_read_buffer_ = buf; | |
184 filtered_read_buffer_len_ = buf_size; | |
185 | |
186 error = ReadFilteredData(&bytes_read); | |
187 | |
188 // Synchronous EOF from the filter. | |
189 if (error == OK && bytes_read == 0) | |
190 DoneReading(); | |
191 } | |
192 | |
193 if (error == ERR_IO_PENDING) | |
194 return ERR_IO_PENDING; | 208 return ERR_IO_PENDING; |
195 | 209 |
196 if (error < 0) { | 210 SourceStreamReadComplete(true, result); |
197 NotifyDone(URLRequestStatus::FromError(error)); | 211 return result; |
198 return error; | |
199 } | |
200 | |
201 if (bytes_read == 0) | |
202 NotifyDone(URLRequestStatus()); | |
203 return bytes_read; | |
204 } | 212 } |
205 | 213 |
206 void URLRequestJob::StopCaching() { | 214 void URLRequestJob::StopCaching() { |
207 // Nothing to do here. | 215 // Nothing to do here. |
208 } | 216 } |
209 | 217 |
210 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { | 218 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { |
211 // Most job types don't send request headers. | 219 // Most job types don't send request headers. |
212 return false; | 220 return false; |
213 } | 221 } |
(...skipping 22 matching lines...) Expand all Loading... |
236 } | 244 } |
237 | 245 |
238 bool URLRequestJob::GetRemoteEndpoint(IPEndPoint* endpoint) const { | 246 bool URLRequestJob::GetRemoteEndpoint(IPEndPoint* endpoint) const { |
239 return false; | 247 return false; |
240 } | 248 } |
241 | 249 |
242 void URLRequestJob::PopulateNetErrorDetails(NetErrorDetails* details) const { | 250 void URLRequestJob::PopulateNetErrorDetails(NetErrorDetails* details) const { |
243 return; | 251 return; |
244 } | 252 } |
245 | 253 |
246 std::unique_ptr<Filter> URLRequestJob::SetupFilter() const { | |
247 return nullptr; | |
248 } | |
249 | |
250 bool URLRequestJob::IsRedirectResponse(GURL* location, | 254 bool URLRequestJob::IsRedirectResponse(GURL* location, |
251 int* http_status_code) { | 255 int* http_status_code) { |
252 // For non-HTTP jobs, headers will be null. | 256 // For non-HTTP jobs, headers will be null. |
253 HttpResponseHeaders* headers = request_->response_headers(); | 257 HttpResponseHeaders* headers = request_->response_headers(); |
254 if (!headers) | 258 if (!headers) |
255 return false; | 259 return false; |
256 | 260 |
257 std::string value; | 261 std::string value; |
258 if (!headers->IsRedirect(&value)) | 262 if (!headers->IsRedirect(&value)) |
259 return false; | 263 return false; |
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
479 // Need to check for a NULL auth_info because the server may have failed | 483 // Need to check for a NULL auth_info because the server may have failed |
480 // to send a challenge with the 401 response. | 484 // to send a challenge with the 401 response. |
481 if (auth_info.get()) { | 485 if (auth_info.get()) { |
482 request_->NotifyAuthRequired(auth_info.get()); | 486 request_->NotifyAuthRequired(auth_info.get()); |
483 // Wait for SetAuth or CancelAuth to be called. | 487 // Wait for SetAuth or CancelAuth to be called. |
484 return; | 488 return; |
485 } | 489 } |
486 } | 490 } |
487 | 491 |
488 has_handled_response_ = true; | 492 has_handled_response_ = true; |
489 if (request_->status().is_success()) | 493 if (request_->status().is_success()) { |
490 filter_ = SetupFilter(); | 494 DCHECK(!source_stream_); |
| 495 source_stream_ = SetUpSourceStream(); |
491 | 496 |
492 if (!filter_.get()) { | 497 if (!source_stream_) { |
493 std::string content_length; | 498 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, |
494 request_->GetResponseHeaderByName("content-length", &content_length); | 499 ERR_CONTENT_DECODING_INIT_FAILED)); |
495 if (!content_length.empty()) | 500 return; |
496 base::StringToInt64(content_length, &expected_content_size_); | 501 } |
497 } else { | 502 if (source_stream_->type() == SourceStream::TYPE_NONE) { |
498 request_->net_log().AddEvent( | 503 std::string content_length; |
499 NetLogEventType::URL_REQUEST_FILTERS_SET, | 504 request_->GetResponseHeaderByName("content-length", &content_length); |
500 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get()))); | 505 if (!content_length.empty()) |
| 506 base::StringToInt64(content_length, &expected_content_size_); |
| 507 } else { |
| 508 request_->net_log().AddEvent( |
| 509 NetLogEventType::URL_REQUEST_FILTERS_SET, |
| 510 base::Bind(&SourceStreamSetCallback, |
| 511 base::Unretained(source_stream_.get()))); |
| 512 } |
501 } | 513 } |
502 | 514 |
503 request_->NotifyResponseStarted(URLRequestStatus()); | 515 request_->NotifyResponseStarted(URLRequestStatus()); |
504 | 516 |
505 // |this| may be destroyed at this point. | 517 // |this| may be destroyed at this point. |
506 } | 518 } |
507 | 519 |
508 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) { | 520 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) { |
509 if (result >= 0) { | 521 if (result >= 0) { |
510 *error = OK; | 522 *error = OK; |
511 *count = result; | 523 *count = result; |
512 } else { | 524 } else { |
513 *error = static_cast<Error>(result); | 525 *error = static_cast<Error>(result); |
514 *count = 0; | 526 *count = 0; |
515 } | 527 } |
516 } | 528 } |
517 | 529 |
518 void URLRequestJob::ReadRawDataComplete(int result) { | 530 void URLRequestJob::ReadRawDataComplete(int result) { |
519 DCHECK(request_->status().is_io_pending()); | 531 DCHECK(request_->status().is_io_pending()); |
| 532 DCHECK_NE(ERR_IO_PENDING, result); |
520 | 533 |
521 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed. | 534 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed. |
522 tracked_objects::ScopedTracker tracking_profile( | 535 tracked_objects::ScopedTracker tracking_profile( |
523 FROM_HERE_WITH_EXPLICIT_FUNCTION( | 536 FROM_HERE_WITH_EXPLICIT_FUNCTION( |
524 "475755 URLRequestJob::RawReadCompleted")); | 537 "475755 URLRequestJob::RawReadCompleted")); |
525 | 538 |
526 // The headers should be complete before reads complete | 539 // The headers should be complete before reads complete |
527 DCHECK(has_handled_response_); | 540 DCHECK(has_handled_response_); |
528 | 541 |
529 Error error; | 542 GatherRawReadStats(result); |
530 int bytes_read; | |
531 ConvertResultToError(result, &error, &bytes_read); | |
532 | 543 |
533 DCHECK_NE(ERR_IO_PENDING, error); | 544 // Notify SourceStream. |
| 545 DCHECK(!read_raw_callback_.is_null()); |
534 | 546 |
535 GatherRawReadStats(error, bytes_read); | 547 base::ResetAndReturn(&read_raw_callback_).Run(result); |
536 | |
537 if (filter_.get() && error == OK) { | |
538 // |bytes_read| being 0 indicates an EOF was received. ReadFilteredData | |
539 // can incorrectly return ERR_IO_PENDING when 0 bytes are passed to it, so | |
540 // just don't call into the filter in that case. | |
541 int filter_bytes_read = 0; | |
542 if (bytes_read > 0) { | |
543 // Tell the filter that it has more data. | |
544 PushInputToFilter(bytes_read); | |
545 | |
546 // Filter the data. | |
547 error = ReadFilteredData(&filter_bytes_read); | |
548 } | |
549 | |
550 if (error == OK && !filter_bytes_read) | |
551 DoneReading(); | |
552 | |
553 DVLOG(1) << __func__ << "() \"" << request_->url().spec() << "\"" | |
554 << " pre bytes read = " << bytes_read | |
555 << " pre total = " << prefilter_bytes_read_ | |
556 << " post total = " << postfilter_bytes_read_; | |
557 bytes_read = filter_bytes_read; | |
558 } else { | |
559 DVLOG(1) << __func__ << "() \"" << request_->url().spec() << "\"" | |
560 << " pre bytes read = " << bytes_read | |
561 << " pre total = " << prefilter_bytes_read_ | |
562 << " post total = " << postfilter_bytes_read_; | |
563 } | |
564 | |
565 if (error == ERR_IO_PENDING) | |
566 return; | |
567 | |
568 if (bytes_read <= 0) | |
569 NotifyDone(URLRequestStatus::FromError(error)); | |
570 | |
571 if (error == OK) | |
572 request_->NotifyReadCompleted(bytes_read); | |
573 | |
574 // |this| may be destroyed at this point. | 548 // |this| may be destroyed at this point. |
575 } | 549 } |
576 | 550 |
577 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { | 551 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { |
578 DCHECK(!has_handled_response_); | 552 DCHECK(!has_handled_response_); |
579 DCHECK(request_->status().is_io_pending()); | 553 DCHECK(request_->status().is_io_pending()); |
580 | 554 |
581 has_handled_response_ = true; | 555 has_handled_response_ = true; |
582 // There may be relevant information in the response info even in the | 556 // There may be relevant information in the response info even in the |
583 // error case. | 557 // error case. |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
659 return 0; | 633 return 0; |
660 } | 634 } |
661 | 635 |
662 void URLRequestJob::DoneReading() { | 636 void URLRequestJob::DoneReading() { |
663 // Do nothing. | 637 // Do nothing. |
664 } | 638 } |
665 | 639 |
666 void URLRequestJob::DoneReadingRedirectResponse() { | 640 void URLRequestJob::DoneReadingRedirectResponse() { |
667 } | 641 } |
668 | 642 |
669 void URLRequestJob::PushInputToFilter(int bytes_read) { | 643 std::unique_ptr<SourceStream> URLRequestJob::SetUpSourceStream() { |
670 DCHECK(filter_); | 644 return base::MakeUnique<URLRequestJobSourceStream>(this); |
671 filter_->FlushStreamBuffer(bytes_read); | |
672 } | 645 } |
673 | 646 |
674 Error URLRequestJob::ReadFilteredData(int* bytes_read) { | 647 void URLRequestJob::DestroySourceStream() { |
675 DCHECK(filter_); | 648 source_stream_.reset(); |
676 DCHECK(filtered_read_buffer_.get()); | |
677 DCHECK_GT(filtered_read_buffer_len_, 0); | |
678 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check. | |
679 DCHECK(!raw_read_buffer_); | |
680 | |
681 *bytes_read = 0; | |
682 Error error = ERR_FAILED; | |
683 | |
684 for (;;) { | |
685 if (is_done()) | |
686 return OK; | |
687 | |
688 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) { | |
689 // We don't have any raw data to work with, so read from the transaction. | |
690 int filtered_data_read; | |
691 error = ReadRawDataForFilter(&filtered_data_read); | |
692 // If ReadRawDataForFilter returned some data, fall through to the case | |
693 // below; otherwise, return early. | |
694 if (error != OK || filtered_data_read == 0) | |
695 return error; | |
696 filter_->FlushStreamBuffer(filtered_data_read); | |
697 } | |
698 | |
699 if ((filter_->stream_data_len() || filter_needs_more_output_space_) && | |
700 !is_done()) { | |
701 // Get filtered data. | |
702 int filtered_data_len = filtered_read_buffer_len_; | |
703 int output_buffer_size = filtered_data_len; | |
704 Filter::FilterStatus status = | |
705 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len); | |
706 | |
707 if (filter_needs_more_output_space_ && !filtered_data_len) { | |
708 // filter_needs_more_output_space_ was mistaken... there are no more | |
709 // bytes and we should have at least tried to fill up the filter's input | |
710 // buffer. Correct the state, and try again. | |
711 filter_needs_more_output_space_ = false; | |
712 continue; | |
713 } | |
714 filter_needs_more_output_space_ = | |
715 (filtered_data_len == output_buffer_size); | |
716 | |
717 switch (status) { | |
718 case Filter::FILTER_DONE: { | |
719 filter_needs_more_output_space_ = false; | |
720 *bytes_read = filtered_data_len; | |
721 postfilter_bytes_read_ += filtered_data_len; | |
722 error = OK; | |
723 break; | |
724 } | |
725 case Filter::FILTER_NEED_MORE_DATA: { | |
726 // We have finished filtering all data currently in the buffer. | |
727 // There might be some space left in the output buffer. One can | |
728 // consider reading more data from the stream to feed the filter | |
729 // and filling up the output buffer. This leads to more complicated | |
730 // buffer management and data notification mechanisms. | |
731 // We can revisit this issue if there is a real perf need. | |
732 if (filtered_data_len > 0) { | |
733 *bytes_read = filtered_data_len; | |
734 postfilter_bytes_read_ += filtered_data_len; | |
735 error = OK; | |
736 } else { | |
737 // Read again since we haven't received enough data yet (e.g., we | |
738 // may not have a complete gzip header yet). | |
739 continue; | |
740 } | |
741 break; | |
742 } | |
743 case Filter::FILTER_OK: { | |
744 *bytes_read = filtered_data_len; | |
745 postfilter_bytes_read_ += filtered_data_len; | |
746 error = OK; | |
747 break; | |
748 } | |
749 case Filter::FILTER_ERROR: { | |
750 DVLOG(1) << __func__ << "() \"" << request_->url().spec() << "\"" | |
751 << " Filter Error"; | |
752 filter_needs_more_output_space_ = false; | |
753 error = ERR_CONTENT_DECODING_FAILED; | |
754 UMA_HISTOGRAM_ENUMERATION("Net.ContentDecodingFailed.FilterType", | |
755 filter_->type(), Filter::FILTER_TYPE_MAX); | |
756 break; | |
757 } | |
758 default: { | |
759 NOTREACHED(); | |
760 filter_needs_more_output_space_ = false; | |
761 error = ERR_FAILED; | |
762 break; | |
763 } | |
764 } | |
765 | |
766 // If logging all bytes is enabled, log the filtered bytes read. | |
767 if (error == OK && filtered_data_len > 0 && | |
768 request()->net_log().IsCapturing()) { | |
769 request()->net_log().AddByteTransferEvent( | |
770 NetLogEventType::URL_REQUEST_JOB_FILTERED_BYTES_READ, | |
771 filtered_data_len, filtered_read_buffer_->data()); | |
772 } | |
773 } else { | |
774 // we are done, or there is no data left. | |
775 error = OK; | |
776 } | |
777 break; | |
778 } | |
779 | |
780 if (error == OK) { | |
781 // When we successfully finished a read, we no longer need to save the | |
782 // caller's buffers. Release our reference. | |
783 filtered_read_buffer_ = NULL; | |
784 filtered_read_buffer_len_ = 0; | |
785 } | |
786 return error; | |
787 } | |
788 | |
789 void URLRequestJob::DestroyFilters() { | |
790 filter_.reset(); | |
791 } | 649 } |
792 | 650 |
793 const URLRequestStatus URLRequestJob::GetStatus() { | 651 const URLRequestStatus URLRequestJob::GetStatus() { |
794 return request_->status(); | 652 return request_->status(); |
795 } | 653 } |
796 | 654 |
797 void URLRequestJob::SetProxyServer(const ProxyServer& proxy_server) { | 655 void URLRequestJob::SetProxyServer(const ProxyServer& proxy_server) { |
798 request_->proxy_server_ = proxy_server; | 656 request_->proxy_server_ = proxy_server; |
799 } | 657 } |
800 | 658 |
801 Error URLRequestJob::ReadRawDataForFilter(int* bytes_read) { | 659 void URLRequestJob::SourceStreamReadComplete(bool synchronous, int result) { |
802 Error error = ERR_FAILED; | 660 DCHECK_NE(ERR_IO_PENDING, result); |
803 DCHECK(bytes_read); | |
804 DCHECK(filter_.get()); | |
805 | 661 |
806 *bytes_read = 0; | 662 if (result > 0 && request()->net_log().IsCapturing()) { |
| 663 request()->net_log().AddByteTransferEvent( |
| 664 NetLogEventType::URL_REQUEST_JOB_FILTERED_BYTES_READ, result, |
| 665 pending_read_buffer_->data()); |
| 666 } |
| 667 pending_read_buffer_ = nullptr; |
807 | 668 |
808 // Get more pre-filtered data if needed. | 669 if (result < 0) { |
809 // TODO(mbelshe): is it possible that the filter needs *MORE* data | 670 NotifyDone(URLRequestStatus::FromError(result)); |
810 // when there is some data already in the buffer? | 671 return; |
811 if (!filter_->stream_data_len() && !is_done()) { | |
812 IOBuffer* stream_buffer = filter_->stream_buffer(); | |
813 int stream_buffer_size = filter_->stream_buffer_size(); | |
814 error = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read); | |
815 } | 672 } |
816 return error; | 673 |
| 674 if (result > 0) { |
| 675 postfilter_bytes_read_ += result; |
| 676 if (!synchronous) |
| 677 request_->NotifyReadCompleted(result); |
| 678 return; |
| 679 } |
| 680 |
| 681 DCHECK_EQ(0, result); |
| 682 DoneReading(); |
| 683 NotifyDone(URLRequestStatus()); |
| 684 if (!synchronous) |
| 685 request_->NotifyReadCompleted(result); |
817 } | 686 } |
818 | 687 |
819 Error URLRequestJob::ReadRawDataHelper(IOBuffer* buf, | 688 int URLRequestJob::ReadRawDataHelper(IOBuffer* buf, |
820 int buf_size, | 689 int buf_size, |
821 int* bytes_read) { | 690 const CompletionCallback& callback) { |
822 DCHECK(!raw_read_buffer_); | 691 DCHECK(!raw_read_buffer_); |
823 | 692 |
824 // Keep a pointer to the read buffer, so we have access to it in | 693 // Keep a pointer to the read buffer, so URLRequestJob::GatherRawReadStats() |
825 // GatherRawReadStats() in the event that the read completes asynchronously. | 694 // has access to it to log stats. |
826 raw_read_buffer_ = buf; | 695 raw_read_buffer_ = buf; |
827 Error error; | |
828 ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read); | |
829 | 696 |
830 if (error != ERR_IO_PENDING) { | 697 // TODO(xunjieli): Make ReadRawData take in a callback rather than requiring |
| 698 // subclass to call ReadRawDataComplete upon asynchronous completion. |
| 699 int result = ReadRawData(buf, buf_size); |
| 700 |
| 701 if (result != ERR_IO_PENDING) { |
831 // If the read completes synchronously, either success or failure, invoke | 702 // If the read completes synchronously, either success or failure, invoke |
832 // GatherRawReadStats so we can account for the completed read. | 703 // GatherRawReadStats so we can account for the completed read. |
833 GatherRawReadStats(error, *bytes_read); | 704 GatherRawReadStats(result); |
| 705 } else { |
| 706 read_raw_callback_ = callback; |
834 } | 707 } |
835 return error; | 708 return result; |
836 } | 709 } |
837 | 710 |
838 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { | 711 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { |
839 int rv = request_->Redirect(redirect_info); | 712 int rv = request_->Redirect(redirect_info); |
840 if (rv != OK) | 713 if (rv != OK) |
841 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | 714 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
842 } | 715 } |
843 | 716 |
844 void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) { | 717 void URLRequestJob::GatherRawReadStats(int bytes_read) { |
845 DCHECK(raw_read_buffer_ || bytes_read == 0); | 718 DCHECK(raw_read_buffer_ || bytes_read == 0); |
846 DCHECK_NE(ERR_IO_PENDING, error); | 719 DCHECK_NE(ERR_IO_PENDING, bytes_read); |
847 | |
848 if (error != OK) { | |
849 raw_read_buffer_ = nullptr; | |
850 return; | |
851 } | |
852 // If |filter_| is non-NULL, bytes will be logged after it is applied | |
853 // instead. | |
854 if (!filter_.get() && bytes_read > 0 && request()->net_log().IsCapturing()) { | |
855 request()->net_log().AddByteTransferEvent( | |
856 NetLogEventType::URL_REQUEST_JOB_BYTES_READ, bytes_read, | |
857 raw_read_buffer_->data()); | |
858 } | |
859 | 720 |
860 if (bytes_read > 0) { | 721 if (bytes_read > 0) { |
| 722 // If there is a filter, bytes will be logged after the filter is applied. |
| 723 if (source_stream_->type() != SourceStream::TYPE_NONE && |
| 724 request()->net_log().IsCapturing()) { |
| 725 request()->net_log().AddByteTransferEvent( |
| 726 NetLogEventType::URL_REQUEST_JOB_BYTES_READ, bytes_read, |
| 727 raw_read_buffer_->data()); |
| 728 } |
861 RecordBytesRead(bytes_read); | 729 RecordBytesRead(bytes_read); |
862 } | 730 } |
863 raw_read_buffer_ = nullptr; | 731 raw_read_buffer_ = nullptr; |
864 } | 732 } |
865 | 733 |
866 void URLRequestJob::RecordBytesRead(int bytes_read) { | 734 void URLRequestJob::RecordBytesRead(int bytes_read) { |
867 DCHECK_GT(bytes_read, 0); | 735 DCHECK_GT(bytes_read, 0); |
868 prefilter_bytes_read_ += bytes_read; | 736 prefilter_bytes_read_ += base::checked_cast<size_t>(bytes_read); |
869 | 737 |
870 // On first read, notify NetworkQualityEstimator that response headers have | 738 // On first read, notify NetworkQualityEstimator that response headers have |
871 // been received. | 739 // been received. |
872 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch | 740 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch |
873 // Service Worker jobs twice. | 741 // Service Worker jobs twice. |
874 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the | 742 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the |
875 // first raw read of the response body. This is used as the signal that | 743 // first raw read of the response body. This is used as the signal that |
876 // response headers have been received. | 744 // response headers have been received. |
877 if (request_->context()->network_quality_estimator() && | 745 if (request_->context()->network_quality_estimator() && |
878 prefilter_bytes_read_ == bytes_read) { | 746 prefilter_bytes_read() == bytes_read) { |
879 request_->context()->network_quality_estimator()->NotifyHeadersReceived( | 747 request_->context()->network_quality_estimator()->NotifyHeadersReceived( |
880 *request_); | 748 *request_); |
881 } | 749 } |
882 | 750 |
883 if (!filter_.get()) | 751 DVLOG(2) << __FUNCTION__ << "() " |
884 postfilter_bytes_read_ += bytes_read; | 752 << "\"" << request_->url().spec() << "\"" |
885 DVLOG(2) << __func__ << "() \"" << request_->url().spec() << "\"" | |
886 << " pre bytes read = " << bytes_read | 753 << " pre bytes read = " << bytes_read |
887 << " pre total = " << prefilter_bytes_read_ | 754 << " pre total = " << prefilter_bytes_read() |
888 << " post total = " << postfilter_bytes_read_; | 755 << " post total = " << postfilter_bytes_read(); |
889 UpdatePacketReadTimes(); // Facilitate stats recording if it is active. | 756 UpdatePacketReadTimes(); // Facilitate stats recording if it is active. |
890 | 757 |
891 // Notify observers if any additional network usage has occurred. Note that | 758 // Notify observers if any additional network usage has occurred. Note that |
892 // the number of received bytes over the network sent by this notification | 759 // the number of received bytes over the network sent by this notification |
893 // could be vastly different from |bytes_read|, such as when a large chunk of | 760 // could be vastly different from |bytes_read|, such as when a large chunk of |
894 // network bytes is received before multiple smaller raw reads are performed | 761 // network bytes is received before multiple smaller raw reads are performed |
895 // on it. | 762 // on it. |
896 MaybeNotifyNetworkBytes(); | 763 MaybeNotifyNetworkBytes(); |
897 } | 764 } |
898 | 765 |
899 bool URLRequestJob::FilterHasData() { | |
900 return filter_.get() && filter_->stream_data_len(); | |
901 } | |
902 | |
903 void URLRequestJob::UpdatePacketReadTimes() { | 766 void URLRequestJob::UpdatePacketReadTimes() { |
904 } | 767 } |
905 | 768 |
906 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location, | 769 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location, |
907 int http_status_code) { | 770 int http_status_code) { |
908 const GURL& url = request_->url(); | 771 const GURL& url = request_->url(); |
909 | 772 |
910 RedirectInfo redirect_info; | 773 RedirectInfo redirect_info; |
911 | 774 |
912 redirect_info.status_code = http_status_code; | 775 redirect_info.status_code = http_status_code; |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
976 int64_t total_sent_bytes = GetTotalSentBytes(); | 839 int64_t total_sent_bytes = GetTotalSentBytes(); |
977 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); | 840 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); |
978 if (total_sent_bytes > last_notified_total_sent_bytes_) { | 841 if (total_sent_bytes > last_notified_total_sent_bytes_) { |
979 network_delegate_->NotifyNetworkBytesSent( | 842 network_delegate_->NotifyNetworkBytesSent( |
980 request_, total_sent_bytes - last_notified_total_sent_bytes_); | 843 request_, total_sent_bytes - last_notified_total_sent_bytes_); |
981 } | 844 } |
982 last_notified_total_sent_bytes_ = total_sent_bytes; | 845 last_notified_total_sent_bytes_ = total_sent_bytes; |
983 } | 846 } |
984 | 847 |
985 } // namespace net | 848 } // namespace net |
OLD | NEW |