Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/url_request/url_request_job.h" | 5 #include "net/url_request/url_request_job.h" |
| 6 | 6 |
| 7 #include <utility> | 7 #include <utility> |
| 8 | 8 |
| 9 #include "base/bind.h" | 9 #include "base/bind.h" |
| 10 #include "base/callback_helpers.h" | |
| 10 #include "base/compiler_specific.h" | 11 #include "base/compiler_specific.h" |
| 11 #include "base/location.h" | 12 #include "base/location.h" |
| 13 #include "base/memory/ptr_util.h" | |
| 12 #include "base/metrics/histogram_macros.h" | 14 #include "base/metrics/histogram_macros.h" |
| 13 #include "base/power_monitor/power_monitor.h" | 15 #include "base/power_monitor/power_monitor.h" |
| 14 #include "base/profiler/scoped_tracker.h" | 16 #include "base/profiler/scoped_tracker.h" |
| 15 #include "base/single_thread_task_runner.h" | 17 #include "base/single_thread_task_runner.h" |
| 16 #include "base/strings/string_number_conversions.h" | 18 #include "base/strings/string_number_conversions.h" |
| 17 #include "base/strings/string_split.h" | 19 #include "base/strings/string_split.h" |
| 18 #include "base/strings/string_util.h" | 20 #include "base/strings/string_util.h" |
| 19 #include "base/threading/thread_task_runner_handle.h" | 21 #include "base/threading/thread_task_runner_handle.h" |
| 20 #include "base/values.h" | 22 #include "base/values.h" |
| 21 #include "net/base/auth.h" | 23 #include "net/base/auth.h" |
| 22 #include "net/base/host_port_pair.h" | 24 #include "net/base/host_port_pair.h" |
| 23 #include "net/base/io_buffer.h" | 25 #include "net/base/io_buffer.h" |
| 24 #include "net/base/load_flags.h" | 26 #include "net/base/load_flags.h" |
| 25 #include "net/base/load_states.h" | 27 #include "net/base/load_states.h" |
| 26 #include "net/base/net_errors.h" | 28 #include "net/base/net_errors.h" |
| 27 #include "net/base/network_delegate.h" | 29 #include "net/base/network_delegate.h" |
| 28 #include "net/filter/filter.h" | |
| 29 #include "net/http/http_response_headers.h" | 30 #include "net/http/http_response_headers.h" |
| 30 #include "net/nqe/network_quality_estimator.h" | 31 #include "net/nqe/network_quality_estimator.h" |
| 31 #include "net/url_request/url_request_context.h" | 32 #include "net/url_request/url_request_context.h" |
| 32 | 33 |
| 33 namespace net { | 34 namespace net { |
| 34 | 35 |
| 35 namespace { | 36 namespace { |
| 36 | 37 |
| 37 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event. | 38 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event. |
| 38 std::unique_ptr<base::Value> FiltersSetCallback( | 39 std::unique_ptr<base::Value> StreamSourceSetCallback( |
| 39 Filter* filter, | 40 StreamSource* stream_source, |
| 40 NetLogCaptureMode /* capture_mode */) { | 41 NetLogCaptureMode /* capture_mode */) { |
| 41 std::unique_ptr<base::DictionaryValue> event_params( | 42 std::unique_ptr<base::DictionaryValue> event_params( |
| 42 new base::DictionaryValue()); | 43 new base::DictionaryValue()); |
| 43 event_params->SetString("filters", filter->OrderedFilterList()); | 44 event_params->SetString("filters", stream_source->OrderedTypeStringList()); |
| 44 return std::move(event_params); | 45 return std::move(event_params); |
| 45 } | 46 } |
| 46 | 47 |
| 47 std::string ComputeMethodForRedirect(const std::string& method, | 48 std::string ComputeMethodForRedirect(const std::string& method, |
| 48 int http_status_code) { | 49 int http_status_code) { |
| 49 // For 303 redirects, all request methods except HEAD are converted to GET, | 50 // For 303 redirects, all request methods except HEAD are converted to GET, |
| 50 // as per the latest httpbis draft. The draft also allows POST requests to | 51 // as per the latest httpbis draft. The draft also allows POST requests to |
| 51 // be converted to GETs when following 301/302 redirects, for historical | 52 // be converted to GETs when following 301/302 redirects, for historical |
| 52 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and | 53 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and |
| 53 // the httpbis draft say to prompt the user to confirm the generation of new | 54 // the httpbis draft say to prompt the user to confirm the generation of new |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 108 base::CompareCaseInsensitiveASCII(token, "unsafe-url") == 0) { | 109 base::CompareCaseInsensitiveASCII(token, "unsafe-url") == 0) { |
| 109 new_policy = URLRequest::NEVER_CLEAR_REFERRER; | 110 new_policy = URLRequest::NEVER_CLEAR_REFERRER; |
| 110 continue; | 111 continue; |
| 111 } | 112 } |
| 112 } | 113 } |
| 113 return new_policy; | 114 return new_policy; |
| 114 } | 115 } |
| 115 | 116 |
| 116 } // namespace | 117 } // namespace |
| 117 | 118 |
| 119 // StreamSources own the previous StreamSource in the chain, but the ultimate | |
| 120 // source is URLRequestJob, which has other ownership semantics, so this class | |
| 121 // is a proxy for URLRequestJob that is owned by the first filter (in dataflow | |
| 122 // order). | |
| 123 class URLRequestJob::URLRequestJobStreamSource : public StreamSource { | |
| 124 public: | |
| 125 explicit URLRequestJobStreamSource(URLRequestJob* job) | |
| 126 : StreamSource(StreamSource::TYPE_NONE), job_(job) {} | |
|
mmenke
2016/07/28 18:40:13
DCHECK(job_);? Since we never modify it, DCHECK s
xunjieli
2016/08/01 16:46:23
Done.
| |
| 127 | |
| 128 ~URLRequestJobStreamSource() override {} | |
| 129 | |
| 130 // StreamSource implementation: | |
| 131 int Read(IOBuffer* dest_buffer, | |
| 132 size_t buffer_size, | |
| 133 const CompletionCallback& callback) override { | |
| 134 DCHECK(job_); | |
| 135 return job_->ReadRawDataHelper(dest_buffer, buffer_size, callback); | |
| 136 } | |
| 137 | |
| 138 private: | |
| 139 URLRequestJob* job_; | |
|
mmenke
2016/07/28 18:40:13
Maybe URLRequestJob* const job_? (Meaning the poi
xunjieli
2016/08/01 16:46:23
Done.
| |
| 140 | |
| 141 DISALLOW_COPY_AND_ASSIGN(URLRequestJobStreamSource); | |
| 142 }; | |
| 143 | |
| 118 URLRequestJob::URLRequestJob(URLRequest* request, | 144 URLRequestJob::URLRequestJob(URLRequest* request, |
| 119 NetworkDelegate* network_delegate) | 145 NetworkDelegate* network_delegate) |
| 120 : request_(request), | 146 : request_(request), |
| 121 done_(false), | 147 done_(false), |
| 122 prefilter_bytes_read_(0), | |
| 123 postfilter_bytes_read_(0), | |
| 124 filter_needs_more_output_space_(false), | |
| 125 filtered_read_buffer_len_(0), | |
| 126 has_handled_response_(false), | 148 has_handled_response_(false), |
| 127 expected_content_size_(-1), | 149 expected_content_size_(-1), |
| 128 network_delegate_(network_delegate), | 150 network_delegate_(network_delegate), |
| 129 last_notified_total_received_bytes_(0), | 151 last_notified_total_received_bytes_(0), |
| 130 last_notified_total_sent_bytes_(0), | 152 last_notified_total_sent_bytes_(0), |
| 153 prefilter_bytes_read_(0), | |
| 154 postfilter_bytes_read_(0), | |
| 131 weak_factory_(this) { | 155 weak_factory_(this) { |
| 132 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); | 156 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); |
| 133 if (power_monitor) | 157 if (power_monitor) |
| 134 power_monitor->AddObserver(this); | 158 power_monitor->AddObserver(this); |
| 135 } | 159 } |
| 136 | 160 |
| 137 URLRequestJob::~URLRequestJob() { | 161 URLRequestJob::~URLRequestJob() { |
| 138 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); | 162 base::PowerMonitor* power_monitor = base::PowerMonitor::Get(); |
| 139 if (power_monitor) | 163 if (power_monitor) |
| 140 power_monitor->RemoveObserver(this); | 164 power_monitor->RemoveObserver(this); |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 156 // Kill(). | 180 // Kill(). |
| 157 // TODO(mmenke): The URLRequest is currently deleted before this method | 181 // TODO(mmenke): The URLRequest is currently deleted before this method |
| 158 // invokes its async callback whenever this is called by the URLRequest. | 182 // invokes its async callback whenever this is called by the URLRequest. |
| 159 // Try to simplify how cancellation works. | 183 // Try to simplify how cancellation works. |
| 160 NotifyCanceled(); | 184 NotifyCanceled(); |
| 161 } | 185 } |
| 162 | 186 |
| 163 // This function calls ReadRawData to get stream data. If a filter exists, it | 187 // This function calls ReadRawData to get stream data. If a filter exists, it |
| 164 // passes the data to the attached filter. It then returns the output from | 188 // passes the data to the attached filter. It then returns the output from |
| 165 // filter back to the caller. | 189 // filter back to the caller. |
| 190 // This method passes reads down the filter chain, where they eventually end up | |
| 191 // at URLRequestJobStreamSource::Read, which calls back into | |
| 192 // URLRequestJob::ReadRawData. | |
| 166 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) { | 193 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) { |
| 167 DCHECK_LT(buf_size, 1000000); // Sanity check. | 194 DCHECK_LT(buf_size, 1000000); // Sanity check. |
| 168 DCHECK(buf); | 195 DCHECK(buf); |
| 169 DCHECK(bytes_read); | 196 DCHECK(bytes_read); |
| 170 DCHECK(!filtered_read_buffer_); | |
| 171 DCHECK_EQ(0, filtered_read_buffer_len_); | |
| 172 | |
| 173 Error error = OK; | |
| 174 *bytes_read = 0; | 197 *bytes_read = 0; |
| 175 | 198 |
| 176 // Skip Filter if not present. | 199 pending_read_buffer_ = buf; |
| 177 if (!filter_) { | 200 int result = source_->Read(buf, buf_size, |
| 178 error = ReadRawDataHelper(buf, buf_size, bytes_read); | 201 base::Bind(&URLRequestJob::SourceReadComplete, |
| 179 } else { | 202 weak_factory_.GetWeakPtr(), false)); |
| 180 // Save the caller's buffers while we do IO | 203 if (result > 0) |
| 181 // in the filter's buffers. | 204 *bytes_read = result; |
| 182 filtered_read_buffer_ = buf; | |
| 183 filtered_read_buffer_len_ = buf_size; | |
| 184 | 205 |
| 185 error = ReadFilteredData(bytes_read); | 206 if (result == ERR_IO_PENDING) { |
| 186 | 207 SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING)); |
| 187 // Synchronous EOF from the filter. | 208 return false; |
| 188 if (error == OK && *bytes_read == 0) | |
| 189 DoneReading(); | |
| 190 } | 209 } |
| 191 | 210 |
| 192 if (error == OK) { | 211 SourceReadComplete(true, result); |
| 193 // If URLRequestJob read zero bytes, the job is at EOF. | 212 return result >= OK; |
| 194 if (*bytes_read == 0) | 213 } |
| 195 NotifyDone(URLRequestStatus()); | 214 |
| 196 } else if (error == ERR_IO_PENDING) { | 215 void URLRequestJob::SourceReadComplete(bool synchronous, int result) { |
| 197 SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING)); | 216 DCHECK_NE(ERR_IO_PENDING, result); |
| 217 | |
| 218 if (result > 0) { | |
| 219 postfilter_bytes_read_ += result; | |
| 220 if (request()->net_log().IsCapturing()) { | |
| 221 request()->net_log().AddByteTransferEvent( | |
| 222 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, result, | |
| 223 pending_read_buffer_->data()); | |
| 224 } | |
| 225 pending_read_buffer_ = nullptr; | |
|
mmenke
2016/07/28 18:40:13
Optional: May be a little cleaner as:
if (result
xunjieli
2016/08/01 16:46:23
Done. great suggestion!
| |
| 226 SetStatus(URLRequestStatus()); | |
| 227 if (!synchronous) | |
| 228 request_->NotifyReadCompleted(result); | |
|
mmenke
2016/07/28 18:40:13
I think early return is a little better in each of
xunjieli
2016/08/01 16:46:23
Done.
| |
| 229 } else if (result == 0) { | |
| 230 pending_read_buffer_ = nullptr; | |
| 231 DoneReading(); | |
| 232 NotifyDone(URLRequestStatus()); | |
| 233 if (!synchronous) | |
| 234 request_->NotifyReadCompleted(result); | |
| 198 } else { | 235 } else { |
| 199 NotifyDone(URLRequestStatus::FromError(error)); | 236 pending_read_buffer_ = nullptr; |
| 200 *bytes_read = -1; | 237 NotifyDone(URLRequestStatus::FromError(result)); |
| 201 } | 238 } |
| 202 return error == OK; | |
| 203 } | 239 } |
| 204 | 240 |
| 205 void URLRequestJob::StopCaching() { | 241 void URLRequestJob::StopCaching() { |
| 206 // Nothing to do here. | 242 // Nothing to do here. |
| 207 } | 243 } |
| 208 | 244 |
| 209 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { | 245 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { |
| 210 // Most job types don't send request headers. | 246 // Most job types don't send request headers. |
| 211 return false; | 247 return false; |
| 212 } | 248 } |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 239 } | 275 } |
| 240 | 276 |
| 241 bool URLRequestJob::GetRemoteEndpoint(IPEndPoint* endpoint) const { | 277 bool URLRequestJob::GetRemoteEndpoint(IPEndPoint* endpoint) const { |
| 242 return false; | 278 return false; |
| 243 } | 279 } |
| 244 | 280 |
| 245 void URLRequestJob::PopulateNetErrorDetails(NetErrorDetails* details) const { | 281 void URLRequestJob::PopulateNetErrorDetails(NetErrorDetails* details) const { |
| 246 return; | 282 return; |
| 247 } | 283 } |
| 248 | 284 |
| 249 std::unique_ptr<Filter> URLRequestJob::SetupFilter() const { | |
| 250 return nullptr; | |
| 251 } | |
| 252 | |
| 253 bool URLRequestJob::IsRedirectResponse(GURL* location, | 285 bool URLRequestJob::IsRedirectResponse(GURL* location, |
| 254 int* http_status_code) { | 286 int* http_status_code) { |
| 255 // For non-HTTP jobs, headers will be null. | 287 // For non-HTTP jobs, headers will be null. |
| 256 HttpResponseHeaders* headers = request_->response_headers(); | 288 HttpResponseHeaders* headers = request_->response_headers(); |
| 257 if (!headers) | 289 if (!headers) |
| 258 return false; | 290 return false; |
| 259 | 291 |
| 260 std::string value; | 292 std::string value; |
| 261 if (!headers->IsRedirect(&value)) | 293 if (!headers->IsRedirect(&value)) |
| 262 return false; | 294 return false; |
| (...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 401 return GURL(); | 433 return GURL(); |
| 402 case URLRequest::MAX_REFERRER_POLICY: | 434 case URLRequest::MAX_REFERRER_POLICY: |
| 403 NOTREACHED(); | 435 NOTREACHED(); |
| 404 return GURL(); | 436 return GURL(); |
| 405 } | 437 } |
| 406 | 438 |
| 407 NOTREACHED(); | 439 NOTREACHED(); |
| 408 return GURL(); | 440 return GURL(); |
| 409 } | 441 } |
| 410 | 442 |
| 443 int64_t URLRequestJob::prefilter_bytes_read() const { | |
| 444 return base::checked_cast<int64_t>(prefilter_bytes_read_); | |
| 445 } | |
| 446 | |
| 411 void URLRequestJob::NotifyCertificateRequested( | 447 void URLRequestJob::NotifyCertificateRequested( |
| 412 SSLCertRequestInfo* cert_request_info) { | 448 SSLCertRequestInfo* cert_request_info) { |
| 413 request_->NotifyCertificateRequested(cert_request_info); | 449 request_->NotifyCertificateRequested(cert_request_info); |
| 414 } | 450 } |
| 415 | 451 |
| 416 void URLRequestJob::NotifySSLCertificateError(const SSLInfo& ssl_info, | 452 void URLRequestJob::NotifySSLCertificateError(const SSLInfo& ssl_info, |
| 417 bool fatal) { | 453 bool fatal) { |
| 418 request_->NotifySSLCertificateError(ssl_info, fatal); | 454 request_->NotifySSLCertificateError(ssl_info, fatal); |
| 419 } | 455 } |
| 420 | 456 |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 489 // Need to check for a NULL auth_info because the server may have failed | 525 // Need to check for a NULL auth_info because the server may have failed |
| 490 // to send a challenge with the 401 response. | 526 // to send a challenge with the 401 response. |
| 491 if (auth_info.get()) { | 527 if (auth_info.get()) { |
| 492 request_->NotifyAuthRequired(auth_info.get()); | 528 request_->NotifyAuthRequired(auth_info.get()); |
| 493 // Wait for SetAuth or CancelAuth to be called. | 529 // Wait for SetAuth or CancelAuth to be called. |
| 494 return; | 530 return; |
| 495 } | 531 } |
| 496 } | 532 } |
| 497 | 533 |
| 498 has_handled_response_ = true; | 534 has_handled_response_ = true; |
| 499 if (request_->status().is_success()) | 535 if (request_->status().is_success()) { |
| 500 filter_ = SetupFilter(); | 536 DCHECK(!source_); |
| 537 source_ = SetupSource(); | |
| 501 | 538 |
| 502 if (!filter_.get()) { | 539 if (source_ == nullptr) { |
| 503 std::string content_length; | 540 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, ERR_FAILED)); |
|
mmenke
2016/07/28 18:40:13
Could we add an error code for this case? Don't n
xunjieli
2016/08/01 16:46:23
Done.
| |
| 504 request_->GetResponseHeaderByName("content-length", &content_length); | 541 return; |
| 505 if (!content_length.empty()) | 542 } |
|
mmenke
2016/07/28 18:40:13
nit: Suggest a blank line here.
xunjieli
2016/08/01 16:46:23
Done.
| |
| 506 base::StringToInt64(content_length, &expected_content_size_); | 543 if (source_->type() == StreamSource::TYPE_NONE) { |
| 507 } else { | 544 std::string content_length; |
| 508 request_->net_log().AddEvent( | 545 request_->GetResponseHeaderByName("content-length", &content_length); |
| 509 NetLog::TYPE_URL_REQUEST_FILTERS_SET, | 546 if (!content_length.empty()) |
| 510 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get()))); | 547 base::StringToInt64(content_length, &expected_content_size_); |
| 548 } else { | |
| 549 request_->net_log().AddEvent(NetLog::TYPE_URL_REQUEST_FILTERS_SET, | |
| 550 base::Bind(&StreamSourceSetCallback, | |
| 551 base::Unretained(source_.get()))); | |
| 552 } | |
| 511 } | 553 } |
| 512 | 554 |
| 513 request_->NotifyResponseStarted(); | 555 request_->NotifyResponseStarted(); |
| 514 | 556 |
| 515 // |this| may be destroyed at this point. | 557 // |this| may be destroyed at this point. |
| 516 } | 558 } |
| 517 | 559 |
| 518 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) { | 560 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) { |
| 519 if (result >= 0) { | 561 if (result >= 0) { |
| 520 *error = OK; | 562 *error = OK; |
| 521 *count = result; | 563 *count = result; |
| 522 } else { | 564 } else { |
| 523 *error = static_cast<Error>(result); | 565 *error = static_cast<Error>(result); |
| 524 *count = 0; | 566 *count = 0; |
| 525 } | 567 } |
| 526 } | 568 } |
| 527 | 569 |
| 528 void URLRequestJob::ReadRawDataComplete(int result) { | 570 void URLRequestJob::ReadRawDataComplete(int result) { |
| 529 DCHECK(request_->status().is_io_pending()); | 571 DCHECK(request_->status().is_io_pending()); |
| 572 DCHECK_NE(ERR_IO_PENDING, result); | |
| 530 | 573 |
| 531 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed. | 574 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed. |
| 532 tracked_objects::ScopedTracker tracking_profile( | 575 tracked_objects::ScopedTracker tracking_profile( |
| 533 FROM_HERE_WITH_EXPLICIT_FUNCTION( | 576 FROM_HERE_WITH_EXPLICIT_FUNCTION( |
| 534 "475755 URLRequestJob::RawReadCompleted")); | 577 "475755 URLRequestJob::RawReadCompleted")); |
| 535 | 578 |
| 536 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome | 579 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome |
| 537 // unit_tests have been fixed to not trip this. | 580 // unit_tests have been fixed to not trip this. |
| 538 #if 0 | 581 #if 0 |
| 539 DCHECK(!request_->status().is_io_pending()); | 582 DCHECK(!request_->status().is_io_pending()); |
| 540 #endif | 583 #endif |
| 541 // The headers should be complete before reads complete | 584 // The headers should be complete before reads complete |
| 542 DCHECK(has_handled_response_); | 585 DCHECK(has_handled_response_); |
| 543 | 586 |
| 544 Error error; | 587 GatherRawReadStats(result); |
| 545 int bytes_read; | |
| 546 ConvertResultToError(result, &error, &bytes_read); | |
| 547 | 588 |
| 548 DCHECK_NE(ERR_IO_PENDING, error); | 589 // Notify StreamSource. |
| 590 DCHECK(!read_raw_callback_.is_null()); | |
| 549 | 591 |
| 550 GatherRawReadStats(error, bytes_read); | 592 base::ResetAndReturn(&read_raw_callback_).Run(result); |
| 551 | |
| 552 if (filter_.get() && error == OK) { | |
| 553 // |bytes_read| being 0 indicates an EOF was received. ReadFilteredData | |
| 554 // can incorrectly return ERR_IO_PENDING when 0 bytes are passed to it, so | |
| 555 // just don't call into the filter in that case. | |
| 556 int filter_bytes_read = 0; | |
| 557 if (bytes_read > 0) { | |
| 558 // Tell the filter that it has more data. | |
| 559 PushInputToFilter(bytes_read); | |
| 560 | |
| 561 // Filter the data. | |
| 562 error = ReadFilteredData(&filter_bytes_read); | |
| 563 } | |
| 564 | |
| 565 if (error == OK && !filter_bytes_read) | |
| 566 DoneReading(); | |
| 567 | |
| 568 DVLOG(1) << __func__ << "() \"" << request_->url().spec() << "\"" | |
| 569 << " pre bytes read = " << bytes_read | |
| 570 << " pre total = " << prefilter_bytes_read_ | |
| 571 << " post total = " << postfilter_bytes_read_; | |
| 572 bytes_read = filter_bytes_read; | |
| 573 } else { | |
| 574 DVLOG(1) << __func__ << "() \"" << request_->url().spec() << "\"" | |
| 575 << " pre bytes read = " << bytes_read | |
| 576 << " pre total = " << prefilter_bytes_read_ | |
| 577 << " post total = " << postfilter_bytes_read_; | |
| 578 } | |
| 579 | |
| 580 // Synchronize the URLRequest state machine with the URLRequestJob state | |
| 581 // machine. If this read succeeded, either the request is at EOF and the | |
| 582 // URLRequest state machine goes to 'finished', or it is not and the | |
| 583 // URLRequest state machine goes to 'success'. If the read failed, the | |
| 584 // URLRequest state machine goes directly to 'finished'. If filtered data is | |
| 585 // pending, then there's nothing to do, since the status of the request is | |
| 586 // already pending. | |
| 587 // | |
| 588 // Update the URLRequest's status first, so that NotifyReadCompleted has an | |
| 589 // accurate view of the request. | |
| 590 if (error == OK && bytes_read > 0) { | |
| 591 SetStatus(URLRequestStatus()); | |
| 592 } else if (error != ERR_IO_PENDING) { | |
| 593 NotifyDone(URLRequestStatus::FromError(error)); | |
| 594 } | |
| 595 | |
| 596 // NotifyReadCompleted should be called after SetStatus or NotifyDone updates | |
| 597 // the status. | |
| 598 if (error == OK) | |
| 599 request_->NotifyReadCompleted(bytes_read); | |
| 600 | |
| 601 // |this| may be destroyed at this point. | 593 // |this| may be destroyed at this point. |
| 602 } | 594 } |
| 603 | 595 |
| 604 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { | 596 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { |
| 605 DCHECK(!has_handled_response_); | 597 DCHECK(!has_handled_response_); |
| 606 DCHECK(request_->status().is_io_pending()); | 598 DCHECK(request_->status().is_io_pending()); |
| 607 | 599 |
| 608 has_handled_response_ = true; | 600 has_handled_response_ = true; |
| 609 // There may be relevant information in the response info even in the | 601 // There may be relevant information in the response info even in the |
| 610 // error case. | 602 // error case. |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 640 request_->set_status(status); | 632 request_->set_status(status); |
| 641 } | 633 } |
| 642 | 634 |
| 643 // If the request succeeded (And wasn't cancelled) and the response code was | 635 // If the request succeeded (And wasn't cancelled) and the response code was |
| 644 // 4xx or 5xx, record whether or not the main frame was blank. This is | 636 // 4xx or 5xx, record whether or not the main frame was blank. This is |
| 645 // intended to be a short-lived histogram, used to figure out how important | 637 // intended to be a short-lived histogram, used to figure out how important |
| 646 // fixing http://crbug.com/331745 is. | 638 // fixing http://crbug.com/331745 is. |
| 647 if (request_->status().is_success()) { | 639 if (request_->status().is_success()) { |
| 648 int response_code = GetResponseCode(); | 640 int response_code = GetResponseCode(); |
| 649 if (400 <= response_code && response_code <= 599) { | 641 if (400 <= response_code && response_code <= 599) { |
| 650 bool page_has_content = (postfilter_bytes_read_ != 0); | 642 bool page_has_content = (postfilter_bytes_read() != 0); |
| 651 if (request_->load_flags() & net::LOAD_MAIN_FRAME) { | 643 if (request_->load_flags() & net::LOAD_MAIN_FRAME) { |
| 652 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentMainFrame", | 644 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentMainFrame", |
| 653 page_has_content); | 645 page_has_content); |
| 654 } else { | 646 } else { |
| 655 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentNonMainFrame", | 647 UMA_HISTOGRAM_BOOLEAN("Net.ErrorResponseHasContentNonMainFrame", |
| 656 page_has_content); | 648 page_has_content); |
| 657 } | 649 } |
| 658 } | 650 } |
| 659 } | 651 } |
| 660 | 652 |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 706 return 0; | 698 return 0; |
| 707 } | 699 } |
| 708 | 700 |
| 709 void URLRequestJob::DoneReading() { | 701 void URLRequestJob::DoneReading() { |
| 710 // Do nothing. | 702 // Do nothing. |
| 711 } | 703 } |
| 712 | 704 |
| 713 void URLRequestJob::DoneReadingRedirectResponse() { | 705 void URLRequestJob::DoneReadingRedirectResponse() { |
| 714 } | 706 } |
| 715 | 707 |
| 716 void URLRequestJob::PushInputToFilter(int bytes_read) { | 708 std::unique_ptr<StreamSource> URLRequestJob::SetupSource() { |
| 717 DCHECK(filter_); | 709 return base::MakeUnique<URLRequestJobStreamSource>(this); |
| 718 filter_->FlushStreamBuffer(bytes_read); | |
| 719 } | |
| 720 | |
| 721 Error URLRequestJob::ReadFilteredData(int* bytes_read) { | |
| 722 DCHECK(filter_); | |
| 723 DCHECK(filtered_read_buffer_.get()); | |
| 724 DCHECK_GT(filtered_read_buffer_len_, 0); | |
| 725 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check. | |
| 726 DCHECK(!raw_read_buffer_); | |
| 727 | |
| 728 *bytes_read = 0; | |
| 729 Error error = ERR_FAILED; | |
| 730 | |
| 731 for (;;) { | |
| 732 if (is_done()) | |
| 733 return OK; | |
| 734 | |
| 735 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) { | |
| 736 // We don't have any raw data to work with, so read from the transaction. | |
| 737 int filtered_data_read; | |
| 738 error = ReadRawDataForFilter(&filtered_data_read); | |
| 739 // If ReadRawDataForFilter returned some data, fall through to the case | |
| 740 // below; otherwise, return early. | |
| 741 if (error != OK || filtered_data_read == 0) | |
| 742 return error; | |
| 743 filter_->FlushStreamBuffer(filtered_data_read); | |
| 744 } | |
| 745 | |
| 746 if ((filter_->stream_data_len() || filter_needs_more_output_space_) && | |
| 747 !is_done()) { | |
| 748 // Get filtered data. | |
| 749 int filtered_data_len = filtered_read_buffer_len_; | |
| 750 int output_buffer_size = filtered_data_len; | |
| 751 Filter::FilterStatus status = | |
| 752 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len); | |
| 753 | |
| 754 if (filter_needs_more_output_space_ && !filtered_data_len) { | |
| 755 // filter_needs_more_output_space_ was mistaken... there are no more | |
| 756 // bytes and we should have at least tried to fill up the filter's input | |
| 757 // buffer. Correct the state, and try again. | |
| 758 filter_needs_more_output_space_ = false; | |
| 759 continue; | |
| 760 } | |
| 761 filter_needs_more_output_space_ = | |
| 762 (filtered_data_len == output_buffer_size); | |
| 763 | |
| 764 switch (status) { | |
| 765 case Filter::FILTER_DONE: { | |
| 766 filter_needs_more_output_space_ = false; | |
| 767 *bytes_read = filtered_data_len; | |
| 768 postfilter_bytes_read_ += filtered_data_len; | |
| 769 error = OK; | |
| 770 break; | |
| 771 } | |
| 772 case Filter::FILTER_NEED_MORE_DATA: { | |
| 773 // We have finished filtering all data currently in the buffer. | |
| 774 // There might be some space left in the output buffer. One can | |
| 775 // consider reading more data from the stream to feed the filter | |
| 776 // and filling up the output buffer. This leads to more complicated | |
| 777 // buffer management and data notification mechanisms. | |
| 778 // We can revisit this issue if there is a real perf need. | |
| 779 if (filtered_data_len > 0) { | |
| 780 *bytes_read = filtered_data_len; | |
| 781 postfilter_bytes_read_ += filtered_data_len; | |
| 782 error = OK; | |
| 783 } else { | |
| 784 // Read again since we haven't received enough data yet (e.g., we | |
| 785 // may not have a complete gzip header yet). | |
| 786 continue; | |
| 787 } | |
| 788 break; | |
| 789 } | |
| 790 case Filter::FILTER_OK: { | |
| 791 *bytes_read = filtered_data_len; | |
| 792 postfilter_bytes_read_ += filtered_data_len; | |
| 793 error = OK; | |
| 794 break; | |
| 795 } | |
| 796 case Filter::FILTER_ERROR: { | |
| 797 DVLOG(1) << __func__ << "() \"" << request_->url().spec() << "\"" | |
| 798 << " Filter Error"; | |
| 799 filter_needs_more_output_space_ = false; | |
| 800 error = ERR_CONTENT_DECODING_FAILED; | |
| 801 UMA_HISTOGRAM_ENUMERATION("Net.ContentDecodingFailed.FilterType", | |
| 802 filter_->type(), Filter::FILTER_TYPE_MAX); | |
| 803 break; | |
| 804 } | |
| 805 default: { | |
| 806 NOTREACHED(); | |
| 807 filter_needs_more_output_space_ = false; | |
| 808 error = ERR_FAILED; | |
| 809 break; | |
| 810 } | |
| 811 } | |
| 812 | |
| 813 // If logging all bytes is enabled, log the filtered bytes read. | |
| 814 if (error == OK && filtered_data_len > 0 && | |
| 815 request()->net_log().IsCapturing()) { | |
| 816 request()->net_log().AddByteTransferEvent( | |
| 817 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, filtered_data_len, | |
| 818 filtered_read_buffer_->data()); | |
| 819 } | |
| 820 } else { | |
| 821 // we are done, or there is no data left. | |
| 822 error = OK; | |
| 823 } | |
| 824 break; | |
| 825 } | |
| 826 | |
| 827 if (error == OK) { | |
| 828 // When we successfully finished a read, we no longer need to save the | |
| 829 // caller's buffers. Release our reference. | |
| 830 filtered_read_buffer_ = NULL; | |
| 831 filtered_read_buffer_len_ = 0; | |
| 832 } | |
| 833 return error; | |
| 834 } | |
| 835 | |
| 836 void URLRequestJob::DestroyFilters() { | |
| 837 filter_.reset(); | |
| 838 } | 710 } |
| 839 | 711 |
| 840 const URLRequestStatus URLRequestJob::GetStatus() { | 712 const URLRequestStatus URLRequestJob::GetStatus() { |
| 841 return request_->status(); | 713 return request_->status(); |
| 842 } | 714 } |
| 843 | 715 |
| 844 void URLRequestJob::SetStatus(const URLRequestStatus &status) { | 716 void URLRequestJob::SetStatus(const URLRequestStatus &status) { |
| 845 // An error status should never be replaced by a non-error status by a | 717 // An error status should never be replaced by a non-error status by a |
| 846 // URLRequestJob. URLRequest has some retry paths, but it resets the status | 718 // URLRequestJob. URLRequest has some retry paths, but it resets the status |
| 847 // itself, if needed. | 719 // itself, if needed. |
| 848 DCHECK(request_->status().is_io_pending() || | 720 DCHECK(request_->status().is_io_pending() || |
| 849 request_->status().is_success() || | 721 request_->status().is_success() || |
| 850 (!status.is_success() && !status.is_io_pending())); | 722 (!status.is_success() && !status.is_io_pending())); |
| 851 request_->set_status(status); | 723 request_->set_status(status); |
| 852 } | 724 } |
| 853 | 725 |
| 854 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) { | 726 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) { |
| 855 request_->proxy_server_ = proxy_server; | 727 request_->proxy_server_ = proxy_server; |
| 856 } | 728 } |
| 857 | 729 |
| 858 Error URLRequestJob::ReadRawDataForFilter(int* bytes_read) { | 730 int64_t URLRequestJob::postfilter_bytes_read() const { |
| 859 Error error = ERR_FAILED; | 731 return base::checked_cast<int64_t>(postfilter_bytes_read_); |
| 860 DCHECK(bytes_read); | |
| 861 DCHECK(filter_.get()); | |
| 862 | |
| 863 *bytes_read = 0; | |
| 864 | |
| 865 // Get more pre-filtered data if needed. | |
| 866 // TODO(mbelshe): is it possible that the filter needs *MORE* data | |
| 867 // when there is some data already in the buffer? | |
| 868 if (!filter_->stream_data_len() && !is_done()) { | |
| 869 IOBuffer* stream_buffer = filter_->stream_buffer(); | |
| 870 int stream_buffer_size = filter_->stream_buffer_size(); | |
| 871 error = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read); | |
| 872 } | |
| 873 return error; | |
| 874 } | 732 } |
| 875 | 733 |
| 876 Error URLRequestJob::ReadRawDataHelper(IOBuffer* buf, | 734 int URLRequestJob::ReadRawDataHelper(IOBuffer* buf, |
| 877 int buf_size, | 735 int buf_size, |
| 878 int* bytes_read) { | 736 const CompletionCallback& callback) { |
| 879 DCHECK(!raw_read_buffer_); | 737 DCHECK(!raw_read_buffer_); |
| 880 | 738 |
| 881 // Keep a pointer to the read buffer, so we have access to it in | 739 // Keep a pointer to the read buffer, so URLRequestJob::GatherRawReadStats() |
| 882 // GatherRawReadStats() in the event that the read completes asynchronously. | 740 // has access to it to log stats. |
| 883 raw_read_buffer_ = buf; | 741 raw_read_buffer_ = buf; |
| 884 Error error; | 742 int result = ReadRawData(buf, buf_size); |
| 885 ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read); | |
| 886 | 743 |
| 887 if (error != ERR_IO_PENDING) { | 744 if (result != ERR_IO_PENDING) { |
| 888 // If the read completes synchronously, either success or failure, invoke | 745 // If the read completes synchronously, either success or failure, invoke |
| 889 // GatherRawReadStats so we can account for the completed read. | 746 // GatherRawReadStats so we can account for the completed read. |
| 890 GatherRawReadStats(error, *bytes_read); | 747 GatherRawReadStats(result); |
| 748 } else { | |
| 749 read_raw_callback_ = callback; | |
|
mmenke
2016/07/28 18:40:13
Should we use a callback, or just keep a pointer t
xunjieli
2016/08/01 16:46:23
Acknowledged. Talked offline and we decided to lea
| |
| 891 } | 750 } |
| 892 return error; | 751 return result; |
| 893 } | 752 } |
| 894 | 753 |
| 895 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { | 754 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { |
| 896 int rv = request_->Redirect(redirect_info); | 755 int rv = request_->Redirect(redirect_info); |
| 897 if (rv != OK) | 756 if (rv != OK) |
| 898 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | 757 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
| 899 } | 758 } |
| 900 | 759 |
| 901 void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) { | 760 void URLRequestJob::GatherRawReadStats(int bytes_read) { |
| 902 DCHECK(raw_read_buffer_ || bytes_read == 0); | 761 DCHECK(raw_read_buffer_ || bytes_read == 0); |
| 903 DCHECK_NE(ERR_IO_PENDING, error); | 762 DCHECK_NE(ERR_IO_PENDING, bytes_read); |
| 904 | |
| 905 if (error != OK) { | |
| 906 raw_read_buffer_ = nullptr; | |
| 907 return; | |
| 908 } | |
| 909 // If |filter_| is non-NULL, bytes will be logged after it is applied | |
| 910 // instead. | |
| 911 if (!filter_.get() && bytes_read > 0 && request()->net_log().IsCapturing()) { | |
| 912 request()->net_log().AddByteTransferEvent( | |
| 913 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, bytes_read, | |
| 914 raw_read_buffer_->data()); | |
| 915 } | |
| 916 | 763 |
| 917 if (bytes_read > 0) { | 764 if (bytes_read > 0) { |
| 765 if (request()->net_log().IsCapturing()) { | |
| 766 request()->net_log().AddByteTransferEvent( | |
| 767 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, bytes_read, | |
| 768 raw_read_buffer_->data()); | |
| 769 } | |
| 918 RecordBytesRead(bytes_read); | 770 RecordBytesRead(bytes_read); |
| 919 } | 771 } |
| 920 raw_read_buffer_ = nullptr; | 772 raw_read_buffer_ = nullptr; |
| 921 } | 773 } |
| 922 | 774 |
| 923 void URLRequestJob::RecordBytesRead(int bytes_read) { | 775 void URLRequestJob::RecordBytesRead(int bytes_read) { |
| 924 DCHECK_GT(bytes_read, 0); | 776 DCHECK_GT(bytes_read, 0); |
| 925 prefilter_bytes_read_ += bytes_read; | 777 prefilter_bytes_read_ += base::checked_cast<size_t>(bytes_read); |
| 926 | 778 |
| 927 // On first read, notify NetworkQualityEstimator that response headers have | 779 // On first read, notify NetworkQualityEstimator that response headers have |
| 928 // been received. | 780 // been received. |
| 929 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch | 781 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch |
| 930 // Service Worker jobs twice. | 782 // Service Worker jobs twice. |
| 931 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the | 783 // If prefilter_bytes_read_ is equal to bytes_read, it indicates this is the |
| 932 // first raw read of the response body. This is used as the signal that | 784 // first raw read of the response body. This is used as the signal that |
| 933 // response headers have been received. | 785 // response headers have been received. |
| 934 if (request_->context()->network_quality_estimator() && | 786 if (request_->context()->network_quality_estimator() && |
| 935 prefilter_bytes_read_ == bytes_read) { | 787 prefilter_bytes_read() == bytes_read) { |
| 936 request_->context()->network_quality_estimator()->NotifyHeadersReceived( | 788 request_->context()->network_quality_estimator()->NotifyHeadersReceived( |
| 937 *request_); | 789 *request_); |
| 938 } | 790 } |
| 939 | 791 |
| 940 if (!filter_.get()) | 792 DVLOG(2) << __FUNCTION__ << "() " |
| 941 postfilter_bytes_read_ += bytes_read; | 793 << "\"" << request_->url().spec() << "\"" |
| 942 DVLOG(2) << __func__ << "() \"" << request_->url().spec() << "\"" | |
| 943 << " pre bytes read = " << bytes_read | 794 << " pre bytes read = " << bytes_read |
| 944 << " pre total = " << prefilter_bytes_read_ | 795 << " pre total = " << prefilter_bytes_read() |
| 945 << " post total = " << postfilter_bytes_read_; | 796 << " post total = " << postfilter_bytes_read(); |
| 946 UpdatePacketReadTimes(); // Facilitate stats recording if it is active. | 797 UpdatePacketReadTimes(); // Facilitate stats recording if it is active. |
| 947 | 798 |
| 948 // Notify observers if any additional network usage has occurred. Note that | 799 // Notify observers if any additional network usage has occurred. Note that |
| 949 // the number of received bytes over the network sent by this notification | 800 // the number of received bytes over the network sent by this notification |
| 950 // could be vastly different from |bytes_read|, such as when a large chunk of | 801 // could be vastly different from |bytes_read|, such as when a large chunk of |
| 951 // network bytes is received before multiple smaller raw reads are performed | 802 // network bytes is received before multiple smaller raw reads are performed |
| 952 // on it. | 803 // on it. |
| 953 MaybeNotifyNetworkBytes(); | 804 MaybeNotifyNetworkBytes(); |
| 954 } | 805 } |
| 955 | 806 |
| 956 bool URLRequestJob::FilterHasData() { | |
| 957 return filter_.get() && filter_->stream_data_len(); | |
| 958 } | |
| 959 | |
| 960 void URLRequestJob::UpdatePacketReadTimes() { | 807 void URLRequestJob::UpdatePacketReadTimes() { |
| 961 } | 808 } |
| 962 | 809 |
| 963 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location, | 810 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location, |
| 964 int http_status_code) { | 811 int http_status_code) { |
| 965 const GURL& url = request_->url(); | 812 const GURL& url = request_->url(); |
| 966 | 813 |
| 967 RedirectInfo redirect_info; | 814 RedirectInfo redirect_info; |
| 968 | 815 |
| 969 redirect_info.status_code = http_status_code; | 816 redirect_info.status_code = http_status_code; |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1036 int64_t total_sent_bytes = GetTotalSentBytes(); | 883 int64_t total_sent_bytes = GetTotalSentBytes(); |
| 1037 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); | 884 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); |
| 1038 if (total_sent_bytes > last_notified_total_sent_bytes_) { | 885 if (total_sent_bytes > last_notified_total_sent_bytes_) { |
| 1039 network_delegate_->NotifyNetworkBytesSent( | 886 network_delegate_->NotifyNetworkBytesSent( |
| 1040 request_, total_sent_bytes - last_notified_total_sent_bytes_); | 887 request_, total_sent_bytes - last_notified_total_sent_bytes_); |
| 1041 } | 888 } |
| 1042 last_notified_total_sent_bytes_ = total_sent_bytes; | 889 last_notified_total_sent_bytes_ = total_sent_bytes; |
| 1043 } | 890 } |
| 1044 | 891 |
| 1045 } // namespace net | 892 } // namespace net |
| OLD | NEW |