Chromium Code Reviews| Index: net/url_request/url_request_job.cc |
| diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc |
| index 641c07bc671663ee709e1bc15de1fea122c55c62..ae731310082dd52ae20c780ea3e9f9ef09ff78cf 100644 |
| --- a/net/url_request/url_request_job.cc |
| +++ b/net/url_request/url_request_job.cc |
| @@ -101,45 +101,46 @@ void URLRequestJob::DetachRequest() { |
| request_ = NULL; |
| } |
| -// This function calls ReadData to get stream data. If a filter exists, passes |
| -// the data to the attached filter. Then returns the output from filter back to |
| -// the caller. |
| +// This function calls ReadRawData to get stream data. If a filter exists, it |
| +// passes the data to the attached filter. It then returns the output from |
| +// filter back to the caller. |
| bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) { |
| - bool rv = false; |
| - |
| DCHECK_LT(buf_size, 1000000); // Sanity check. |
| DCHECK(buf); |
| DCHECK(bytes_read); |
| DCHECK(filtered_read_buffer_.get() == NULL); |
| DCHECK_EQ(0, filtered_read_buffer_len_); |
| + Error error = OK; |
| *bytes_read = 0; |
| // Skip Filter if not present. |
| - if (!filter_.get()) { |
| - rv = ReadRawDataHelper(buf, buf_size, bytes_read); |
| + if (!filter_) { |
| + error = ReadRawDataHelper(buf, buf_size, bytes_read); |
| } else { |
| // Save the caller's buffers while we do IO |
| // in the filter's buffers. |
| filtered_read_buffer_ = buf; |
| filtered_read_buffer_len_ = buf_size; |
| - if (ReadFilteredData(bytes_read)) { |
| - rv = true; // We have data to return. |
| + error = ReadFilteredData(bytes_read); |
| - // It is fine to call DoneReading even if ReadFilteredData receives 0 |
| - // bytes from the net, but we avoid making that call if we know for |
| - // sure that's the case (ReadRawDataHelper path). |
| - if (*bytes_read == 0) |
| - DoneReading(); |
| - } else { |
| - rv = false; // Error, or a new IO is pending. |
| - } |
| + // Synchronous EOF from the filter. |
| + if (error == OK && *bytes_read == 0) |
| + DoneReading(); |
|
mmenke
2015/10/22 18:58:10
Randy: Did we ever figure out why this isn't call
Randy Smith (Not in Mondays)
2015/10/22 20:38:45
So the comment in url_request_job.h says:
// Ca
mmenke
2015/10/22 20:42:46
I thought transaction_->DoneReading() was used to
Randy Smith (Not in Mondays)
2015/10/26 21:38:03
So the comment in HttpTransaction suggests why we
|
| } |
| - if (rv && *bytes_read == 0) |
| - NotifyDone(URLRequestStatus()); |
| - return rv; |
| + if (error == OK) { |
| + // If URLRequestJob read zero bytes, the job is at EOF. |
| + if (*bytes_read == 0) |
| + NotifyDone(URLRequestStatus()); |
| + } else if (error == ERR_IO_PENDING) { |
| + SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING)); |
| + } else { |
| + NotifyDone(URLRequestStatus::FromError(error)); |
| + *bytes_read = -1; |
| + } |
| + return error == OK; |
| } |
| void URLRequestJob::StopCaching() { |
| @@ -480,11 +481,25 @@ void URLRequestJob::NotifyHeadersComplete() { |
| request_->NotifyResponseStarted(); |
| } |
| -void URLRequestJob::NotifyReadComplete(int bytes_read) { |
| +void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) { |
| + if (result >= 0) { |
| + *error = OK; |
| + *count = result; |
| + } else { |
| + *error = static_cast<Error>(result); |
| + *count = 0; |
| + } |
| +} |
| + |
| +void URLRequestJob::ReadRawDataComplete(int result) { |
| // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed. |
| tracked_objects::ScopedTracker tracking_profile( |
| FROM_HERE_WITH_EXPLICIT_FUNCTION( |
| - "475755 URLRequestJob::NotifyReadComplete")); |
| + "475755 URLRequestJob::RawReadCompleted")); |
| + |
| + Error error; |
| + int bytes_read; |
| + ConvertResultToError(result, &error, &bytes_read); |
| if (!request_ || !request_->has_delegate()) |
| return; // The request was destroyed, so there is no more work to do. |
| @@ -497,11 +512,35 @@ void URLRequestJob::NotifyReadComplete(int bytes_read) { |
| // The headers should be complete before reads complete |
| DCHECK(has_handled_response_); |
| - OnRawReadComplete(bytes_read); |
| + GatherRawReadStats(error, bytes_read); |
| - // Don't notify if we had an error. |
| - if (!request_->status().is_success()) |
| - return; |
| + bool notify = false; |
| + if (filter_.get() && error == OK) { |
| + int filter_bytes_read = 0; |
| + // Tell the filter that it has more data |
|
mmenke
2015/10/22 18:58:10
nit: +.
xunjieli
2015/10/23 13:43:08
Done.
|
| + PushInputToFilter(bytes_read); |
| + |
| + // Filter the data. |
| + error = ReadFilteredData(&filter_bytes_read); |
| + if (error == OK) { |
| + if (!filter_bytes_read) |
| + DoneReading(); |
| + notify = true; |
|
mmenke
2015/10/22 18:58:11
Keep comment from before? ("Don't notify if we ha
xunjieli
2015/10/23 13:43:08
Done.
|
| + } |
| + DVLOG(1) << __FUNCTION__ << "() " |
| + << "\"" << (request_ ? request_->url().spec() : "???") << "\"" |
| + << " pre bytes read = " << bytes_read |
| + << " pre total = " << prefilter_bytes_read_ |
| + << " post total = " << postfilter_bytes_read_; |
| + bytes_read = filter_bytes_read; |
| + } else { |
| + notify = true; |
| + DVLOG(1) << __FUNCTION__ << "() " |
| + << "\"" << (request_ ? request_->url().spec() : "???") << "\"" |
| + << " pre bytes read = " << bytes_read |
| + << " pre total = " << prefilter_bytes_read_ |
| + << " post total = " << postfilter_bytes_read_; |
| + } |
| // When notifying the delegate, the delegate can release the request |
| // (and thus release 'this'). After calling to the delegate, we must |
| @@ -510,25 +549,27 @@ void URLRequestJob::NotifyReadComplete(int bytes_read) { |
| // survival until we can get out of this method. |
| scoped_refptr<URLRequestJob> self_preservation(this); |
| - if (filter_.get()) { |
| - // Tell the filter that it has more data |
| - FilteredDataRead(bytes_read); |
| - |
| - // Filter the data. |
| - int filter_bytes_read = 0; |
| - if (ReadFilteredData(&filter_bytes_read)) { |
| - if (!filter_bytes_read) |
| - DoneReading(); |
| - request_->NotifyReadCompleted(filter_bytes_read); |
| - } |
| + // Synchronize the URLRequest state machine with the URLRequestJob state |
| + // machine. If this read succeeded, either the request is at EOF and the |
| + // URLRequest state machine goes to 'finished', or it is not and the |
| + // URLRequest state machine goes to 'success'. If the read failed, the |
| + // URLRequest state machine goes directly to 'finished'. |
| + // |
| + // Update the URLRequest's status first, so that NotifyReadCompleted has an |
| + // accurate view of the request. |
| + if (error == OK) { |
| + if (bytes_read == 0) |
| + NotifyDone(URLRequestStatus()); |
| + else |
| + SetStatus(URLRequestStatus()); |
| } else { |
| - request_->NotifyReadCompleted(bytes_read); |
| + NotifyDone(URLRequestStatus::FromError(error)); |
| } |
|
mmenke
2015/10/22 18:58:10
Maybe:
if (error == OK && bytes_read > 0) {
Set
xunjieli
2015/10/23 13:43:08
Done.
|
| - DVLOG(1) << __FUNCTION__ << "() " |
| - << "\"" << (request_ ? request_->url().spec() : "???") << "\"" |
| - << " pre bytes read = " << bytes_read |
| - << " pre total = " << prefilter_bytes_read_ |
| - << " post total = " << postfilter_bytes_read_; |
| + |
| + // TODO(ellyjones): why does this method only call NotifyReadComplete when |
| + // there isn't a filter error? How do filter errors get notified? |
| + if (notify) |
| + request_->NotifyReadCompleted(bytes_read); |
| } |
| void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { |
| @@ -555,7 +596,7 @@ void URLRequestJob::NotifyDone(const URLRequestStatus &status) { |
| // the response before getting here. |
| DCHECK(has_handled_response_ || !status.is_success()); |
| - // As with NotifyReadComplete, we need to take care to notice if we were |
| + // As with RawReadCompleted, we need to take care to notice if we were |
| // destroyed during a delegate callback. |
| if (request_) { |
| request_->set_is_pending(false); |
| @@ -638,11 +679,8 @@ void URLRequestJob::OnCallToDelegateComplete() { |
| request_->OnCallToDelegateComplete(); |
| } |
| -bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size, |
| - int *bytes_read) { |
| - DCHECK(bytes_read); |
| - *bytes_read = 0; |
| - return true; |
| +int URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size) { |
| + return 0; |
| } |
| void URLRequestJob::DoneReading() { |
| @@ -652,38 +690,34 @@ void URLRequestJob::DoneReading() { |
| void URLRequestJob::DoneReadingRedirectResponse() { |
| } |
| -void URLRequestJob::FilteredDataRead(int bytes_read) { |
| +void URLRequestJob::PushInputToFilter(int bytes_read) { |
| DCHECK(filter_); |
| filter_->FlushStreamBuffer(bytes_read); |
| } |
| -bool URLRequestJob::ReadFilteredData(int* bytes_read) { |
| +Error URLRequestJob::ReadFilteredData(int* bytes_read) { |
| DCHECK(filter_); |
| DCHECK(filtered_read_buffer_.get()); |
| DCHECK_GT(filtered_read_buffer_len_, 0); |
| DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check. |
| - DCHECK(!raw_read_buffer_.get()); |
| + DCHECK(!raw_read_buffer_); |
| *bytes_read = 0; |
| - bool rv = false; |
| + Error error = ERR_FAILED; |
| for (;;) { |
| if (is_done()) |
| - return true; |
| + return OK; |
| if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) { |
| // We don't have any raw data to work with, so read from the transaction. |
| int filtered_data_read; |
| - if (ReadRawDataForFilter(&filtered_data_read)) { |
| - if (filtered_data_read > 0) { |
| - // Give data to filter. |
| - filter_->FlushStreamBuffer(filtered_data_read); |
| - } else { |
| - return true; // EOF. |
| - } |
| - } else { |
| - return false; // IO Pending (or error). |
| - } |
| + error = ReadRawDataForFilter(&filtered_data_read); |
| + // If ReadRawDataForFilter returned some data, fall through to the case |
| + // below; otherwise, return early. |
| + if (error != OK || filtered_data_read == 0) |
| + return error; |
| + filter_->FlushStreamBuffer(filtered_data_read); |
| } |
| if ((filter_->stream_data_len() || filter_needs_more_output_space_) && |
| @@ -709,7 +743,7 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { |
| filter_needs_more_output_space_ = false; |
| *bytes_read = filtered_data_len; |
| postfilter_bytes_read_ += filtered_data_len; |
| - rv = true; |
| + error = OK; |
| break; |
| } |
| case Filter::FILTER_NEED_MORE_DATA: { |
| @@ -722,7 +756,7 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { |
| if (filtered_data_len > 0) { |
| *bytes_read = filtered_data_len; |
| postfilter_bytes_read_ += filtered_data_len; |
| - rv = true; |
| + error = OK; |
| } else { |
| // Read again since we haven't received enough data yet (e.g., we |
| // may not have a complete gzip header yet). |
| @@ -733,7 +767,7 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { |
| case Filter::FILTER_OK: { |
| *bytes_read = filtered_data_len; |
| postfilter_bytes_read_ += filtered_data_len; |
| - rv = true; |
| + error = OK; |
| break; |
| } |
| case Filter::FILTER_ERROR: { |
| @@ -741,21 +775,19 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { |
| << "\"" << (request_ ? request_->url().spec() : "???") |
| << "\"" << " Filter Error"; |
| filter_needs_more_output_space_ = false; |
| - NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, |
| - ERR_CONTENT_DECODING_FAILED)); |
| - rv = false; |
| + error = ERR_CONTENT_DECODING_FAILED; |
| break; |
| } |
| default: { |
| NOTREACHED(); |
| filter_needs_more_output_space_ = false; |
| - rv = false; |
| + error = ERR_FAILED; |
| break; |
| } |
| } |
| // If logging all bytes is enabled, log the filtered bytes read. |
| - if (rv && request() && filtered_data_len > 0 && |
| + if (error == OK && request() && filtered_data_len > 0 && |
| request()->net_log().IsCapturing()) { |
| request()->net_log().AddByteTransferEvent( |
| NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, filtered_data_len, |
| @@ -763,18 +795,18 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { |
| } |
| } else { |
| // we are done, or there is no data left. |
| - rv = true; |
| + error = OK; |
| } |
| break; |
| } |
| - if (rv) { |
| + if (error == OK) { |
| // When we successfully finished a read, we no longer need to save the |
| // caller's buffers. Release our reference. |
| filtered_read_buffer_ = NULL; |
| filtered_read_buffer_len_ = 0; |
| } |
| - return rv; |
| + return error; |
| } |
| void URLRequestJob::DestroyFilters() { |
| @@ -807,9 +839,8 @@ void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) { |
| request_->proxy_server_ = proxy_server; |
| } |
| -bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) { |
| - bool rv = false; |
| - |
| +Error URLRequestJob::ReadRawDataForFilter(int* bytes_read) { |
| + Error error = ERR_FAILED; |
| DCHECK(bytes_read); |
| DCHECK(filter_.get()); |
| @@ -821,29 +852,29 @@ bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) { |
| if (!filter_->stream_data_len() && !is_done()) { |
| IOBuffer* stream_buffer = filter_->stream_buffer(); |
| int stream_buffer_size = filter_->stream_buffer_size(); |
| - rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read); |
| + error = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read); |
| } |
| - return rv; |
| + return error; |
| } |
| -bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf, int buf_size, |
| - int* bytes_read) { |
| +Error URLRequestJob::ReadRawDataHelper(IOBuffer* buf, |
| + int buf_size, |
| + int* bytes_read) { |
| DCHECK(!request_->status().is_io_pending()); |
| - DCHECK(raw_read_buffer_.get() == NULL); |
| + DCHECK(!raw_read_buffer_); |
| - // Keep a pointer to the read buffer, so we have access to it in the |
| - // OnRawReadComplete() callback in the event that the read completes |
| - // asynchronously. |
| + // Keep a pointer to the read buffer, so we have access to it in |
| + // GatherRawReadStats() in the event that the read completes asynchronously. |
| raw_read_buffer_ = buf; |
| - bool rv = ReadRawData(buf, buf_size, bytes_read); |
| + Error error; |
| + ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read); |
| - if (!request_->status().is_io_pending()) { |
| - // If the read completes synchronously, either success or failure, |
| - // invoke the OnRawReadComplete callback so we can account for the |
| - // completed read. |
| - OnRawReadComplete(*bytes_read); |
| + if (error != ERR_IO_PENDING) { |
| + // If the read completes synchronously, either success or failure, invoke |
| + // GatherRawReadStats so we can account for the completed read. |
| + GatherRawReadStats(error, *bytes_read); |
| } |
| - return rv; |
| + return error; |
| } |
| void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { |
| @@ -852,8 +883,11 @@ void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { |
| NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
| } |
| -void URLRequestJob::OnRawReadComplete(int bytes_read) { |
| - DCHECK(raw_read_buffer_.get()); |
| +void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) { |
| + DCHECK(raw_read_buffer_ || bytes_read == 0); |
| + DCHECK_NE(ERR_IO_PENDING, error); |
| + if (error != OK) |
| + return; |
|
Randy Smith (Not in Mondays)
2015/10/22 20:38:45
It looks to me (you should verify) like this will
xunjieli
2015/10/23 13:43:08
Done. I believe you are right.
Randy Smith (Not in Mondays)
2015/10/26 21:38:03
nit, suggestion: I think the code would be a bit c
xunjieli
2015/10/27 14:17:21
Done.
|
| // If |filter_| is non-NULL, bytes will be logged after it is applied instead. |
| if (!filter_.get() && request() && bytes_read > 0 && |
| request()->net_log().IsCapturing()) { |