Index: net/url_request/url_request_job.cc |
diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc |
index 3073414d421426aa7e3b086300840e6069007db1..4914345f3b39a76f095270f95e2435987bfc398f 100644 |
--- a/net/url_request/url_request_job.cc |
+++ b/net/url_request/url_request_job.cc |
@@ -306,13 +306,12 @@ void URLRequestJob::NotifyReadComplete(int bytes_read) { |
return; |
// When notifying the delegate, the delegate can release the request |
- // (and thus release 'this'). After calling to the delgate, we must |
+ // (and thus release 'this'). After calling to the delegate, we must |
// check the request pointer to see if it still exists, and return |
// immediately if it has been destroyed. self_preservation ensures our |
// survival until we can get out of this method. |
scoped_refptr<URLRequestJob> self_preservation(this); |
- prefilter_bytes_read_ += bytes_read; |
if (filter_.get()) { |
// Tell the filter that it has more data |
FilteredDataRead(bytes_read); |
@@ -320,13 +319,16 @@ void URLRequestJob::NotifyReadComplete(int bytes_read) { |
// Filter the data. |
int filter_bytes_read = 0; |
if (ReadFilteredData(&filter_bytes_read)) { |
- postfilter_bytes_read_ += filter_bytes_read; |
request_->delegate()->OnReadCompleted(request_, filter_bytes_read); |
} |
} else { |
- postfilter_bytes_read_ += bytes_read; |
request_->delegate()->OnReadCompleted(request_, bytes_read); |
} |
+ VLOG(21) << __FUNCTION__ << "() " |
rvargas (doing something else)
2011/05/21 01:24:50
DVLOG. I'm still not too happy about the 21... it'
ahendrickson
2011/05/22 06:43:34
Reduced the number to 2 (in case anyone adds VLOGs
|
+ << "\"" << (request_ ? request_->url().spec() : "???") << "\"" |
+ << " pre bytes read = " << bytes_read |
+ << " pre total = " << prefilter_bytes_read_ |
+ << " post total = " << postfilter_bytes_read_; |
} |
void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { |
@@ -468,6 +470,7 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { |
case Filter::FILTER_DONE: { |
filter_needs_more_output_space_ = false; |
*bytes_read = filtered_data_len; |
+ postfilter_bytes_read_ += filtered_data_len; |
rv = true; |
break; |
} |
@@ -482,6 +485,7 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { |
// We can revisit this issue if there is a real perf need. |
if (filtered_data_len > 0) { |
*bytes_read = filtered_data_len; |
+ postfilter_bytes_read_ += filtered_data_len; |
rv = true; |
} else { |
// Read again since we haven't received enough data yet (e.g., we may |
@@ -494,6 +498,7 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { |
filter_needs_more_output_space_ = |
(filtered_data_len == output_buffer_size); |
*bytes_read = filtered_data_len; |
+ postfilter_bytes_read_ += filtered_data_len; |
rv = true; |
break; |
} |
@@ -511,6 +516,13 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { |
break; |
} |
} |
+ VLOG(21) << __FUNCTION__ << "() " |
+ << "\"" << (request_ ? request_->url().spec() : "???") << "\"" |
+ << " rv = " << rv |
+ << " post bytes read = " << filtered_data_len |
+ << " pre total = " << prefilter_bytes_read_ |
+ << " post total = " |
+ << postfilter_bytes_read_; |
} else { |
// we are done, or there is no data left. |
rv = true; |
@@ -595,6 +607,14 @@ void URLRequestJob::OnRawReadComplete(int bytes_read) { |
void URLRequestJob::RecordBytesRead(int bytes_read) { |
filter_input_byte_count_ += bytes_read; |
+ prefilter_bytes_read_ += bytes_read; |
+ if (!filter_.get()) |
+ postfilter_bytes_read_ += bytes_read; |
+ VLOG(21) << __FUNCTION__ << "() " |
+ << "\"" << (request_ ? request_->url().spec() : "???") << "\"" |
+ << " pre bytes read = " << bytes_read |
+ << " pre total = " << prefilter_bytes_read_ |
+ << " post total = " << postfilter_bytes_read_; |
UpdatePacketReadTimes(); // Facilitate stats recording if it is active. |
g_url_request_job_tracker.OnBytesRead(this, raw_read_buffer_->data(), |
bytes_read); |