Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(977)

Unified Diff: net/url_request/url_request_job.cc

Issue 6382003: Reorder the methods in net/url_request/. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Compiling net_unittests != compiling the rest of chrome Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « net/url_request/url_request_job.h ('k') | net/url_request/url_request_job_tracker.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: net/url_request/url_request_job.cc
diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc
index 0c54c105fd9c00d5adb81a6109af869bbe60a59d..ac051270ccf588a4164cf8e6b31267a6479c6ab5 100644
--- a/net/url_request/url_request_job.cc
+++ b/net/url_request/url_request_job.cc
@@ -53,10 +53,6 @@ URLRequestJob::URLRequestJob(URLRequest* request)
g_url_request_job_tracker.AddNewJob(this);
}
-URLRequestJob::~URLRequestJob() {
- g_url_request_job_tracker.RemoveJob(this);
-}
-
void URLRequestJob::SetUpload(net::UploadData* upload) {
}
@@ -75,6 +71,68 @@ void URLRequestJob::DetachRequest() {
request_ = NULL;
}
+// This function calls ReadData to get stream data. If a filter exists, passes
+// the data to the attached filter. Then returns the output from filter back to
+// the caller.
+bool URLRequestJob::Read(net::IOBuffer* buf, int buf_size, int *bytes_read) {
+ bool rv = false;
+
+ DCHECK_LT(buf_size, 1000000); // sanity check
+ DCHECK(buf);
+ DCHECK(bytes_read);
+ DCHECK(filtered_read_buffer_ == NULL);
+ DCHECK_EQ(0, filtered_read_buffer_len_);
+
+ *bytes_read = 0;
+
+ // Skip Filter if not present
+ if (!filter_.get()) {
+ rv = ReadRawDataHelper(buf, buf_size, bytes_read);
+ } else {
+ // Save the caller's buffers while we do IO
+ // in the filter's buffers.
+ filtered_read_buffer_ = buf;
+ filtered_read_buffer_len_ = buf_size;
+
+ if (ReadFilteredData(bytes_read)) {
+ rv = true; // we have data to return
+ } else {
+ rv = false; // error, or a new IO is pending
+ }
+ }
+ if (rv && *bytes_read == 0)
+ NotifyDone(URLRequestStatus());
+ return rv;
+}
+
+void URLRequestJob::StopCaching() {
+ // Nothing to do here.
+}
+
+net::LoadState URLRequestJob::GetLoadState() const {
+ return net::LOAD_STATE_IDLE;
+}
+
+uint64 URLRequestJob::GetUploadProgress() const {
+ return 0;
+}
+
+bool URLRequestJob::GetCharset(std::string* charset) {
+ return false;
+}
+
+void URLRequestJob::GetResponseInfo(net::HttpResponseInfo* info) {
+}
+
+bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
+ return false;
+}
+
+bool URLRequestJob::GetContentEncodings(
+ std::vector<Filter::FilterType>* encoding_types) {
+ return false;
+}
+
void URLRequestJob::SetupFilter() {
std::vector<Filter::FilterType> encoding_types;
if (GetContentEncodings(&encoding_types)) {
@@ -158,8 +216,11 @@ void URLRequestJob::FollowDeferredRedirect() {
FollowRedirect(redirect_url, redirect_status_code);
}
-int64 URLRequestJob::GetByteReadCount() const {
- return filter_input_byte_count_;
+URLRequestJobMetrics* URLRequestJob::RetrieveMetrics() {
+ if (is_profiling())
+ return metrics_.release();
+ else
+ return NULL;
}
bool URLRequestJob::GetMimeType(std::string* mime_type) const {
@@ -177,12 +238,24 @@ base::Time URLRequestJob::GetRequestTime() const {
if (!request_)
return base::Time();
return request_->request_time();
-};
+}
+
+bool URLRequestJob::IsDownload() const {
+ return (load_flags_ & net::LOAD_IS_DOWNLOAD) != 0;
+}
+
+bool URLRequestJob::IsSdchResponse() const {
+ return false;
+}
bool URLRequestJob::IsCachedContent() const {
return false;
}
+int64 URLRequestJob::GetByteReadCount() const {
+ return filter_input_byte_count_;
+}
+
int URLRequestJob::GetResponseCode() const {
return -1;
}
@@ -191,255 +264,150 @@ int URLRequestJob::GetInputStreamBufferSize() const {
return kFilterBufSize;
}
-// This function calls ReadData to get stream data. If a filter exists, passes
-// the data to the attached filter. Then returns the output from filter back to
-// the caller.
-bool URLRequestJob::Read(net::IOBuffer* buf, int buf_size, int *bytes_read) {
- bool rv = false;
+void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const {
+ if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
+ return;
- DCHECK_LT(buf_size, 1000000); // sanity check
- DCHECK(buf);
- DCHECK(bytes_read);
- DCHECK(filtered_read_buffer_ == NULL);
- DCHECK_EQ(0, filtered_read_buffer_len_);
+ // Caller should verify that we're not cached content, but we can't always
+ // really check for it here because we may (at destruction time) call our own
+ // class method and get a bogus const answer of false. This DCHECK only helps
+ // when this method has a valid overridden definition.
+ DCHECK(!IsCachedContent());
- *bytes_read = 0;
+ base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
+ switch (statistic) {
+ case SDCH_DECODE: {
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_Latency_F_a", duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Decode_Packets_b",
+ static_cast<int>(observed_packet_count_));
+ UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
+ static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
+ if (packet_times_.empty())
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_Last_a",
+ final_packet_time_ - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
- // Skip Filter if not present
- if (!filter_.get()) {
- rv = ReadRawDataHelper(buf, buf_size, bytes_read);
- } else {
- // Save the caller's buffers while we do IO
- // in the filter's buffers.
- filtered_read_buffer_ = buf;
- filtered_read_buffer_len_ = buf_size;
+ DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
+ DCHECK(kSdchPacketHistogramCount > 4);
+ if (packet_times_.size() <= 4)
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_2nd_c",
+ packet_times_[1] - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_2nd_To_3rd_c",
+ packet_times_[2] - packet_times_[1],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_3rd_To_4th_c",
+ packet_times_[3] - packet_times_[2],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_4th_To_5th_c",
+ packet_times_[4] - packet_times_[3],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ return;
+ }
+ case SDCH_PASSTHROUGH: {
+ // Despite advertising a dictionary, we handled non-sdch compressed
+ // content.
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_Latency_F_a",
+ duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Pass-through_Packets_b",
+ observed_packet_count_);
+ if (packet_times_.empty())
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_Last_a",
+ final_packet_time_ - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
+ DCHECK(kSdchPacketHistogramCount > 4);
+ if (packet_times_.size() <= 4)
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_2nd_c",
+ packet_times_[1] - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_2nd_To_3rd_c",
+ packet_times_[2] - packet_times_[1],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_3rd_To_4th_c",
+ packet_times_[3] - packet_times_[2],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_4th_To_5th_c",
+ packet_times_[4] - packet_times_[3],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ return;
+ }
- if (ReadFilteredData(bytes_read)) {
- rv = true; // we have data to return
- } else {
- rv = false; // error, or a new IO is pending
+ case SDCH_EXPERIMENT_DECODE: {
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Decode",
+ duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ // We already provided interpacket histograms above in the SDCH_DECODE
+ // case, so we don't need them here.
+ return;
+ }
+ case SDCH_EXPERIMENT_HOLDBACK: {
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback",
+ duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_Last_a",
+ final_packet_time_ - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+
+ DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
+ DCHECK(kSdchPacketHistogramCount > 4);
+ if (packet_times_.size() <= 4)
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_2nd_c",
+ packet_times_[1] - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_2nd_To_3rd_c",
+ packet_times_[2] - packet_times_[1],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_3rd_To_4th_c",
+ packet_times_[3] - packet_times_[2],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_4th_To_5th_c",
+ packet_times_[4] - packet_times_[3],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ return;
}
+ default:
+ NOTREACHED();
+ return;
}
- if (rv && *bytes_read == 0)
- NotifyDone(URLRequestStatus());
- return rv;
}
-void URLRequestJob::StopCaching() {
- // Nothing to do here.
+URLRequestJob::~URLRequestJob() {
+ g_url_request_job_tracker.RemoveJob(this);
}
-net::LoadState URLRequestJob::GetLoadState() const {
- return net::LOAD_STATE_IDLE;
-}
+void URLRequestJob::NotifyHeadersComplete() {
+ if (!request_ || !request_->delegate())
+ return; // The request was destroyed, so there is no more work to do.
-uint64 URLRequestJob::GetUploadProgress() const {
- return 0;
-}
-
-bool URLRequestJob::GetCharset(std::string* charset) {
- return false;
-}
-
-void URLRequestJob::GetResponseInfo(net::HttpResponseInfo* info) {
-}
-
-bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
- return false;
-}
-
-bool URLRequestJob::GetContentEncodings(
- std::vector<Filter::FilterType>* encoding_types) {
- return false;
-}
-
-bool URLRequestJob::IsDownload() const {
- return (load_flags_ & net::LOAD_IS_DOWNLOAD) != 0;
-}
-
-bool URLRequestJob::IsSdchResponse() const {
- return false;
-}
-
-bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
- bool rv = false;
-
- DCHECK(bytes_read);
- DCHECK(filter_.get());
-
- *bytes_read = 0;
-
- // Get more pre-filtered data if needed.
- // TODO(mbelshe): is it possible that the filter needs *MORE* data
- // when there is some data already in the buffer?
- if (!filter_->stream_data_len() && !is_done()) {
- net::IOBuffer* stream_buffer = filter_->stream_buffer();
- int stream_buffer_size = filter_->stream_buffer_size();
- rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
- }
- return rv;
-}
-
-void URLRequestJob::FollowRedirect(const GURL& location, int http_status_code) {
- g_url_request_job_tracker.OnJobRedirect(this, location, http_status_code);
-
- int rv = request_->Redirect(location, http_status_code);
- if (rv != net::OK)
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
-}
-
-void URLRequestJob::FilteredDataRead(int bytes_read) {
- DCHECK(filter_.get()); // don't add data if there is no filter
- filter_->FlushStreamBuffer(bytes_read);
-}
-
-bool URLRequestJob::ReadFilteredData(int* bytes_read) {
- DCHECK(filter_.get()); // don't add data if there is no filter
- DCHECK(filtered_read_buffer_ != NULL); // we need to have a buffer to fill
- DCHECK_GT(filtered_read_buffer_len_, 0); // sanity check
- DCHECK_LT(filtered_read_buffer_len_, 1000000); // sanity check
- DCHECK(raw_read_buffer_ == NULL); // there should be no raw read buffer yet
-
- bool rv = false;
- *bytes_read = 0;
-
- if (is_done())
- return true;
-
- if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
- // We don't have any raw data to work with, so
- // read from the socket.
- int filtered_data_read;
- if (ReadRawDataForFilter(&filtered_data_read)) {
- if (filtered_data_read > 0) {
- filter_->FlushStreamBuffer(filtered_data_read); // Give data to filter.
- } else {
- return true; // EOF
- }
- } else {
- return false; // IO Pending (or error)
- }
- }
-
- if ((filter_->stream_data_len() || filter_needs_more_output_space_)
- && !is_done()) {
- // Get filtered data.
- int filtered_data_len = filtered_read_buffer_len_;
- Filter::FilterStatus status;
- int output_buffer_size = filtered_data_len;
- status = filter_->ReadData(filtered_read_buffer_->data(),
- &filtered_data_len);
-
- if (filter_needs_more_output_space_ && 0 == filtered_data_len) {
- // filter_needs_more_output_space_ was mistaken... there are no more bytes
- // and we should have at least tried to fill up the filter's input buffer.
- // Correct the state, and try again.
- filter_needs_more_output_space_ = false;
- return ReadFilteredData(bytes_read);
- }
-
- switch (status) {
- case Filter::FILTER_DONE: {
- filter_needs_more_output_space_ = false;
- *bytes_read = filtered_data_len;
- rv = true;
- break;
- }
- case Filter::FILTER_NEED_MORE_DATA: {
- filter_needs_more_output_space_ =
- (filtered_data_len == output_buffer_size);
- // We have finished filtering all data currently in the buffer.
- // There might be some space left in the output buffer. One can
- // consider reading more data from the stream to feed the filter
- // and filling up the output buffer. This leads to more complicated
- // buffer management and data notification mechanisms.
- // We can revisit this issue if there is a real perf need.
- if (filtered_data_len > 0) {
- *bytes_read = filtered_data_len;
- rv = true;
- } else {
- // Read again since we haven't received enough data yet (e.g., we may
- // not have a complete gzip header yet)
- rv = ReadFilteredData(bytes_read);
- }
- break;
- }
- case Filter::FILTER_OK: {
- filter_needs_more_output_space_ =
- (filtered_data_len == output_buffer_size);
- *bytes_read = filtered_data_len;
- rv = true;
- break;
- }
- case Filter::FILTER_ERROR: {
- filter_needs_more_output_space_ = false;
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
- net::ERR_CONTENT_DECODING_FAILED));
- rv = false;
- break;
- }
- default: {
- NOTREACHED();
- filter_needs_more_output_space_ = false;
- rv = false;
- break;
- }
- }
- } else {
- // we are done, or there is no data left.
- rv = true;
- }
-
- if (rv) {
- // When we successfully finished a read, we no longer need to
- // save the caller's buffers. Release our reference.
- filtered_read_buffer_ = NULL;
- filtered_read_buffer_len_ = 0;
- }
- return rv;
-}
-
-bool URLRequestJob::ReadRawDataHelper(net::IOBuffer* buf, int buf_size,
- int* bytes_read) {
- DCHECK(!request_->status().is_io_pending());
- DCHECK(raw_read_buffer_ == NULL);
-
- // Keep a pointer to the read buffer, so we have access to it in the
- // OnRawReadComplete() callback in the event that the read completes
- // asynchronously.
- raw_read_buffer_ = buf;
- bool rv = ReadRawData(buf, buf_size, bytes_read);
-
- if (!request_->status().is_io_pending()) {
- // If the read completes synchronously, either success or failure,
- // invoke the OnRawReadComplete callback so we can account for the
- // completed read.
- OnRawReadComplete(*bytes_read);
- }
- return rv;
-}
-
-bool URLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size,
- int *bytes_read) {
- DCHECK(bytes_read);
- *bytes_read = 0;
- NotifyDone(URLRequestStatus());
- return false;
-}
-
-URLRequestJobMetrics* URLRequestJob::RetrieveMetrics() {
- if (is_profiling())
- return metrics_.release();
- else
- return NULL;
-}
-
-void URLRequestJob::NotifyHeadersComplete() {
- if (!request_ || !request_->delegate())
- return; // The request was destroyed, so there is no more work to do.
-
- if (has_handled_response_)
- return;
+ if (has_handled_response_)
+ return;
DCHECK(!request_->status().is_io_pending());
@@ -529,15 +497,6 @@ void URLRequestJob::NotifyHeadersComplete() {
request_->ResponseStarted();
}
-void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
- DCHECK(!has_handled_response_);
- has_handled_response_ = true;
- if (request_) {
- request_->set_status(status);
- request_->ResponseStarted();
- }
-}
-
void URLRequestJob::NotifyReadComplete(int bytes_read) {
if (!request_ || !request_->delegate())
return; // The request was destroyed, so there is no more work to do.
@@ -579,6 +538,15 @@ void URLRequestJob::NotifyReadComplete(int bytes_read) {
}
}
+void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
+ DCHECK(!has_handled_response_);
+ has_handled_response_ = true;
+ if (request_) {
+ request_->set_status(status);
+ request_->ResponseStarted();
+ }
+}
+
void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
DCHECK(!done_) << "Job sending done notification twice";
if (done_)
@@ -658,33 +626,135 @@ void URLRequestJob::NotifyRestartRequired() {
request_->Restart();
}
-bool URLRequestJob::FilterHasData() {
- return filter_.get() && filter_->stream_data_len();
+bool URLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size,
+ int *bytes_read) {
+ DCHECK(bytes_read);
+ *bytes_read = 0;
+ NotifyDone(URLRequestStatus());
+ return false;
}
-void URLRequestJob::OnRawReadComplete(int bytes_read) {
- DCHECK(raw_read_buffer_);
- if (bytes_read > 0) {
- RecordBytesRead(bytes_read);
- }
- raw_read_buffer_ = NULL;
+void URLRequestJob::FilteredDataRead(int bytes_read) {
+ DCHECK(filter_.get()); // don't add data if there is no filter
+ filter_->FlushStreamBuffer(bytes_read);
}
-void URLRequestJob::RecordBytesRead(int bytes_read) {
- if (is_profiling()) {
- ++(metrics_->number_of_read_IO_);
- metrics_->total_bytes_read_ += bytes_read;
- }
- filter_input_byte_count_ += bytes_read;
- UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
- g_url_request_job_tracker.OnBytesRead(this, raw_read_buffer_->data(),
- bytes_read);
-}
+bool URLRequestJob::ReadFilteredData(int* bytes_read) {
+ DCHECK(filter_.get()); // don't add data if there is no filter
+ DCHECK(filtered_read_buffer_ != NULL); // we need to have a buffer to fill
+ DCHECK_GT(filtered_read_buffer_len_, 0); // sanity check
+ DCHECK_LT(filtered_read_buffer_len_, 1000000); // sanity check
+ DCHECK(raw_read_buffer_ == NULL); // there should be no raw read buffer yet
-const URLRequestStatus URLRequestJob::GetStatus() {
- if (request_)
- return request_->status();
- // If the request is gone, we must be cancelled.
+ bool rv = false;
+ *bytes_read = 0;
+
+ if (is_done())
+ return true;
+
+ if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
+ // We don't have any raw data to work with, so
+ // read from the socket.
+ int filtered_data_read;
+ if (ReadRawDataForFilter(&filtered_data_read)) {
+ if (filtered_data_read > 0) {
+ filter_->FlushStreamBuffer(filtered_data_read); // Give data to filter.
+ } else {
+ return true; // EOF
+ }
+ } else {
+ return false; // IO Pending (or error)
+ }
+ }
+
+ if ((filter_->stream_data_len() || filter_needs_more_output_space_)
+ && !is_done()) {
+ // Get filtered data.
+ int filtered_data_len = filtered_read_buffer_len_;
+ Filter::FilterStatus status;
+ int output_buffer_size = filtered_data_len;
+ status = filter_->ReadData(filtered_read_buffer_->data(),
+ &filtered_data_len);
+
+ if (filter_needs_more_output_space_ && 0 == filtered_data_len) {
+ // filter_needs_more_output_space_ was mistaken... there are no more bytes
+ // and we should have at least tried to fill up the filter's input buffer.
+ // Correct the state, and try again.
+ filter_needs_more_output_space_ = false;
+ return ReadFilteredData(bytes_read);
+ }
+
+ switch (status) {
+ case Filter::FILTER_DONE: {
+ filter_needs_more_output_space_ = false;
+ *bytes_read = filtered_data_len;
+ rv = true;
+ break;
+ }
+ case Filter::FILTER_NEED_MORE_DATA: {
+ filter_needs_more_output_space_ =
+ (filtered_data_len == output_buffer_size);
+ // We have finished filtering all data currently in the buffer.
+ // There might be some space left in the output buffer. One can
+ // consider reading more data from the stream to feed the filter
+ // and filling up the output buffer. This leads to more complicated
+ // buffer management and data notification mechanisms.
+ // We can revisit this issue if there is a real perf need.
+ if (filtered_data_len > 0) {
+ *bytes_read = filtered_data_len;
+ rv = true;
+ } else {
+ // Read again since we haven't received enough data yet (e.g., we may
+ // not have a complete gzip header yet)
+ rv = ReadFilteredData(bytes_read);
+ }
+ break;
+ }
+ case Filter::FILTER_OK: {
+ filter_needs_more_output_space_ =
+ (filtered_data_len == output_buffer_size);
+ *bytes_read = filtered_data_len;
+ rv = true;
+ break;
+ }
+ case Filter::FILTER_ERROR: {
+ filter_needs_more_output_space_ = false;
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+ net::ERR_CONTENT_DECODING_FAILED));
+ rv = false;
+ break;
+ }
+ default: {
+ NOTREACHED();
+ filter_needs_more_output_space_ = false;
+ rv = false;
+ break;
+ }
+ }
+ } else {
+ // we are done, or there is no data left.
+ rv = true;
+ }
+
+ if (rv) {
+ // When we successfully finished a read, we no longer need to
+ // save the caller's buffers. Release our reference.
+ filtered_read_buffer_ = NULL;
+ filtered_read_buffer_len_ = 0;
+ }
+ return rv;
+}
+
+void URLRequestJob::EnablePacketCounting(size_t max_packets_timed) {
+ if (max_packets_timed_ < max_packets_timed)
+ max_packets_timed_ = max_packets_timed;
+ packet_timing_enabled_ = true;
+}
+
+const URLRequestStatus URLRequestJob::GetStatus() {
+ if (request_)
+ return request_->status();
+ // If the request is gone, we must be cancelled.
return URLRequestStatus(URLRequestStatus::CANCELED,
net::ERR_ABORTED);
}
@@ -694,6 +764,76 @@ void URLRequestJob::SetStatus(const URLRequestStatus &status) {
request_->set_status(status);
}
+bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
+ bool rv = false;
+
+ DCHECK(bytes_read);
+ DCHECK(filter_.get());
+
+ *bytes_read = 0;
+
+ // Get more pre-filtered data if needed.
+ // TODO(mbelshe): is it possible that the filter needs *MORE* data
+ // when there is some data already in the buffer?
+ if (!filter_->stream_data_len() && !is_done()) {
+ net::IOBuffer* stream_buffer = filter_->stream_buffer();
+ int stream_buffer_size = filter_->stream_buffer_size();
+ rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
+ }
+ return rv;
+}
+
+bool URLRequestJob::ReadRawDataHelper(net::IOBuffer* buf, int buf_size,
+ int* bytes_read) {
+ DCHECK(!request_->status().is_io_pending());
+ DCHECK(raw_read_buffer_ == NULL);
+
+ // Keep a pointer to the read buffer, so we have access to it in the
+ // OnRawReadComplete() callback in the event that the read completes
+ // asynchronously.
+ raw_read_buffer_ = buf;
+ bool rv = ReadRawData(buf, buf_size, bytes_read);
+
+ if (!request_->status().is_io_pending()) {
+ // If the read completes synchronously, either success or failure,
+ // invoke the OnRawReadComplete callback so we can account for the
+ // completed read.
+ OnRawReadComplete(*bytes_read);
+ }
+ return rv;
+}
+
+void URLRequestJob::FollowRedirect(const GURL& location, int http_status_code) {
+ g_url_request_job_tracker.OnJobRedirect(this, location, http_status_code);
+
+ int rv = request_->Redirect(location, http_status_code);
+ if (rv != net::OK)
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+}
+
+void URLRequestJob::OnRawReadComplete(int bytes_read) {
+ DCHECK(raw_read_buffer_);
+ if (bytes_read > 0) {
+ RecordBytesRead(bytes_read);
+ }
+ raw_read_buffer_ = NULL;
+}
+
+void URLRequestJob::RecordBytesRead(int bytes_read) {
+ if (is_profiling()) {
+ ++(metrics_->number_of_read_IO_);
+ metrics_->total_bytes_read_ += bytes_read;
+ }
+ filter_input_byte_count_ += bytes_read;
+ UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
+ g_url_request_job_tracker.OnBytesRead(this, raw_read_buffer_->data(),
+ bytes_read);
+}
+
+bool URLRequestJob::FilterHasData() {
+ return filter_.get() && filter_->stream_data_len();
+}
+
void URLRequestJob::UpdatePacketReadTimes() {
if (!packet_timing_enabled_)
return;
@@ -722,146 +862,6 @@ void URLRequestJob::UpdatePacketReadTimes() {
bytes_observed_in_packets_ = filter_input_byte_count_;
}
-void URLRequestJob::EnablePacketCounting(size_t max_packets_timed) {
- if (max_packets_timed_ < max_packets_timed)
- max_packets_timed_ = max_packets_timed;
- packet_timing_enabled_ = true;
-}
-
-void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const {
- if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
- return;
-
- // Caller should verify that we're not cached content, but we can't always
- // really check for it here because we may (at destruction time) call our own
- // class method and get a bogus const answer of false. This DCHECK only helps
- // when this method has a valid overridden definition.
- DCHECK(!IsCachedContent());
-
- base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
- switch (statistic) {
- case SDCH_DECODE: {
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_Latency_F_a", duration,
- base::TimeDelta::FromMilliseconds(20),
- base::TimeDelta::FromMinutes(10), 100);
- UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Decode_Packets_b",
- static_cast<int>(observed_packet_count_));
- UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
- static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
- if (packet_times_.empty())
- return;
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_Last_a",
- final_packet_time_ - packet_times_[0],
- base::TimeDelta::FromMilliseconds(20),
- base::TimeDelta::FromMinutes(10), 100);
-
- DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
- DCHECK(kSdchPacketHistogramCount > 4);
- if (packet_times_.size() <= 4)
- return;
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_2nd_c",
- packet_times_[1] - packet_times_[0],
- base::TimeDelta::FromMilliseconds(1),
- base::TimeDelta::FromSeconds(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_2nd_To_3rd_c",
- packet_times_[2] - packet_times_[1],
- base::TimeDelta::FromMilliseconds(1),
- base::TimeDelta::FromSeconds(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_3rd_To_4th_c",
- packet_times_[3] - packet_times_[2],
- base::TimeDelta::FromMilliseconds(1),
- base::TimeDelta::FromSeconds(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_4th_To_5th_c",
- packet_times_[4] - packet_times_[3],
- base::TimeDelta::FromMilliseconds(1),
- base::TimeDelta::FromSeconds(10), 100);
- return;
- }
- case SDCH_PASSTHROUGH: {
- // Despite advertising a dictionary, we handled non-sdch compressed
- // content.
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_Latency_F_a",
- duration,
- base::TimeDelta::FromMilliseconds(20),
- base::TimeDelta::FromMinutes(10), 100);
- UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Pass-through_Packets_b",
- observed_packet_count_);
- if (packet_times_.empty())
- return;
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_Last_a",
- final_packet_time_ - packet_times_[0],
- base::TimeDelta::FromMilliseconds(20),
- base::TimeDelta::FromMinutes(10), 100);
- DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
- DCHECK(kSdchPacketHistogramCount > 4);
- if (packet_times_.size() <= 4)
- return;
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_2nd_c",
- packet_times_[1] - packet_times_[0],
- base::TimeDelta::FromMilliseconds(1),
- base::TimeDelta::FromSeconds(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_2nd_To_3rd_c",
- packet_times_[2] - packet_times_[1],
- base::TimeDelta::FromMilliseconds(1),
- base::TimeDelta::FromSeconds(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_3rd_To_4th_c",
- packet_times_[3] - packet_times_[2],
- base::TimeDelta::FromMilliseconds(1),
- base::TimeDelta::FromSeconds(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_4th_To_5th_c",
- packet_times_[4] - packet_times_[3],
- base::TimeDelta::FromMilliseconds(1),
- base::TimeDelta::FromSeconds(10), 100);
- return;
- }
-
- case SDCH_EXPERIMENT_DECODE: {
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Decode",
- duration,
- base::TimeDelta::FromMilliseconds(20),
- base::TimeDelta::FromMinutes(10), 100);
- // We already provided interpacket histograms above in the SDCH_DECODE
- // case, so we don't need them here.
- return;
- }
- case SDCH_EXPERIMENT_HOLDBACK: {
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback",
- duration,
- base::TimeDelta::FromMilliseconds(20),
- base::TimeDelta::FromMinutes(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_Last_a",
- final_packet_time_ - packet_times_[0],
- base::TimeDelta::FromMilliseconds(20),
- base::TimeDelta::FromMinutes(10), 100);
-
- DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
- DCHECK(kSdchPacketHistogramCount > 4);
- if (packet_times_.size() <= 4)
- return;
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_2nd_c",
- packet_times_[1] - packet_times_[0],
- base::TimeDelta::FromMilliseconds(1),
- base::TimeDelta::FromSeconds(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_2nd_To_3rd_c",
- packet_times_[2] - packet_times_[1],
- base::TimeDelta::FromMilliseconds(1),
- base::TimeDelta::FromSeconds(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_3rd_To_4th_c",
- packet_times_[3] - packet_times_[2],
- base::TimeDelta::FromMilliseconds(1),
- base::TimeDelta::FromSeconds(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_4th_To_5th_c",
- packet_times_[4] - packet_times_[3],
- base::TimeDelta::FromMilliseconds(1),
- base::TimeDelta::FromSeconds(10), 100);
- return;
- }
- default:
- NOTREACHED();
- return;
- }
-}
-
// The common type of histogram we use for all compression-tracking histograms.
#define COMPRESSION_HISTOGRAM(name, sample) \
do { \
« no previous file with comments | « net/url_request/url_request_job.h ('k') | net/url_request/url_request_job_tracker.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698