Index: net/url_request/url_request_http_job.cc |
diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc |
index fcf296b1e772498a438a905c3dd86d3897a7f4ad..76daf80c0721a6498c45f42af204e1927f59e619 100644 |
--- a/net/url_request/url_request_http_job.cc |
+++ b/net/url_request/url_request_http_job.cc |
@@ -228,6 +228,7 @@ URLRequestHttpJob::URLRequestHttpJob( |
NetworkDelegate* network_delegate, |
const HttpUserAgentSettings* http_user_agent_settings) |
: URLRequestJob(request, network_delegate), |
+ priority_(DEFAULT_PRIORITY), |
response_info_(NULL), |
response_cookies_save_index_(0), |
proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), |
@@ -268,6 +269,37 @@ URLRequestHttpJob::URLRequestHttpJob( |
ResetTimer(); |
} |
+URLRequestHttpJob::~URLRequestHttpJob() { |
+ CHECK(!awaiting_callback_); |
+ |
+ DCHECK(!sdch_test_control_ || !sdch_test_activated_); |
+ if (!is_cached_content_) { |
+ if (sdch_test_control_) |
+ RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); |
+ if (sdch_test_activated_) |
+ RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); |
+ } |
+ // Make sure SDCH filters are told to emit histogram data while |
+ // filter_context_ is still alive. |
+ DestroyFilters(); |
+ |
+ if (sdch_dictionary_url_.is_valid()) { |
+ // Prior to reaching the destructor, request_ has been set to a NULL |
+ // pointer, so request_->url() is no longer valid in the destructor, and we |
+ // use an alternate copy |request_info_.url|. |
+ SdchManager* manager = SdchManager::Global(); |
+ // To be extra safe, since this is a "different time" from when we decided |
+ // to get the dictionary, we'll validate that an SdchManager is available. |
+ // At shutdown time, care is taken to be sure that we don't delete this |
+ // globally useful instance "too soon," so this check is just defensive |
+ // coding to assure that IF the system is shutting down, we don't have any |
+ // problem if the manager was deleted ahead of time. |
+ if (manager) // Defensive programming. |
+ manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); |
+ } |
+ DoneWithRequest(ABORTED); |
+} |
+ |
void URLRequestHttpJob::NotifyHeadersComplete() { |
DCHECK(!response_info_); |
@@ -394,7 +426,7 @@ void URLRequestHttpJob::StartTransactionInternal() { |
DCHECK(request_->context()->http_transaction_factory()); |
rv = request_->context()->http_transaction_factory()->CreateTransaction( |
- request_->priority(), &transaction_, http_transaction_delegate_.get()); |
+ priority_, &transaction_, http_transaction_delegate_.get()); |
mmenke
2013/03/20 17:12:59
Hmm...We could probably get away with not caching
akalin
2013/03/21 01:30:33
I actually think it's clearer this way, because:
mmenke
2013/03/21 04:13:01
My general feeling is the more places we have valu
|
if (rv == OK) { |
if (!throttling_entry_ || |
!throttling_entry_->ShouldRejectRequest(*request_)) { |
@@ -859,6 +891,12 @@ void URLRequestHttpJob::SetExtraRequestHeaders( |
request_info_.extra_headers.CopyFrom(headers); |
} |
+void URLRequestHttpJob::SetPriority(RequestPriority priority) { |
+ priority_ = priority; |
+ if (transaction_) |
+ transaction_->SetPriority(priority_); |
+} |
+ |
void URLRequestHttpJob::Start() { |
DCHECK(!transaction_.get()); |
@@ -1230,37 +1268,6 @@ HostPortPair URLRequestHttpJob::GetSocketAddress() const { |
return response_info_ ? response_info_->socket_address : HostPortPair(); |
} |
-URLRequestHttpJob::~URLRequestHttpJob() { |
- CHECK(!awaiting_callback_); |
- |
- DCHECK(!sdch_test_control_ || !sdch_test_activated_); |
- if (!is_cached_content_) { |
- if (sdch_test_control_) |
- RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); |
- if (sdch_test_activated_) |
- RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); |
- } |
- // Make sure SDCH filters are told to emit histogram data while |
- // filter_context_ is still alive. |
- DestroyFilters(); |
- |
- if (sdch_dictionary_url_.is_valid()) { |
- // Prior to reaching the destructor, request_ has been set to a NULL |
- // pointer, so request_->url() is no longer valid in the destructor, and we |
- // use an alternate copy |request_info_.url|. |
- SdchManager* manager = SdchManager::Global(); |
- // To be extra safe, since this is a "different time" from when we decided |
- // to get the dictionary, we'll validate that an SdchManager is available. |
- // At shutdown time, care is taken to be sure that we don't delete this |
- // globally useful instance "too soon," so this check is just defensive |
- // coding to assure that IF the system is shutting down, we don't have any |
- // problem if the manager was deleted ahead of time. |
- if (manager) // Defensive programming. |
- manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); |
- } |
- DoneWithRequest(ABORTED); |
-} |
- |
void URLRequestHttpJob::RecordTimer() { |
if (request_creation_time_.is_null()) { |
NOTREACHED() |