Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(294)

Unified Diff: net/url_request/url_request_http_job.cc

Issue 6382003: Reorder the methods in net/url_request/. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Compiling net_unittests != compiling the rest of chrome Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « net/url_request/url_request_http_job.h ('k') | net/url_request/url_request_job.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: net/url_request/url_request_http_job.cc
diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc
index ec0134a5ac11f58a2425764527dba92e981c6fe0..21b84f23ee84c7fc85f1444dd9dadb31ef28d099 100644
--- a/net/url_request/url_request_http_job.cc
+++ b/net/url_request/url_request_http_job.cc
@@ -117,6 +117,7 @@ URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request,
return new URLRequestHttpJob(request);
}
+
URLRequestHttpJob::URLRequestHttpJob(URLRequest* request)
: URLRequestJob(request),
response_info_(NULL),
@@ -142,354 +143,350 @@ URLRequestHttpJob::URLRequestHttpJob(URLRequest* request)
ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) {
}
-URLRequestHttpJob::~URLRequestHttpJob() {
- DCHECK(!sdch_test_control_ || !sdch_test_activated_);
- if (!IsCachedContent()) {
- if (sdch_test_control_)
- RecordPacketStats(SDCH_EXPERIMENT_HOLDBACK);
- if (sdch_test_activated_)
- RecordPacketStats(SDCH_EXPERIMENT_DECODE);
+void URLRequestHttpJob::NotifyHeadersComplete() {
+ DCHECK(!response_info_);
+
+ response_info_ = transaction_->GetResponseInfo();
+
+ // Save boolean, as we'll need this info at destruction time, and filters may
+ // also need this info.
+ is_cached_content_ = response_info_->was_cached;
+
+ if (!is_cached_content_) {
+ URLRequestThrottlerHeaderAdapter response_adapter(
+ response_info_->headers);
+ throttling_entry_->UpdateWithResponse(&response_adapter);
}
- // Make sure SDCH filters are told to emit histogram data while this class
- // can still service the IsCachedContent() call.
- DestroyFilters();
- if (sdch_dictionary_url_.is_valid()) {
- // Prior to reaching the destructor, request_ has been set to a NULL
- // pointer, so request_->url() is no longer valid in the destructor, and we
- // use an alternate copy |request_info_.url|.
- SdchManager* manager = SdchManager::Global();
- // To be extra safe, since this is a "different time" from when we decided
- // to get the dictionary, we'll validate that an SdchManager is available.
- // At shutdown time, care is taken to be sure that we don't delete this
- // globally useful instance "too soon," so this check is just defensive
- // coding to assure that IF the system is shutting down, we don't have any
- // problem if the manager was deleted ahead of time.
- if (manager) // Defensive programming.
- manager->FetchDictionary(request_info_.url, sdch_dictionary_url_);
+ ProcessStrictTransportSecurityHeader();
+
+ if (SdchManager::Global() &&
+ SdchManager::Global()->IsInSupportedDomain(request_->url())) {
+ static const std::string name = "Get-Dictionary";
+ std::string url_text;
+ void* iter = NULL;
+ // TODO(jar): We need to not fetch dictionaries the first time they are
+ // seen, but rather wait until we can justify their usefulness.
+ // For now, we will only fetch the first dictionary, which will at least
+ // require multiple suggestions before we get additional ones for this site.
+ // Eventually we should wait until a dictionary is requested several times
+ // before we even download it (so that we don't waste memory or bandwidth).
+ if (response_info_->headers->EnumerateHeader(&iter, name, &url_text)) {
+ // request_->url() won't be valid in the destructor, so we use an
+ // alternate copy.
+ DCHECK(request_->url() == request_info_.url);
+ // Resolve suggested URL relative to request url.
+ sdch_dictionary_url_ = request_info_.url.Resolve(url_text);
+ }
}
-}
-void URLRequestHttpJob::SetUpload(UploadData* upload) {
- DCHECK(!transaction_.get()) << "cannot change once started";
- request_info_.upload_data = upload;
+ // The HTTP transaction may be restarted several times for the purposes
+ // of sending authorization information. Each time it restarts, we get
+ // notified of the headers completion so that we can update the cookie store.
+ if (transaction_->IsReadyToRestartForAuth()) {
+ DCHECK(!response_info_->auth_challenge.get());
+ RestartTransactionWithAuth(string16(), string16());
+ return;
+ }
+
+ URLRequestJob::NotifyHeadersComplete();
}
-void URLRequestHttpJob::SetExtraRequestHeaders(
- const HttpRequestHeaders& headers) {
- DCHECK(!transaction_.get()) << "cannot change once started";
- request_info_.extra_headers.CopyFrom(headers);
+void URLRequestHttpJob::DestroyTransaction() {
+ DCHECK(transaction_.get());
+
+ transaction_.reset();
+ response_info_ = NULL;
+ context_ = NULL;
}
-void URLRequestHttpJob::Start() {
- DCHECK(!transaction_.get());
+void URLRequestHttpJob::StartTransaction() {
+ // NOTE: This method assumes that request_info_ is already setup properly.
- // Ensure that we do not send username and password fields in the referrer.
- GURL referrer(request_->GetSanitizedReferrer());
+ // If we already have a transaction, then we should restart the transaction
+ // with auth provided by username_ and password_.
- request_info_.url = request_->url();
- request_info_.referrer = referrer;
- request_info_.method = request_->method();
- request_info_.load_flags = request_->load_flags();
- request_info_.priority = request_->priority();
+ int rv;
- if (request_->context()) {
- request_info_.extra_headers.SetHeader(
- HttpRequestHeaders::kUserAgent,
- request_->context()->GetUserAgent(request_->url()));
- }
+ if (transaction_.get()) {
+ rv = transaction_->RestartWithAuth(username_, password_, &start_callback_);
+ username_.clear();
+ password_.clear();
+ } else {
+ DCHECK(request_->context());
+ DCHECK(request_->context()->http_transaction_factory());
- AddExtraHeaders();
- AddCookieHeaderAndStart();
-}
+ rv = request_->context()->http_transaction_factory()->CreateTransaction(
+ &transaction_);
+ if (rv == OK) {
+ if (!throttling_entry_->IsDuringExponentialBackoff() ||
+ !net::URLRequestThrottlerManager::GetInstance()->
+ enforce_throttling()) {
+ rv = transaction_->Start(
+ &request_info_, &start_callback_, request_->net_log());
+ } else {
+ // Special error code for the exponential back-off module.
+ rv = ERR_TEMPORARILY_THROTTLED;
+ }
+ // Make sure the context is alive for the duration of the
+ // transaction.
+ context_ = request_->context();
+ }
+ }
-void URLRequestHttpJob::Kill() {
- if (!transaction_.get())
+ if (rv == ERR_IO_PENDING)
return;
- DestroyTransaction();
- URLRequestJob::Kill();
-}
-
-LoadState URLRequestHttpJob::GetLoadState() const {
- return transaction_.get() ?
- transaction_->GetLoadState() : LOAD_STATE_IDLE;
+ // The transaction started synchronously, but we need to notify the
+ // URLRequest delegate via the message loop.
+ MessageLoop::current()->PostTask(
+ FROM_HERE,
+ method_factory_.NewRunnableMethod(
+ &URLRequestHttpJob::OnStartCompleted, rv));
}
-uint64 URLRequestHttpJob::GetUploadProgress() const {
- return transaction_.get() ? transaction_->GetUploadProgress() : 0;
-}
+void URLRequestHttpJob::AddExtraHeaders() {
+ // TODO(jar): Consider optimizing away SDCH advertising bytes when the URL is
+ // probably an img or such (and SDCH encoding is not likely).
+ bool advertise_sdch = SdchManager::Global() &&
+ SdchManager::Global()->IsInSupportedDomain(request_->url());
+ std::string avail_dictionaries;
+ if (advertise_sdch) {
+ SdchManager::Global()->GetAvailDictionaryList(request_->url(),
+ &avail_dictionaries);
-bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
- DCHECK(transaction_.get());
+ // The AllowLatencyExperiment() is only true if we've successfully done a
+ // full SDCH compression recently in this browser session for this host.
+ // Note that for this path, there might be no applicable dictionaries, and
+ // hence we can't participate in the experiment.
+ if (!avail_dictionaries.empty() &&
+ SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
+ // We are participating in the test (or control), and hence we'll
+ // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
+ // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
+ EnablePacketCounting(kSdchPacketHistogramCount);
+ if (base::RandDouble() < .01) {
+ sdch_test_control_ = true; // 1% probability.
+ advertise_sdch = false;
+ } else {
+ sdch_test_activated_ = true;
+ }
+ }
+ }
- if (!response_info_)
- return false;
+ // Supply Accept-Encoding headers first so that it is more likely that they
+ // will be in the first transmitted packet. This can sometimes make it easier
+ // to filter and analyze the streams to assure that a proxy has not damaged
+ // these headers. Some proxies deliberately corrupt Accept-Encoding headers.
+ if (!advertise_sdch) {
+ // Tell the server what compression formats we support (other than SDCH).
+ request_info_.extra_headers.SetHeader(
+ HttpRequestHeaders::kAcceptEncoding, "gzip,deflate");
+ } else {
+ // Include SDCH in acceptable list.
+ request_info_.extra_headers.SetHeader(
+ HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch");
+ if (!avail_dictionaries.empty()) {
+ request_info_.extra_headers.SetHeader(
+ kAvailDictionaryHeader,
+ avail_dictionaries);
+ sdch_dictionary_advertised_ = true;
+ // Since we're tagging this transaction as advertising a dictionary, we'll
+ // definately employ an SDCH filter (or tentative sdch filter) when we get
+ // a response. When done, we'll record histograms via SDCH_DECODE or
+ // SDCH_PASSTHROUGH. Hence we need to record packet arrival times.
+ EnablePacketCounting(kSdchPacketHistogramCount);
+ }
+ }
- return response_info_->headers->GetMimeType(mime_type);
+ URLRequestContext* context = request_->context();
+ if (context) {
+ // Only add default Accept-Language and Accept-Charset if the request
+ // didn't have them specified.
+ if (!request_info_.extra_headers.HasHeader(
+ HttpRequestHeaders::kAcceptLanguage)) {
+ request_info_.extra_headers.SetHeader(
+ HttpRequestHeaders::kAcceptLanguage,
+ context->accept_language());
+ }
+ if (!request_info_.extra_headers.HasHeader(
+ HttpRequestHeaders::kAcceptCharset)) {
+ request_info_.extra_headers.SetHeader(
+ HttpRequestHeaders::kAcceptCharset,
+ context->accept_charset());
+ }
+ }
}
-bool URLRequestHttpJob::GetCharset(std::string* charset) {
- DCHECK(transaction_.get());
+void URLRequestHttpJob::AddCookieHeaderAndStart() {
+ // No matter what, we want to report our status as IO pending since we will
+ // be notifying our consumer asynchronously via OnStartCompleted.
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- if (!response_info_)
- return false;
+ AddRef(); // Balanced in OnCanGetCookiesCompleted
- return response_info_->headers->GetCharset(charset);
-}
+ int policy = OK;
-void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) {
- DCHECK(request_);
- DCHECK(transaction_.get());
+ if (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) {
+ policy = ERR_FAILED;
+ } else if (request_->context()->cookie_policy()) {
+ policy = request_->context()->cookie_policy()->CanGetCookies(
+ request_->url(),
+ request_->first_party_for_cookies(),
+ &can_get_cookies_callback_);
+ if (policy == ERR_IO_PENDING)
+ return; // Wait for completion callback
+ }
- if (response_info_)
- *info = *response_info_;
+ OnCanGetCookiesCompleted(policy);
}
-bool URLRequestHttpJob::GetResponseCookies(
- std::vector<std::string>* cookies) {
+void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete() {
DCHECK(transaction_.get());
- if (!response_info_)
- return false;
+ const HttpResponseInfo* response_info = transaction_->GetResponseInfo();
+ DCHECK(response_info);
- // TODO(darin): Why are we extracting response cookies again? Perhaps we
- // should just leverage response_cookies_.
-
- cookies->clear();
- FetchResponseCookies(response_info_, cookies);
- return true;
-}
-
-int URLRequestHttpJob::GetResponseCode() const {
- DCHECK(transaction_.get());
+ response_cookies_.clear();
+ response_cookies_save_index_ = 0;
- if (!response_info_)
- return -1;
+ FetchResponseCookies(response_info, &response_cookies_);
- return response_info_->headers->response_code();
+ // Now, loop over the response cookies, and attempt to persist each.
+ SaveNextCookie();
}
-bool URLRequestHttpJob::GetContentEncodings(
- std::vector<Filter::FilterType>* encoding_types) {
- DCHECK(transaction_.get());
- if (!response_info_)
- return false;
- DCHECK(encoding_types->empty());
-
- std::string encoding_type;
- void* iter = NULL;
- while (response_info_->headers->EnumerateHeader(&iter, "Content-Encoding",
- &encoding_type)) {
- encoding_types->push_back(Filter::ConvertEncodingToType(encoding_type));
+void URLRequestHttpJob::SaveNextCookie() {
+ if (response_cookies_save_index_ == response_cookies_.size()) {
+ response_cookies_.clear();
+ response_cookies_save_index_ = 0;
+ SetStatus(URLRequestStatus()); // Clear the IO_PENDING status
+ NotifyHeadersComplete();
+ return;
}
- // Even if encoding types are empty, there is a chance that we need to add
- // some decoding, as some proxies strip encoding completely. In such cases,
- // we may need to add (for example) SDCH filtering (when the context suggests
- // it is appropriate).
- Filter::FixupEncodingTypes(*this, encoding_types);
-
- return !encoding_types->empty();
-}
-
-bool URLRequestHttpJob::IsCachedContent() const {
- return is_cached_content_;
-}
-
-bool URLRequestHttpJob::IsSdchResponse() const {
- return sdch_dictionary_advertised_;
-}
-
-bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) {
- // We only allow redirects to certain "safe" protocols. This does not
- // restrict redirects to externally handled protocols. Our consumer would
- // need to take care of those.
+ // No matter what, we want to report our status as IO pending since we will
+ // be notifying our consumer asynchronously via OnStartCompleted.
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- if (!URLRequest::IsHandledURL(location))
- return true;
+ AddRef(); // Balanced in OnCanSetCookieCompleted
- static const char* kSafeSchemes[] = {
- "http",
- "https",
- "ftp"
- };
+ int policy = OK;
- for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) {
- if (location.SchemeIs(kSafeSchemes[i]))
- return true;
+ if (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) {
+ policy = ERR_FAILED;
+ } else if (request_->context()->cookie_policy()) {
+ policy = request_->context()->cookie_policy()->CanSetCookie(
+ request_->url(),
+ request_->first_party_for_cookies(),
+ response_cookies_[response_cookies_save_index_],
+ &can_set_cookie_callback_);
+ if (policy == ERR_IO_PENDING)
+ return; // Wait for completion callback
}
- return false;
+ OnCanSetCookieCompleted(policy);
}
-bool URLRequestHttpJob::NeedsAuth() {
- int code = GetResponseCode();
- if (code == -1)
- return false;
+void URLRequestHttpJob::FetchResponseCookies(
+ const HttpResponseInfo* response_info,
+ std::vector<std::string>* cookies) {
+ std::string name = "Set-Cookie";
+ std::string value;
- // Check if we need either Proxy or WWW Authentication. This could happen
- // because we either provided no auth info, or provided incorrect info.
- switch (code) {
- case 407:
- if (proxy_auth_state_ == AUTH_STATE_CANCELED)
- return false;
- proxy_auth_state_ = AUTH_STATE_NEED_AUTH;
- return true;
- case 401:
- if (server_auth_state_ == AUTH_STATE_CANCELED)
- return false;
- server_auth_state_ = AUTH_STATE_NEED_AUTH;
- return true;
+ void* iter = NULL;
+ while (response_info->headers->EnumerateHeader(&iter, name, &value)) {
+ if (!value.empty())
+ cookies->push_back(value);
}
- return false;
}
-void URLRequestHttpJob::GetAuthChallengeInfo(
- scoped_refptr<AuthChallengeInfo>* result) {
- DCHECK(transaction_.get());
+void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() {
DCHECK(response_info_);
- // sanity checks:
- DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
- server_auth_state_ == AUTH_STATE_NEED_AUTH);
- DCHECK(response_info_->headers->response_code() == 401 ||
- response_info_->headers->response_code() == 407);
-
- *result = response_info_->auth_challenge;
-}
-
-void URLRequestHttpJob::SetAuth(const string16& username,
- const string16& password) {
- DCHECK(transaction_.get());
-
- // Proxy gets set first, then WWW.
- if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
- proxy_auth_state_ = AUTH_STATE_HAVE_AUTH;
- } else {
- DCHECK(server_auth_state_ == AUTH_STATE_NEED_AUTH);
- server_auth_state_ = AUTH_STATE_HAVE_AUTH;
- }
+ URLRequestContext* ctx = request_->context();
+ if (!ctx || !ctx->transport_security_state())
+ return;
- RestartTransactionWithAuth(username, password);
-}
+ const bool https = response_info_->ssl_info.is_valid();
+ const bool valid_https =
+ https && !IsCertStatusError(response_info_->ssl_info.cert_status);
-void URLRequestHttpJob::RestartTransactionWithAuth(
- const string16& username,
- const string16& password) {
- username_ = username;
- password_ = password;
+ std::string name = "Strict-Transport-Security";
+ std::string value;
- // These will be reset in OnStartCompleted.
- response_info_ = NULL;
- response_cookies_.clear();
+ int max_age;
+ bool include_subdomains;
- // Update the cookies, since the cookie store may have been updated from the
- // headers in the 401/407. Since cookies were already appended to
- // extra_headers, we need to strip them out before adding them again.
- request_info_.extra_headers.RemoveHeader(
- HttpRequestHeaders::kCookie);
+ void* iter = NULL;
+ while (response_info_->headers->EnumerateHeader(&iter, name, &value)) {
+ const bool ok = TransportSecurityState::ParseHeader(
+ value, &max_age, &include_subdomains);
+ if (!ok)
+ continue;
+ // We will only accept strict mode if we saw the header from an HTTPS
+ // connection with no certificate problems.
+ if (!valid_https)
+ continue;
+ base::Time current_time(base::Time::Now());
+ base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age);
- AddCookieHeaderAndStart();
-}
+ TransportSecurityState::DomainState domain_state;
+ domain_state.expiry = current_time + max_age_delta;
+ domain_state.mode = TransportSecurityState::DomainState::MODE_STRICT;
+ domain_state.include_subdomains = include_subdomains;
-void URLRequestHttpJob::CancelAuth() {
- // Proxy gets set first, then WWW.
- if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
- proxy_auth_state_ = AUTH_STATE_CANCELED;
- } else {
- DCHECK(server_auth_state_ == AUTH_STATE_NEED_AUTH);
- server_auth_state_ = AUTH_STATE_CANCELED;
+ ctx->transport_security_state()->EnableHost(request_info_.url.host(),
+ domain_state);
}
- // These will be reset in OnStartCompleted.
- response_info_ = NULL;
- response_cookies_.clear();
-
- // OK, let the consumer read the error page...
- //
- // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
- // which will cause the consumer to receive OnResponseStarted instead of
- // OnAuthRequired.
- //
- // We have to do this via InvokeLater to avoid "recursing" the consumer.
- //
- MessageLoop::current()->PostTask(
- FROM_HERE,
- method_factory_.NewRunnableMethod(
- &URLRequestHttpJob::OnStartCompleted, OK));
-}
+ // TODO(agl): change this over when we have fixed things at the server end.
+ // The string should be "Opportunistic-Transport-Security";
+ name = "X-Bodge-Transport-Security";
-void URLRequestHttpJob::ContinueWithCertificate(
- X509Certificate* client_cert) {
- DCHECK(transaction_.get());
+ while (response_info_->headers->EnumerateHeader(&iter, name, &value)) {
+ const bool ok = TransportSecurityState::ParseHeader(
+ value, &max_age, &include_subdomains);
+ if (!ok)
+ continue;
+ // If we saw an opportunistic request over HTTPS, then clearly we can make
+ // HTTPS connections to the host so we should remember this.
+ if (https) {
+ base::Time current_time(base::Time::Now());
+ base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age);
- DCHECK(!response_info_) << "should not have a response yet";
+ TransportSecurityState::DomainState domain_state;
+ domain_state.expiry = current_time + max_age_delta;
+ domain_state.mode =
+ TransportSecurityState::DomainState::MODE_SPDY_ONLY;
+ domain_state.include_subdomains = include_subdomains;
- // No matter what, we want to report our status as IO pending since we will
- // be notifying our consumer asynchronously via OnStartCompleted.
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ ctx->transport_security_state()->EnableHost(request_info_.url.host(),
+ domain_state);
+ continue;
+ }
- int rv = transaction_->RestartWithCertificate(client_cert, &start_callback_);
- if (rv == ERR_IO_PENDING)
- return;
+ if (!request())
+ break;
- // The transaction started synchronously, but we need to notify the
- // URLRequest delegate via the message loop.
- MessageLoop::current()->PostTask(
- FROM_HERE,
- method_factory_.NewRunnableMethod(
- &URLRequestHttpJob::OnStartCompleted, rv));
-}
+ // At this point, we have a request for opportunistic encryption over HTTP.
+ // In this case we need to probe to check that we can make HTTPS
+ // connections to that host.
+ HTTPSProber* const prober = HTTPSProber::GetInstance();
+ if (prober->HaveProbed(request_info_.url.host()) ||
+ prober->InFlight(request_info_.url.host())) {
+ continue;
+ }
-void URLRequestHttpJob::ContinueDespiteLastError() {
- // If the transaction was destroyed, then the job was cancelled.
- if (!transaction_.get())
- return;
-
- DCHECK(!response_info_) << "should not have a response yet";
-
- // No matter what, we want to report our status as IO pending since we will
- // be notifying our consumer asynchronously via OnStartCompleted.
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
-
- int rv = transaction_->RestartIgnoringLastError(&start_callback_);
- if (rv == ERR_IO_PENDING)
- return;
-
- // The transaction started synchronously, but we need to notify the
- // URLRequest delegate via the message loop.
- MessageLoop::current()->PostTask(
- FROM_HERE,
- method_factory_.NewRunnableMethod(
- &URLRequestHttpJob::OnStartCompleted, rv));
-}
-
-bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
- int *bytes_read) {
- DCHECK_NE(buf_size, 0);
- DCHECK(bytes_read);
- DCHECK(!read_in_progress_);
-
- int rv = transaction_->Read(buf, buf_size, &read_callback_);
- if (rv >= 0) {
- *bytes_read = rv;
- return true;
- }
-
- if (rv == ERR_IO_PENDING) {
- read_in_progress_ = true;
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- } else {
- NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+ HTTPSProberDelegateImpl* delegate =
+ new HTTPSProberDelegateImpl(request_info_.url.host(), max_age,
+ include_subdomains,
+ ctx->transport_security_state());
+ if (!prober->ProbeHost(request_info_.url.host(), request()->context(),
+ delegate)) {
+ delete delegate;
+ }
}
-
- return false;
-}
-
-void URLRequestHttpJob::StopCaching() {
- if (transaction_.get())
- transaction_->StopCaching();
}
void URLRequestHttpJob::OnCanGetCookiesCompleted(int policy) {
@@ -620,349 +617,353 @@ bool URLRequestHttpJob::ShouldTreatAsCertificateError(int result) {
TransportSecurityState::DomainState::MODE_OPPORTUNISTIC;
}
-void URLRequestHttpJob::NotifyHeadersComplete() {
- DCHECK(!response_info_);
+void URLRequestHttpJob::RestartTransactionWithAuth(
+ const string16& username,
+ const string16& password) {
+ username_ = username;
+ password_ = password;
- response_info_ = transaction_->GetResponseInfo();
+ // These will be reset in OnStartCompleted.
+ response_info_ = NULL;
+ response_cookies_.clear();
- // Save boolean, as we'll need this info at destruction time, and filters may
- // also need this info.
- is_cached_content_ = response_info_->was_cached;
+ // Update the cookies, since the cookie store may have been updated from the
+ // headers in the 401/407. Since cookies were already appended to
+ // extra_headers, we need to strip them out before adding them again.
+ request_info_.extra_headers.RemoveHeader(
+ HttpRequestHeaders::kCookie);
- if (!is_cached_content_) {
- URLRequestThrottlerHeaderAdapter response_adapter(
- response_info_->headers);
- throttling_entry_->UpdateWithResponse(&response_adapter);
- }
+ AddCookieHeaderAndStart();
+}
- ProcessStrictTransportSecurityHeader();
+void URLRequestHttpJob::SetUpload(UploadData* upload) {
+ DCHECK(!transaction_.get()) << "cannot change once started";
+ request_info_.upload_data = upload;
+}
- if (SdchManager::Global() &&
- SdchManager::Global()->IsInSupportedDomain(request_->url())) {
- static const std::string name = "Get-Dictionary";
- std::string url_text;
- void* iter = NULL;
- // TODO(jar): We need to not fetch dictionaries the first time they are
- // seen, but rather wait until we can justify their usefulness.
- // For now, we will only fetch the first dictionary, which will at least
- // require multiple suggestions before we get additional ones for this site.
- // Eventually we should wait until a dictionary is requested several times
- // before we even download it (so that we don't waste memory or bandwidth).
- if (response_info_->headers->EnumerateHeader(&iter, name, &url_text)) {
- // request_->url() won't be valid in the destructor, so we use an
- // alternate copy.
- DCHECK(request_->url() == request_info_.url);
- // Resolve suggested URL relative to request url.
- sdch_dictionary_url_ = request_info_.url.Resolve(url_text);
- }
+void URLRequestHttpJob::SetExtraRequestHeaders(
+ const HttpRequestHeaders& headers) {
+ DCHECK(!transaction_.get()) << "cannot change once started";
+ request_info_.extra_headers.CopyFrom(headers);
+}
+
+void URLRequestHttpJob::Start() {
+ DCHECK(!transaction_.get());
+
+ // Ensure that we do not send username and password fields in the referrer.
+ GURL referrer(request_->GetSanitizedReferrer());
+
+ request_info_.url = request_->url();
+ request_info_.referrer = referrer;
+ request_info_.method = request_->method();
+ request_info_.load_flags = request_->load_flags();
+ request_info_.priority = request_->priority();
+
+ if (request_->context()) {
+ request_info_.extra_headers.SetHeader(
+ HttpRequestHeaders::kUserAgent,
+ request_->context()->GetUserAgent(request_->url()));
}
- // The HTTP transaction may be restarted several times for the purposes
- // of sending authorization information. Each time it restarts, we get
- // notified of the headers completion so that we can update the cookie store.
- if (transaction_->IsReadyToRestartForAuth()) {
- DCHECK(!response_info_->auth_challenge.get());
- RestartTransactionWithAuth(string16(), string16());
+ AddExtraHeaders();
+ AddCookieHeaderAndStart();
+}
+
+void URLRequestHttpJob::Kill() {
+ if (!transaction_.get())
return;
- }
- URLRequestJob::NotifyHeadersComplete();
+ DestroyTransaction();
+ URLRequestJob::Kill();
}
-void URLRequestHttpJob::DestroyTransaction() {
+LoadState URLRequestHttpJob::GetLoadState() const {
+ return transaction_.get() ?
+ transaction_->GetLoadState() : LOAD_STATE_IDLE;
+}
+
+uint64 URLRequestHttpJob::GetUploadProgress() const {
+ return transaction_.get() ? transaction_->GetUploadProgress() : 0;
+}
+
+bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
DCHECK(transaction_.get());
- transaction_.reset();
- response_info_ = NULL;
- context_ = NULL;
+ if (!response_info_)
+ return false;
+
+ return response_info_->headers->GetMimeType(mime_type);
}
-void URLRequestHttpJob::StartTransaction() {
- // NOTE: This method assumes that request_info_ is already setup properly.
+bool URLRequestHttpJob::GetCharset(std::string* charset) {
+ DCHECK(transaction_.get());
- // If we already have a transaction, then we should restart the transaction
- // with auth provided by username_ and password_.
+ if (!response_info_)
+ return false;
- int rv;
+ return response_info_->headers->GetCharset(charset);
+}
- if (transaction_.get()) {
- rv = transaction_->RestartWithAuth(username_, password_, &start_callback_);
- username_.clear();
- password_.clear();
- } else {
- DCHECK(request_->context());
- DCHECK(request_->context()->http_transaction_factory());
+void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) {
+ DCHECK(request_);
+ DCHECK(transaction_.get());
- rv = request_->context()->http_transaction_factory()->CreateTransaction(
- &transaction_);
- if (rv == OK) {
- if (!throttling_entry_->IsDuringExponentialBackoff() ||
- !net::URLRequestThrottlerManager::GetInstance()->
- enforce_throttling()) {
- rv = transaction_->Start(
- &request_info_, &start_callback_, request_->net_log());
- } else {
- // Special error code for the exponential back-off module.
- rv = ERR_TEMPORARILY_THROTTLED;
- }
- // Make sure the context is alive for the duration of the
- // transaction.
- context_ = request_->context();
- }
- }
+ if (response_info_)
+ *info = *response_info_;
+}
- if (rv == ERR_IO_PENDING)
- return;
+bool URLRequestHttpJob::GetResponseCookies(
+ std::vector<std::string>* cookies) {
+ DCHECK(transaction_.get());
- // The transaction started synchronously, but we need to notify the
- // URLRequest delegate via the message loop.
- MessageLoop::current()->PostTask(
- FROM_HERE,
- method_factory_.NewRunnableMethod(
- &URLRequestHttpJob::OnStartCompleted, rv));
+ if (!response_info_)
+ return false;
+
+ // TODO(darin): Why are we extracting response cookies again? Perhaps we
+ // should just leverage response_cookies_.
+
+ cookies->clear();
+ FetchResponseCookies(response_info_, cookies);
+ return true;
}
-void URLRequestHttpJob::AddExtraHeaders() {
- // TODO(jar): Consider optimizing away SDCH advertising bytes when the URL is
- // probably an img or such (and SDCH encoding is not likely).
- bool advertise_sdch = SdchManager::Global() &&
- SdchManager::Global()->IsInSupportedDomain(request_->url());
- std::string avail_dictionaries;
- if (advertise_sdch) {
- SdchManager::Global()->GetAvailDictionaryList(request_->url(),
- &avail_dictionaries);
+int URLRequestHttpJob::GetResponseCode() const {
+ DCHECK(transaction_.get());
- // The AllowLatencyExperiment() is only true if we've successfully done a
- // full SDCH compression recently in this browser session for this host.
- // Note that for this path, there might be no applicable dictionaries, and
- // hence we can't participate in the experiment.
- if (!avail_dictionaries.empty() &&
- SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
- // We are participating in the test (or control), and hence we'll
- // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
- // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
- EnablePacketCounting(kSdchPacketHistogramCount);
- if (base::RandDouble() < .01) {
- sdch_test_control_ = true; // 1% probability.
- advertise_sdch = false;
- } else {
- sdch_test_activated_ = true;
- }
- }
+ if (!response_info_)
+ return -1;
+
+ return response_info_->headers->response_code();
+}
+
+bool URLRequestHttpJob::GetContentEncodings(
+ std::vector<Filter::FilterType>* encoding_types) {
+ DCHECK(transaction_.get());
+ if (!response_info_)
+ return false;
+ DCHECK(encoding_types->empty());
+
+ std::string encoding_type;
+ void* iter = NULL;
+ while (response_info_->headers->EnumerateHeader(&iter, "Content-Encoding",
+ &encoding_type)) {
+ encoding_types->push_back(Filter::ConvertEncodingToType(encoding_type));
}
- // Supply Accept-Encoding headers first so that it is more likely that they
- // will be in the first transmitted packet. This can sometimes make it easier
- // to filter and analyze the streams to assure that a proxy has not damaged
- // these headers. Some proxies deliberately corrupt Accept-Encoding headers.
- if (!advertise_sdch) {
- // Tell the server what compression formats we support (other than SDCH).
- request_info_.extra_headers.SetHeader(
- HttpRequestHeaders::kAcceptEncoding, "gzip,deflate");
- } else {
- // Include SDCH in acceptable list.
- request_info_.extra_headers.SetHeader(
- HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch");
- if (!avail_dictionaries.empty()) {
- request_info_.extra_headers.SetHeader(
- kAvailDictionaryHeader,
- avail_dictionaries);
- sdch_dictionary_advertised_ = true;
- // Since we're tagging this transaction as advertising a dictionary, we'll
- // definately employ an SDCH filter (or tentative sdch filter) when we get
- // a response. When done, we'll record histograms via SDCH_DECODE or
- // SDCH_PASSTHROUGH. Hence we need to record packet arrival times.
- EnablePacketCounting(kSdchPacketHistogramCount);
- }
+ // Even if encoding types are empty, there is a chance that we need to add
+ // some decoding, as some proxies strip encoding completely. In such cases,
+ // we may need to add (for example) SDCH filtering (when the context suggests
+ // it is appropriate).
+ Filter::FixupEncodingTypes(*this, encoding_types);
+
+ return !encoding_types->empty();
+}
+
+bool URLRequestHttpJob::IsCachedContent() const {
+ return is_cached_content_;
+}
+
+bool URLRequestHttpJob::IsSdchResponse() const {
+ return sdch_dictionary_advertised_;
+}
+
+bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) {
+ // We only allow redirects to certain "safe" protocols. This does not
+ // restrict redirects to externally handled protocols. Our consumer would
+ // need to take care of those.
+
+ if (!URLRequest::IsHandledURL(location))
+ return true;
+
+ static const char* kSafeSchemes[] = {
+ "http",
+ "https",
+ "ftp"
+ };
+
+ for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) {
+ if (location.SchemeIs(kSafeSchemes[i]))
+ return true;
}
- URLRequestContext* context = request_->context();
- if (context) {
- // Only add default Accept-Language and Accept-Charset if the request
- // didn't have them specified.
- if (!request_info_.extra_headers.HasHeader(
- HttpRequestHeaders::kAcceptLanguage)) {
- request_info_.extra_headers.SetHeader(
- HttpRequestHeaders::kAcceptLanguage,
- context->accept_language());
- }
- if (!request_info_.extra_headers.HasHeader(
- HttpRequestHeaders::kAcceptCharset)) {
- request_info_.extra_headers.SetHeader(
- HttpRequestHeaders::kAcceptCharset,
- context->accept_charset());
- }
+ return false;
+}
+
+bool URLRequestHttpJob::NeedsAuth() {
+ int code = GetResponseCode();
+ if (code == -1)
+ return false;
+
+ // Check if we need either Proxy or WWW Authentication. This could happen
+ // because we either provided no auth info, or provided incorrect info.
+ switch (code) {
+ case 407:
+ if (proxy_auth_state_ == AUTH_STATE_CANCELED)
+ return false;
+ proxy_auth_state_ = AUTH_STATE_NEED_AUTH;
+ return true;
+ case 401:
+ if (server_auth_state_ == AUTH_STATE_CANCELED)
+ return false;
+ server_auth_state_ = AUTH_STATE_NEED_AUTH;
+ return true;
}
+ return false;
}
-void URLRequestHttpJob::AddCookieHeaderAndStart() {
- // No matter what, we want to report our status as IO pending since we will
- // be notifying our consumer asynchronously via OnStartCompleted.
- SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+void URLRequestHttpJob::GetAuthChallengeInfo(
+ scoped_refptr<AuthChallengeInfo>* result) {
+ DCHECK(transaction_.get());
+ DCHECK(response_info_);
- AddRef(); // Balanced in OnCanGetCookiesCompleted
+ // sanity checks:
+ DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
+ server_auth_state_ == AUTH_STATE_NEED_AUTH);
+ DCHECK(response_info_->headers->response_code() == 401 ||
+ response_info_->headers->response_code() == 407);
- int policy = OK;
+ *result = response_info_->auth_challenge;
+}
- if (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) {
- policy = ERR_FAILED;
- } else if (request_->context()->cookie_policy()) {
- policy = request_->context()->cookie_policy()->CanGetCookies(
- request_->url(),
- request_->first_party_for_cookies(),
- &can_get_cookies_callback_);
- if (policy == ERR_IO_PENDING)
- return; // Wait for completion callback
+void URLRequestHttpJob::SetAuth(const string16& username,
+ const string16& password) {
+ DCHECK(transaction_.get());
+
+ // Proxy gets set first, then WWW.
+ if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
+ proxy_auth_state_ = AUTH_STATE_HAVE_AUTH;
+ } else {
+ DCHECK(server_auth_state_ == AUTH_STATE_NEED_AUTH);
+ server_auth_state_ = AUTH_STATE_HAVE_AUTH;
}
- OnCanGetCookiesCompleted(policy);
+ RestartTransactionWithAuth(username, password);
}
-void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete() {
- DCHECK(transaction_.get());
-
- const HttpResponseInfo* response_info = transaction_->GetResponseInfo();
- DCHECK(response_info);
+void URLRequestHttpJob::CancelAuth() {
+ // Proxy gets set first, then WWW.
+ if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
+ proxy_auth_state_ = AUTH_STATE_CANCELED;
+ } else {
+ DCHECK(server_auth_state_ == AUTH_STATE_NEED_AUTH);
+ server_auth_state_ = AUTH_STATE_CANCELED;
+ }
+ // These will be reset in OnStartCompleted.
+ response_info_ = NULL;
response_cookies_.clear();
- response_cookies_save_index_ = 0;
-
- FetchResponseCookies(response_info, &response_cookies_);
- // Now, loop over the response cookies, and attempt to persist each.
- SaveNextCookie();
+ // OK, let the consumer read the error page...
+ //
+ // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
+ // which will cause the consumer to receive OnResponseStarted instead of
+ // OnAuthRequired.
+ //
+ // We have to do this via InvokeLater to avoid "recursing" the consumer.
+ //
+ MessageLoop::current()->PostTask(
+ FROM_HERE,
+ method_factory_.NewRunnableMethod(
+ &URLRequestHttpJob::OnStartCompleted, OK));
}
-void URLRequestHttpJob::SaveNextCookie() {
- if (response_cookies_save_index_ == response_cookies_.size()) {
- response_cookies_.clear();
- response_cookies_save_index_ = 0;
- SetStatus(URLRequestStatus()); // Clear the IO_PENDING status
- NotifyHeadersComplete();
- return;
- }
+void URLRequestHttpJob::ContinueWithCertificate(
+ X509Certificate* client_cert) {
+ DCHECK(transaction_.get());
+
+ DCHECK(!response_info_) << "should not have a response yet";
// No matter what, we want to report our status as IO pending since we will
// be notifying our consumer asynchronously via OnStartCompleted.
SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- AddRef(); // Balanced in OnCanSetCookieCompleted
-
- int policy = OK;
-
- if (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) {
- policy = ERR_FAILED;
- } else if (request_->context()->cookie_policy()) {
- policy = request_->context()->cookie_policy()->CanSetCookie(
- request_->url(),
- request_->first_party_for_cookies(),
- response_cookies_[response_cookies_save_index_],
- &can_set_cookie_callback_);
- if (policy == ERR_IO_PENDING)
- return; // Wait for completion callback
- }
-
- OnCanSetCookieCompleted(policy);
-}
-
-void URLRequestHttpJob::FetchResponseCookies(
- const HttpResponseInfo* response_info,
- std::vector<std::string>* cookies) {
- std::string name = "Set-Cookie";
- std::string value;
+ int rv = transaction_->RestartWithCertificate(client_cert, &start_callback_);
+ if (rv == ERR_IO_PENDING)
+ return;
- void* iter = NULL;
- while (response_info->headers->EnumerateHeader(&iter, name, &value)) {
- if (!value.empty())
- cookies->push_back(value);
- }
+ // The transaction started synchronously, but we need to notify the
+ // URLRequest delegate via the message loop.
+ MessageLoop::current()->PostTask(
+ FROM_HERE,
+ method_factory_.NewRunnableMethod(
+ &URLRequestHttpJob::OnStartCompleted, rv));
}
-void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() {
- DCHECK(response_info_);
-
- URLRequestContext* ctx = request_->context();
- if (!ctx || !ctx->transport_security_state())
+void URLRequestHttpJob::ContinueDespiteLastError() {
+ // If the transaction was destroyed, then the job was cancelled.
+ if (!transaction_.get())
return;
- const bool https = response_info_->ssl_info.is_valid();
- const bool valid_https =
- https && !IsCertStatusError(response_info_->ssl_info.cert_status);
+ DCHECK(!response_info_) << "should not have a response yet";
- std::string name = "Strict-Transport-Security";
- std::string value;
+ // No matter what, we want to report our status as IO pending since we will
+ // be notifying our consumer asynchronously via OnStartCompleted.
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
- int max_age;
- bool include_subdomains;
+ int rv = transaction_->RestartIgnoringLastError(&start_callback_);
+ if (rv == ERR_IO_PENDING)
+ return;
- void* iter = NULL;
- while (response_info_->headers->EnumerateHeader(&iter, name, &value)) {
- const bool ok = TransportSecurityState::ParseHeader(
- value, &max_age, &include_subdomains);
- if (!ok)
- continue;
- // We will only accept strict mode if we saw the header from an HTTPS
- // connection with no certificate problems.
- if (!valid_https)
- continue;
- base::Time current_time(base::Time::Now());
- base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age);
+ // The transaction started synchronously, but we need to notify the
+ // URLRequest delegate via the message loop.
+ MessageLoop::current()->PostTask(
+ FROM_HERE,
+ method_factory_.NewRunnableMethod(
+ &URLRequestHttpJob::OnStartCompleted, rv));
+}
- TransportSecurityState::DomainState domain_state;
- domain_state.expiry = current_time + max_age_delta;
- domain_state.mode = TransportSecurityState::DomainState::MODE_STRICT;
- domain_state.include_subdomains = include_subdomains;
+bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
+ int *bytes_read) {
+ DCHECK_NE(buf_size, 0);
+ DCHECK(bytes_read);
+ DCHECK(!read_in_progress_);
- ctx->transport_security_state()->EnableHost(request_info_.url.host(),
- domain_state);
+ int rv = transaction_->Read(buf, buf_size, &read_callback_);
+ if (rv >= 0) {
+ *bytes_read = rv;
+ return true;
}
- // TODO(agl): change this over when we have fixed things at the server end.
- // The string should be "Opportunistic-Transport-Security";
- name = "X-Bodge-Transport-Security";
-
- while (response_info_->headers->EnumerateHeader(&iter, name, &value)) {
- const bool ok = TransportSecurityState::ParseHeader(
- value, &max_age, &include_subdomains);
- if (!ok)
- continue;
- // If we saw an opportunistic request over HTTPS, then clearly we can make
- // HTTPS connections to the host so we should remember this.
- if (https) {
- base::Time current_time(base::Time::Now());
- base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age);
-
- TransportSecurityState::DomainState domain_state;
- domain_state.expiry = current_time + max_age_delta;
- domain_state.mode =
- TransportSecurityState::DomainState::MODE_SPDY_ONLY;
- domain_state.include_subdomains = include_subdomains;
+ if (rv == ERR_IO_PENDING) {
+ read_in_progress_ = true;
+ SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+ } else {
+ NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+ }
- ctx->transport_security_state()->EnableHost(request_info_.url.host(),
- domain_state);
- continue;
- }
+ return false;
+}
- if (!request())
- break;
+void URLRequestHttpJob::StopCaching() {
+ if (transaction_.get())
+ transaction_->StopCaching();
+}
- // At this point, we have a request for opportunistic encryption over HTTP.
- // In this case we need to probe to check that we can make HTTPS
- // connections to that host.
- HTTPSProber* const prober = HTTPSProber::GetInstance();
- if (prober->HaveProbed(request_info_.url.host()) ||
- prober->InFlight(request_info_.url.host())) {
- continue;
- }
+URLRequestHttpJob::~URLRequestHttpJob() {
+ DCHECK(!sdch_test_control_ || !sdch_test_activated_);
+ if (!IsCachedContent()) {
+ if (sdch_test_control_)
+ RecordPacketStats(SDCH_EXPERIMENT_HOLDBACK);
+ if (sdch_test_activated_)
+ RecordPacketStats(SDCH_EXPERIMENT_DECODE);
+ }
+ // Make sure SDCH filters are told to emit histogram data while this class
+ // can still service the IsCachedContent() call.
+ DestroyFilters();
- HTTPSProberDelegateImpl* delegate =
- new HTTPSProberDelegateImpl(request_info_.url.host(), max_age,
- include_subdomains,
- ctx->transport_security_state());
- if (!prober->ProbeHost(request_info_.url.host(), request()->context(),
- delegate)) {
- delete delegate;
- }
+ if (sdch_dictionary_url_.is_valid()) {
+ // Prior to reaching the destructor, request_ has been set to a NULL
+ // pointer, so request_->url() is no longer valid in the destructor, and we
+ // use an alternate copy |request_info_.url|.
+ SdchManager* manager = SdchManager::Global();
+ // To be extra safe, since this is a "different time" from when we decided
+ // to get the dictionary, we'll validate that an SdchManager is available.
+ // At shutdown time, care is taken to be sure that we don't delete this
+ // globally useful instance "too soon," so this check is just defensive
+ // coding to assure that IF the system is shutting down, we don't have any
+ // problem if the manager was deleted ahead of time.
+ if (manager) // Defensive programming.
+ manager->FetchDictionary(request_info_.url, sdch_dictionary_url_);
}
}
« no previous file with comments | « net/url_request/url_request_http_job.h ('k') | net/url_request/url_request_job.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698