OLD | NEW |
1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/url_request/url_request_http_job.h" | 5 #include "net/url_request/url_request_http_job.h" |
6 | 6 |
7 #include "base/base_switches.h" | 7 #include "base/base_switches.h" |
8 #include "base/command_line.h" | 8 #include "base/command_line.h" |
9 #include "base/compiler_specific.h" | 9 #include "base/compiler_specific.h" |
10 #include "base/file_util.h" | 10 #include "base/file_util.h" |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
110 GURL new_location = request->url().ReplaceComponents(replacements); | 110 GURL new_location = request->url().ReplaceComponents(replacements); |
111 return new URLRequestRedirectJob(request, new_location); | 111 return new URLRequestRedirectJob(request, new_location); |
112 } else { | 112 } else { |
113 // TODO(agl): implement opportunistic HTTPS upgrade. | 113 // TODO(agl): implement opportunistic HTTPS upgrade. |
114 } | 114 } |
115 } | 115 } |
116 | 116 |
117 return new URLRequestHttpJob(request); | 117 return new URLRequestHttpJob(request); |
118 } | 118 } |
119 | 119 |
| 120 |
120 URLRequestHttpJob::URLRequestHttpJob(URLRequest* request) | 121 URLRequestHttpJob::URLRequestHttpJob(URLRequest* request) |
121 : URLRequestJob(request), | 122 : URLRequestJob(request), |
122 response_info_(NULL), | 123 response_info_(NULL), |
123 response_cookies_save_index_(0), | 124 response_cookies_save_index_(0), |
124 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), | 125 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), |
125 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), | 126 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), |
126 ALLOW_THIS_IN_INITIALIZER_LIST(can_get_cookies_callback_( | 127 ALLOW_THIS_IN_INITIALIZER_LIST(can_get_cookies_callback_( |
127 this, &URLRequestHttpJob::OnCanGetCookiesCompleted)), | 128 this, &URLRequestHttpJob::OnCanGetCookiesCompleted)), |
128 ALLOW_THIS_IN_INITIALIZER_LIST(can_set_cookie_callback_( | 129 ALLOW_THIS_IN_INITIALIZER_LIST(can_set_cookie_callback_( |
129 this, &URLRequestHttpJob::OnCanSetCookieCompleted)), | 130 this, &URLRequestHttpJob::OnCanSetCookieCompleted)), |
130 ALLOW_THIS_IN_INITIALIZER_LIST(start_callback_( | 131 ALLOW_THIS_IN_INITIALIZER_LIST(start_callback_( |
131 this, &URLRequestHttpJob::OnStartCompleted)), | 132 this, &URLRequestHttpJob::OnStartCompleted)), |
132 ALLOW_THIS_IN_INITIALIZER_LIST(read_callback_( | 133 ALLOW_THIS_IN_INITIALIZER_LIST(read_callback_( |
133 this, &URLRequestHttpJob::OnReadCompleted)), | 134 this, &URLRequestHttpJob::OnReadCompleted)), |
134 read_in_progress_(false), | 135 read_in_progress_(false), |
135 transaction_(NULL), | 136 transaction_(NULL), |
136 throttling_entry_(URLRequestThrottlerManager::GetInstance()-> | 137 throttling_entry_(URLRequestThrottlerManager::GetInstance()-> |
137 RegisterRequestUrl(request->url())), | 138 RegisterRequestUrl(request->url())), |
138 sdch_dictionary_advertised_(false), | 139 sdch_dictionary_advertised_(false), |
139 sdch_test_activated_(false), | 140 sdch_test_activated_(false), |
140 sdch_test_control_(false), | 141 sdch_test_control_(false), |
141 is_cached_content_(false), | 142 is_cached_content_(false), |
142 ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) { | 143 ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) { |
143 } | 144 } |
144 | 145 |
145 URLRequestHttpJob::~URLRequestHttpJob() { | 146 void URLRequestHttpJob::NotifyHeadersComplete() { |
146 DCHECK(!sdch_test_control_ || !sdch_test_activated_); | 147 DCHECK(!response_info_); |
147 if (!IsCachedContent()) { | 148 |
148 if (sdch_test_control_) | 149 response_info_ = transaction_->GetResponseInfo(); |
149 RecordPacketStats(SDCH_EXPERIMENT_HOLDBACK); | 150 |
150 if (sdch_test_activated_) | 151 // Save boolean, as we'll need this info at destruction time, and filters may |
151 RecordPacketStats(SDCH_EXPERIMENT_DECODE); | 152 // also need this info. |
152 } | 153 is_cached_content_ = response_info_->was_cached; |
153 // Make sure SDCH filters are told to emit histogram data while this class | 154 |
154 // can still service the IsCachedContent() call. | 155 if (!is_cached_content_) { |
155 DestroyFilters(); | 156 URLRequestThrottlerHeaderAdapter response_adapter( |
156 | 157 response_info_->headers); |
157 if (sdch_dictionary_url_.is_valid()) { | 158 throttling_entry_->UpdateWithResponse(&response_adapter); |
158 // Prior to reaching the destructor, request_ has been set to a NULL | 159 } |
159 // pointer, so request_->url() is no longer valid in the destructor, and we | 160 |
160 // use an alternate copy |request_info_.url|. | 161 ProcessStrictTransportSecurityHeader(); |
161 SdchManager* manager = SdchManager::Global(); | 162 |
162 // To be extra safe, since this is a "different time" from when we decided | 163 if (SdchManager::Global() && |
163 // to get the dictionary, we'll validate that an SdchManager is available. | 164 SdchManager::Global()->IsInSupportedDomain(request_->url())) { |
164 // At shutdown time, care is taken to be sure that we don't delete this | 165 static const std::string name = "Get-Dictionary"; |
165 // globally useful instance "too soon," so this check is just defensive | 166 std::string url_text; |
166 // coding to assure that IF the system is shutting down, we don't have any | 167 void* iter = NULL; |
167 // problem if the manager was deleted ahead of time. | 168 // TODO(jar): We need to not fetch dictionaries the first time they are |
168 if (manager) // Defensive programming. | 169 // seen, but rather wait until we can justify their usefulness. |
169 manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); | 170 // For now, we will only fetch the first dictionary, which will at least |
170 } | 171 // require multiple suggestions before we get additional ones for this site. |
171 } | 172 // Eventually we should wait until a dictionary is requested several times |
172 | 173 // before we even download it (so that we don't waste memory or bandwidth). |
173 void URLRequestHttpJob::SetUpload(UploadData* upload) { | 174 if (response_info_->headers->EnumerateHeader(&iter, name, &url_text)) { |
174 DCHECK(!transaction_.get()) << "cannot change once started"; | 175 // request_->url() won't be valid in the destructor, so we use an |
175 request_info_.upload_data = upload; | 176 // alternate copy. |
176 } | 177 DCHECK(request_->url() == request_info_.url); |
177 | 178 // Resolve suggested URL relative to request url. |
178 void URLRequestHttpJob::SetExtraRequestHeaders( | 179 sdch_dictionary_url_ = request_info_.url.Resolve(url_text); |
179 const HttpRequestHeaders& headers) { | 180 } |
180 DCHECK(!transaction_.get()) << "cannot change once started"; | 181 } |
181 request_info_.extra_headers.CopyFrom(headers); | 182 |
182 } | 183 // The HTTP transaction may be restarted several times for the purposes |
183 | 184 // of sending authorization information. Each time it restarts, we get |
184 void URLRequestHttpJob::Start() { | 185 // notified of the headers completion so that we can update the cookie store. |
185 DCHECK(!transaction_.get()); | 186 if (transaction_->IsReadyToRestartForAuth()) { |
186 | 187 DCHECK(!response_info_->auth_challenge.get()); |
187 // Ensure that we do not send username and password fields in the referrer. | 188 RestartTransactionWithAuth(string16(), string16()); |
188 GURL referrer(request_->GetSanitizedReferrer()); | |
189 | |
190 request_info_.url = request_->url(); | |
191 request_info_.referrer = referrer; | |
192 request_info_.method = request_->method(); | |
193 request_info_.load_flags = request_->load_flags(); | |
194 request_info_.priority = request_->priority(); | |
195 | |
196 if (request_->context()) { | |
197 request_info_.extra_headers.SetHeader( | |
198 HttpRequestHeaders::kUserAgent, | |
199 request_->context()->GetUserAgent(request_->url())); | |
200 } | |
201 | |
202 AddExtraHeaders(); | |
203 AddCookieHeaderAndStart(); | |
204 } | |
205 | |
206 void URLRequestHttpJob::Kill() { | |
207 if (!transaction_.get()) | |
208 return; | 189 return; |
209 | 190 } |
210 DestroyTransaction(); | 191 |
211 URLRequestJob::Kill(); | 192 URLRequestJob::NotifyHeadersComplete(); |
212 } | 193 } |
213 | 194 |
214 LoadState URLRequestHttpJob::GetLoadState() const { | 195 void URLRequestHttpJob::DestroyTransaction() { |
215 return transaction_.get() ? | |
216 transaction_->GetLoadState() : LOAD_STATE_IDLE; | |
217 } | |
218 | |
219 uint64 URLRequestHttpJob::GetUploadProgress() const { | |
220 return transaction_.get() ? transaction_->GetUploadProgress() : 0; | |
221 } | |
222 | |
223 bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { | |
224 DCHECK(transaction_.get()); | 196 DCHECK(transaction_.get()); |
225 | 197 |
226 if (!response_info_) | 198 transaction_.reset(); |
227 return false; | 199 response_info_ = NULL; |
228 | 200 context_ = NULL; |
229 return response_info_->headers->GetMimeType(mime_type); | 201 } |
230 } | 202 |
231 | 203 void URLRequestHttpJob::StartTransaction() { |
232 bool URLRequestHttpJob::GetCharset(std::string* charset) { | 204 // NOTE: This method assumes that request_info_ is already setup properly. |
233 DCHECK(transaction_.get()); | 205 |
234 | 206 // If we already have a transaction, then we should restart the transaction |
235 if (!response_info_) | 207 // with auth provided by username_ and password_. |
236 return false; | 208 |
237 | 209 int rv; |
238 return response_info_->headers->GetCharset(charset); | 210 |
239 } | 211 if (transaction_.get()) { |
240 | 212 rv = transaction_->RestartWithAuth(username_, password_, &start_callback_); |
241 void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { | 213 username_.clear(); |
242 DCHECK(request_); | 214 password_.clear(); |
243 DCHECK(transaction_.get()); | |
244 | |
245 if (response_info_) | |
246 *info = *response_info_; | |
247 } | |
248 | |
249 bool URLRequestHttpJob::GetResponseCookies( | |
250 std::vector<std::string>* cookies) { | |
251 DCHECK(transaction_.get()); | |
252 | |
253 if (!response_info_) | |
254 return false; | |
255 | |
256 // TODO(darin): Why are we extracting response cookies again? Perhaps we | |
257 // should just leverage response_cookies_. | |
258 | |
259 cookies->clear(); | |
260 FetchResponseCookies(response_info_, cookies); | |
261 return true; | |
262 } | |
263 | |
264 int URLRequestHttpJob::GetResponseCode() const { | |
265 DCHECK(transaction_.get()); | |
266 | |
267 if (!response_info_) | |
268 return -1; | |
269 | |
270 return response_info_->headers->response_code(); | |
271 } | |
272 | |
273 bool URLRequestHttpJob::GetContentEncodings( | |
274 std::vector<Filter::FilterType>* encoding_types) { | |
275 DCHECK(transaction_.get()); | |
276 if (!response_info_) | |
277 return false; | |
278 DCHECK(encoding_types->empty()); | |
279 | |
280 std::string encoding_type; | |
281 void* iter = NULL; | |
282 while (response_info_->headers->EnumerateHeader(&iter, "Content-Encoding", | |
283 &encoding_type)) { | |
284 encoding_types->push_back(Filter::ConvertEncodingToType(encoding_type)); | |
285 } | |
286 | |
287 // Even if encoding types are empty, there is a chance that we need to add | |
288 // some decoding, as some proxies strip encoding completely. In such cases, | |
289 // we may need to add (for example) SDCH filtering (when the context suggests | |
290 // it is appropriate). | |
291 Filter::FixupEncodingTypes(*this, encoding_types); | |
292 | |
293 return !encoding_types->empty(); | |
294 } | |
295 | |
296 bool URLRequestHttpJob::IsCachedContent() const { | |
297 return is_cached_content_; | |
298 } | |
299 | |
300 bool URLRequestHttpJob::IsSdchResponse() const { | |
301 return sdch_dictionary_advertised_; | |
302 } | |
303 | |
304 bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { | |
305 // We only allow redirects to certain "safe" protocols. This does not | |
306 // restrict redirects to externally handled protocols. Our consumer would | |
307 // need to take care of those. | |
308 | |
309 if (!URLRequest::IsHandledURL(location)) | |
310 return true; | |
311 | |
312 static const char* kSafeSchemes[] = { | |
313 "http", | |
314 "https", | |
315 "ftp" | |
316 }; | |
317 | |
318 for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) { | |
319 if (location.SchemeIs(kSafeSchemes[i])) | |
320 return true; | |
321 } | |
322 | |
323 return false; | |
324 } | |
325 | |
326 bool URLRequestHttpJob::NeedsAuth() { | |
327 int code = GetResponseCode(); | |
328 if (code == -1) | |
329 return false; | |
330 | |
331 // Check if we need either Proxy or WWW Authentication. This could happen | |
332 // because we either provided no auth info, or provided incorrect info. | |
333 switch (code) { | |
334 case 407: | |
335 if (proxy_auth_state_ == AUTH_STATE_CANCELED) | |
336 return false; | |
337 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; | |
338 return true; | |
339 case 401: | |
340 if (server_auth_state_ == AUTH_STATE_CANCELED) | |
341 return false; | |
342 server_auth_state_ = AUTH_STATE_NEED_AUTH; | |
343 return true; | |
344 } | |
345 return false; | |
346 } | |
347 | |
348 void URLRequestHttpJob::GetAuthChallengeInfo( | |
349 scoped_refptr<AuthChallengeInfo>* result) { | |
350 DCHECK(transaction_.get()); | |
351 DCHECK(response_info_); | |
352 | |
353 // sanity checks: | |
354 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || | |
355 server_auth_state_ == AUTH_STATE_NEED_AUTH); | |
356 DCHECK(response_info_->headers->response_code() == 401 || | |
357 response_info_->headers->response_code() == 407); | |
358 | |
359 *result = response_info_->auth_challenge; | |
360 } | |
361 | |
362 void URLRequestHttpJob::SetAuth(const string16& username, | |
363 const string16& password) { | |
364 DCHECK(transaction_.get()); | |
365 | |
366 // Proxy gets set first, then WWW. | |
367 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { | |
368 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; | |
369 } else { | 215 } else { |
370 DCHECK(server_auth_state_ == AUTH_STATE_NEED_AUTH); | 216 DCHECK(request_->context()); |
371 server_auth_state_ = AUTH_STATE_HAVE_AUTH; | 217 DCHECK(request_->context()->http_transaction_factory()); |
372 } | 218 |
373 | 219 rv = request_->context()->http_transaction_factory()->CreateTransaction( |
374 RestartTransactionWithAuth(username, password); | 220 &transaction_); |
375 } | 221 if (rv == OK) { |
376 | 222 if (!throttling_entry_->IsDuringExponentialBackoff() || |
377 void URLRequestHttpJob::RestartTransactionWithAuth( | 223 !net::URLRequestThrottlerManager::GetInstance()-> |
378 const string16& username, | 224 enforce_throttling()) { |
379 const string16& password) { | 225 rv = transaction_->Start( |
380 username_ = username; | 226 &request_info_, &start_callback_, request_->net_log()); |
381 password_ = password; | 227 } else { |
382 | 228 // Special error code for the exponential back-off module. |
383 // These will be reset in OnStartCompleted. | 229 rv = ERR_TEMPORARILY_THROTTLED; |
384 response_info_ = NULL; | 230 } |
385 response_cookies_.clear(); | 231 // Make sure the context is alive for the duration of the |
386 | 232 // transaction. |
387 // Update the cookies, since the cookie store may have been updated from the | 233 context_ = request_->context(); |
388 // headers in the 401/407. Since cookies were already appended to | 234 } |
389 // extra_headers, we need to strip them out before adding them again. | 235 } |
390 request_info_.extra_headers.RemoveHeader( | 236 |
391 HttpRequestHeaders::kCookie); | |
392 | |
393 AddCookieHeaderAndStart(); | |
394 } | |
395 | |
396 void URLRequestHttpJob::CancelAuth() { | |
397 // Proxy gets set first, then WWW. | |
398 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { | |
399 proxy_auth_state_ = AUTH_STATE_CANCELED; | |
400 } else { | |
401 DCHECK(server_auth_state_ == AUTH_STATE_NEED_AUTH); | |
402 server_auth_state_ = AUTH_STATE_CANCELED; | |
403 } | |
404 | |
405 // These will be reset in OnStartCompleted. | |
406 response_info_ = NULL; | |
407 response_cookies_.clear(); | |
408 | |
409 // OK, let the consumer read the error page... | |
410 // | |
411 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, | |
412 // which will cause the consumer to receive OnResponseStarted instead of | |
413 // OnAuthRequired. | |
414 // | |
415 // We have to do this via InvokeLater to avoid "recursing" the consumer. | |
416 // | |
417 MessageLoop::current()->PostTask( | |
418 FROM_HERE, | |
419 method_factory_.NewRunnableMethod( | |
420 &URLRequestHttpJob::OnStartCompleted, OK)); | |
421 } | |
422 | |
423 void URLRequestHttpJob::ContinueWithCertificate( | |
424 X509Certificate* client_cert) { | |
425 DCHECK(transaction_.get()); | |
426 | |
427 DCHECK(!response_info_) << "should not have a response yet"; | |
428 | |
429 // No matter what, we want to report our status as IO pending since we will | |
430 // be notifying our consumer asynchronously via OnStartCompleted. | |
431 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | |
432 | |
433 int rv = transaction_->RestartWithCertificate(client_cert, &start_callback_); | |
434 if (rv == ERR_IO_PENDING) | 237 if (rv == ERR_IO_PENDING) |
435 return; | 238 return; |
436 | 239 |
437 // The transaction started synchronously, but we need to notify the | 240 // The transaction started synchronously, but we need to notify the |
438 // URLRequest delegate via the message loop. | 241 // URLRequest delegate via the message loop. |
439 MessageLoop::current()->PostTask( | 242 MessageLoop::current()->PostTask( |
440 FROM_HERE, | 243 FROM_HERE, |
441 method_factory_.NewRunnableMethod( | 244 method_factory_.NewRunnableMethod( |
442 &URLRequestHttpJob::OnStartCompleted, rv)); | 245 &URLRequestHttpJob::OnStartCompleted, rv)); |
443 } | 246 } |
444 | 247 |
445 void URLRequestHttpJob::ContinueDespiteLastError() { | 248 void URLRequestHttpJob::AddExtraHeaders() { |
446 // If the transaction was destroyed, then the job was cancelled. | 249 // TODO(jar): Consider optimizing away SDCH advertising bytes when the URL is |
447 if (!transaction_.get()) | 250 // probably an img or such (and SDCH encoding is not likely). |
448 return; | 251 bool advertise_sdch = SdchManager::Global() && |
449 | 252 SdchManager::Global()->IsInSupportedDomain(request_->url()); |
450 DCHECK(!response_info_) << "should not have a response yet"; | 253 std::string avail_dictionaries; |
451 | 254 if (advertise_sdch) { |
| 255 SdchManager::Global()->GetAvailDictionaryList(request_->url(), |
| 256 &avail_dictionaries); |
| 257 |
| 258 // The AllowLatencyExperiment() is only true if we've successfully done a |
| 259 // full SDCH compression recently in this browser session for this host. |
| 260 // Note that for this path, there might be no applicable dictionaries, and |
| 261 // hence we can't participate in the experiment. |
| 262 if (!avail_dictionaries.empty() && |
| 263 SdchManager::Global()->AllowLatencyExperiment(request_->url())) { |
| 264 // We are participating in the test (or control), and hence we'll |
| 265 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or |
| 266 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. |
| 267 EnablePacketCounting(kSdchPacketHistogramCount); |
| 268 if (base::RandDouble() < .01) { |
| 269 sdch_test_control_ = true; // 1% probability. |
| 270 advertise_sdch = false; |
| 271 } else { |
| 272 sdch_test_activated_ = true; |
| 273 } |
| 274 } |
| 275 } |
| 276 |
| 277 // Supply Accept-Encoding headers first so that it is more likely that they |
| 278 // will be in the first transmitted packet. This can sometimes make it easier |
| 279 // to filter and analyze the streams to assure that a proxy has not damaged |
| 280 // these headers. Some proxies deliberately corrupt Accept-Encoding headers. |
| 281 if (!advertise_sdch) { |
| 282 // Tell the server what compression formats we support (other than SDCH). |
| 283 request_info_.extra_headers.SetHeader( |
| 284 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate"); |
| 285 } else { |
| 286 // Include SDCH in acceptable list. |
| 287 request_info_.extra_headers.SetHeader( |
| 288 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch"); |
| 289 if (!avail_dictionaries.empty()) { |
| 290 request_info_.extra_headers.SetHeader( |
| 291 kAvailDictionaryHeader, |
| 292 avail_dictionaries); |
| 293 sdch_dictionary_advertised_ = true; |
| 294 // Since we're tagging this transaction as advertising a dictionary, we'll |
| 295 // definately employ an SDCH filter (or tentative sdch filter) when we get |
| 296 // a response. When done, we'll record histograms via SDCH_DECODE or |
| 297 // SDCH_PASSTHROUGH. Hence we need to record packet arrival times. |
| 298 EnablePacketCounting(kSdchPacketHistogramCount); |
| 299 } |
| 300 } |
| 301 |
| 302 URLRequestContext* context = request_->context(); |
| 303 if (context) { |
| 304 // Only add default Accept-Language and Accept-Charset if the request |
| 305 // didn't have them specified. |
| 306 if (!request_info_.extra_headers.HasHeader( |
| 307 HttpRequestHeaders::kAcceptLanguage)) { |
| 308 request_info_.extra_headers.SetHeader( |
| 309 HttpRequestHeaders::kAcceptLanguage, |
| 310 context->accept_language()); |
| 311 } |
| 312 if (!request_info_.extra_headers.HasHeader( |
| 313 HttpRequestHeaders::kAcceptCharset)) { |
| 314 request_info_.extra_headers.SetHeader( |
| 315 HttpRequestHeaders::kAcceptCharset, |
| 316 context->accept_charset()); |
| 317 } |
| 318 } |
| 319 } |
| 320 |
| 321 void URLRequestHttpJob::AddCookieHeaderAndStart() { |
452 // No matter what, we want to report our status as IO pending since we will | 322 // No matter what, we want to report our status as IO pending since we will |
453 // be notifying our consumer asynchronously via OnStartCompleted. | 323 // be notifying our consumer asynchronously via OnStartCompleted. |
454 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | 324 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
455 | 325 |
456 int rv = transaction_->RestartIgnoringLastError(&start_callback_); | 326 AddRef(); // Balanced in OnCanGetCookiesCompleted |
457 if (rv == ERR_IO_PENDING) | 327 |
| 328 int policy = OK; |
| 329 |
| 330 if (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) { |
| 331 policy = ERR_FAILED; |
| 332 } else if (request_->context()->cookie_policy()) { |
| 333 policy = request_->context()->cookie_policy()->CanGetCookies( |
| 334 request_->url(), |
| 335 request_->first_party_for_cookies(), |
| 336 &can_get_cookies_callback_); |
| 337 if (policy == ERR_IO_PENDING) |
| 338 return; // Wait for completion callback |
| 339 } |
| 340 |
| 341 OnCanGetCookiesCompleted(policy); |
| 342 } |
| 343 |
| 344 void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete() { |
| 345 DCHECK(transaction_.get()); |
| 346 |
| 347 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); |
| 348 DCHECK(response_info); |
| 349 |
| 350 response_cookies_.clear(); |
| 351 response_cookies_save_index_ = 0; |
| 352 |
| 353 FetchResponseCookies(response_info, &response_cookies_); |
| 354 |
| 355 // Now, loop over the response cookies, and attempt to persist each. |
| 356 SaveNextCookie(); |
| 357 } |
| 358 |
| 359 void URLRequestHttpJob::SaveNextCookie() { |
| 360 if (response_cookies_save_index_ == response_cookies_.size()) { |
| 361 response_cookies_.clear(); |
| 362 response_cookies_save_index_ = 0; |
| 363 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status |
| 364 NotifyHeadersComplete(); |
458 return; | 365 return; |
459 | 366 } |
460 // The transaction started synchronously, but we need to notify the | 367 |
461 // URLRequest delegate via the message loop. | 368 // No matter what, we want to report our status as IO pending since we will |
462 MessageLoop::current()->PostTask( | 369 // be notifying our consumer asynchronously via OnStartCompleted. |
463 FROM_HERE, | 370 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
464 method_factory_.NewRunnableMethod( | 371 |
465 &URLRequestHttpJob::OnStartCompleted, rv)); | 372 AddRef(); // Balanced in OnCanSetCookieCompleted |
466 } | 373 |
467 | 374 int policy = OK; |
468 bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, | 375 |
469 int *bytes_read) { | 376 if (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) { |
470 DCHECK_NE(buf_size, 0); | 377 policy = ERR_FAILED; |
471 DCHECK(bytes_read); | 378 } else if (request_->context()->cookie_policy()) { |
472 DCHECK(!read_in_progress_); | 379 policy = request_->context()->cookie_policy()->CanSetCookie( |
473 | 380 request_->url(), |
474 int rv = transaction_->Read(buf, buf_size, &read_callback_); | 381 request_->first_party_for_cookies(), |
475 if (rv >= 0) { | 382 response_cookies_[response_cookies_save_index_], |
476 *bytes_read = rv; | 383 &can_set_cookie_callback_); |
477 return true; | 384 if (policy == ERR_IO_PENDING) |
478 } | 385 return; // Wait for completion callback |
479 | 386 } |
480 if (rv == ERR_IO_PENDING) { | 387 |
481 read_in_progress_ = true; | 388 OnCanSetCookieCompleted(policy); |
482 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | 389 } |
483 } else { | 390 |
484 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | 391 void URLRequestHttpJob::FetchResponseCookies( |
485 } | 392 const HttpResponseInfo* response_info, |
486 | 393 std::vector<std::string>* cookies) { |
487 return false; | 394 std::string name = "Set-Cookie"; |
488 } | 395 std::string value; |
489 | 396 |
490 void URLRequestHttpJob::StopCaching() { | 397 void* iter = NULL; |
491 if (transaction_.get()) | 398 while (response_info->headers->EnumerateHeader(&iter, name, &value)) { |
492 transaction_->StopCaching(); | 399 if (!value.empty()) |
| 400 cookies->push_back(value); |
| 401 } |
| 402 } |
| 403 |
| 404 void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { |
| 405 DCHECK(response_info_); |
| 406 |
| 407 URLRequestContext* ctx = request_->context(); |
| 408 if (!ctx || !ctx->transport_security_state()) |
| 409 return; |
| 410 |
| 411 const bool https = response_info_->ssl_info.is_valid(); |
| 412 const bool valid_https = |
| 413 https && !IsCertStatusError(response_info_->ssl_info.cert_status); |
| 414 |
| 415 std::string name = "Strict-Transport-Security"; |
| 416 std::string value; |
| 417 |
| 418 int max_age; |
| 419 bool include_subdomains; |
| 420 |
| 421 void* iter = NULL; |
| 422 while (response_info_->headers->EnumerateHeader(&iter, name, &value)) { |
| 423 const bool ok = TransportSecurityState::ParseHeader( |
| 424 value, &max_age, &include_subdomains); |
| 425 if (!ok) |
| 426 continue; |
| 427 // We will only accept strict mode if we saw the header from an HTTPS |
| 428 // connection with no certificate problems. |
| 429 if (!valid_https) |
| 430 continue; |
| 431 base::Time current_time(base::Time::Now()); |
| 432 base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age); |
| 433 |
| 434 TransportSecurityState::DomainState domain_state; |
| 435 domain_state.expiry = current_time + max_age_delta; |
| 436 domain_state.mode = TransportSecurityState::DomainState::MODE_STRICT; |
| 437 domain_state.include_subdomains = include_subdomains; |
| 438 |
| 439 ctx->transport_security_state()->EnableHost(request_info_.url.host(), |
| 440 domain_state); |
| 441 } |
| 442 |
| 443 // TODO(agl): change this over when we have fixed things at the server end. |
| 444 // The string should be "Opportunistic-Transport-Security"; |
| 445 name = "X-Bodge-Transport-Security"; |
| 446 |
| 447 while (response_info_->headers->EnumerateHeader(&iter, name, &value)) { |
| 448 const bool ok = TransportSecurityState::ParseHeader( |
| 449 value, &max_age, &include_subdomains); |
| 450 if (!ok) |
| 451 continue; |
| 452 // If we saw an opportunistic request over HTTPS, then clearly we can make |
| 453 // HTTPS connections to the host so we should remember this. |
| 454 if (https) { |
| 455 base::Time current_time(base::Time::Now()); |
| 456 base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age); |
| 457 |
| 458 TransportSecurityState::DomainState domain_state; |
| 459 domain_state.expiry = current_time + max_age_delta; |
| 460 domain_state.mode = |
| 461 TransportSecurityState::DomainState::MODE_SPDY_ONLY; |
| 462 domain_state.include_subdomains = include_subdomains; |
| 463 |
| 464 ctx->transport_security_state()->EnableHost(request_info_.url.host(), |
| 465 domain_state); |
| 466 continue; |
| 467 } |
| 468 |
| 469 if (!request()) |
| 470 break; |
| 471 |
| 472 // At this point, we have a request for opportunistic encryption over HTTP. |
| 473 // In this case we need to probe to check that we can make HTTPS |
| 474 // connections to that host. |
| 475 HTTPSProber* const prober = HTTPSProber::GetInstance(); |
| 476 if (prober->HaveProbed(request_info_.url.host()) || |
| 477 prober->InFlight(request_info_.url.host())) { |
| 478 continue; |
| 479 } |
| 480 |
| 481 HTTPSProberDelegateImpl* delegate = |
| 482 new HTTPSProberDelegateImpl(request_info_.url.host(), max_age, |
| 483 include_subdomains, |
| 484 ctx->transport_security_state()); |
| 485 if (!prober->ProbeHost(request_info_.url.host(), request()->context(), |
| 486 delegate)) { |
| 487 delete delegate; |
| 488 } |
| 489 } |
493 } | 490 } |
494 | 491 |
495 void URLRequestHttpJob::OnCanGetCookiesCompleted(int policy) { | 492 void URLRequestHttpJob::OnCanGetCookiesCompleted(int policy) { |
496 // If the request was destroyed, then there is no more work to do. | 493 // If the request was destroyed, then there is no more work to do. |
497 if (request_ && request_->delegate()) { | 494 if (request_ && request_->delegate()) { |
498 if (request_->context()->cookie_store()) { | 495 if (request_->context()->cookie_store()) { |
499 if (policy == ERR_ACCESS_DENIED) { | 496 if (policy == ERR_ACCESS_DENIED) { |
500 request_->delegate()->OnGetCookies(request_, true); | 497 request_->delegate()->OnGetCookies(request_, true); |
501 } else if (policy == OK) { | 498 } else if (policy == OK) { |
502 request_->delegate()->OnGetCookies(request_, false); | 499 request_->delegate()->OnGetCookies(request_, false); |
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
613 | 610 |
614 TransportSecurityState::DomainState domain_state; | 611 TransportSecurityState::DomainState domain_state; |
615 // TODO(agl): don't ignore opportunistic mode. | 612 // TODO(agl): don't ignore opportunistic mode. |
616 const bool r = context_->transport_security_state()->IsEnabledForHost( | 613 const bool r = context_->transport_security_state()->IsEnabledForHost( |
617 &domain_state, request_info_.url.host()); | 614 &domain_state, request_info_.url.host()); |
618 | 615 |
619 return !r || domain_state.mode == | 616 return !r || domain_state.mode == |
620 TransportSecurityState::DomainState::MODE_OPPORTUNISTIC; | 617 TransportSecurityState::DomainState::MODE_OPPORTUNISTIC; |
621 } | 618 } |
622 | 619 |
623 void URLRequestHttpJob::NotifyHeadersComplete() { | 620 void URLRequestHttpJob::RestartTransactionWithAuth( |
624 DCHECK(!response_info_); | 621 const string16& username, |
625 | 622 const string16& password) { |
626 response_info_ = transaction_->GetResponseInfo(); | 623 username_ = username; |
627 | 624 password_ = password; |
628 // Save boolean, as we'll need this info at destruction time, and filters may | 625 |
629 // also need this info. | 626 // These will be reset in OnStartCompleted. |
630 is_cached_content_ = response_info_->was_cached; | 627 response_info_ = NULL; |
631 | 628 response_cookies_.clear(); |
632 if (!is_cached_content_) { | 629 |
633 URLRequestThrottlerHeaderAdapter response_adapter( | 630 // Update the cookies, since the cookie store may have been updated from the |
634 response_info_->headers); | 631 // headers in the 401/407. Since cookies were already appended to |
635 throttling_entry_->UpdateWithResponse(&response_adapter); | 632 // extra_headers, we need to strip them out before adding them again. |
636 } | 633 request_info_.extra_headers.RemoveHeader( |
637 | 634 HttpRequestHeaders::kCookie); |
638 ProcessStrictTransportSecurityHeader(); | 635 |
639 | 636 AddCookieHeaderAndStart(); |
640 if (SdchManager::Global() && | 637 } |
641 SdchManager::Global()->IsInSupportedDomain(request_->url())) { | 638 |
642 static const std::string name = "Get-Dictionary"; | 639 void URLRequestHttpJob::SetUpload(UploadData* upload) { |
643 std::string url_text; | 640 DCHECK(!transaction_.get()) << "cannot change once started"; |
644 void* iter = NULL; | 641 request_info_.upload_data = upload; |
645 // TODO(jar): We need to not fetch dictionaries the first time they are | 642 } |
646 // seen, but rather wait until we can justify their usefulness. | 643 |
647 // For now, we will only fetch the first dictionary, which will at least | 644 void URLRequestHttpJob::SetExtraRequestHeaders( |
648 // require multiple suggestions before we get additional ones for this site. | 645 const HttpRequestHeaders& headers) { |
649 // Eventually we should wait until a dictionary is requested several times | 646 DCHECK(!transaction_.get()) << "cannot change once started"; |
650 // before we even download it (so that we don't waste memory or bandwidth). | 647 request_info_.extra_headers.CopyFrom(headers); |
651 if (response_info_->headers->EnumerateHeader(&iter, name, &url_text)) { | 648 } |
652 // request_->url() won't be valid in the destructor, so we use an | 649 |
653 // alternate copy. | 650 void URLRequestHttpJob::Start() { |
654 DCHECK(request_->url() == request_info_.url); | 651 DCHECK(!transaction_.get()); |
655 // Resolve suggested URL relative to request url. | 652 |
656 sdch_dictionary_url_ = request_info_.url.Resolve(url_text); | 653 // Ensure that we do not send username and password fields in the referrer. |
657 } | 654 GURL referrer(request_->GetSanitizedReferrer()); |
658 } | 655 |
659 | 656 request_info_.url = request_->url(); |
660 // The HTTP transaction may be restarted several times for the purposes | 657 request_info_.referrer = referrer; |
661 // of sending authorization information. Each time it restarts, we get | 658 request_info_.method = request_->method(); |
662 // notified of the headers completion so that we can update the cookie store. | 659 request_info_.load_flags = request_->load_flags(); |
663 if (transaction_->IsReadyToRestartForAuth()) { | 660 request_info_.priority = request_->priority(); |
664 DCHECK(!response_info_->auth_challenge.get()); | 661 |
665 RestartTransactionWithAuth(string16(), string16()); | 662 if (request_->context()) { |
| 663 request_info_.extra_headers.SetHeader( |
| 664 HttpRequestHeaders::kUserAgent, |
| 665 request_->context()->GetUserAgent(request_->url())); |
| 666 } |
| 667 |
| 668 AddExtraHeaders(); |
| 669 AddCookieHeaderAndStart(); |
| 670 } |
| 671 |
| 672 void URLRequestHttpJob::Kill() { |
| 673 if (!transaction_.get()) |
666 return; | 674 return; |
667 } | 675 |
668 | 676 DestroyTransaction(); |
669 URLRequestJob::NotifyHeadersComplete(); | 677 URLRequestJob::Kill(); |
670 } | 678 } |
671 | 679 |
672 void URLRequestHttpJob::DestroyTransaction() { | 680 LoadState URLRequestHttpJob::GetLoadState() const { |
673 DCHECK(transaction_.get()); | 681 return transaction_.get() ? |
674 | 682 transaction_->GetLoadState() : LOAD_STATE_IDLE; |
675 transaction_.reset(); | 683 } |
| 684 |
| 685 uint64 URLRequestHttpJob::GetUploadProgress() const { |
| 686 return transaction_.get() ? transaction_->GetUploadProgress() : 0; |
| 687 } |
| 688 |
| 689 bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { |
| 690 DCHECK(transaction_.get()); |
| 691 |
| 692 if (!response_info_) |
| 693 return false; |
| 694 |
| 695 return response_info_->headers->GetMimeType(mime_type); |
| 696 } |
| 697 |
| 698 bool URLRequestHttpJob::GetCharset(std::string* charset) { |
| 699 DCHECK(transaction_.get()); |
| 700 |
| 701 if (!response_info_) |
| 702 return false; |
| 703 |
| 704 return response_info_->headers->GetCharset(charset); |
| 705 } |
| 706 |
| 707 void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { |
| 708 DCHECK(request_); |
| 709 DCHECK(transaction_.get()); |
| 710 |
| 711 if (response_info_) |
| 712 *info = *response_info_; |
| 713 } |
| 714 |
| 715 bool URLRequestHttpJob::GetResponseCookies( |
| 716 std::vector<std::string>* cookies) { |
| 717 DCHECK(transaction_.get()); |
| 718 |
| 719 if (!response_info_) |
| 720 return false; |
| 721 |
| 722 // TODO(darin): Why are we extracting response cookies again? Perhaps we |
| 723 // should just leverage response_cookies_. |
| 724 |
| 725 cookies->clear(); |
| 726 FetchResponseCookies(response_info_, cookies); |
| 727 return true; |
| 728 } |
| 729 |
| 730 int URLRequestHttpJob::GetResponseCode() const { |
| 731 DCHECK(transaction_.get()); |
| 732 |
| 733 if (!response_info_) |
| 734 return -1; |
| 735 |
| 736 return response_info_->headers->response_code(); |
| 737 } |
| 738 |
| 739 bool URLRequestHttpJob::GetContentEncodings( |
| 740 std::vector<Filter::FilterType>* encoding_types) { |
| 741 DCHECK(transaction_.get()); |
| 742 if (!response_info_) |
| 743 return false; |
| 744 DCHECK(encoding_types->empty()); |
| 745 |
| 746 std::string encoding_type; |
| 747 void* iter = NULL; |
| 748 while (response_info_->headers->EnumerateHeader(&iter, "Content-Encoding", |
| 749 &encoding_type)) { |
| 750 encoding_types->push_back(Filter::ConvertEncodingToType(encoding_type)); |
| 751 } |
| 752 |
| 753 // Even if encoding types are empty, there is a chance that we need to add |
| 754 // some decoding, as some proxies strip encoding completely. In such cases, |
| 755 // we may need to add (for example) SDCH filtering (when the context suggests |
| 756 // it is appropriate). |
| 757 Filter::FixupEncodingTypes(*this, encoding_types); |
| 758 |
| 759 return !encoding_types->empty(); |
| 760 } |
| 761 |
| 762 bool URLRequestHttpJob::IsCachedContent() const { |
| 763 return is_cached_content_; |
| 764 } |
| 765 |
| 766 bool URLRequestHttpJob::IsSdchResponse() const { |
| 767 return sdch_dictionary_advertised_; |
| 768 } |
| 769 |
| 770 bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { |
| 771 // We only allow redirects to certain "safe" protocols. This does not |
| 772 // restrict redirects to externally handled protocols. Our consumer would |
| 773 // need to take care of those. |
| 774 |
| 775 if (!URLRequest::IsHandledURL(location)) |
| 776 return true; |
| 777 |
| 778 static const char* kSafeSchemes[] = { |
| 779 "http", |
| 780 "https", |
| 781 "ftp" |
| 782 }; |
| 783 |
| 784 for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) { |
| 785 if (location.SchemeIs(kSafeSchemes[i])) |
| 786 return true; |
| 787 } |
| 788 |
| 789 return false; |
| 790 } |
| 791 |
| 792 bool URLRequestHttpJob::NeedsAuth() { |
| 793 int code = GetResponseCode(); |
| 794 if (code == -1) |
| 795 return false; |
| 796 |
| 797 // Check if we need either Proxy or WWW Authentication. This could happen |
| 798 // because we either provided no auth info, or provided incorrect info. |
| 799 switch (code) { |
| 800 case 407: |
| 801 if (proxy_auth_state_ == AUTH_STATE_CANCELED) |
| 802 return false; |
| 803 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; |
| 804 return true; |
| 805 case 401: |
| 806 if (server_auth_state_ == AUTH_STATE_CANCELED) |
| 807 return false; |
| 808 server_auth_state_ = AUTH_STATE_NEED_AUTH; |
| 809 return true; |
| 810 } |
| 811 return false; |
| 812 } |
| 813 |
| 814 void URLRequestHttpJob::GetAuthChallengeInfo( |
| 815 scoped_refptr<AuthChallengeInfo>* result) { |
| 816 DCHECK(transaction_.get()); |
| 817 DCHECK(response_info_); |
| 818 |
| 819 // sanity checks: |
| 820 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || |
| 821 server_auth_state_ == AUTH_STATE_NEED_AUTH); |
| 822 DCHECK(response_info_->headers->response_code() == 401 || |
| 823 response_info_->headers->response_code() == 407); |
| 824 |
| 825 *result = response_info_->auth_challenge; |
| 826 } |
| 827 |
| 828 void URLRequestHttpJob::SetAuth(const string16& username, |
| 829 const string16& password) { |
| 830 DCHECK(transaction_.get()); |
| 831 |
| 832 // Proxy gets set first, then WWW. |
| 833 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { |
| 834 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; |
| 835 } else { |
| 836 DCHECK(server_auth_state_ == AUTH_STATE_NEED_AUTH); |
| 837 server_auth_state_ = AUTH_STATE_HAVE_AUTH; |
| 838 } |
| 839 |
| 840 RestartTransactionWithAuth(username, password); |
| 841 } |
| 842 |
| 843 void URLRequestHttpJob::CancelAuth() { |
| 844 // Proxy gets set first, then WWW. |
| 845 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { |
| 846 proxy_auth_state_ = AUTH_STATE_CANCELED; |
| 847 } else { |
| 848 DCHECK(server_auth_state_ == AUTH_STATE_NEED_AUTH); |
| 849 server_auth_state_ = AUTH_STATE_CANCELED; |
| 850 } |
| 851 |
| 852 // These will be reset in OnStartCompleted. |
676 response_info_ = NULL; | 853 response_info_ = NULL; |
677 context_ = NULL; | 854 response_cookies_.clear(); |
678 } | 855 |
679 | 856 // OK, let the consumer read the error page... |
680 void URLRequestHttpJob::StartTransaction() { | 857 // |
681 // NOTE: This method assumes that request_info_ is already setup properly. | 858 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, |
682 | 859 // which will cause the consumer to receive OnResponseStarted instead of |
683 // If we already have a transaction, then we should restart the transaction | 860 // OnAuthRequired. |
684 // with auth provided by username_ and password_. | 861 // |
685 | 862 // We have to do this via InvokeLater to avoid "recursing" the consumer. |
686 int rv; | 863 // |
687 | 864 MessageLoop::current()->PostTask( |
688 if (transaction_.get()) { | 865 FROM_HERE, |
689 rv = transaction_->RestartWithAuth(username_, password_, &start_callback_); | 866 method_factory_.NewRunnableMethod( |
690 username_.clear(); | 867 &URLRequestHttpJob::OnStartCompleted, OK)); |
691 password_.clear(); | 868 } |
692 } else { | 869 |
693 DCHECK(request_->context()); | 870 void URLRequestHttpJob::ContinueWithCertificate( |
694 DCHECK(request_->context()->http_transaction_factory()); | 871 X509Certificate* client_cert) { |
695 | 872 DCHECK(transaction_.get()); |
696 rv = request_->context()->http_transaction_factory()->CreateTransaction( | 873 |
697 &transaction_); | 874 DCHECK(!response_info_) << "should not have a response yet"; |
698 if (rv == OK) { | 875 |
699 if (!throttling_entry_->IsDuringExponentialBackoff() || | 876 // No matter what, we want to report our status as IO pending since we will |
700 !net::URLRequestThrottlerManager::GetInstance()-> | 877 // be notifying our consumer asynchronously via OnStartCompleted. |
701 enforce_throttling()) { | 878 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
702 rv = transaction_->Start( | 879 |
703 &request_info_, &start_callback_, request_->net_log()); | 880 int rv = transaction_->RestartWithCertificate(client_cert, &start_callback_); |
704 } else { | |
705 // Special error code for the exponential back-off module. | |
706 rv = ERR_TEMPORARILY_THROTTLED; | |
707 } | |
708 // Make sure the context is alive for the duration of the | |
709 // transaction. | |
710 context_ = request_->context(); | |
711 } | |
712 } | |
713 | |
714 if (rv == ERR_IO_PENDING) | 881 if (rv == ERR_IO_PENDING) |
715 return; | 882 return; |
716 | 883 |
717 // The transaction started synchronously, but we need to notify the | 884 // The transaction started synchronously, but we need to notify the |
718 // URLRequest delegate via the message loop. | 885 // URLRequest delegate via the message loop. |
719 MessageLoop::current()->PostTask( | 886 MessageLoop::current()->PostTask( |
720 FROM_HERE, | 887 FROM_HERE, |
721 method_factory_.NewRunnableMethod( | 888 method_factory_.NewRunnableMethod( |
722 &URLRequestHttpJob::OnStartCompleted, rv)); | 889 &URLRequestHttpJob::OnStartCompleted, rv)); |
723 } | 890 } |
724 | 891 |
725 void URLRequestHttpJob::AddExtraHeaders() { | 892 void URLRequestHttpJob::ContinueDespiteLastError() { |
726 // TODO(jar): Consider optimizing away SDCH advertising bytes when the URL is | 893 // If the transaction was destroyed, then the job was cancelled. |
727 // probably an img or such (and SDCH encoding is not likely). | 894 if (!transaction_.get()) |
728 bool advertise_sdch = SdchManager::Global() && | 895 return; |
729 SdchManager::Global()->IsInSupportedDomain(request_->url()); | 896 |
730 std::string avail_dictionaries; | 897 DCHECK(!response_info_) << "should not have a response yet"; |
731 if (advertise_sdch) { | 898 |
732 SdchManager::Global()->GetAvailDictionaryList(request_->url(), | |
733 &avail_dictionaries); | |
734 | |
735 // The AllowLatencyExperiment() is only true if we've successfully done a | |
736 // full SDCH compression recently in this browser session for this host. | |
737 // Note that for this path, there might be no applicable dictionaries, and | |
738 // hence we can't participate in the experiment. | |
739 if (!avail_dictionaries.empty() && | |
740 SdchManager::Global()->AllowLatencyExperiment(request_->url())) { | |
741 // We are participating in the test (or control), and hence we'll | |
742 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or | |
743 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. | |
744 EnablePacketCounting(kSdchPacketHistogramCount); | |
745 if (base::RandDouble() < .01) { | |
746 sdch_test_control_ = true; // 1% probability. | |
747 advertise_sdch = false; | |
748 } else { | |
749 sdch_test_activated_ = true; | |
750 } | |
751 } | |
752 } | |
753 | |
754 // Supply Accept-Encoding headers first so that it is more likely that they | |
755 // will be in the first transmitted packet. This can sometimes make it easier | |
756 // to filter and analyze the streams to assure that a proxy has not damaged | |
757 // these headers. Some proxies deliberately corrupt Accept-Encoding headers. | |
758 if (!advertise_sdch) { | |
759 // Tell the server what compression formats we support (other than SDCH). | |
760 request_info_.extra_headers.SetHeader( | |
761 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate"); | |
762 } else { | |
763 // Include SDCH in acceptable list. | |
764 request_info_.extra_headers.SetHeader( | |
765 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch"); | |
766 if (!avail_dictionaries.empty()) { | |
767 request_info_.extra_headers.SetHeader( | |
768 kAvailDictionaryHeader, | |
769 avail_dictionaries); | |
770 sdch_dictionary_advertised_ = true; | |
771 // Since we're tagging this transaction as advertising a dictionary, we'll | |
772 // definately employ an SDCH filter (or tentative sdch filter) when we get | |
773 // a response. When done, we'll record histograms via SDCH_DECODE or | |
774 // SDCH_PASSTHROUGH. Hence we need to record packet arrival times. | |
775 EnablePacketCounting(kSdchPacketHistogramCount); | |
776 } | |
777 } | |
778 | |
779 URLRequestContext* context = request_->context(); | |
780 if (context) { | |
781 // Only add default Accept-Language and Accept-Charset if the request | |
782 // didn't have them specified. | |
783 if (!request_info_.extra_headers.HasHeader( | |
784 HttpRequestHeaders::kAcceptLanguage)) { | |
785 request_info_.extra_headers.SetHeader( | |
786 HttpRequestHeaders::kAcceptLanguage, | |
787 context->accept_language()); | |
788 } | |
789 if (!request_info_.extra_headers.HasHeader( | |
790 HttpRequestHeaders::kAcceptCharset)) { | |
791 request_info_.extra_headers.SetHeader( | |
792 HttpRequestHeaders::kAcceptCharset, | |
793 context->accept_charset()); | |
794 } | |
795 } | |
796 } | |
797 | |
798 void URLRequestHttpJob::AddCookieHeaderAndStart() { | |
799 // No matter what, we want to report our status as IO pending since we will | 899 // No matter what, we want to report our status as IO pending since we will |
800 // be notifying our consumer asynchronously via OnStartCompleted. | 900 // be notifying our consumer asynchronously via OnStartCompleted. |
801 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | 901 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
802 | 902 |
803 AddRef(); // Balanced in OnCanGetCookiesCompleted | 903 int rv = transaction_->RestartIgnoringLastError(&start_callback_); |
804 | 904 if (rv == ERR_IO_PENDING) |
805 int policy = OK; | |
806 | |
807 if (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) { | |
808 policy = ERR_FAILED; | |
809 } else if (request_->context()->cookie_policy()) { | |
810 policy = request_->context()->cookie_policy()->CanGetCookies( | |
811 request_->url(), | |
812 request_->first_party_for_cookies(), | |
813 &can_get_cookies_callback_); | |
814 if (policy == ERR_IO_PENDING) | |
815 return; // Wait for completion callback | |
816 } | |
817 | |
818 OnCanGetCookiesCompleted(policy); | |
819 } | |
820 | |
821 void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete() { | |
822 DCHECK(transaction_.get()); | |
823 | |
824 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); | |
825 DCHECK(response_info); | |
826 | |
827 response_cookies_.clear(); | |
828 response_cookies_save_index_ = 0; | |
829 | |
830 FetchResponseCookies(response_info, &response_cookies_); | |
831 | |
832 // Now, loop over the response cookies, and attempt to persist each. | |
833 SaveNextCookie(); | |
834 } | |
835 | |
836 void URLRequestHttpJob::SaveNextCookie() { | |
837 if (response_cookies_save_index_ == response_cookies_.size()) { | |
838 response_cookies_.clear(); | |
839 response_cookies_save_index_ = 0; | |
840 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status | |
841 NotifyHeadersComplete(); | |
842 return; | 905 return; |
843 } | 906 |
844 | 907 // The transaction started synchronously, but we need to notify the |
845 // No matter what, we want to report our status as IO pending since we will | 908 // URLRequest delegate via the message loop. |
846 // be notifying our consumer asynchronously via OnStartCompleted. | 909 MessageLoop::current()->PostTask( |
847 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | 910 FROM_HERE, |
848 | 911 method_factory_.NewRunnableMethod( |
849 AddRef(); // Balanced in OnCanSetCookieCompleted | 912 &URLRequestHttpJob::OnStartCompleted, rv)); |
850 | 913 } |
851 int policy = OK; | 914 |
852 | 915 bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, |
853 if (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) { | 916 int *bytes_read) { |
854 policy = ERR_FAILED; | 917 DCHECK_NE(buf_size, 0); |
855 } else if (request_->context()->cookie_policy()) { | 918 DCHECK(bytes_read); |
856 policy = request_->context()->cookie_policy()->CanSetCookie( | 919 DCHECK(!read_in_progress_); |
857 request_->url(), | 920 |
858 request_->first_party_for_cookies(), | 921 int rv = transaction_->Read(buf, buf_size, &read_callback_); |
859 response_cookies_[response_cookies_save_index_], | 922 if (rv >= 0) { |
860 &can_set_cookie_callback_); | 923 *bytes_read = rv; |
861 if (policy == ERR_IO_PENDING) | 924 return true; |
862 return; // Wait for completion callback | 925 } |
863 } | 926 |
864 | 927 if (rv == ERR_IO_PENDING) { |
865 OnCanSetCookieCompleted(policy); | 928 read_in_progress_ = true; |
866 } | 929 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); |
867 | 930 } else { |
868 void URLRequestHttpJob::FetchResponseCookies( | 931 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
869 const HttpResponseInfo* response_info, | 932 } |
870 std::vector<std::string>* cookies) { | 933 |
871 std::string name = "Set-Cookie"; | 934 return false; |
872 std::string value; | 935 } |
873 | 936 |
874 void* iter = NULL; | 937 void URLRequestHttpJob::StopCaching() { |
875 while (response_info->headers->EnumerateHeader(&iter, name, &value)) { | 938 if (transaction_.get()) |
876 if (!value.empty()) | 939 transaction_->StopCaching(); |
877 cookies->push_back(value); | 940 } |
878 } | 941 |
879 } | 942 URLRequestHttpJob::~URLRequestHttpJob() { |
880 | 943 DCHECK(!sdch_test_control_ || !sdch_test_activated_); |
881 void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { | 944 if (!IsCachedContent()) { |
882 DCHECK(response_info_); | 945 if (sdch_test_control_) |
883 | 946 RecordPacketStats(SDCH_EXPERIMENT_HOLDBACK); |
884 URLRequestContext* ctx = request_->context(); | 947 if (sdch_test_activated_) |
885 if (!ctx || !ctx->transport_security_state()) | 948 RecordPacketStats(SDCH_EXPERIMENT_DECODE); |
886 return; | 949 } |
887 | 950 // Make sure SDCH filters are told to emit histogram data while this class |
888 const bool https = response_info_->ssl_info.is_valid(); | 951 // can still service the IsCachedContent() call. |
889 const bool valid_https = | 952 DestroyFilters(); |
890 https && !IsCertStatusError(response_info_->ssl_info.cert_status); | 953 |
891 | 954 if (sdch_dictionary_url_.is_valid()) { |
892 std::string name = "Strict-Transport-Security"; | 955 // Prior to reaching the destructor, request_ has been set to a NULL |
893 std::string value; | 956 // pointer, so request_->url() is no longer valid in the destructor, and we |
894 | 957 // use an alternate copy |request_info_.url|. |
895 int max_age; | 958 SdchManager* manager = SdchManager::Global(); |
896 bool include_subdomains; | 959 // To be extra safe, since this is a "different time" from when we decided |
897 | 960 // to get the dictionary, we'll validate that an SdchManager is available. |
898 void* iter = NULL; | 961 // At shutdown time, care is taken to be sure that we don't delete this |
899 while (response_info_->headers->EnumerateHeader(&iter, name, &value)) { | 962 // globally useful instance "too soon," so this check is just defensive |
900 const bool ok = TransportSecurityState::ParseHeader( | 963 // coding to assure that IF the system is shutting down, we don't have any |
901 value, &max_age, &include_subdomains); | 964 // problem if the manager was deleted ahead of time. |
902 if (!ok) | 965 if (manager) // Defensive programming. |
903 continue; | 966 manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); |
904 // We will only accept strict mode if we saw the header from an HTTPS | 967 } |
905 // connection with no certificate problems. | 968 } |
906 if (!valid_https) | 969 |
907 continue; | |
908 base::Time current_time(base::Time::Now()); | |
909 base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age); | |
910 | |
911 TransportSecurityState::DomainState domain_state; | |
912 domain_state.expiry = current_time + max_age_delta; | |
913 domain_state.mode = TransportSecurityState::DomainState::MODE_STRICT; | |
914 domain_state.include_subdomains = include_subdomains; | |
915 | |
916 ctx->transport_security_state()->EnableHost(request_info_.url.host(), | |
917 domain_state); | |
918 } | |
919 | |
920 // TODO(agl): change this over when we have fixed things at the server end. | |
921 // The string should be "Opportunistic-Transport-Security"; | |
922 name = "X-Bodge-Transport-Security"; | |
923 | |
924 while (response_info_->headers->EnumerateHeader(&iter, name, &value)) { | |
925 const bool ok = TransportSecurityState::ParseHeader( | |
926 value, &max_age, &include_subdomains); | |
927 if (!ok) | |
928 continue; | |
929 // If we saw an opportunistic request over HTTPS, then clearly we can make | |
930 // HTTPS connections to the host so we should remember this. | |
931 if (https) { | |
932 base::Time current_time(base::Time::Now()); | |
933 base::TimeDelta max_age_delta = base::TimeDelta::FromSeconds(max_age); | |
934 | |
935 TransportSecurityState::DomainState domain_state; | |
936 domain_state.expiry = current_time + max_age_delta; | |
937 domain_state.mode = | |
938 TransportSecurityState::DomainState::MODE_SPDY_ONLY; | |
939 domain_state.include_subdomains = include_subdomains; | |
940 | |
941 ctx->transport_security_state()->EnableHost(request_info_.url.host(), | |
942 domain_state); | |
943 continue; | |
944 } | |
945 | |
946 if (!request()) | |
947 break; | |
948 | |
949 // At this point, we have a request for opportunistic encryption over HTTP. | |
950 // In this case we need to probe to check that we can make HTTPS | |
951 // connections to that host. | |
952 HTTPSProber* const prober = HTTPSProber::GetInstance(); | |
953 if (prober->HaveProbed(request_info_.url.host()) || | |
954 prober->InFlight(request_info_.url.host())) { | |
955 continue; | |
956 } | |
957 | |
958 HTTPSProberDelegateImpl* delegate = | |
959 new HTTPSProberDelegateImpl(request_info_.url.host(), max_age, | |
960 include_subdomains, | |
961 ctx->transport_security_state()); | |
962 if (!prober->ProbeHost(request_info_.url.host(), request()->context(), | |
963 delegate)) { | |
964 delete delegate; | |
965 } | |
966 } | |
967 } | |
968 | |
969 } // namespace net | 970 } // namespace net |
OLD | NEW |