OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "net/url_request/url_request_http_job.h" | |
6 | |
7 #include "base/base_switches.h" | |
8 #include "base/bind.h" | |
9 #include "base/bind_helpers.h" | |
10 #include "base/command_line.h" | |
11 #include "base/compiler_specific.h" | |
12 #include "base/file_version_info.h" | |
13 #include "base/message_loop/message_loop.h" | |
14 #include "base/metrics/field_trial.h" | |
15 #include "base/metrics/histogram.h" | |
16 #include "base/profiler/scoped_tracker.h" | |
17 #include "base/rand_util.h" | |
18 #include "base/strings/string_util.h" | |
19 #include "base/time/time.h" | |
20 #include "net/base/host_port_pair.h" | |
21 #include "net/base/load_flags.h" | |
22 #include "net/base/mime_util.h" | |
23 #include "net/base/net_errors.h" | |
24 #include "net/base/net_util.h" | |
25 #include "net/base/network_delegate.h" | |
26 #include "net/base/sdch_manager.h" | |
27 #include "net/base/sdch_net_log_params.h" | |
28 #include "net/cert/cert_status_flags.h" | |
29 #include "net/cookies/cookie_store.h" | |
30 #include "net/http/http_content_disposition.h" | |
31 #include "net/http/http_network_session.h" | |
32 #include "net/http/http_request_headers.h" | |
33 #include "net/http/http_response_headers.h" | |
34 #include "net/http/http_response_info.h" | |
35 #include "net/http/http_status_code.h" | |
36 #include "net/http/http_transaction.h" | |
37 #include "net/http/http_transaction_factory.h" | |
38 #include "net/http/http_util.h" | |
39 #include "net/proxy/proxy_info.h" | |
40 #include "net/ssl/ssl_cert_request_info.h" | |
41 #include "net/ssl/ssl_config_service.h" | |
42 #include "net/url_request/fraudulent_certificate_reporter.h" | |
43 #include "net/url_request/http_user_agent_settings.h" | |
44 #include "net/url_request/url_request.h" | |
45 #include "net/url_request/url_request_context.h" | |
46 #include "net/url_request/url_request_error_job.h" | |
47 #include "net/url_request/url_request_job_factory.h" | |
48 #include "net/url_request/url_request_redirect_job.h" | |
49 #include "net/url_request/url_request_throttler_header_adapter.h" | |
50 #include "net/url_request/url_request_throttler_manager.h" | |
51 #include "net/websockets/websocket_handshake_stream_base.h" | |
52 | |
53 static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; | |
54 | |
55 namespace net { | |
56 | |
57 class URLRequestHttpJob::HttpFilterContext : public FilterContext { | |
58 public: | |
59 explicit HttpFilterContext(URLRequestHttpJob* job); | |
60 ~HttpFilterContext() override; | |
61 | |
62 // FilterContext implementation. | |
63 bool GetMimeType(std::string* mime_type) const override; | |
64 bool GetURL(GURL* gurl) const override; | |
65 bool GetContentDisposition(std::string* disposition) const override; | |
66 base::Time GetRequestTime() const override; | |
67 bool IsCachedContent() const override; | |
68 bool IsDownload() const override; | |
69 SdchManager::DictionarySet* SdchDictionariesAdvertised() const override; | |
70 int64 GetByteReadCount() const override; | |
71 int GetResponseCode() const override; | |
72 const URLRequestContext* GetURLRequestContext() const override; | |
73 void RecordPacketStats(StatisticSelector statistic) const override; | |
74 const BoundNetLog& GetNetLog() const override; | |
75 | |
76 private: | |
77 URLRequestHttpJob* job_; | |
78 | |
79 // URLRequestHttpJob may be detached from URLRequest, but we still need to | |
80 // return something. | |
81 BoundNetLog dummy_log_; | |
82 | |
83 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext); | |
84 }; | |
85 | |
86 URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job) | |
87 : job_(job) { | |
88 DCHECK(job_); | |
89 } | |
90 | |
91 URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() { | |
92 } | |
93 | |
94 bool URLRequestHttpJob::HttpFilterContext::GetMimeType( | |
95 std::string* mime_type) const { | |
96 return job_->GetMimeType(mime_type); | |
97 } | |
98 | |
99 bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const { | |
100 if (!job_->request()) | |
101 return false; | |
102 *gurl = job_->request()->url(); | |
103 return true; | |
104 } | |
105 | |
106 bool URLRequestHttpJob::HttpFilterContext::GetContentDisposition( | |
107 std::string* disposition) const { | |
108 HttpResponseHeaders* headers = job_->GetResponseHeaders(); | |
109 void *iter = NULL; | |
110 return headers->EnumerateHeader(&iter, "Content-Disposition", disposition); | |
111 } | |
112 | |
113 base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { | |
114 return job_->request() ? job_->request()->request_time() : base::Time(); | |
115 } | |
116 | |
117 bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { | |
118 return job_->is_cached_content_; | |
119 } | |
120 | |
121 bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { | |
122 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0; | |
123 } | |
124 | |
125 SdchManager::DictionarySet* | |
126 URLRequestHttpJob::HttpFilterContext::SdchDictionariesAdvertised() const { | |
127 return job_->dictionaries_advertised_.get(); | |
128 } | |
129 | |
130 int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const { | |
131 return job_->filter_input_byte_count(); | |
132 } | |
133 | |
134 int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const { | |
135 return job_->GetResponseCode(); | |
136 } | |
137 | |
138 const URLRequestContext* | |
139 URLRequestHttpJob::HttpFilterContext::GetURLRequestContext() const { | |
140 return job_->request() ? job_->request()->context() : NULL; | |
141 } | |
142 | |
143 void URLRequestHttpJob::HttpFilterContext::RecordPacketStats( | |
144 StatisticSelector statistic) const { | |
145 job_->RecordPacketStats(statistic); | |
146 } | |
147 | |
148 const BoundNetLog& URLRequestHttpJob::HttpFilterContext::GetNetLog() const { | |
149 return job_->request() ? job_->request()->net_log() : dummy_log_; | |
150 } | |
151 | |
152 // TODO(darin): make sure the port blocking code is not lost | |
153 // static | |
154 URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request, | |
155 NetworkDelegate* network_delegate, | |
156 const std::string& scheme) { | |
157 DCHECK(scheme == "http" || scheme == "https" || scheme == "ws" || | |
158 scheme == "wss"); | |
159 | |
160 if (!request->context()->http_transaction_factory()) { | |
161 NOTREACHED() << "requires a valid context"; | |
162 return new URLRequestErrorJob( | |
163 request, network_delegate, ERR_INVALID_ARGUMENT); | |
164 } | |
165 | |
166 GURL redirect_url; | |
167 if (request->GetHSTSRedirect(&redirect_url)) { | |
168 return new URLRequestRedirectJob( | |
169 request, network_delegate, redirect_url, | |
170 // Use status code 307 to preserve the method, so POST requests work. | |
171 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT, "HSTS"); | |
172 } | |
173 return new URLRequestHttpJob(request, | |
174 network_delegate, | |
175 request->context()->http_user_agent_settings()); | |
176 } | |
177 | |
178 URLRequestHttpJob::URLRequestHttpJob( | |
179 URLRequest* request, | |
180 NetworkDelegate* network_delegate, | |
181 const HttpUserAgentSettings* http_user_agent_settings) | |
182 : URLRequestJob(request, network_delegate), | |
183 priority_(DEFAULT_PRIORITY), | |
184 response_info_(NULL), | |
185 response_cookies_save_index_(0), | |
186 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH), | |
187 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH), | |
188 start_callback_(base::Bind(&URLRequestHttpJob::OnStartCompleted, | |
189 base::Unretained(this))), | |
190 notify_before_headers_sent_callback_( | |
191 base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback, | |
192 base::Unretained(this))), | |
193 read_in_progress_(false), | |
194 throttling_entry_(NULL), | |
195 sdch_test_activated_(false), | |
196 sdch_test_control_(false), | |
197 is_cached_content_(false), | |
198 request_creation_time_(), | |
199 packet_timing_enabled_(false), | |
200 done_(false), | |
201 bytes_observed_in_packets_(0), | |
202 request_time_snapshot_(), | |
203 final_packet_time_(), | |
204 filter_context_(new HttpFilterContext(this)), | |
205 on_headers_received_callback_( | |
206 base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback, | |
207 base::Unretained(this))), | |
208 awaiting_callback_(false), | |
209 http_user_agent_settings_(http_user_agent_settings), | |
210 weak_factory_(this) { | |
211 URLRequestThrottlerManager* manager = request->context()->throttler_manager(); | |
212 if (manager) | |
213 throttling_entry_ = manager->RegisterRequestUrl(request->url()); | |
214 | |
215 ResetTimer(); | |
216 } | |
217 | |
218 URLRequestHttpJob::~URLRequestHttpJob() { | |
219 CHECK(!awaiting_callback_); | |
220 | |
221 DCHECK(!sdch_test_control_ || !sdch_test_activated_); | |
222 if (!is_cached_content_) { | |
223 if (sdch_test_control_) | |
224 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); | |
225 if (sdch_test_activated_) | |
226 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); | |
227 } | |
228 // Make sure SDCH filters are told to emit histogram data while | |
229 // filter_context_ is still alive. | |
230 DestroyFilters(); | |
231 | |
232 DoneWithRequest(ABORTED); | |
233 } | |
234 | |
235 void URLRequestHttpJob::SetPriority(RequestPriority priority) { | |
236 priority_ = priority; | |
237 if (transaction_) | |
238 transaction_->SetPriority(priority_); | |
239 } | |
240 | |
241 void URLRequestHttpJob::Start() { | |
242 DCHECK(!transaction_.get()); | |
243 | |
244 // URLRequest::SetReferrer ensures that we do not send username and password | |
245 // fields in the referrer. | |
246 GURL referrer(request_->referrer()); | |
247 | |
248 request_info_.url = request_->url(); | |
249 request_info_.method = request_->method(); | |
250 request_info_.load_flags = request_->load_flags(); | |
251 // Enable privacy mode if cookie settings or flags tell us not send or | |
252 // save cookies. | |
253 bool enable_privacy_mode = | |
254 (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) || | |
255 (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) || | |
256 CanEnablePrivacyMode(); | |
257 // Privacy mode could still be disabled in OnCookiesLoaded if we are going | |
258 // to send previously saved cookies. | |
259 request_info_.privacy_mode = enable_privacy_mode ? | |
260 PRIVACY_MODE_ENABLED : PRIVACY_MODE_DISABLED; | |
261 | |
262 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins | |
263 // from overriding headers that are controlled using other means. Otherwise a | |
264 // plugin could set a referrer although sending the referrer is inhibited. | |
265 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer); | |
266 | |
267 // Our consumer should have made sure that this is a safe referrer. See for | |
268 // instance WebCore::FrameLoader::HideReferrer. | |
269 if (referrer.is_valid()) { | |
270 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer, | |
271 referrer.spec()); | |
272 } | |
273 | |
274 request_info_.extra_headers.SetHeaderIfMissing( | |
275 HttpRequestHeaders::kUserAgent, | |
276 http_user_agent_settings_ ? | |
277 http_user_agent_settings_->GetUserAgent() : std::string()); | |
278 | |
279 AddExtraHeaders(); | |
280 AddCookieHeaderAndStart(); | |
281 } | |
282 | |
283 void URLRequestHttpJob::Kill() { | |
284 if (!transaction_.get()) | |
285 return; | |
286 | |
287 weak_factory_.InvalidateWeakPtrs(); | |
288 DestroyTransaction(); | |
289 URLRequestJob::Kill(); | |
290 } | |
291 | |
292 void URLRequestHttpJob::NotifyBeforeSendProxyHeadersCallback( | |
293 const ProxyInfo& proxy_info, | |
294 HttpRequestHeaders* request_headers) { | |
295 DCHECK(request_headers); | |
296 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); | |
297 if (network_delegate()) { | |
298 network_delegate()->NotifyBeforeSendProxyHeaders( | |
299 request_, | |
300 proxy_info, | |
301 request_headers); | |
302 } | |
303 } | |
304 | |
305 void URLRequestHttpJob::NotifyHeadersComplete() { | |
306 DCHECK(!response_info_); | |
307 | |
308 response_info_ = transaction_->GetResponseInfo(); | |
309 | |
310 // Save boolean, as we'll need this info at destruction time, and filters may | |
311 // also need this info. | |
312 is_cached_content_ = response_info_->was_cached; | |
313 | |
314 if (!is_cached_content_ && throttling_entry_.get()) { | |
315 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders()); | |
316 throttling_entry_->UpdateWithResponse(request_info_.url.host(), | |
317 &response_adapter); | |
318 } | |
319 | |
320 // The ordering of these calls is not important. | |
321 ProcessStrictTransportSecurityHeader(); | |
322 ProcessPublicKeyPinsHeader(); | |
323 | |
324 // Handle the server notification of a new SDCH dictionary. | |
325 SdchManager* sdch_manager(request()->context()->sdch_manager()); | |
326 if (sdch_manager) { | |
327 SdchProblemCode rv = sdch_manager->IsInSupportedDomain(request()->url()); | |
328 if (rv != SDCH_OK) { | |
329 // If SDCH is just disabled, it is not a real error. | |
330 if (rv != SDCH_DISABLED && rv != SDCH_SECURE_SCHEME_NOT_SUPPORTED) { | |
331 SdchManager::SdchErrorRecovery(rv); | |
332 request()->net_log().AddEvent( | |
333 NetLog::TYPE_SDCH_DECODING_ERROR, | |
334 base::Bind(&NetLogSdchResourceProblemCallback, rv)); | |
335 } | |
336 } else { | |
337 const std::string name = "Get-Dictionary"; | |
338 std::string url_text; | |
339 void* iter = NULL; | |
340 // TODO(jar): We need to not fetch dictionaries the first time they are | |
341 // seen, but rather wait until we can justify their usefulness. | |
342 // For now, we will only fetch the first dictionary, which will at least | |
343 // require multiple suggestions before we get additional ones for this | |
344 // site. Eventually we should wait until a dictionary is requested | |
345 // several times | |
346 // before we even download it (so that we don't waste memory or | |
347 // bandwidth). | |
348 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) { | |
349 // Resolve suggested URL relative to request url. | |
350 GURL sdch_dictionary_url = request_->url().Resolve(url_text); | |
351 if (sdch_dictionary_url.is_valid()) { | |
352 rv = sdch_manager->OnGetDictionary(request_->url(), | |
353 sdch_dictionary_url); | |
354 if (rv != SDCH_OK) { | |
355 SdchManager::SdchErrorRecovery(rv); | |
356 request_->net_log().AddEvent( | |
357 NetLog::TYPE_SDCH_DICTIONARY_ERROR, | |
358 base::Bind(&NetLogSdchDictionaryFetchProblemCallback, rv, | |
359 sdch_dictionary_url, false)); | |
360 } | |
361 } | |
362 } | |
363 } | |
364 } | |
365 | |
366 // Handle the server signalling no SDCH encoding. | |
367 if (dictionaries_advertised_) { | |
368 // We are wary of proxies that discard or damage SDCH encoding. If a server | |
369 // explicitly states that this is not SDCH content, then we can correct our | |
370 // assumption that this is an SDCH response, and avoid the need to recover | |
371 // as though the content is corrupted (when we discover it is not SDCH | |
372 // encoded). | |
373 std::string sdch_response_status; | |
374 void* iter = NULL; | |
375 while (GetResponseHeaders()->EnumerateHeader(&iter, "X-Sdch-Encode", | |
376 &sdch_response_status)) { | |
377 if (sdch_response_status == "0") { | |
378 dictionaries_advertised_.reset(); | |
379 break; | |
380 } | |
381 } | |
382 } | |
383 | |
384 // The HTTP transaction may be restarted several times for the purposes | |
385 // of sending authorization information. Each time it restarts, we get | |
386 // notified of the headers completion so that we can update the cookie store. | |
387 if (transaction_->IsReadyToRestartForAuth()) { | |
388 DCHECK(!response_info_->auth_challenge.get()); | |
389 // TODO(battre): This breaks the webrequest API for | |
390 // URLRequestTestHTTP.BasicAuthWithCookies | |
391 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders | |
392 // occurs. | |
393 RestartTransactionWithAuth(AuthCredentials()); | |
394 return; | |
395 } | |
396 | |
397 URLRequestJob::NotifyHeadersComplete(); | |
398 } | |
399 | |
400 void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { | |
401 DoneWithRequest(FINISHED); | |
402 URLRequestJob::NotifyDone(status); | |
403 } | |
404 | |
405 void URLRequestHttpJob::DestroyTransaction() { | |
406 DCHECK(transaction_.get()); | |
407 | |
408 DoneWithRequest(ABORTED); | |
409 transaction_.reset(); | |
410 response_info_ = NULL; | |
411 receive_headers_end_ = base::TimeTicks(); | |
412 } | |
413 | |
414 void URLRequestHttpJob::StartTransaction() { | |
415 if (network_delegate()) { | |
416 OnCallToDelegate(); | |
417 int rv = network_delegate()->NotifyBeforeSendHeaders( | |
418 request_, notify_before_headers_sent_callback_, | |
419 &request_info_.extra_headers); | |
420 // If an extension blocks the request, we rely on the callback to | |
421 // MaybeStartTransactionInternal(). | |
422 if (rv == ERR_IO_PENDING) | |
423 return; | |
424 MaybeStartTransactionInternal(rv); | |
425 return; | |
426 } | |
427 StartTransactionInternal(); | |
428 } | |
429 | |
430 void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) { | |
431 // Check that there are no callbacks to already canceled requests. | |
432 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); | |
433 | |
434 MaybeStartTransactionInternal(result); | |
435 } | |
436 | |
437 void URLRequestHttpJob::MaybeStartTransactionInternal(int result) { | |
438 OnCallToDelegateComplete(); | |
439 if (result == OK) { | |
440 StartTransactionInternal(); | |
441 } else { | |
442 std::string source("delegate"); | |
443 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, | |
444 NetLog::StringCallback("source", &source)); | |
445 NotifyCanceled(); | |
446 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); | |
447 } | |
448 } | |
449 | |
450 void URLRequestHttpJob::StartTransactionInternal() { | |
451 // NOTE: This method assumes that request_info_ is already setup properly. | |
452 | |
453 // If we already have a transaction, then we should restart the transaction | |
454 // with auth provided by auth_credentials_. | |
455 | |
456 int rv; | |
457 | |
458 if (network_delegate()) { | |
459 network_delegate()->NotifySendHeaders( | |
460 request_, request_info_.extra_headers); | |
461 } | |
462 | |
463 if (transaction_.get()) { | |
464 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_); | |
465 auth_credentials_ = AuthCredentials(); | |
466 } else { | |
467 DCHECK(request_->context()->http_transaction_factory()); | |
468 | |
469 rv = request_->context()->http_transaction_factory()->CreateTransaction( | |
470 priority_, &transaction_); | |
471 | |
472 if (rv == OK && request_info_.url.SchemeIsWSOrWSS()) { | |
473 base::SupportsUserData::Data* data = request_->GetUserData( | |
474 WebSocketHandshakeStreamBase::CreateHelper::DataKey()); | |
475 if (data) { | |
476 transaction_->SetWebSocketHandshakeStreamCreateHelper( | |
477 static_cast<WebSocketHandshakeStreamBase::CreateHelper*>(data)); | |
478 } else { | |
479 rv = ERR_DISALLOWED_URL_SCHEME; | |
480 } | |
481 } | |
482 | |
483 if (rv == OK) { | |
484 transaction_->SetBeforeNetworkStartCallback( | |
485 base::Bind(&URLRequestHttpJob::NotifyBeforeNetworkStart, | |
486 base::Unretained(this))); | |
487 transaction_->SetBeforeProxyHeadersSentCallback( | |
488 base::Bind(&URLRequestHttpJob::NotifyBeforeSendProxyHeadersCallback, | |
489 base::Unretained(this))); | |
490 | |
491 if (!throttling_entry_.get() || | |
492 !throttling_entry_->ShouldRejectRequest(*request_, | |
493 network_delegate())) { | |
494 rv = transaction_->Start( | |
495 &request_info_, start_callback_, request_->net_log()); | |
496 start_time_ = base::TimeTicks::Now(); | |
497 } else { | |
498 // Special error code for the exponential back-off module. | |
499 rv = ERR_TEMPORARILY_THROTTLED; | |
500 } | |
501 } | |
502 } | |
503 | |
504 if (rv == ERR_IO_PENDING) | |
505 return; | |
506 | |
507 // The transaction started synchronously, but we need to notify the | |
508 // URLRequest delegate via the message loop. | |
509 base::MessageLoop::current()->PostTask( | |
510 FROM_HERE, | |
511 base::Bind(&URLRequestHttpJob::OnStartCompleted, | |
512 weak_factory_.GetWeakPtr(), rv)); | |
513 } | |
514 | |
515 void URLRequestHttpJob::AddExtraHeaders() { | |
516 SdchManager* sdch_manager = request()->context()->sdch_manager(); | |
517 | |
518 // Supply Accept-Encoding field only if it is not already provided. | |
519 // It should be provided IF the content is known to have restrictions on | |
520 // potential encoding, such as streaming multi-media. | |
521 // For details see bug 47381. | |
522 // TODO(jar, enal): jpeg files etc. should set up a request header if | |
523 // possible. Right now it is done only by buffered_resource_loader and | |
524 // simple_data_source. | |
525 if (!request_info_.extra_headers.HasHeader( | |
526 HttpRequestHeaders::kAcceptEncoding)) { | |
527 // We don't support SDCH responses to POST as there is a possibility | |
528 // of having SDCH encoded responses returned (e.g. by the cache) | |
529 // which we cannot decode, and in those situations, we will need | |
530 // to retransmit the request without SDCH, which is illegal for a POST. | |
531 bool advertise_sdch = sdch_manager != NULL && request()->method() != "POST"; | |
532 if (advertise_sdch) { | |
533 SdchProblemCode rv = sdch_manager->IsInSupportedDomain(request()->url()); | |
534 if (rv != SDCH_OK) { | |
535 advertise_sdch = false; | |
536 // If SDCH is just disabled, it is not a real error. | |
537 if (rv != SDCH_DISABLED && rv != SDCH_SECURE_SCHEME_NOT_SUPPORTED) { | |
538 SdchManager::SdchErrorRecovery(rv); | |
539 request()->net_log().AddEvent( | |
540 NetLog::TYPE_SDCH_DECODING_ERROR, | |
541 base::Bind(&NetLogSdchResourceProblemCallback, rv)); | |
542 } | |
543 } | |
544 } | |
545 if (advertise_sdch) { | |
546 dictionaries_advertised_ = | |
547 sdch_manager->GetDictionarySet(request_->url()); | |
548 } | |
549 | |
550 // The AllowLatencyExperiment() is only true if we've successfully done a | |
551 // full SDCH compression recently in this browser session for this host. | |
552 // Note that for this path, there might be no applicable dictionaries, | |
553 // and hence we can't participate in the experiment. | |
554 if (dictionaries_advertised_ && | |
555 sdch_manager->AllowLatencyExperiment(request_->url())) { | |
556 // We are participating in the test (or control), and hence we'll | |
557 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or | |
558 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. | |
559 packet_timing_enabled_ = true; | |
560 if (base::RandDouble() < .01) { | |
561 sdch_test_control_ = true; // 1% probability. | |
562 dictionaries_advertised_.reset(); | |
563 advertise_sdch = false; | |
564 } else { | |
565 sdch_test_activated_ = true; | |
566 } | |
567 } | |
568 | |
569 // Supply Accept-Encoding headers first so that it is more likely that they | |
570 // will be in the first transmitted packet. This can sometimes make it | |
571 // easier to filter and analyze the streams to assure that a proxy has not | |
572 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding | |
573 // headers. | |
574 if (!advertise_sdch) { | |
575 // Tell the server what compression formats we support (other than SDCH). | |
576 request_info_.extra_headers.SetHeader( | |
577 HttpRequestHeaders::kAcceptEncoding, "gzip, deflate"); | |
578 } else { | |
579 // Include SDCH in acceptable list. | |
580 request_info_.extra_headers.SetHeader( | |
581 HttpRequestHeaders::kAcceptEncoding, "gzip, deflate, sdch"); | |
582 if (dictionaries_advertised_) { | |
583 request_info_.extra_headers.SetHeader( | |
584 kAvailDictionaryHeader, | |
585 dictionaries_advertised_->GetDictionaryClientHashList()); | |
586 // Since we're tagging this transaction as advertising a dictionary, | |
587 // we'll definitely employ an SDCH filter (or tentative sdch filter) | |
588 // when we get a response. When done, we'll record histograms via | |
589 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet | |
590 // arrival times. | |
591 packet_timing_enabled_ = true; | |
592 } | |
593 } | |
594 } | |
595 | |
596 if (http_user_agent_settings_) { | |
597 // Only add default Accept-Language if the request didn't have it | |
598 // specified. | |
599 std::string accept_language = | |
600 http_user_agent_settings_->GetAcceptLanguage(); | |
601 if (!accept_language.empty()) { | |
602 request_info_.extra_headers.SetHeaderIfMissing( | |
603 HttpRequestHeaders::kAcceptLanguage, | |
604 accept_language); | |
605 } | |
606 } | |
607 } | |
608 | |
609 void URLRequestHttpJob::AddCookieHeaderAndStart() { | |
610 // No matter what, we want to report our status as IO pending since we will | |
611 // be notifying our consumer asynchronously via OnStartCompleted. | |
612 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | |
613 | |
614 // If the request was destroyed, then there is no more work to do. | |
615 if (!request_) | |
616 return; | |
617 | |
618 CookieStore* cookie_store = GetCookieStore(); | |
619 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) { | |
620 cookie_store->GetAllCookiesForURLAsync( | |
621 request_->url(), | |
622 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad, | |
623 weak_factory_.GetWeakPtr())); | |
624 } else { | |
625 DoStartTransaction(); | |
626 } | |
627 } | |
628 | |
629 void URLRequestHttpJob::DoLoadCookies() { | |
630 CookieOptions options; | |
631 options.set_include_httponly(); | |
632 GetCookieStore()->GetCookiesWithOptionsAsync( | |
633 request_->url(), options, | |
634 base::Bind(&URLRequestHttpJob::OnCookiesLoaded, | |
635 weak_factory_.GetWeakPtr())); | |
636 } | |
637 | |
638 void URLRequestHttpJob::CheckCookiePolicyAndLoad( | |
639 const CookieList& cookie_list) { | |
640 if (CanGetCookies(cookie_list)) | |
641 DoLoadCookies(); | |
642 else | |
643 DoStartTransaction(); | |
644 } | |
645 | |
646 void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) { | |
647 if (!cookie_line.empty()) { | |
648 request_info_.extra_headers.SetHeader( | |
649 HttpRequestHeaders::kCookie, cookie_line); | |
650 // Disable privacy mode as we are sending cookies anyway. | |
651 request_info_.privacy_mode = PRIVACY_MODE_DISABLED; | |
652 } | |
653 DoStartTransaction(); | |
654 } | |
655 | |
656 void URLRequestHttpJob::DoStartTransaction() { | |
657 // We may have been canceled while retrieving cookies. | |
658 if (GetStatus().is_success()) { | |
659 StartTransaction(); | |
660 } else { | |
661 NotifyCanceled(); | |
662 } | |
663 } | |
664 | |
665 void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) { | |
666 // End of the call started in OnStartCompleted. | |
667 OnCallToDelegateComplete(); | |
668 | |
669 if (result != net::OK) { | |
670 std::string source("delegate"); | |
671 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, | |
672 NetLog::StringCallback("source", &source)); | |
673 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); | |
674 return; | |
675 } | |
676 | |
677 DCHECK(transaction_.get()); | |
678 | |
679 const HttpResponseInfo* response_info = transaction_->GetResponseInfo(); | |
680 DCHECK(response_info); | |
681 | |
682 response_cookies_.clear(); | |
683 response_cookies_save_index_ = 0; | |
684 | |
685 FetchResponseCookies(&response_cookies_); | |
686 | |
687 if (!GetResponseHeaders()->GetDateValue(&response_date_)) | |
688 response_date_ = base::Time(); | |
689 | |
690 // Now, loop over the response cookies, and attempt to persist each. | |
691 SaveNextCookie(); | |
692 } | |
693 | |
694 // If the save occurs synchronously, SaveNextCookie will loop and save the next | |
695 // cookie. If the save is deferred, the callback is responsible for continuing | |
696 // to iterate through the cookies. | |
697 // TODO(erikwright): Modify the CookieStore API to indicate via return value | |
698 // whether it completed synchronously or asynchronously. | |
699 // See http://crbug.com/131066. | |
700 void URLRequestHttpJob::SaveNextCookie() { | |
701 // No matter what, we want to report our status as IO pending since we will | |
702 // be notifying our consumer asynchronously via OnStartCompleted. | |
703 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | |
704 | |
705 // Used to communicate with the callback. See the implementation of | |
706 // OnCookieSaved. | |
707 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false); | |
708 scoped_refptr<SharedBoolean> save_next_cookie_running = | |
709 new SharedBoolean(true); | |
710 | |
711 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) && | |
712 GetCookieStore() && response_cookies_.size() > 0) { | |
713 CookieOptions options; | |
714 options.set_include_httponly(); | |
715 options.set_server_time(response_date_); | |
716 | |
717 net::CookieStore::SetCookiesCallback callback( | |
718 base::Bind(&URLRequestHttpJob::OnCookieSaved, | |
719 weak_factory_.GetWeakPtr(), | |
720 save_next_cookie_running, | |
721 callback_pending)); | |
722 | |
723 // Loop through the cookies as long as SetCookieWithOptionsAsync completes | |
724 // synchronously. | |
725 while (!callback_pending->data && | |
726 response_cookies_save_index_ < response_cookies_.size()) { | |
727 if (CanSetCookie( | |
728 response_cookies_[response_cookies_save_index_], &options)) { | |
729 callback_pending->data = true; | |
730 GetCookieStore()->SetCookieWithOptionsAsync( | |
731 request_->url(), response_cookies_[response_cookies_save_index_], | |
732 options, callback); | |
733 } | |
734 ++response_cookies_save_index_; | |
735 } | |
736 } | |
737 | |
738 save_next_cookie_running->data = false; | |
739 | |
740 if (!callback_pending->data) { | |
741 response_cookies_.clear(); | |
742 response_cookies_save_index_ = 0; | |
743 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status | |
744 NotifyHeadersComplete(); | |
745 return; | |
746 } | |
747 } | |
748 | |
749 // |save_next_cookie_running| is true when the callback is bound and set to | |
750 // false when SaveNextCookie exits, allowing the callback to determine if the | |
751 // save occurred synchronously or asynchronously. | |
752 // |callback_pending| is false when the callback is invoked and will be set to | |
753 // true by the callback, allowing SaveNextCookie to detect whether the save | |
754 // occurred synchronously. | |
755 // See SaveNextCookie() for more information. | |
756 void URLRequestHttpJob::OnCookieSaved( | |
757 scoped_refptr<SharedBoolean> save_next_cookie_running, | |
758 scoped_refptr<SharedBoolean> callback_pending, | |
759 bool cookie_status) { | |
760 callback_pending->data = false; | |
761 | |
762 // If we were called synchronously, return. | |
763 if (save_next_cookie_running->data) { | |
764 return; | |
765 } | |
766 | |
767 // We were called asynchronously, so trigger the next save. | |
768 // We may have been canceled within OnSetCookie. | |
769 if (GetStatus().is_success()) { | |
770 SaveNextCookie(); | |
771 } else { | |
772 NotifyCanceled(); | |
773 } | |
774 } | |
775 | |
776 void URLRequestHttpJob::FetchResponseCookies( | |
777 std::vector<std::string>* cookies) { | |
778 const std::string name = "Set-Cookie"; | |
779 std::string value; | |
780 | |
781 void* iter = NULL; | |
782 HttpResponseHeaders* headers = GetResponseHeaders(); | |
783 while (headers->EnumerateHeader(&iter, name, &value)) { | |
784 if (!value.empty()) | |
785 cookies->push_back(value); | |
786 } | |
787 } | |
788 | |
789 // NOTE: |ProcessStrictTransportSecurityHeader| and | |
790 // |ProcessPublicKeyPinsHeader| have very similar structures, by design. | |
791 void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() { | |
792 DCHECK(response_info_); | |
793 TransportSecurityState* security_state = | |
794 request_->context()->transport_security_state(); | |
795 const SSLInfo& ssl_info = response_info_->ssl_info; | |
796 | |
797 // Only accept HSTS headers on HTTPS connections that have no | |
798 // certificate errors. | |
799 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || | |
800 !security_state) | |
801 return; | |
802 | |
803 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec: | |
804 // | |
805 // If a UA receives more than one STS header field in a HTTP response | |
806 // message over secure transport, then the UA MUST process only the | |
807 // first such header field. | |
808 HttpResponseHeaders* headers = GetResponseHeaders(); | |
809 std::string value; | |
810 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value)) | |
811 security_state->AddHSTSHeader(request_info_.url.host(), value); | |
812 } | |
813 | |
814 void URLRequestHttpJob::ProcessPublicKeyPinsHeader() { | |
815 DCHECK(response_info_); | |
816 TransportSecurityState* security_state = | |
817 request_->context()->transport_security_state(); | |
818 const SSLInfo& ssl_info = response_info_->ssl_info; | |
819 | |
820 // Only accept HPKP headers on HTTPS connections that have no | |
821 // certificate errors. | |
822 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) || | |
823 !security_state) | |
824 return; | |
825 | |
826 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning: | |
827 // | |
828 // If a UA receives more than one PKP header field in an HTTP | |
829 // response message over secure transport, then the UA MUST process | |
830 // only the first such header field. | |
831 HttpResponseHeaders* headers = GetResponseHeaders(); | |
832 std::string value; | |
833 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value)) | |
834 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info); | |
835 } | |
836 | |
837 void URLRequestHttpJob::OnStartCompleted(int result) { | |
838 // TODO(vadimt): Remove ScopedTracker below once crbug.com/424359 is fixed. | |
839 tracked_objects::ScopedTracker tracking_profile( | |
840 FROM_HERE_WITH_EXPLICIT_FUNCTION( | |
841 "424359 URLRequestHttpJob::OnStartCompleted")); | |
842 | |
843 RecordTimer(); | |
844 | |
845 // If the request was destroyed, then there is no more work to do. | |
846 if (!request_) | |
847 return; | |
848 | |
849 // If the job is done (due to cancellation), can just ignore this | |
850 // notification. | |
851 if (done_) | |
852 return; | |
853 | |
854 receive_headers_end_ = base::TimeTicks::Now(); | |
855 | |
856 // Clear the IO_PENDING status | |
857 SetStatus(URLRequestStatus()); | |
858 | |
859 const URLRequestContext* context = request_->context(); | |
860 | |
861 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN && | |
862 transaction_->GetResponseInfo() != NULL) { | |
863 FraudulentCertificateReporter* reporter = | |
864 context->fraudulent_certificate_reporter(); | |
865 if (reporter != NULL) { | |
866 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info; | |
867 const std::string& host = request_->url().host(); | |
868 | |
869 reporter->SendReport(host, ssl_info); | |
870 } | |
871 } | |
872 | |
873 if (result == OK) { | |
874 if (transaction_ && transaction_->GetResponseInfo()) { | |
875 SetProxyServer(transaction_->GetResponseInfo()->proxy_server); | |
876 } | |
877 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders(); | |
878 if (network_delegate()) { | |
879 // Note that |this| may not be deleted until | |
880 // |on_headers_received_callback_| or | |
881 // |NetworkDelegate::URLRequestDestroyed()| has been called. | |
882 OnCallToDelegate(); | |
883 allowed_unsafe_redirect_url_ = GURL(); | |
884 int error = network_delegate()->NotifyHeadersReceived( | |
885 request_, | |
886 on_headers_received_callback_, | |
887 headers.get(), | |
888 &override_response_headers_, | |
889 &allowed_unsafe_redirect_url_); | |
890 if (error != net::OK) { | |
891 if (error == net::ERR_IO_PENDING) { | |
892 awaiting_callback_ = true; | |
893 } else { | |
894 std::string source("delegate"); | |
895 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED, | |
896 NetLog::StringCallback("source", | |
897 &source)); | |
898 OnCallToDelegateComplete(); | |
899 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error)); | |
900 } | |
901 return; | |
902 } | |
903 } | |
904 | |
905 SaveCookiesAndNotifyHeadersComplete(net::OK); | |
906 } else if (IsCertificateError(result)) { | |
907 // We encountered an SSL certificate error. | |
908 if (result == ERR_SSL_WEAK_SERVER_EPHEMERAL_DH_KEY || | |
909 result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN) { | |
910 // These are hard failures. They're handled separately and don't have | |
911 // the correct cert status, so set it here. | |
912 SSLInfo info(transaction_->GetResponseInfo()->ssl_info); | |
913 info.cert_status = MapNetErrorToCertStatus(result); | |
914 NotifySSLCertificateError(info, true); | |
915 } else { | |
916 // Maybe overridable, maybe not. Ask the delegate to decide. | |
917 const URLRequestContext* context = request_->context(); | |
918 TransportSecurityState* state = context->transport_security_state(); | |
919 const bool fatal = | |
920 state && state->ShouldSSLErrorsBeFatal(request_info_.url.host()); | |
921 NotifySSLCertificateError( | |
922 transaction_->GetResponseInfo()->ssl_info, fatal); | |
923 } | |
924 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) { | |
925 NotifyCertificateRequested( | |
926 transaction_->GetResponseInfo()->cert_request_info.get()); | |
927 } else { | |
928 // Even on an error, there may be useful information in the response | |
929 // info (e.g. whether there's a cached copy). | |
930 if (transaction_.get()) | |
931 response_info_ = transaction_->GetResponseInfo(); | |
932 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); | |
933 } | |
934 } | |
935 | |
936 void URLRequestHttpJob::OnHeadersReceivedCallback(int result) { | |
937 awaiting_callback_ = false; | |
938 | |
939 // Check that there are no callbacks to already canceled requests. | |
940 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status()); | |
941 | |
942 SaveCookiesAndNotifyHeadersComplete(result); | |
943 } | |
944 | |
945 void URLRequestHttpJob::OnReadCompleted(int result) { | |
946 // TODO(vadimt): Remove ScopedTracker below once crbug.com/424359 is fixed. | |
947 tracked_objects::ScopedTracker tracking_profile( | |
948 FROM_HERE_WITH_EXPLICIT_FUNCTION( | |
949 "424359 URLRequestHttpJob::OnReadCompleted")); | |
950 | |
951 read_in_progress_ = false; | |
952 | |
953 if (ShouldFixMismatchedContentLength(result)) | |
954 result = OK; | |
955 | |
956 if (result == OK) { | |
957 NotifyDone(URLRequestStatus()); | |
958 } else if (result < 0) { | |
959 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result)); | |
960 } else { | |
961 // Clear the IO_PENDING status | |
962 SetStatus(URLRequestStatus()); | |
963 } | |
964 | |
965 NotifyReadComplete(result); | |
966 } | |
967 | |
968 void URLRequestHttpJob::RestartTransactionWithAuth( | |
969 const AuthCredentials& credentials) { | |
970 auth_credentials_ = credentials; | |
971 | |
972 // These will be reset in OnStartCompleted. | |
973 response_info_ = NULL; | |
974 receive_headers_end_ = base::TimeTicks(); | |
975 response_cookies_.clear(); | |
976 | |
977 ResetTimer(); | |
978 | |
979 // Update the cookies, since the cookie store may have been updated from the | |
980 // headers in the 401/407. Since cookies were already appended to | |
981 // extra_headers, we need to strip them out before adding them again. | |
982 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie); | |
983 | |
984 AddCookieHeaderAndStart(); | |
985 } | |
986 | |
987 void URLRequestHttpJob::SetUpload(UploadDataStream* upload) { | |
988 DCHECK(!transaction_.get()) << "cannot change once started"; | |
989 request_info_.upload_data_stream = upload; | |
990 } | |
991 | |
992 void URLRequestHttpJob::SetExtraRequestHeaders( | |
993 const HttpRequestHeaders& headers) { | |
994 DCHECK(!transaction_.get()) << "cannot change once started"; | |
995 request_info_.extra_headers.CopyFrom(headers); | |
996 } | |
997 | |
998 LoadState URLRequestHttpJob::GetLoadState() const { | |
999 // TODO(pkasting): Remove ScopedTracker below once crbug.com/455952 is | |
1000 // fixed. | |
1001 tracked_objects::ScopedTracker tracking_profile( | |
1002 FROM_HERE_WITH_EXPLICIT_FUNCTION( | |
1003 "455952 URLRequestHttpJob::GetLoadState")); | |
1004 return transaction_.get() ? | |
1005 transaction_->GetLoadState() : LOAD_STATE_IDLE; | |
1006 } | |
1007 | |
1008 UploadProgress URLRequestHttpJob::GetUploadProgress() const { | |
1009 return transaction_.get() ? | |
1010 transaction_->GetUploadProgress() : UploadProgress(); | |
1011 } | |
1012 | |
1013 bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const { | |
1014 DCHECK(transaction_.get()); | |
1015 | |
1016 if (!response_info_) | |
1017 return false; | |
1018 | |
1019 HttpResponseHeaders* headers = GetResponseHeaders(); | |
1020 if (!headers) | |
1021 return false; | |
1022 return headers->GetMimeType(mime_type); | |
1023 } | |
1024 | |
1025 bool URLRequestHttpJob::GetCharset(std::string* charset) { | |
1026 DCHECK(transaction_.get()); | |
1027 | |
1028 if (!response_info_) | |
1029 return false; | |
1030 | |
1031 return GetResponseHeaders()->GetCharset(charset); | |
1032 } | |
1033 | |
1034 void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) { | |
1035 DCHECK(request_); | |
1036 | |
1037 if (response_info_) { | |
1038 DCHECK(transaction_.get()); | |
1039 | |
1040 *info = *response_info_; | |
1041 if (override_response_headers_.get()) | |
1042 info->headers = override_response_headers_; | |
1043 } | |
1044 } | |
1045 | |
1046 void URLRequestHttpJob::GetLoadTimingInfo( | |
1047 LoadTimingInfo* load_timing_info) const { | |
1048 // If haven't made it far enough to receive any headers, don't return | |
1049 // anything. This makes for more consistent behavior in the case of errors. | |
1050 if (!transaction_ || receive_headers_end_.is_null()) | |
1051 return; | |
1052 if (transaction_->GetLoadTimingInfo(load_timing_info)) | |
1053 load_timing_info->receive_headers_end = receive_headers_end_; | |
1054 } | |
1055 | |
1056 bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) { | |
1057 DCHECK(transaction_.get()); | |
1058 | |
1059 if (!response_info_) | |
1060 return false; | |
1061 | |
1062 // TODO(darin): Why are we extracting response cookies again? Perhaps we | |
1063 // should just leverage response_cookies_. | |
1064 | |
1065 cookies->clear(); | |
1066 FetchResponseCookies(cookies); | |
1067 return true; | |
1068 } | |
1069 | |
1070 int URLRequestHttpJob::GetResponseCode() const { | |
1071 DCHECK(transaction_.get()); | |
1072 | |
1073 if (!response_info_) | |
1074 return -1; | |
1075 | |
1076 return GetResponseHeaders()->response_code(); | |
1077 } | |
1078 | |
1079 Filter* URLRequestHttpJob::SetupFilter() const { | |
1080 DCHECK(transaction_.get()); | |
1081 if (!response_info_) | |
1082 return NULL; | |
1083 | |
1084 std::vector<Filter::FilterType> encoding_types; | |
1085 std::string encoding_type; | |
1086 HttpResponseHeaders* headers = GetResponseHeaders(); | |
1087 void* iter = NULL; | |
1088 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) { | |
1089 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type)); | |
1090 } | |
1091 | |
1092 // Even if encoding types are empty, there is a chance that we need to add | |
1093 // some decoding, as some proxies strip encoding completely. In such cases, | |
1094 // we may need to add (for example) SDCH filtering (when the context suggests | |
1095 // it is appropriate). | |
1096 Filter::FixupEncodingTypes(*filter_context_, &encoding_types); | |
1097 | |
1098 return !encoding_types.empty() | |
1099 ? Filter::Factory(encoding_types, *filter_context_) : NULL; | |
1100 } | |
1101 | |
1102 bool URLRequestHttpJob::CopyFragmentOnRedirect(const GURL& location) const { | |
1103 // Allow modification of reference fragments by default, unless | |
1104 // |allowed_unsafe_redirect_url_| is set and equal to the redirect URL. | |
1105 // When this is the case, we assume that the network delegate has set the | |
1106 // desired redirect URL (with or without fragment), so it must not be changed | |
1107 // any more. | |
1108 return !allowed_unsafe_redirect_url_.is_valid() || | |
1109 allowed_unsafe_redirect_url_ != location; | |
1110 } | |
1111 | |
1112 bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { | |
1113 // HTTP is always safe. | |
1114 // TODO(pauljensen): Remove once crbug.com/146591 is fixed. | |
1115 if (location.is_valid() && | |
1116 (location.scheme() == "http" || location.scheme() == "https")) { | |
1117 return true; | |
1118 } | |
1119 // Delegates may mark a URL as safe for redirection. | |
1120 if (allowed_unsafe_redirect_url_.is_valid() && | |
1121 allowed_unsafe_redirect_url_ == location) { | |
1122 return true; | |
1123 } | |
1124 // Query URLRequestJobFactory as to whether |location| would be safe to | |
1125 // redirect to. | |
1126 return request_->context()->job_factory() && | |
1127 request_->context()->job_factory()->IsSafeRedirectTarget(location); | |
1128 } | |
1129 | |
1130 bool URLRequestHttpJob::NeedsAuth() { | |
1131 int code = GetResponseCode(); | |
1132 if (code == -1) | |
1133 return false; | |
1134 | |
1135 // Check if we need either Proxy or WWW Authentication. This could happen | |
1136 // because we either provided no auth info, or provided incorrect info. | |
1137 switch (code) { | |
1138 case 407: | |
1139 if (proxy_auth_state_ == AUTH_STATE_CANCELED) | |
1140 return false; | |
1141 proxy_auth_state_ = AUTH_STATE_NEED_AUTH; | |
1142 return true; | |
1143 case 401: | |
1144 if (server_auth_state_ == AUTH_STATE_CANCELED) | |
1145 return false; | |
1146 server_auth_state_ = AUTH_STATE_NEED_AUTH; | |
1147 return true; | |
1148 } | |
1149 return false; | |
1150 } | |
1151 | |
1152 void URLRequestHttpJob::GetAuthChallengeInfo( | |
1153 scoped_refptr<AuthChallengeInfo>* result) { | |
1154 DCHECK(transaction_.get()); | |
1155 DCHECK(response_info_); | |
1156 | |
1157 // sanity checks: | |
1158 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH || | |
1159 server_auth_state_ == AUTH_STATE_NEED_AUTH); | |
1160 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) || | |
1161 (GetResponseHeaders()->response_code() == | |
1162 HTTP_PROXY_AUTHENTICATION_REQUIRED)); | |
1163 | |
1164 *result = response_info_->auth_challenge; | |
1165 } | |
1166 | |
1167 void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) { | |
1168 DCHECK(transaction_.get()); | |
1169 | |
1170 // Proxy gets set first, then WWW. | |
1171 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { | |
1172 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; | |
1173 } else { | |
1174 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); | |
1175 server_auth_state_ = AUTH_STATE_HAVE_AUTH; | |
1176 } | |
1177 | |
1178 RestartTransactionWithAuth(credentials); | |
1179 } | |
1180 | |
1181 void URLRequestHttpJob::CancelAuth() { | |
1182 // Proxy gets set first, then WWW. | |
1183 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { | |
1184 proxy_auth_state_ = AUTH_STATE_CANCELED; | |
1185 } else { | |
1186 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); | |
1187 server_auth_state_ = AUTH_STATE_CANCELED; | |
1188 } | |
1189 | |
1190 // These will be reset in OnStartCompleted. | |
1191 response_info_ = NULL; | |
1192 receive_headers_end_ = base::TimeTicks::Now(); | |
1193 response_cookies_.clear(); | |
1194 | |
1195 ResetTimer(); | |
1196 | |
1197 // OK, let the consumer read the error page... | |
1198 // | |
1199 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false, | |
1200 // which will cause the consumer to receive OnResponseStarted instead of | |
1201 // OnAuthRequired. | |
1202 // | |
1203 // We have to do this via InvokeLater to avoid "recursing" the consumer. | |
1204 // | |
1205 base::MessageLoop::current()->PostTask( | |
1206 FROM_HERE, | |
1207 base::Bind(&URLRequestHttpJob::OnStartCompleted, | |
1208 weak_factory_.GetWeakPtr(), OK)); | |
1209 } | |
1210 | |
1211 void URLRequestHttpJob::ContinueWithCertificate( | |
1212 X509Certificate* client_cert) { | |
1213 DCHECK(transaction_.get()); | |
1214 | |
1215 DCHECK(!response_info_) << "should not have a response yet"; | |
1216 receive_headers_end_ = base::TimeTicks(); | |
1217 | |
1218 ResetTimer(); | |
1219 | |
1220 // No matter what, we want to report our status as IO pending since we will | |
1221 // be notifying our consumer asynchronously via OnStartCompleted. | |
1222 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | |
1223 | |
1224 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_); | |
1225 if (rv == ERR_IO_PENDING) | |
1226 return; | |
1227 | |
1228 // The transaction started synchronously, but we need to notify the | |
1229 // URLRequest delegate via the message loop. | |
1230 base::MessageLoop::current()->PostTask( | |
1231 FROM_HERE, | |
1232 base::Bind(&URLRequestHttpJob::OnStartCompleted, | |
1233 weak_factory_.GetWeakPtr(), rv)); | |
1234 } | |
1235 | |
1236 void URLRequestHttpJob::ContinueDespiteLastError() { | |
1237 // If the transaction was destroyed, then the job was cancelled. | |
1238 if (!transaction_.get()) | |
1239 return; | |
1240 | |
1241 DCHECK(!response_info_) << "should not have a response yet"; | |
1242 receive_headers_end_ = base::TimeTicks(); | |
1243 | |
1244 ResetTimer(); | |
1245 | |
1246 // No matter what, we want to report our status as IO pending since we will | |
1247 // be notifying our consumer asynchronously via OnStartCompleted. | |
1248 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | |
1249 | |
1250 int rv = transaction_->RestartIgnoringLastError(start_callback_); | |
1251 if (rv == ERR_IO_PENDING) | |
1252 return; | |
1253 | |
1254 // The transaction started synchronously, but we need to notify the | |
1255 // URLRequest delegate via the message loop. | |
1256 base::MessageLoop::current()->PostTask( | |
1257 FROM_HERE, | |
1258 base::Bind(&URLRequestHttpJob::OnStartCompleted, | |
1259 weak_factory_.GetWeakPtr(), rv)); | |
1260 } | |
1261 | |
1262 void URLRequestHttpJob::ResumeNetworkStart() { | |
1263 DCHECK(transaction_.get()); | |
1264 transaction_->ResumeNetworkStart(); | |
1265 } | |
1266 | |
1267 bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const { | |
1268 // Some servers send the body compressed, but specify the content length as | |
1269 // the uncompressed size. Although this violates the HTTP spec we want to | |
1270 // support it (as IE and FireFox do), but *only* for an exact match. | |
1271 // See http://crbug.com/79694. | |
1272 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH || | |
1273 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) { | |
1274 if (request_ && request_->response_headers()) { | |
1275 int64 expected_length = request_->response_headers()->GetContentLength(); | |
1276 VLOG(1) << __FUNCTION__ << "() " | |
1277 << "\"" << request_->url().spec() << "\"" | |
1278 << " content-length = " << expected_length | |
1279 << " pre total = " << prefilter_bytes_read() | |
1280 << " post total = " << postfilter_bytes_read(); | |
1281 if (postfilter_bytes_read() == expected_length) { | |
1282 // Clear the error. | |
1283 return true; | |
1284 } | |
1285 } | |
1286 } | |
1287 return false; | |
1288 } | |
1289 | |
1290 bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, | |
1291 int* bytes_read) { | |
1292 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed. | |
1293 tracked_objects::ScopedTracker tracking_profile1( | |
1294 FROM_HERE_WITH_EXPLICIT_FUNCTION( | |
1295 "423948 URLRequestHttpJob::ReadRawData1")); | |
1296 | |
1297 DCHECK_NE(buf_size, 0); | |
1298 DCHECK(bytes_read); | |
1299 DCHECK(!read_in_progress_); | |
1300 | |
1301 int rv = transaction_->Read( | |
1302 buf, buf_size, | |
1303 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this))); | |
1304 | |
1305 if (ShouldFixMismatchedContentLength(rv)) | |
1306 rv = 0; | |
1307 | |
1308 if (rv >= 0) { | |
1309 *bytes_read = rv; | |
1310 if (!rv) { | |
1311 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is | |
1312 // fixed. | |
1313 tracked_objects::ScopedTracker tracking_profile2( | |
1314 FROM_HERE_WITH_EXPLICIT_FUNCTION( | |
1315 "423948 URLRequestHttpJob::ReadRawData2")); | |
1316 | |
1317 DoneWithRequest(FINISHED); | |
1318 } | |
1319 return true; | |
1320 } | |
1321 | |
1322 if (rv == ERR_IO_PENDING) { | |
1323 read_in_progress_ = true; | |
1324 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); | |
1325 } else { | |
1326 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | |
1327 } | |
1328 | |
1329 return false; | |
1330 } | |
1331 | |
1332 void URLRequestHttpJob::StopCaching() { | |
1333 if (transaction_.get()) | |
1334 transaction_->StopCaching(); | |
1335 } | |
1336 | |
1337 bool URLRequestHttpJob::GetFullRequestHeaders( | |
1338 HttpRequestHeaders* headers) const { | |
1339 if (!transaction_) | |
1340 return false; | |
1341 | |
1342 return transaction_->GetFullRequestHeaders(headers); | |
1343 } | |
1344 | |
1345 int64 URLRequestHttpJob::GetTotalReceivedBytes() const { | |
1346 if (!transaction_) | |
1347 return 0; | |
1348 | |
1349 return transaction_->GetTotalReceivedBytes(); | |
1350 } | |
1351 | |
1352 void URLRequestHttpJob::DoneReading() { | |
1353 if (transaction_) { | |
1354 transaction_->DoneReading(); | |
1355 } | |
1356 DoneWithRequest(FINISHED); | |
1357 } | |
1358 | |
1359 void URLRequestHttpJob::DoneReadingRedirectResponse() { | |
1360 if (transaction_) { | |
1361 if (transaction_->GetResponseInfo()->headers->IsRedirect(NULL)) { | |
1362 // If the original headers indicate a redirect, go ahead and cache the | |
1363 // response, even if the |override_response_headers_| are a redirect to | |
1364 // another location. | |
1365 transaction_->DoneReading(); | |
1366 } else { | |
1367 // Otherwise, |override_response_headers_| must be non-NULL and contain | |
1368 // bogus headers indicating a redirect. | |
1369 DCHECK(override_response_headers_.get()); | |
1370 DCHECK(override_response_headers_->IsRedirect(NULL)); | |
1371 transaction_->StopCaching(); | |
1372 } | |
1373 } | |
1374 DoneWithRequest(FINISHED); | |
1375 } | |
1376 | |
1377 HostPortPair URLRequestHttpJob::GetSocketAddress() const { | |
1378 return response_info_ ? response_info_->socket_address : HostPortPair(); | |
1379 } | |
1380 | |
1381 void URLRequestHttpJob::RecordTimer() { | |
1382 if (request_creation_time_.is_null()) { | |
1383 NOTREACHED() | |
1384 << "The same transaction shouldn't start twice without new timing."; | |
1385 return; | |
1386 } | |
1387 | |
1388 base::TimeDelta to_start = base::Time::Now() - request_creation_time_; | |
1389 request_creation_time_ = base::Time(); | |
1390 | |
1391 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start); | |
1392 } | |
1393 | |
1394 void URLRequestHttpJob::ResetTimer() { | |
1395 if (!request_creation_time_.is_null()) { | |
1396 NOTREACHED() | |
1397 << "The timer was reset before it was recorded."; | |
1398 return; | |
1399 } | |
1400 request_creation_time_ = base::Time::Now(); | |
1401 } | |
1402 | |
1403 void URLRequestHttpJob::UpdatePacketReadTimes() { | |
1404 if (!packet_timing_enabled_) | |
1405 return; | |
1406 | |
1407 if (filter_input_byte_count() <= bytes_observed_in_packets_) { | |
1408 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); | |
1409 return; // No new bytes have arrived. | |
1410 } | |
1411 | |
1412 base::Time now(base::Time::Now()); | |
1413 if (!bytes_observed_in_packets_) | |
1414 request_time_snapshot_ = now; | |
1415 final_packet_time_ = now; | |
1416 | |
1417 bytes_observed_in_packets_ = filter_input_byte_count(); | |
1418 } | |
1419 | |
1420 void URLRequestHttpJob::RecordPacketStats( | |
1421 FilterContext::StatisticSelector statistic) const { | |
1422 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) | |
1423 return; | |
1424 | |
1425 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; | |
1426 switch (statistic) { | |
1427 case FilterContext::SDCH_DECODE: { | |
1428 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", | |
1429 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); | |
1430 return; | |
1431 } | |
1432 case FilterContext::SDCH_PASSTHROUGH: { | |
1433 // Despite advertising a dictionary, we handled non-sdch compressed | |
1434 // content. | |
1435 return; | |
1436 } | |
1437 | |
1438 case FilterContext::SDCH_EXPERIMENT_DECODE: { | |
1439 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment3_Decode", | |
1440 duration, | |
1441 base::TimeDelta::FromMilliseconds(20), | |
1442 base::TimeDelta::FromMinutes(10), 100); | |
1443 return; | |
1444 } | |
1445 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { | |
1446 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment3_Holdback", | |
1447 duration, | |
1448 base::TimeDelta::FromMilliseconds(20), | |
1449 base::TimeDelta::FromMinutes(10), 100); | |
1450 return; | |
1451 } | |
1452 default: | |
1453 NOTREACHED(); | |
1454 return; | |
1455 } | |
1456 } | |
1457 | |
1458 void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { | |
1459 if (start_time_.is_null()) | |
1460 return; | |
1461 | |
1462 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; | |
1463 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); | |
1464 | |
1465 if (reason == FINISHED) { | |
1466 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); | |
1467 } else { | |
1468 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); | |
1469 } | |
1470 | |
1471 if (response_info_) { | |
1472 if (response_info_->was_cached) { | |
1473 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time); | |
1474 } else { | |
1475 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time); | |
1476 } | |
1477 } | |
1478 | |
1479 if (request_info_.load_flags & LOAD_PREFETCH && !request_->was_cached()) | |
1480 UMA_HISTOGRAM_COUNTS("Net.Prefetch.PrefilterBytesReadFromNetwork", | |
1481 prefilter_bytes_read()); | |
1482 | |
1483 start_time_ = base::TimeTicks(); | |
1484 } | |
1485 | |
1486 void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { | |
1487 if (done_) | |
1488 return; | |
1489 done_ = true; | |
1490 RecordPerfHistograms(reason); | |
1491 if (reason == FINISHED) { | |
1492 request_->set_received_response_content_length(prefilter_bytes_read()); | |
1493 } | |
1494 } | |
1495 | |
1496 HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const { | |
1497 DCHECK(transaction_.get()); | |
1498 DCHECK(transaction_->GetResponseInfo()); | |
1499 return override_response_headers_.get() ? | |
1500 override_response_headers_.get() : | |
1501 transaction_->GetResponseInfo()->headers.get(); | |
1502 } | |
1503 | |
1504 void URLRequestHttpJob::NotifyURLRequestDestroyed() { | |
1505 awaiting_callback_ = false; | |
1506 } | |
1507 | |
1508 } // namespace net | |
OLD | NEW |