Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(745)

Side by Side Diff: net/url_request/url_request_job.cc

Issue 992733002: Remove //net (except for Android test stuff) and sdch (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « net/url_request/url_request_job.h ('k') | net/url_request/url_request_job_factory.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/url_request/url_request_job.h"
6
7 #include "base/bind.h"
8 #include "base/compiler_specific.h"
9 #include "base/message_loop/message_loop.h"
10 #include "base/power_monitor/power_monitor.h"
11 #include "base/profiler/scoped_tracker.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "base/strings/string_util.h"
14 #include "base/values.h"
15 #include "net/base/auth.h"
16 #include "net/base/host_port_pair.h"
17 #include "net/base/io_buffer.h"
18 #include "net/base/load_states.h"
19 #include "net/base/net_errors.h"
20 #include "net/base/network_delegate.h"
21 #include "net/filter/filter.h"
22 #include "net/http/http_response_headers.h"
23
24 namespace net {
25
26 namespace {
27
28 // Callback for TYPE_URL_REQUEST_FILTERS_SET net-internals event.
29 base::Value* FiltersSetCallback(Filter* filter,
30 NetLog::LogLevel /* log_level */) {
31 base::DictionaryValue* event_params = new base::DictionaryValue();
32 event_params->SetString("filters", filter->OrderedFilterList());
33 return event_params;
34 }
35
36 std::string ComputeMethodForRedirect(const std::string& method,
37 int http_status_code) {
38 // For 303 redirects, all request methods except HEAD are converted to GET,
39 // as per the latest httpbis draft. The draft also allows POST requests to
40 // be converted to GETs when following 301/302 redirects, for historical
41 // reasons. Most major browsers do this and so shall we. Both RFC 2616 and
42 // the httpbis draft say to prompt the user to confirm the generation of new
43 // requests, other than GET and HEAD requests, but IE omits these prompts and
44 // so shall we.
45 // See:
46 // https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-17#section-7.3
47 if ((http_status_code == 303 && method != "HEAD") ||
48 ((http_status_code == 301 || http_status_code == 302) &&
49 method == "POST")) {
50 return "GET";
51 }
52 return method;
53 }
54
55 } // namespace
56
57 URLRequestJob::URLRequestJob(URLRequest* request,
58 NetworkDelegate* network_delegate)
59 : request_(request),
60 done_(false),
61 prefilter_bytes_read_(0),
62 postfilter_bytes_read_(0),
63 filter_input_byte_count_(0),
64 filter_needs_more_output_space_(false),
65 filtered_read_buffer_len_(0),
66 has_handled_response_(false),
67 expected_content_size_(-1),
68 network_delegate_(network_delegate),
69 weak_factory_(this) {
70 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
71 if (power_monitor)
72 power_monitor->AddObserver(this);
73 }
74
75 void URLRequestJob::SetUpload(UploadDataStream* upload) {
76 }
77
78 void URLRequestJob::SetExtraRequestHeaders(const HttpRequestHeaders& headers) {
79 }
80
81 void URLRequestJob::SetPriority(RequestPriority priority) {
82 }
83
84 void URLRequestJob::Kill() {
85 weak_factory_.InvalidateWeakPtrs();
86 // Make sure the request is notified that we are done. We assume that the
87 // request took care of setting its error status before calling Kill.
88 if (request_)
89 NotifyCanceled();
90 }
91
92 void URLRequestJob::DetachRequest() {
93 request_ = NULL;
94 }
95
96 // This function calls ReadData to get stream data. If a filter exists, passes
97 // the data to the attached filter. Then returns the output from filter back to
98 // the caller.
99 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
100 bool rv = false;
101
102 DCHECK_LT(buf_size, 1000000); // Sanity check.
103 DCHECK(buf);
104 DCHECK(bytes_read);
105 DCHECK(filtered_read_buffer_.get() == NULL);
106 DCHECK_EQ(0, filtered_read_buffer_len_);
107
108 *bytes_read = 0;
109
110 // Skip Filter if not present.
111 if (!filter_.get()) {
112 rv = ReadRawDataHelper(buf, buf_size, bytes_read);
113 } else {
114 // Save the caller's buffers while we do IO
115 // in the filter's buffers.
116 filtered_read_buffer_ = buf;
117 filtered_read_buffer_len_ = buf_size;
118
119 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
120 tracked_objects::ScopedTracker tracking_profile2(
121 FROM_HERE_WITH_EXPLICIT_FUNCTION("423948 URLRequestJob::Read2"));
122
123 if (ReadFilteredData(bytes_read)) {
124 rv = true; // We have data to return.
125
126 // It is fine to call DoneReading even if ReadFilteredData receives 0
127 // bytes from the net, but we avoid making that call if we know for
128 // sure that's the case (ReadRawDataHelper path).
129 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is
130 // fixed.
131 tracked_objects::ScopedTracker tracking_profile3(
132 FROM_HERE_WITH_EXPLICIT_FUNCTION("423948 URLRequestJob::Read3"));
133
134 if (*bytes_read == 0)
135 DoneReading();
136 } else {
137 rv = false; // Error, or a new IO is pending.
138 }
139 }
140
141 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
142 tracked_objects::ScopedTracker tracking_profile4(
143 FROM_HERE_WITH_EXPLICIT_FUNCTION("423948 URLRequestJob::Read4"));
144
145 if (rv && *bytes_read == 0)
146 NotifyDone(URLRequestStatus());
147 return rv;
148 }
149
150 void URLRequestJob::StopCaching() {
151 // Nothing to do here.
152 }
153
154 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const {
155 // Most job types don't send request headers.
156 return false;
157 }
158
159 int64 URLRequestJob::GetTotalReceivedBytes() const {
160 return 0;
161 }
162
163 LoadState URLRequestJob::GetLoadState() const {
164 return LOAD_STATE_IDLE;
165 }
166
167 UploadProgress URLRequestJob::GetUploadProgress() const {
168 return UploadProgress();
169 }
170
171 bool URLRequestJob::GetCharset(std::string* charset) {
172 return false;
173 }
174
175 void URLRequestJob::GetResponseInfo(HttpResponseInfo* info) {
176 }
177
178 void URLRequestJob::GetLoadTimingInfo(LoadTimingInfo* load_timing_info) const {
179 // Only certain request types return more than just request start times.
180 }
181
182 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
183 return false;
184 }
185
186 Filter* URLRequestJob::SetupFilter() const {
187 return NULL;
188 }
189
190 bool URLRequestJob::IsRedirectResponse(GURL* location,
191 int* http_status_code) {
192 // For non-HTTP jobs, headers will be null.
193 HttpResponseHeaders* headers = request_->response_headers();
194 if (!headers)
195 return false;
196
197 std::string value;
198 if (!headers->IsRedirect(&value))
199 return false;
200
201 *location = request_->url().Resolve(value);
202 *http_status_code = headers->response_code();
203 return true;
204 }
205
206 bool URLRequestJob::CopyFragmentOnRedirect(const GURL& location) const {
207 return true;
208 }
209
210 bool URLRequestJob::IsSafeRedirect(const GURL& location) {
211 return true;
212 }
213
214 bool URLRequestJob::NeedsAuth() {
215 return false;
216 }
217
218 void URLRequestJob::GetAuthChallengeInfo(
219 scoped_refptr<AuthChallengeInfo>* auth_info) {
220 // This will only be called if NeedsAuth() returns true, in which
221 // case the derived class should implement this!
222 NOTREACHED();
223 }
224
225 void URLRequestJob::SetAuth(const AuthCredentials& credentials) {
226 // This will only be called if NeedsAuth() returns true, in which
227 // case the derived class should implement this!
228 NOTREACHED();
229 }
230
231 void URLRequestJob::CancelAuth() {
232 // This will only be called if NeedsAuth() returns true, in which
233 // case the derived class should implement this!
234 NOTREACHED();
235 }
236
237 void URLRequestJob::ContinueWithCertificate(
238 X509Certificate* client_cert) {
239 // The derived class should implement this!
240 NOTREACHED();
241 }
242
243 void URLRequestJob::ContinueDespiteLastError() {
244 // Implementations should know how to recover from errors they generate.
245 // If this code was reached, we are trying to recover from an error that
246 // we don't know how to recover from.
247 NOTREACHED();
248 }
249
250 void URLRequestJob::FollowDeferredRedirect() {
251 DCHECK_NE(-1, deferred_redirect_info_.status_code);
252
253 // NOTE: deferred_redirect_info_ may be invalid, and attempting to follow it
254 // will fail inside FollowRedirect. The DCHECK above asserts that we called
255 // OnReceivedRedirect.
256
257 // It is also possible that FollowRedirect will drop the last reference to
258 // this job, so we need to reset our members before calling it.
259
260 RedirectInfo redirect_info = deferred_redirect_info_;
261 deferred_redirect_info_ = RedirectInfo();
262 FollowRedirect(redirect_info);
263 }
264
265 void URLRequestJob::ResumeNetworkStart() {
266 // This should only be called for HTTP Jobs, and implemented in the derived
267 // class.
268 NOTREACHED();
269 }
270
271 bool URLRequestJob::GetMimeType(std::string* mime_type) const {
272 return false;
273 }
274
275 int URLRequestJob::GetResponseCode() const {
276 return -1;
277 }
278
279 HostPortPair URLRequestJob::GetSocketAddress() const {
280 return HostPortPair();
281 }
282
283 void URLRequestJob::OnSuspend() {
284 Kill();
285 }
286
287 void URLRequestJob::NotifyURLRequestDestroyed() {
288 }
289
290 // static
291 GURL URLRequestJob::ComputeReferrerForRedirect(
292 URLRequest::ReferrerPolicy policy,
293 const std::string& referrer,
294 const GURL& redirect_destination) {
295 GURL original_referrer(referrer);
296 bool secure_referrer_but_insecure_destination =
297 original_referrer.SchemeIsSecure() &&
298 !redirect_destination.SchemeIsSecure();
299 bool same_origin =
300 original_referrer.GetOrigin() == redirect_destination.GetOrigin();
301 switch (policy) {
302 case URLRequest::CLEAR_REFERRER_ON_TRANSITION_FROM_SECURE_TO_INSECURE:
303 return secure_referrer_but_insecure_destination ? GURL()
304 : original_referrer;
305
306 case URLRequest::REDUCE_REFERRER_GRANULARITY_ON_TRANSITION_CROSS_ORIGIN:
307 if (same_origin) {
308 return original_referrer;
309 } else if (secure_referrer_but_insecure_destination) {
310 return GURL();
311 } else {
312 return original_referrer.GetOrigin();
313 }
314
315 case URLRequest::ORIGIN_ONLY_ON_TRANSITION_CROSS_ORIGIN:
316 return same_origin ? original_referrer : original_referrer.GetOrigin();
317
318 case URLRequest::NEVER_CLEAR_REFERRER:
319 return original_referrer;
320 }
321
322 NOTREACHED();
323 return GURL();
324 }
325
326 URLRequestJob::~URLRequestJob() {
327 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
328 if (power_monitor)
329 power_monitor->RemoveObserver(this);
330 }
331
332 void URLRequestJob::NotifyCertificateRequested(
333 SSLCertRequestInfo* cert_request_info) {
334 if (!request_)
335 return; // The request was destroyed, so there is no more work to do.
336
337 request_->NotifyCertificateRequested(cert_request_info);
338 }
339
340 void URLRequestJob::NotifySSLCertificateError(const SSLInfo& ssl_info,
341 bool fatal) {
342 if (!request_)
343 return; // The request was destroyed, so there is no more work to do.
344
345 request_->NotifySSLCertificateError(ssl_info, fatal);
346 }
347
348 bool URLRequestJob::CanGetCookies(const CookieList& cookie_list) const {
349 if (!request_)
350 return false; // The request was destroyed, so there is no more work to do.
351
352 return request_->CanGetCookies(cookie_list);
353 }
354
355 bool URLRequestJob::CanSetCookie(const std::string& cookie_line,
356 CookieOptions* options) const {
357 if (!request_)
358 return false; // The request was destroyed, so there is no more work to do.
359
360 return request_->CanSetCookie(cookie_line, options);
361 }
362
363 bool URLRequestJob::CanEnablePrivacyMode() const {
364 if (!request_)
365 return false; // The request was destroyed, so there is no more work to do.
366
367 return request_->CanEnablePrivacyMode();
368 }
369
370 CookieStore* URLRequestJob::GetCookieStore() const {
371 DCHECK(request_);
372
373 return request_->cookie_store();
374 }
375
376 void URLRequestJob::NotifyBeforeNetworkStart(bool* defer) {
377 if (!request_)
378 return;
379
380 request_->NotifyBeforeNetworkStart(defer);
381 }
382
383 void URLRequestJob::NotifyHeadersComplete() {
384 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
385 tracked_objects::ScopedTracker tracking_profile(
386 FROM_HERE_WITH_EXPLICIT_FUNCTION(
387 "423948 URLRequestJob::NotifyHeadersComplete"));
388
389 if (!request_ || !request_->has_delegate())
390 return; // The request was destroyed, so there is no more work to do.
391
392 if (has_handled_response_)
393 return;
394
395 DCHECK(!request_->status().is_io_pending());
396
397 // Initialize to the current time, and let the subclass optionally override
398 // the time stamps if it has that information. The default request_time is
399 // set by URLRequest before it calls our Start method.
400 request_->response_info_.response_time = base::Time::Now();
401 GetResponseInfo(&request_->response_info_);
402
403 // When notifying the delegate, the delegate can release the request
404 // (and thus release 'this'). After calling to the delgate, we must
405 // check the request pointer to see if it still exists, and return
406 // immediately if it has been destroyed. self_preservation ensures our
407 // survival until we can get out of this method.
408 scoped_refptr<URLRequestJob> self_preservation(this);
409
410 if (request_) {
411 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
412 tracked_objects::ScopedTracker tracking_profile1(
413 FROM_HERE_WITH_EXPLICIT_FUNCTION(
414 "423948 URLRequestJob::NotifyHeadersComplete 1"));
415
416 request_->OnHeadersComplete();
417 }
418
419 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
420 tracked_objects::ScopedTracker tracking_profile2(
421 FROM_HERE_WITH_EXPLICIT_FUNCTION(
422 "423948 URLRequestJob::NotifyHeadersComplete 2"));
423
424 GURL new_location;
425 int http_status_code;
426 if (IsRedirectResponse(&new_location, &http_status_code)) {
427 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
428 tracked_objects::ScopedTracker tracking_profile3(
429 FROM_HERE_WITH_EXPLICIT_FUNCTION(
430 "423948 URLRequestJob::NotifyHeadersComplete 3"));
431
432 // Redirect response bodies are not read. Notify the transaction
433 // so it does not treat being stopped as an error.
434 DoneReadingRedirectResponse();
435
436 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
437 tracked_objects::ScopedTracker tracking_profile4(
438 FROM_HERE_WITH_EXPLICIT_FUNCTION(
439 "423948 URLRequestJob::NotifyHeadersComplete 4"));
440
441 RedirectInfo redirect_info =
442 ComputeRedirectInfo(new_location, http_status_code);
443
444 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
445 tracked_objects::ScopedTracker tracking_profile5(
446 FROM_HERE_WITH_EXPLICIT_FUNCTION(
447 "423948 URLRequestJob::NotifyHeadersComplete 5"));
448
449 bool defer_redirect = false;
450 request_->NotifyReceivedRedirect(redirect_info, &defer_redirect);
451
452 // Ensure that the request wasn't detached or destroyed in
453 // NotifyReceivedRedirect
454 if (!request_ || !request_->has_delegate())
455 return;
456
457 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
458 tracked_objects::ScopedTracker tracking_profile6(
459 FROM_HERE_WITH_EXPLICIT_FUNCTION(
460 "423948 URLRequestJob::NotifyHeadersComplete 6"));
461
462 // If we were not cancelled, then maybe follow the redirect.
463 if (request_->status().is_success()) {
464 if (defer_redirect) {
465 deferred_redirect_info_ = redirect_info;
466 } else {
467 FollowRedirect(redirect_info);
468 }
469 return;
470 }
471 } else if (NeedsAuth()) {
472 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
473 tracked_objects::ScopedTracker tracking_profile7(
474 FROM_HERE_WITH_EXPLICIT_FUNCTION(
475 "423948 URLRequestJob::NotifyHeadersComplete 7"));
476
477 scoped_refptr<AuthChallengeInfo> auth_info;
478 GetAuthChallengeInfo(&auth_info);
479
480 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
481 tracked_objects::ScopedTracker tracking_profile8(
482 FROM_HERE_WITH_EXPLICIT_FUNCTION(
483 "423948 URLRequestJob::NotifyHeadersComplete 8"));
484
485 // Need to check for a NULL auth_info because the server may have failed
486 // to send a challenge with the 401 response.
487 if (auth_info.get()) {
488 request_->NotifyAuthRequired(auth_info.get());
489 // Wait for SetAuth or CancelAuth to be called.
490 return;
491 }
492 }
493
494 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
495 tracked_objects::ScopedTracker tracking_profile9(
496 FROM_HERE_WITH_EXPLICIT_FUNCTION(
497 "423948 URLRequestJob::NotifyHeadersComplete 9"));
498
499 has_handled_response_ = true;
500 if (request_->status().is_success())
501 filter_.reset(SetupFilter());
502
503 if (!filter_.get()) {
504 std::string content_length;
505 request_->GetResponseHeaderByName("content-length", &content_length);
506 if (!content_length.empty())
507 base::StringToInt64(content_length, &expected_content_size_);
508 } else {
509 request_->net_log().AddEvent(
510 NetLog::TYPE_URL_REQUEST_FILTERS_SET,
511 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get())));
512 }
513
514 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
515 tracked_objects::ScopedTracker tracking_profile10(
516 FROM_HERE_WITH_EXPLICIT_FUNCTION(
517 "423948 URLRequestJob::NotifyHeadersComplete 10"));
518
519 request_->NotifyResponseStarted();
520 }
521
522 void URLRequestJob::NotifyReadComplete(int bytes_read) {
523 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
524 tracked_objects::ScopedTracker tracking_profile(
525 FROM_HERE_WITH_EXPLICIT_FUNCTION(
526 "423948 URLRequestJob::NotifyReadComplete"));
527
528 if (!request_ || !request_->has_delegate())
529 return; // The request was destroyed, so there is no more work to do.
530
531 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
532 // unit_tests have been fixed to not trip this.
533 #if 0
534 DCHECK(!request_->status().is_io_pending());
535 #endif
536 // The headers should be complete before reads complete
537 DCHECK(has_handled_response_);
538
539 OnRawReadComplete(bytes_read);
540
541 // Don't notify if we had an error.
542 if (!request_->status().is_success())
543 return;
544
545 // When notifying the delegate, the delegate can release the request
546 // (and thus release 'this'). After calling to the delegate, we must
547 // check the request pointer to see if it still exists, and return
548 // immediately if it has been destroyed. self_preservation ensures our
549 // survival until we can get out of this method.
550 scoped_refptr<URLRequestJob> self_preservation(this);
551
552 if (filter_.get()) {
553 // Tell the filter that it has more data
554 FilteredDataRead(bytes_read);
555
556 // Filter the data.
557 int filter_bytes_read = 0;
558 if (ReadFilteredData(&filter_bytes_read)) {
559 if (!filter_bytes_read)
560 DoneReading();
561 request_->NotifyReadCompleted(filter_bytes_read);
562 }
563 } else {
564 request_->NotifyReadCompleted(bytes_read);
565 }
566 DVLOG(1) << __FUNCTION__ << "() "
567 << "\"" << (request_ ? request_->url().spec() : "???") << "\""
568 << " pre bytes read = " << bytes_read
569 << " pre total = " << prefilter_bytes_read_
570 << " post total = " << postfilter_bytes_read_;
571 }
572
573 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
574 DCHECK(!has_handled_response_);
575 has_handled_response_ = true;
576 if (request_) {
577 // There may be relevant information in the response info even in the
578 // error case.
579 GetResponseInfo(&request_->response_info_);
580
581 request_->set_status(status);
582 request_->NotifyResponseStarted();
583 // We may have been deleted.
584 }
585 }
586
587 void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
588 DCHECK(!done_) << "Job sending done notification twice";
589 if (done_)
590 return;
591 done_ = true;
592
593 // Unless there was an error, we should have at least tried to handle
594 // the response before getting here.
595 DCHECK(has_handled_response_ || !status.is_success());
596
597 // As with NotifyReadComplete, we need to take care to notice if we were
598 // destroyed during a delegate callback.
599 if (request_) {
600 request_->set_is_pending(false);
601 // With async IO, it's quite possible to have a few outstanding
602 // requests. We could receive a request to Cancel, followed shortly
603 // by a successful IO. For tracking the status(), once there is
604 // an error, we do not change the status back to success. To
605 // enforce this, only set the status if the job is so far
606 // successful.
607 if (request_->status().is_success()) {
608 if (status.status() == URLRequestStatus::FAILED) {
609 request_->net_log().AddEventWithNetErrorCode(NetLog::TYPE_FAILED,
610 status.error());
611 }
612 request_->set_status(status);
613 }
614 }
615
616 // Complete this notification later. This prevents us from re-entering the
617 // delegate if we're done because of a synchronous call.
618 base::MessageLoop::current()->PostTask(
619 FROM_HERE,
620 base::Bind(&URLRequestJob::CompleteNotifyDone,
621 weak_factory_.GetWeakPtr()));
622 }
623
624 void URLRequestJob::CompleteNotifyDone() {
625 // Check if we should notify the delegate that we're done because of an error.
626 if (request_ &&
627 !request_->status().is_success() &&
628 request_->has_delegate()) {
629 // We report the error differently depending on whether we've called
630 // OnResponseStarted yet.
631 if (has_handled_response_) {
632 // We signal the error by calling OnReadComplete with a bytes_read of -1.
633 request_->NotifyReadCompleted(-1);
634 } else {
635 has_handled_response_ = true;
636 request_->NotifyResponseStarted();
637 }
638 }
639 }
640
641 void URLRequestJob::NotifyCanceled() {
642 if (!done_) {
643 NotifyDone(URLRequestStatus(URLRequestStatus::CANCELED, ERR_ABORTED));
644 }
645 }
646
647 void URLRequestJob::NotifyRestartRequired() {
648 DCHECK(!has_handled_response_);
649 if (GetStatus().status() != URLRequestStatus::CANCELED)
650 request_->Restart();
651 }
652
653 void URLRequestJob::OnCallToDelegate() {
654 request_->OnCallToDelegate();
655 }
656
657 void URLRequestJob::OnCallToDelegateComplete() {
658 request_->OnCallToDelegateComplete();
659 }
660
661 bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size,
662 int *bytes_read) {
663 DCHECK(bytes_read);
664 *bytes_read = 0;
665 return true;
666 }
667
668 void URLRequestJob::DoneReading() {
669 // Do nothing.
670 }
671
672 void URLRequestJob::DoneReadingRedirectResponse() {
673 }
674
675 void URLRequestJob::FilteredDataRead(int bytes_read) {
676 DCHECK(filter_);
677 filter_->FlushStreamBuffer(bytes_read);
678 }
679
680 bool URLRequestJob::ReadFilteredData(int* bytes_read) {
681 DCHECK(filter_);
682 DCHECK(filtered_read_buffer_.get());
683 DCHECK_GT(filtered_read_buffer_len_, 0);
684 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check.
685 DCHECK(!raw_read_buffer_.get());
686
687 *bytes_read = 0;
688 bool rv = false;
689
690 for (;;) {
691 if (is_done())
692 return true;
693
694 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
695 // We don't have any raw data to work with, so read from the transaction.
696 int filtered_data_read;
697 if (ReadRawDataForFilter(&filtered_data_read)) {
698 if (filtered_data_read > 0) {
699 // Give data to filter.
700 filter_->FlushStreamBuffer(filtered_data_read);
701 } else {
702 return true; // EOF.
703 }
704 } else {
705 return false; // IO Pending (or error).
706 }
707 }
708
709 if ((filter_->stream_data_len() || filter_needs_more_output_space_) &&
710 !is_done()) {
711 // Get filtered data.
712 int filtered_data_len = filtered_read_buffer_len_;
713 int output_buffer_size = filtered_data_len;
714 Filter::FilterStatus status =
715 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len);
716
717 if (filter_needs_more_output_space_ && !filtered_data_len) {
718 // filter_needs_more_output_space_ was mistaken... there are no more
719 // bytes and we should have at least tried to fill up the filter's input
720 // buffer. Correct the state, and try again.
721 filter_needs_more_output_space_ = false;
722 continue;
723 }
724 filter_needs_more_output_space_ =
725 (filtered_data_len == output_buffer_size);
726
727 switch (status) {
728 case Filter::FILTER_DONE: {
729 filter_needs_more_output_space_ = false;
730 *bytes_read = filtered_data_len;
731 postfilter_bytes_read_ += filtered_data_len;
732 rv = true;
733 break;
734 }
735 case Filter::FILTER_NEED_MORE_DATA: {
736 // We have finished filtering all data currently in the buffer.
737 // There might be some space left in the output buffer. One can
738 // consider reading more data from the stream to feed the filter
739 // and filling up the output buffer. This leads to more complicated
740 // buffer management and data notification mechanisms.
741 // We can revisit this issue if there is a real perf need.
742 if (filtered_data_len > 0) {
743 *bytes_read = filtered_data_len;
744 postfilter_bytes_read_ += filtered_data_len;
745 rv = true;
746 } else {
747 // Read again since we haven't received enough data yet (e.g., we
748 // may not have a complete gzip header yet).
749 continue;
750 }
751 break;
752 }
753 case Filter::FILTER_OK: {
754 *bytes_read = filtered_data_len;
755 postfilter_bytes_read_ += filtered_data_len;
756 rv = true;
757 break;
758 }
759 case Filter::FILTER_ERROR: {
760 DVLOG(1) << __FUNCTION__ << "() "
761 << "\"" << (request_ ? request_->url().spec() : "???")
762 << "\"" << " Filter Error";
763 filter_needs_more_output_space_ = false;
764 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
765 ERR_CONTENT_DECODING_FAILED));
766 rv = false;
767 break;
768 }
769 default: {
770 NOTREACHED();
771 filter_needs_more_output_space_ = false;
772 rv = false;
773 break;
774 }
775 }
776
777 // If logging all bytes is enabled, log the filtered bytes read.
778 if (rv && request() && request()->net_log().IsLoggingBytes() &&
779 filtered_data_len > 0) {
780 request()->net_log().AddByteTransferEvent(
781 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ,
782 filtered_data_len, filtered_read_buffer_->data());
783 }
784 } else {
785 // we are done, or there is no data left.
786 rv = true;
787 }
788 break;
789 }
790
791 if (rv) {
792 // When we successfully finished a read, we no longer need to save the
793 // caller's buffers. Release our reference.
794 filtered_read_buffer_ = NULL;
795 filtered_read_buffer_len_ = 0;
796 }
797 return rv;
798 }
799
800 void URLRequestJob::DestroyFilters() {
801 filter_.reset();
802 }
803
804 const URLRequestStatus URLRequestJob::GetStatus() {
805 if (request_)
806 return request_->status();
807 // If the request is gone, we must be cancelled.
808 return URLRequestStatus(URLRequestStatus::CANCELED,
809 ERR_ABORTED);
810 }
811
812 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
813 if (request_)
814 request_->set_status(status);
815 }
816
817 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) {
818 request_->proxy_server_ = proxy_server;
819 }
820
821 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
822 bool rv = false;
823
824 DCHECK(bytes_read);
825 DCHECK(filter_.get());
826
827 *bytes_read = 0;
828
829 // Get more pre-filtered data if needed.
830 // TODO(mbelshe): is it possible that the filter needs *MORE* data
831 // when there is some data already in the buffer?
832 if (!filter_->stream_data_len() && !is_done()) {
833 IOBuffer* stream_buffer = filter_->stream_buffer();
834 int stream_buffer_size = filter_->stream_buffer_size();
835 rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
836 }
837 return rv;
838 }
839
840 bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf, int buf_size,
841 int* bytes_read) {
842 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
843 tracked_objects::ScopedTracker tracking_profile(
844 FROM_HERE_WITH_EXPLICIT_FUNCTION(
845 "423948 URLRequestJob::ReadRawDataHelper"));
846
847 DCHECK(!request_->status().is_io_pending());
848 DCHECK(raw_read_buffer_.get() == NULL);
849
850 // Keep a pointer to the read buffer, so we have access to it in the
851 // OnRawReadComplete() callback in the event that the read completes
852 // asynchronously.
853 raw_read_buffer_ = buf;
854 bool rv = ReadRawData(buf, buf_size, bytes_read);
855
856 if (!request_->status().is_io_pending()) {
857 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
858 tracked_objects::ScopedTracker tracking_profile1(
859 FROM_HERE_WITH_EXPLICIT_FUNCTION(
860 "423948 URLRequestJob::ReadRawDataHelper1"));
861
862 // If the read completes synchronously, either success or failure,
863 // invoke the OnRawReadComplete callback so we can account for the
864 // completed read.
865 OnRawReadComplete(*bytes_read);
866 }
867 return rv;
868 }
869
870 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) {
871 int rv = request_->Redirect(redirect_info);
872 if (rv != OK)
873 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
874 }
875
876 void URLRequestJob::OnRawReadComplete(int bytes_read) {
877 DCHECK(raw_read_buffer_.get());
878 // If |filter_| is non-NULL, bytes will be logged after it is applied instead.
879 if (!filter_.get() && request() && request()->net_log().IsLoggingBytes() &&
880 bytes_read > 0) {
881 request()->net_log().AddByteTransferEvent(
882 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ,
883 bytes_read, raw_read_buffer_->data());
884 }
885
886 if (bytes_read > 0) {
887 RecordBytesRead(bytes_read);
888 }
889 raw_read_buffer_ = NULL;
890 }
891
892 void URLRequestJob::RecordBytesRead(int bytes_read) {
893 filter_input_byte_count_ += bytes_read;
894 prefilter_bytes_read_ += bytes_read;
895 if (!filter_.get())
896 postfilter_bytes_read_ += bytes_read;
897 DVLOG(2) << __FUNCTION__ << "() "
898 << "\"" << (request_ ? request_->url().spec() : "???") << "\""
899 << " pre bytes read = " << bytes_read
900 << " pre total = " << prefilter_bytes_read_
901 << " post total = " << postfilter_bytes_read_;
902 UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
903 if (network_delegate_) {
904 // TODO(vadimt): Remove ScopedTracker below once crbug.com/423948 is fixed.
905 tracked_objects::ScopedTracker tracking_profile(
906 FROM_HERE_WITH_EXPLICIT_FUNCTION(
907 "423948 URLRequestJob::RecordBytesRead NotifyRawBytesRead"));
908
909 network_delegate_->NotifyRawBytesRead(*request_, bytes_read);
910 }
911 }
912
913 bool URLRequestJob::FilterHasData() {
914 return filter_.get() && filter_->stream_data_len();
915 }
916
917 void URLRequestJob::UpdatePacketReadTimes() {
918 }
919
920 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location,
921 int http_status_code) {
922 const GURL& url = request_->url();
923
924 RedirectInfo redirect_info;
925
926 redirect_info.status_code = http_status_code;
927
928 // The request method may change, depending on the status code.
929 redirect_info.new_method =
930 ComputeMethodForRedirect(request_->method(), http_status_code);
931
932 // Move the reference fragment of the old location to the new one if the
933 // new one has none. This duplicates mozilla's behavior.
934 if (url.is_valid() && url.has_ref() && !location.has_ref() &&
935 CopyFragmentOnRedirect(location)) {
936 GURL::Replacements replacements;
937 // Reference the |ref| directly out of the original URL to avoid a
938 // malloc.
939 replacements.SetRef(url.spec().data(),
940 url.parsed_for_possibly_invalid_spec().ref);
941 redirect_info.new_url = location.ReplaceComponents(replacements);
942 } else {
943 redirect_info.new_url = location;
944 }
945
946 // Update the first-party URL if appropriate.
947 if (request_->first_party_url_policy() ==
948 URLRequest::UPDATE_FIRST_PARTY_URL_ON_REDIRECT) {
949 redirect_info.new_first_party_for_cookies = redirect_info.new_url;
950 } else {
951 redirect_info.new_first_party_for_cookies =
952 request_->first_party_for_cookies();
953 }
954
955 // Alter the referrer if redirecting cross-origin (especially HTTP->HTTPS).
956 redirect_info.new_referrer =
957 ComputeReferrerForRedirect(request_->referrer_policy(),
958 request_->referrer(),
959 redirect_info.new_url).spec();
960
961 return redirect_info;
962 }
963
964 } // namespace net
OLDNEW
« no previous file with comments | « net/url_request/url_request_job.h ('k') | net/url_request/url_request_job_factory.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698