OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/url_request/url_request_job.h" | 5 #include "net/url_request/url_request_job.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/compiler_specific.h" | 8 #include "base/compiler_specific.h" |
9 #include "base/location.h" | 9 #include "base/location.h" |
10 #include "base/metrics/histogram_macros.h" | 10 #include "base/metrics/histogram_macros.h" |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
94 // Make sure the request is notified that we are done. We assume that the | 94 // Make sure the request is notified that we are done. We assume that the |
95 // request took care of setting its error status before calling Kill. | 95 // request took care of setting its error status before calling Kill. |
96 if (request_) | 96 if (request_) |
97 NotifyCanceled(); | 97 NotifyCanceled(); |
98 } | 98 } |
99 | 99 |
100 void URLRequestJob::DetachRequest() { | 100 void URLRequestJob::DetachRequest() { |
101 request_ = NULL; | 101 request_ = NULL; |
102 } | 102 } |
103 | 103 |
104 // This function calls ReadData to get stream data. If a filter exists, passes | 104 // This function calls ReadRawData to get stream data. If a filter exists, it |
105 // the data to the attached filter. Then returns the output from filter back to | 105 // passes the data to the attached filter. It then returns the output from |
106 // the caller. | 106 // filter back to the caller. |
107 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) { | 107 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) { |
108 bool rv = false; | |
109 | |
110 DCHECK_LT(buf_size, 1000000); // Sanity check. | 108 DCHECK_LT(buf_size, 1000000); // Sanity check. |
111 DCHECK(buf); | 109 DCHECK(buf); |
112 DCHECK(bytes_read); | 110 DCHECK(bytes_read); |
113 DCHECK(filtered_read_buffer_.get() == NULL); | 111 DCHECK(filtered_read_buffer_.get() == NULL); |
114 DCHECK_EQ(0, filtered_read_buffer_len_); | 112 DCHECK_EQ(0, filtered_read_buffer_len_); |
115 | 113 |
| 114 Error error = OK; |
116 *bytes_read = 0; | 115 *bytes_read = 0; |
117 | 116 |
118 // Skip Filter if not present. | 117 // Skip Filter if not present. |
119 if (!filter_.get()) { | 118 if (!filter_) { |
120 rv = ReadRawDataHelper(buf, buf_size, bytes_read); | 119 error = ReadRawDataHelper(buf, buf_size, bytes_read); |
121 } else { | 120 } else { |
122 // Save the caller's buffers while we do IO | 121 // Save the caller's buffers while we do IO |
123 // in the filter's buffers. | 122 // in the filter's buffers. |
124 filtered_read_buffer_ = buf; | 123 filtered_read_buffer_ = buf; |
125 filtered_read_buffer_len_ = buf_size; | 124 filtered_read_buffer_len_ = buf_size; |
126 | 125 |
127 if (ReadFilteredData(bytes_read)) { | 126 error = ReadFilteredData(bytes_read); |
128 rv = true; // We have data to return. | |
129 | 127 |
130 // It is fine to call DoneReading even if ReadFilteredData receives 0 | 128 // Synchronous EOF from the filter. |
131 // bytes from the net, but we avoid making that call if we know for | 129 if (error == OK && *bytes_read == 0) |
132 // sure that's the case (ReadRawDataHelper path). | 130 DoneReading(); |
133 if (*bytes_read == 0) | |
134 DoneReading(); | |
135 } else { | |
136 rv = false; // Error, or a new IO is pending. | |
137 } | |
138 } | 131 } |
139 | 132 |
140 if (rv && *bytes_read == 0) | 133 if (error == OK) { |
141 NotifyDone(URLRequestStatus()); | 134 // If URLRequestJob read zero bytes, the job is at EOF. |
142 return rv; | 135 if (*bytes_read == 0) |
| 136 NotifyDone(URLRequestStatus()); |
| 137 } else if (error == ERR_IO_PENDING) { |
| 138 SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING)); |
| 139 } else { |
| 140 NotifyDone(URLRequestStatus::FromError(error)); |
| 141 *bytes_read = -1; |
| 142 } |
| 143 return error == OK; |
143 } | 144 } |
144 | 145 |
145 void URLRequestJob::StopCaching() { | 146 void URLRequestJob::StopCaching() { |
146 // Nothing to do here. | 147 // Nothing to do here. |
147 } | 148 } |
148 | 149 |
149 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { | 150 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { |
150 // Most job types don't send request headers. | 151 // Most job types don't send request headers. |
151 return false; | 152 return false; |
152 } | 153 } |
(...skipping 320 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
473 base::StringToInt64(content_length, &expected_content_size_); | 474 base::StringToInt64(content_length, &expected_content_size_); |
474 } else { | 475 } else { |
475 request_->net_log().AddEvent( | 476 request_->net_log().AddEvent( |
476 NetLog::TYPE_URL_REQUEST_FILTERS_SET, | 477 NetLog::TYPE_URL_REQUEST_FILTERS_SET, |
477 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get()))); | 478 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get()))); |
478 } | 479 } |
479 | 480 |
480 request_->NotifyResponseStarted(); | 481 request_->NotifyResponseStarted(); |
481 } | 482 } |
482 | 483 |
483 void URLRequestJob::NotifyReadComplete(int bytes_read) { | 484 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) { |
| 485 if (result >= 0) { |
| 486 *error = OK; |
| 487 *count = result; |
| 488 } else { |
| 489 *error = static_cast<Error>(result); |
| 490 *count = 0; |
| 491 } |
| 492 } |
| 493 |
| 494 void URLRequestJob::ReadRawDataComplete(int result) { |
484 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed. | 495 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed. |
485 tracked_objects::ScopedTracker tracking_profile( | 496 tracked_objects::ScopedTracker tracking_profile( |
486 FROM_HERE_WITH_EXPLICIT_FUNCTION( | 497 FROM_HERE_WITH_EXPLICIT_FUNCTION( |
487 "475755 URLRequestJob::NotifyReadComplete")); | 498 "475755 URLRequestJob::RawReadCompleted")); |
| 499 |
| 500 Error error; |
| 501 int bytes_read; |
| 502 ConvertResultToError(result, &error, &bytes_read); |
488 | 503 |
489 if (!request_ || !request_->has_delegate()) | 504 if (!request_ || !request_->has_delegate()) |
490 return; // The request was destroyed, so there is no more work to do. | 505 return; // The request was destroyed, so there is no more work to do. |
491 | 506 |
492 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome | 507 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome |
493 // unit_tests have been fixed to not trip this. | 508 // unit_tests have been fixed to not trip this. |
494 #if 0 | 509 #if 0 |
495 DCHECK(!request_->status().is_io_pending()); | 510 DCHECK(!request_->status().is_io_pending()); |
496 #endif | 511 #endif |
497 // The headers should be complete before reads complete | 512 // The headers should be complete before reads complete |
498 DCHECK(has_handled_response_); | 513 DCHECK(has_handled_response_); |
499 | 514 |
500 OnRawReadComplete(bytes_read); | 515 GatherRawReadStats(error, bytes_read); |
501 | 516 |
502 // Don't notify if we had an error. | 517 if (filter_.get() && error == OK) { |
503 if (!request_->status().is_success()) | 518 int filter_bytes_read = 0; |
504 return; | 519 // Tell the filter that it has more data. |
| 520 PushInputToFilter(bytes_read); |
| 521 |
| 522 // Filter the data. |
| 523 error = ReadFilteredData(&filter_bytes_read); |
| 524 |
| 525 if (!filter_bytes_read) |
| 526 DoneReading(); |
| 527 |
| 528 DVLOG(1) << __FUNCTION__ << "() " |
| 529 << "\"" << (request_ ? request_->url().spec() : "???") << "\"" |
| 530 << " pre bytes read = " << bytes_read |
| 531 << " pre total = " << prefilter_bytes_read_ |
| 532 << " post total = " << postfilter_bytes_read_; |
| 533 bytes_read = filter_bytes_read; |
| 534 } else { |
| 535 DVLOG(1) << __FUNCTION__ << "() " |
| 536 << "\"" << (request_ ? request_->url().spec() : "???") << "\"" |
| 537 << " pre bytes read = " << bytes_read |
| 538 << " pre total = " << prefilter_bytes_read_ |
| 539 << " post total = " << postfilter_bytes_read_; |
| 540 } |
505 | 541 |
506 // When notifying the delegate, the delegate can release the request | 542 // When notifying the delegate, the delegate can release the request |
507 // (and thus release 'this'). After calling to the delegate, we must | 543 // (and thus release 'this'). After calling to the delegate, we must |
508 // check the request pointer to see if it still exists, and return | 544 // check the request pointer to see if it still exists, and return |
509 // immediately if it has been destroyed. self_preservation ensures our | 545 // immediately if it has been destroyed. self_preservation ensures our |
510 // survival until we can get out of this method. | 546 // survival until we can get out of this method. |
511 scoped_refptr<URLRequestJob> self_preservation(this); | 547 scoped_refptr<URLRequestJob> self_preservation(this); |
512 | 548 |
513 if (filter_.get()) { | 549 // Synchronize the URLRequest state machine with the URLRequestJob state |
514 // Tell the filter that it has more data | 550 // machine. If this read succeeded, either the request is at EOF and the |
515 FilteredDataRead(bytes_read); | 551 // URLRequest state machine goes to 'finished', or it is not and the |
| 552 // URLRequest state machine goes to 'success'. If the read failed, the |
| 553 // URLRequest state machine goes directly to 'finished'. |
| 554 // |
| 555 // Update the URLRequest's status first, so that NotifyReadCompleted has an |
| 556 // accurate view of the request. |
| 557 if (error == OK && bytes_read > 0) { |
| 558 SetStatus(URLRequestStatus()); |
| 559 } else { |
| 560 NotifyDone(URLRequestStatus::FromError(error)); |
| 561 } |
516 | 562 |
517 // Filter the data. | 563 // NotifyReadCompleted should be called after SetStatus or NotifyDone updates |
518 int filter_bytes_read = 0; | 564 // the status. |
519 if (ReadFilteredData(&filter_bytes_read)) { | 565 if (error == OK) |
520 if (!filter_bytes_read) | |
521 DoneReading(); | |
522 request_->NotifyReadCompleted(filter_bytes_read); | |
523 } | |
524 } else { | |
525 request_->NotifyReadCompleted(bytes_read); | 566 request_->NotifyReadCompleted(bytes_read); |
526 } | |
527 DVLOG(1) << __FUNCTION__ << "() " | |
528 << "\"" << (request_ ? request_->url().spec() : "???") << "\"" | |
529 << " pre bytes read = " << bytes_read | |
530 << " pre total = " << prefilter_bytes_read_ | |
531 << " post total = " << postfilter_bytes_read_; | |
532 } | 567 } |
533 | 568 |
534 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { | 569 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { |
535 DCHECK(!has_handled_response_); | 570 DCHECK(!has_handled_response_); |
536 has_handled_response_ = true; | 571 has_handled_response_ = true; |
537 if (request_) { | 572 if (request_) { |
538 // There may be relevant information in the response info even in the | 573 // There may be relevant information in the response info even in the |
539 // error case. | 574 // error case. |
540 GetResponseInfo(&request_->response_info_); | 575 GetResponseInfo(&request_->response_info_); |
541 | 576 |
542 request_->set_status(status); | 577 request_->set_status(status); |
543 request_->NotifyResponseStarted(); | 578 request_->NotifyResponseStarted(); |
544 // We may have been deleted. | 579 // We may have been deleted. |
545 } | 580 } |
546 } | 581 } |
547 | 582 |
548 void URLRequestJob::NotifyDone(const URLRequestStatus &status) { | 583 void URLRequestJob::NotifyDone(const URLRequestStatus &status) { |
549 DCHECK(!done_) << "Job sending done notification twice"; | 584 DCHECK(!done_) << "Job sending done notification twice"; |
550 if (done_) | 585 if (done_) |
551 return; | 586 return; |
552 done_ = true; | 587 done_ = true; |
553 | 588 |
554 // Unless there was an error, we should have at least tried to handle | 589 // Unless there was an error, we should have at least tried to handle |
555 // the response before getting here. | 590 // the response before getting here. |
556 DCHECK(has_handled_response_ || !status.is_success()); | 591 DCHECK(has_handled_response_ || !status.is_success()); |
557 | 592 |
558 // As with NotifyReadComplete, we need to take care to notice if we were | 593 // As with RawReadCompleted, we need to take care to notice if we were |
559 // destroyed during a delegate callback. | 594 // destroyed during a delegate callback. |
560 if (request_) { | 595 if (request_) { |
561 request_->set_is_pending(false); | 596 request_->set_is_pending(false); |
562 // With async IO, it's quite possible to have a few outstanding | 597 // With async IO, it's quite possible to have a few outstanding |
563 // requests. We could receive a request to Cancel, followed shortly | 598 // requests. We could receive a request to Cancel, followed shortly |
564 // by a successful IO. For tracking the status(), once there is | 599 // by a successful IO. For tracking the status(), once there is |
565 // an error, we do not change the status back to success. To | 600 // an error, we do not change the status back to success. To |
566 // enforce this, only set the status if the job is so far | 601 // enforce this, only set the status if the job is so far |
567 // successful. | 602 // successful. |
568 if (request_->status().is_success()) { | 603 if (request_->status().is_success()) { |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
631 } | 666 } |
632 | 667 |
633 void URLRequestJob::OnCallToDelegate() { | 668 void URLRequestJob::OnCallToDelegate() { |
634 request_->OnCallToDelegate(); | 669 request_->OnCallToDelegate(); |
635 } | 670 } |
636 | 671 |
637 void URLRequestJob::OnCallToDelegateComplete() { | 672 void URLRequestJob::OnCallToDelegateComplete() { |
638 request_->OnCallToDelegateComplete(); | 673 request_->OnCallToDelegateComplete(); |
639 } | 674 } |
640 | 675 |
641 bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size, | 676 int URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size) { |
642 int *bytes_read) { | 677 return 0; |
643 DCHECK(bytes_read); | |
644 *bytes_read = 0; | |
645 return true; | |
646 } | 678 } |
647 | 679 |
648 void URLRequestJob::DoneReading() { | 680 void URLRequestJob::DoneReading() { |
649 // Do nothing. | 681 // Do nothing. |
650 } | 682 } |
651 | 683 |
652 void URLRequestJob::DoneReadingRedirectResponse() { | 684 void URLRequestJob::DoneReadingRedirectResponse() { |
653 } | 685 } |
654 | 686 |
655 void URLRequestJob::FilteredDataRead(int bytes_read) { | 687 void URLRequestJob::PushInputToFilter(int bytes_read) { |
656 DCHECK(filter_); | 688 DCHECK(filter_); |
657 filter_->FlushStreamBuffer(bytes_read); | 689 filter_->FlushStreamBuffer(bytes_read); |
658 } | 690 } |
659 | 691 |
660 bool URLRequestJob::ReadFilteredData(int* bytes_read) { | 692 Error URLRequestJob::ReadFilteredData(int* bytes_read) { |
661 DCHECK(filter_); | 693 DCHECK(filter_); |
662 DCHECK(filtered_read_buffer_.get()); | 694 DCHECK(filtered_read_buffer_.get()); |
663 DCHECK_GT(filtered_read_buffer_len_, 0); | 695 DCHECK_GT(filtered_read_buffer_len_, 0); |
664 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check. | 696 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check. |
665 DCHECK(!raw_read_buffer_.get()); | 697 DCHECK(!raw_read_buffer_); |
666 | 698 |
667 *bytes_read = 0; | 699 *bytes_read = 0; |
668 bool rv = false; | 700 Error error = ERR_FAILED; |
669 | 701 |
670 for (;;) { | 702 for (;;) { |
671 if (is_done()) | 703 if (is_done()) |
672 return true; | 704 return OK; |
673 | 705 |
674 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) { | 706 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) { |
675 // We don't have any raw data to work with, so read from the transaction. | 707 // We don't have any raw data to work with, so read from the transaction. |
676 int filtered_data_read; | 708 int filtered_data_read; |
677 if (ReadRawDataForFilter(&filtered_data_read)) { | 709 error = ReadRawDataForFilter(&filtered_data_read); |
678 if (filtered_data_read > 0) { | 710 // If ReadRawDataForFilter returned some data, fall through to the case |
679 // Give data to filter. | 711 // below; otherwise, return early. |
680 filter_->FlushStreamBuffer(filtered_data_read); | 712 if (error != OK || filtered_data_read == 0) |
681 } else { | 713 return error; |
682 return true; // EOF. | 714 filter_->FlushStreamBuffer(filtered_data_read); |
683 } | |
684 } else { | |
685 return false; // IO Pending (or error). | |
686 } | |
687 } | 715 } |
688 | 716 |
689 if ((filter_->stream_data_len() || filter_needs_more_output_space_) && | 717 if ((filter_->stream_data_len() || filter_needs_more_output_space_) && |
690 !is_done()) { | 718 !is_done()) { |
691 // Get filtered data. | 719 // Get filtered data. |
692 int filtered_data_len = filtered_read_buffer_len_; | 720 int filtered_data_len = filtered_read_buffer_len_; |
693 int output_buffer_size = filtered_data_len; | 721 int output_buffer_size = filtered_data_len; |
694 Filter::FilterStatus status = | 722 Filter::FilterStatus status = |
695 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len); | 723 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len); |
696 | 724 |
697 if (filter_needs_more_output_space_ && !filtered_data_len) { | 725 if (filter_needs_more_output_space_ && !filtered_data_len) { |
698 // filter_needs_more_output_space_ was mistaken... there are no more | 726 // filter_needs_more_output_space_ was mistaken... there are no more |
699 // bytes and we should have at least tried to fill up the filter's input | 727 // bytes and we should have at least tried to fill up the filter's input |
700 // buffer. Correct the state, and try again. | 728 // buffer. Correct the state, and try again. |
701 filter_needs_more_output_space_ = false; | 729 filter_needs_more_output_space_ = false; |
702 continue; | 730 continue; |
703 } | 731 } |
704 filter_needs_more_output_space_ = | 732 filter_needs_more_output_space_ = |
705 (filtered_data_len == output_buffer_size); | 733 (filtered_data_len == output_buffer_size); |
706 | 734 |
707 switch (status) { | 735 switch (status) { |
708 case Filter::FILTER_DONE: { | 736 case Filter::FILTER_DONE: { |
709 filter_needs_more_output_space_ = false; | 737 filter_needs_more_output_space_ = false; |
710 *bytes_read = filtered_data_len; | 738 *bytes_read = filtered_data_len; |
711 postfilter_bytes_read_ += filtered_data_len; | 739 postfilter_bytes_read_ += filtered_data_len; |
712 rv = true; | 740 error = OK; |
713 break; | 741 break; |
714 } | 742 } |
715 case Filter::FILTER_NEED_MORE_DATA: { | 743 case Filter::FILTER_NEED_MORE_DATA: { |
716 // We have finished filtering all data currently in the buffer. | 744 // We have finished filtering all data currently in the buffer. |
717 // There might be some space left in the output buffer. One can | 745 // There might be some space left in the output buffer. One can |
718 // consider reading more data from the stream to feed the filter | 746 // consider reading more data from the stream to feed the filter |
719 // and filling up the output buffer. This leads to more complicated | 747 // and filling up the output buffer. This leads to more complicated |
720 // buffer management and data notification mechanisms. | 748 // buffer management and data notification mechanisms. |
721 // We can revisit this issue if there is a real perf need. | 749 // We can revisit this issue if there is a real perf need. |
722 if (filtered_data_len > 0) { | 750 if (filtered_data_len > 0) { |
723 *bytes_read = filtered_data_len; | 751 *bytes_read = filtered_data_len; |
724 postfilter_bytes_read_ += filtered_data_len; | 752 postfilter_bytes_read_ += filtered_data_len; |
725 rv = true; | 753 error = OK; |
726 } else { | 754 } else { |
727 // Read again since we haven't received enough data yet (e.g., we | 755 // Read again since we haven't received enough data yet (e.g., we |
728 // may not have a complete gzip header yet). | 756 // may not have a complete gzip header yet). |
729 continue; | 757 continue; |
730 } | 758 } |
731 break; | 759 break; |
732 } | 760 } |
733 case Filter::FILTER_OK: { | 761 case Filter::FILTER_OK: { |
734 *bytes_read = filtered_data_len; | 762 *bytes_read = filtered_data_len; |
735 postfilter_bytes_read_ += filtered_data_len; | 763 postfilter_bytes_read_ += filtered_data_len; |
736 rv = true; | 764 error = OK; |
737 break; | 765 break; |
738 } | 766 } |
739 case Filter::FILTER_ERROR: { | 767 case Filter::FILTER_ERROR: { |
740 DVLOG(1) << __FUNCTION__ << "() " | 768 DVLOG(1) << __FUNCTION__ << "() " |
741 << "\"" << (request_ ? request_->url().spec() : "???") | 769 << "\"" << (request_ ? request_->url().spec() : "???") |
742 << "\"" << " Filter Error"; | 770 << "\"" << " Filter Error"; |
743 filter_needs_more_output_space_ = false; | 771 filter_needs_more_output_space_ = false; |
744 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | 772 error = ERR_CONTENT_DECODING_FAILED; |
745 ERR_CONTENT_DECODING_FAILED)); | |
746 rv = false; | |
747 break; | 773 break; |
748 } | 774 } |
749 default: { | 775 default: { |
750 NOTREACHED(); | 776 NOTREACHED(); |
751 filter_needs_more_output_space_ = false; | 777 filter_needs_more_output_space_ = false; |
752 rv = false; | 778 error = ERR_FAILED; |
753 break; | 779 break; |
754 } | 780 } |
755 } | 781 } |
756 | 782 |
757 // If logging all bytes is enabled, log the filtered bytes read. | 783 // If logging all bytes is enabled, log the filtered bytes read. |
758 if (rv && request() && filtered_data_len > 0 && | 784 if (error == OK && request() && filtered_data_len > 0 && |
759 request()->net_log().IsCapturing()) { | 785 request()->net_log().IsCapturing()) { |
760 request()->net_log().AddByteTransferEvent( | 786 request()->net_log().AddByteTransferEvent( |
761 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, filtered_data_len, | 787 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, filtered_data_len, |
762 filtered_read_buffer_->data()); | 788 filtered_read_buffer_->data()); |
763 } | 789 } |
764 } else { | 790 } else { |
765 // we are done, or there is no data left. | 791 // we are done, or there is no data left. |
766 rv = true; | 792 error = OK; |
767 } | 793 } |
768 break; | 794 break; |
769 } | 795 } |
770 | 796 |
771 if (rv) { | 797 if (error == OK) { |
772 // When we successfully finished a read, we no longer need to save the | 798 // When we successfully finished a read, we no longer need to save the |
773 // caller's buffers. Release our reference. | 799 // caller's buffers. Release our reference. |
774 filtered_read_buffer_ = NULL; | 800 filtered_read_buffer_ = NULL; |
775 filtered_read_buffer_len_ = 0; | 801 filtered_read_buffer_len_ = 0; |
776 } | 802 } |
777 return rv; | 803 return error; |
778 } | 804 } |
779 | 805 |
780 void URLRequestJob::DestroyFilters() { | 806 void URLRequestJob::DestroyFilters() { |
781 filter_.reset(); | 807 filter_.reset(); |
782 } | 808 } |
783 | 809 |
784 const URLRequestStatus URLRequestJob::GetStatus() { | 810 const URLRequestStatus URLRequestJob::GetStatus() { |
785 if (request_) | 811 if (request_) |
786 return request_->status(); | 812 return request_->status(); |
787 // If the request is gone, we must be cancelled. | 813 // If the request is gone, we must be cancelled. |
(...skipping 12 matching lines...) Expand all Loading... |
800 request_->status().is_success() || | 826 request_->status().is_success() || |
801 (!status.is_success() && !status.is_io_pending())); | 827 (!status.is_success() && !status.is_io_pending())); |
802 request_->set_status(status); | 828 request_->set_status(status); |
803 } | 829 } |
804 } | 830 } |
805 | 831 |
806 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) { | 832 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) { |
807 request_->proxy_server_ = proxy_server; | 833 request_->proxy_server_ = proxy_server; |
808 } | 834 } |
809 | 835 |
810 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) { | 836 Error URLRequestJob::ReadRawDataForFilter(int* bytes_read) { |
811 bool rv = false; | 837 Error error = ERR_FAILED; |
812 | |
813 DCHECK(bytes_read); | 838 DCHECK(bytes_read); |
814 DCHECK(filter_.get()); | 839 DCHECK(filter_.get()); |
815 | 840 |
816 *bytes_read = 0; | 841 *bytes_read = 0; |
817 | 842 |
818 // Get more pre-filtered data if needed. | 843 // Get more pre-filtered data if needed. |
819 // TODO(mbelshe): is it possible that the filter needs *MORE* data | 844 // TODO(mbelshe): is it possible that the filter needs *MORE* data |
820 // when there is some data already in the buffer? | 845 // when there is some data already in the buffer? |
821 if (!filter_->stream_data_len() && !is_done()) { | 846 if (!filter_->stream_data_len() && !is_done()) { |
822 IOBuffer* stream_buffer = filter_->stream_buffer(); | 847 IOBuffer* stream_buffer = filter_->stream_buffer(); |
823 int stream_buffer_size = filter_->stream_buffer_size(); | 848 int stream_buffer_size = filter_->stream_buffer_size(); |
824 rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read); | 849 error = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read); |
825 } | 850 } |
826 return rv; | 851 return error; |
827 } | 852 } |
828 | 853 |
829 bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf, int buf_size, | 854 Error URLRequestJob::ReadRawDataHelper(IOBuffer* buf, |
830 int* bytes_read) { | 855 int buf_size, |
831 DCHECK(!request_->status().is_io_pending()); | 856 int* bytes_read) { |
832 DCHECK(raw_read_buffer_.get() == NULL); | 857 DCHECK(!raw_read_buffer_); |
833 | 858 |
834 // Keep a pointer to the read buffer, so we have access to it in the | 859 // Keep a pointer to the read buffer, so we have access to it in |
835 // OnRawReadComplete() callback in the event that the read completes | 860 // GatherRawReadStats() in the event that the read completes asynchronously. |
836 // asynchronously. | |
837 raw_read_buffer_ = buf; | 861 raw_read_buffer_ = buf; |
838 bool rv = ReadRawData(buf, buf_size, bytes_read); | 862 Error error; |
| 863 ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read); |
839 | 864 |
840 if (!request_->status().is_io_pending()) { | 865 if (error != ERR_IO_PENDING) { |
841 // If the read completes synchronously, either success or failure, | 866 // If the read completes synchronously, either success or failure, invoke |
842 // invoke the OnRawReadComplete callback so we can account for the | 867 // GatherRawReadStats so we can account for the completed read. |
843 // completed read. | 868 GatherRawReadStats(error, *bytes_read); |
844 OnRawReadComplete(*bytes_read); | |
845 } | 869 } |
846 return rv; | 870 return error; |
847 } | 871 } |
848 | 872 |
849 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { | 873 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { |
850 int rv = request_->Redirect(redirect_info); | 874 int rv = request_->Redirect(redirect_info); |
851 if (rv != OK) | 875 if (rv != OK) |
852 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | 876 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
853 } | 877 } |
854 | 878 |
855 void URLRequestJob::OnRawReadComplete(int bytes_read) { | 879 void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) { |
856 DCHECK(raw_read_buffer_.get()); | 880 DCHECK(raw_read_buffer_ || bytes_read == 0); |
857 // If |filter_| is non-NULL, bytes will be logged after it is applied instead. | 881 DCHECK_NE(ERR_IO_PENDING, error); |
| 882 |
| 883 if (error != OK) { |
| 884 raw_read_buffer_ = nullptr; |
| 885 return; |
| 886 } |
| 887 // If |filter_| is non-NULL, bytes will be logged after it is applied |
| 888 // instead. |
858 if (!filter_.get() && request() && bytes_read > 0 && | 889 if (!filter_.get() && request() && bytes_read > 0 && |
859 request()->net_log().IsCapturing()) { | 890 request()->net_log().IsCapturing()) { |
860 request()->net_log().AddByteTransferEvent( | 891 request()->net_log().AddByteTransferEvent( |
861 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, | 892 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, bytes_read, |
862 bytes_read, raw_read_buffer_->data()); | 893 raw_read_buffer_->data()); |
863 } | 894 } |
864 | 895 |
865 if (bytes_read > 0) { | 896 if (bytes_read > 0) { |
866 RecordBytesRead(bytes_read); | 897 RecordBytesRead(bytes_read); |
867 } | 898 } |
868 raw_read_buffer_ = NULL; | 899 raw_read_buffer_ = nullptr; |
869 } | 900 } |
870 | 901 |
871 void URLRequestJob::RecordBytesRead(int bytes_read) { | 902 void URLRequestJob::RecordBytesRead(int bytes_read) { |
872 DCHECK_GT(bytes_read, 0); | 903 DCHECK_GT(bytes_read, 0); |
873 prefilter_bytes_read_ += bytes_read; | 904 prefilter_bytes_read_ += bytes_read; |
874 | 905 |
875 // On first read, notify NetworkQualityEstimator that response headers have | 906 // On first read, notify NetworkQualityEstimator that response headers have |
876 // been received. | 907 // been received. |
877 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch | 908 // TODO(tbansal): Move this to url_request_http_job.cc. This may catch |
878 // Service Worker jobs twice. | 909 // Service Worker jobs twice. |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
970 int64_t total_sent_bytes = GetTotalSentBytes(); | 1001 int64_t total_sent_bytes = GetTotalSentBytes(); |
971 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); | 1002 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); |
972 if (total_sent_bytes > last_notified_total_sent_bytes_) { | 1003 if (total_sent_bytes > last_notified_total_sent_bytes_) { |
973 network_delegate_->NotifyNetworkBytesSent( | 1004 network_delegate_->NotifyNetworkBytesSent( |
974 *request_, total_sent_bytes - last_notified_total_sent_bytes_); | 1005 *request_, total_sent_bytes - last_notified_total_sent_bytes_); |
975 } | 1006 } |
976 last_notified_total_sent_bytes_ = total_sent_bytes; | 1007 last_notified_total_sent_bytes_ = total_sent_bytes; |
977 } | 1008 } |
978 | 1009 |
979 } // namespace net | 1010 } // namespace net |
OLD | NEW |