OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/url_request/url_request_job.h" | 5 #include "net/url_request/url_request_job.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/compiler_specific.h" | 8 #include "base/compiler_specific.h" |
9 #include "base/location.h" | 9 #include "base/location.h" |
10 #include "base/metrics/histogram_macros.h" | 10 #include "base/metrics/histogram_macros.h" |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
94 // Make sure the request is notified that we are done. We assume that the | 94 // Make sure the request is notified that we are done. We assume that the |
95 // request took care of setting its error status before calling Kill. | 95 // request took care of setting its error status before calling Kill. |
96 if (request_) | 96 if (request_) |
97 NotifyCanceled(); | 97 NotifyCanceled(); |
98 } | 98 } |
99 | 99 |
100 void URLRequestJob::DetachRequest() { | 100 void URLRequestJob::DetachRequest() { |
101 request_ = NULL; | 101 request_ = NULL; |
102 } | 102 } |
103 | 103 |
104 // This function calls ReadData to get stream data. If a filter exists, passes | 104 // This function calls ReadRawData to get stream data. If a filter exists, it |
105 // the data to the attached filter. Then returns the output from filter back to | 105 // passes the data to the attached filter. It then returns the output from |
106 // the caller. | 106 // filter back to the caller. |
107 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) { | 107 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) { |
108 bool rv = false; | |
109 | |
110 DCHECK_LT(buf_size, 1000000); // Sanity check. | 108 DCHECK_LT(buf_size, 1000000); // Sanity check. |
111 DCHECK(buf); | 109 DCHECK(buf); |
112 DCHECK(bytes_read); | 110 DCHECK(bytes_read); |
113 DCHECK(filtered_read_buffer_.get() == NULL); | 111 DCHECK(filtered_read_buffer_.get() == NULL); |
114 DCHECK_EQ(0, filtered_read_buffer_len_); | 112 DCHECK_EQ(0, filtered_read_buffer_len_); |
115 | 113 |
114 Error error = OK; | |
116 *bytes_read = 0; | 115 *bytes_read = 0; |
117 | 116 |
118 // Skip Filter if not present. | 117 // Skip Filter if not present. |
119 if (!filter_.get()) { | 118 if (!filter_) { |
120 rv = ReadRawDataHelper(buf, buf_size, bytes_read); | 119 error = ReadRawDataHelper(buf, buf_size, bytes_read); |
121 } else { | 120 } else { |
122 // Save the caller's buffers while we do IO | 121 // Save the caller's buffers while we do IO |
123 // in the filter's buffers. | 122 // in the filter's buffers. |
124 filtered_read_buffer_ = buf; | 123 filtered_read_buffer_ = buf; |
125 filtered_read_buffer_len_ = buf_size; | 124 filtered_read_buffer_len_ = buf_size; |
126 | 125 |
127 if (ReadFilteredData(bytes_read)) { | 126 error = ReadFilteredData(bytes_read); |
128 rv = true; // We have data to return. | |
129 | 127 |
130 // It is fine to call DoneReading even if ReadFilteredData receives 0 | 128 // Synchronous EOF from the filter. |
131 // bytes from the net, but we avoid making that call if we know for | 129 if (error == OK && *bytes_read == 0) |
132 // sure that's the case (ReadRawDataHelper path). | 130 DoneReading(); |
mmenke
2015/10/22 18:58:10
Randy: Did we ever figure out why this isn't call
Randy Smith (Not in Mondays)
2015/10/22 20:38:45
So the comment in url_request_job.h says:
// Ca
mmenke
2015/10/22 20:42:46
I thought transaction_->DoneReading() was used to
Randy Smith (Not in Mondays)
2015/10/26 21:38:03
So the comment in HttpTransaction suggests why we
| |
133 if (*bytes_read == 0) | |
134 DoneReading(); | |
135 } else { | |
136 rv = false; // Error, or a new IO is pending. | |
137 } | |
138 } | 131 } |
139 | 132 |
140 if (rv && *bytes_read == 0) | 133 if (error == OK) { |
141 NotifyDone(URLRequestStatus()); | 134 // If URLRequestJob read zero bytes, the job is at EOF. |
142 return rv; | 135 if (*bytes_read == 0) |
136 NotifyDone(URLRequestStatus()); | |
137 } else if (error == ERR_IO_PENDING) { | |
138 SetStatus(URLRequestStatus::FromError(ERR_IO_PENDING)); | |
139 } else { | |
140 NotifyDone(URLRequestStatus::FromError(error)); | |
141 *bytes_read = -1; | |
142 } | |
143 return error == OK; | |
143 } | 144 } |
144 | 145 |
145 void URLRequestJob::StopCaching() { | 146 void URLRequestJob::StopCaching() { |
146 // Nothing to do here. | 147 // Nothing to do here. |
147 } | 148 } |
148 | 149 |
149 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { | 150 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const { |
150 // Most job types don't send request headers. | 151 // Most job types don't send request headers. |
151 return false; | 152 return false; |
152 } | 153 } |
(...skipping 320 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
473 base::StringToInt64(content_length, &expected_content_size_); | 474 base::StringToInt64(content_length, &expected_content_size_); |
474 } else { | 475 } else { |
475 request_->net_log().AddEvent( | 476 request_->net_log().AddEvent( |
476 NetLog::TYPE_URL_REQUEST_FILTERS_SET, | 477 NetLog::TYPE_URL_REQUEST_FILTERS_SET, |
477 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get()))); | 478 base::Bind(&FiltersSetCallback, base::Unretained(filter_.get()))); |
478 } | 479 } |
479 | 480 |
480 request_->NotifyResponseStarted(); | 481 request_->NotifyResponseStarted(); |
481 } | 482 } |
482 | 483 |
483 void URLRequestJob::NotifyReadComplete(int bytes_read) { | 484 void URLRequestJob::ConvertResultToError(int result, Error* error, int* count) { |
485 if (result >= 0) { | |
486 *error = OK; | |
487 *count = result; | |
488 } else { | |
489 *error = static_cast<Error>(result); | |
490 *count = 0; | |
491 } | |
492 } | |
493 | |
494 void URLRequestJob::ReadRawDataComplete(int result) { | |
484 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed. | 495 // TODO(cbentzel): Remove ScopedTracker below once crbug.com/475755 is fixed. |
485 tracked_objects::ScopedTracker tracking_profile( | 496 tracked_objects::ScopedTracker tracking_profile( |
486 FROM_HERE_WITH_EXPLICIT_FUNCTION( | 497 FROM_HERE_WITH_EXPLICIT_FUNCTION( |
487 "475755 URLRequestJob::NotifyReadComplete")); | 498 "475755 URLRequestJob::RawReadCompleted")); |
499 | |
500 Error error; | |
501 int bytes_read; | |
502 ConvertResultToError(result, &error, &bytes_read); | |
488 | 503 |
489 if (!request_ || !request_->has_delegate()) | 504 if (!request_ || !request_->has_delegate()) |
490 return; // The request was destroyed, so there is no more work to do. | 505 return; // The request was destroyed, so there is no more work to do. |
491 | 506 |
492 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome | 507 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome |
493 // unit_tests have been fixed to not trip this. | 508 // unit_tests have been fixed to not trip this. |
494 #if 0 | 509 #if 0 |
495 DCHECK(!request_->status().is_io_pending()); | 510 DCHECK(!request_->status().is_io_pending()); |
496 #endif | 511 #endif |
497 // The headers should be complete before reads complete | 512 // The headers should be complete before reads complete |
498 DCHECK(has_handled_response_); | 513 DCHECK(has_handled_response_); |
499 | 514 |
500 OnRawReadComplete(bytes_read); | 515 GatherRawReadStats(error, bytes_read); |
501 | 516 |
502 // Don't notify if we had an error. | 517 bool notify = false; |
503 if (!request_->status().is_success()) | 518 if (filter_.get() && error == OK) { |
504 return; | 519 int filter_bytes_read = 0; |
520 // Tell the filter that it has more data | |
mmenke
2015/10/22 18:58:10
nit: +.
xunjieli
2015/10/23 13:43:08
Done.
| |
521 PushInputToFilter(bytes_read); | |
522 | |
523 // Filter the data. | |
524 error = ReadFilteredData(&filter_bytes_read); | |
525 if (error == OK) { | |
526 if (!filter_bytes_read) | |
527 DoneReading(); | |
528 notify = true; | |
mmenke
2015/10/22 18:58:11
Keep comment from before? ("Don't notify if we ha
xunjieli
2015/10/23 13:43:08
Done.
| |
529 } | |
530 DVLOG(1) << __FUNCTION__ << "() " | |
531 << "\"" << (request_ ? request_->url().spec() : "???") << "\"" | |
532 << " pre bytes read = " << bytes_read | |
533 << " pre total = " << prefilter_bytes_read_ | |
534 << " post total = " << postfilter_bytes_read_; | |
535 bytes_read = filter_bytes_read; | |
536 } else { | |
537 notify = true; | |
538 DVLOG(1) << __FUNCTION__ << "() " | |
539 << "\"" << (request_ ? request_->url().spec() : "???") << "\"" | |
540 << " pre bytes read = " << bytes_read | |
541 << " pre total = " << prefilter_bytes_read_ | |
542 << " post total = " << postfilter_bytes_read_; | |
543 } | |
505 | 544 |
506 // When notifying the delegate, the delegate can release the request | 545 // When notifying the delegate, the delegate can release the request |
507 // (and thus release 'this'). After calling to the delegate, we must | 546 // (and thus release 'this'). After calling to the delegate, we must |
508 // check the request pointer to see if it still exists, and return | 547 // check the request pointer to see if it still exists, and return |
509 // immediately if it has been destroyed. self_preservation ensures our | 548 // immediately if it has been destroyed. self_preservation ensures our |
510 // survival until we can get out of this method. | 549 // survival until we can get out of this method. |
511 scoped_refptr<URLRequestJob> self_preservation(this); | 550 scoped_refptr<URLRequestJob> self_preservation(this); |
512 | 551 |
513 if (filter_.get()) { | 552 // Synchronize the URLRequest state machine with the URLRequestJob state |
514 // Tell the filter that it has more data | 553 // machine. If this read succeeded, either the request is at EOF and the |
515 FilteredDataRead(bytes_read); | 554 // URLRequest state machine goes to 'finished', or it is not and the |
555 // URLRequest state machine goes to 'success'. If the read failed, the | |
556 // URLRequest state machine goes directly to 'finished'. | |
557 // | |
558 // Update the URLRequest's status first, so that NotifyReadCompleted has an | |
559 // accurate view of the request. | |
560 if (error == OK) { | |
561 if (bytes_read == 0) | |
562 NotifyDone(URLRequestStatus()); | |
563 else | |
564 SetStatus(URLRequestStatus()); | |
565 } else { | |
566 NotifyDone(URLRequestStatus::FromError(error)); | |
567 } | |
mmenke
2015/10/22 18:58:10
Maybe:
if (error == OK && bytes_read > 0) {
Set
xunjieli
2015/10/23 13:43:08
Done.
| |
516 | 568 |
517 // Filter the data. | 569 // TODO(ellyjones): why does this method only call NotifyReadComplete when |
518 int filter_bytes_read = 0; | 570 // there isn't a filter error? How do filter errors get notified? |
519 if (ReadFilteredData(&filter_bytes_read)) { | 571 if (notify) |
520 if (!filter_bytes_read) | |
521 DoneReading(); | |
522 request_->NotifyReadCompleted(filter_bytes_read); | |
523 } | |
524 } else { | |
525 request_->NotifyReadCompleted(bytes_read); | 572 request_->NotifyReadCompleted(bytes_read); |
526 } | |
527 DVLOG(1) << __FUNCTION__ << "() " | |
528 << "\"" << (request_ ? request_->url().spec() : "???") << "\"" | |
529 << " pre bytes read = " << bytes_read | |
530 << " pre total = " << prefilter_bytes_read_ | |
531 << " post total = " << postfilter_bytes_read_; | |
532 } | 573 } |
533 | 574 |
534 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { | 575 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) { |
535 DCHECK(!has_handled_response_); | 576 DCHECK(!has_handled_response_); |
536 has_handled_response_ = true; | 577 has_handled_response_ = true; |
537 if (request_) { | 578 if (request_) { |
538 // There may be relevant information in the response info even in the | 579 // There may be relevant information in the response info even in the |
539 // error case. | 580 // error case. |
540 GetResponseInfo(&request_->response_info_); | 581 GetResponseInfo(&request_->response_info_); |
541 | 582 |
542 request_->set_status(status); | 583 request_->set_status(status); |
543 request_->NotifyResponseStarted(); | 584 request_->NotifyResponseStarted(); |
544 // We may have been deleted. | 585 // We may have been deleted. |
545 } | 586 } |
546 } | 587 } |
547 | 588 |
548 void URLRequestJob::NotifyDone(const URLRequestStatus &status) { | 589 void URLRequestJob::NotifyDone(const URLRequestStatus &status) { |
549 DCHECK(!done_) << "Job sending done notification twice"; | 590 DCHECK(!done_) << "Job sending done notification twice"; |
550 if (done_) | 591 if (done_) |
551 return; | 592 return; |
552 done_ = true; | 593 done_ = true; |
553 | 594 |
554 // Unless there was an error, we should have at least tried to handle | 595 // Unless there was an error, we should have at least tried to handle |
555 // the response before getting here. | 596 // the response before getting here. |
556 DCHECK(has_handled_response_ || !status.is_success()); | 597 DCHECK(has_handled_response_ || !status.is_success()); |
557 | 598 |
558 // As with NotifyReadComplete, we need to take care to notice if we were | 599 // As with RawReadCompleted, we need to take care to notice if we were |
559 // destroyed during a delegate callback. | 600 // destroyed during a delegate callback. |
560 if (request_) { | 601 if (request_) { |
561 request_->set_is_pending(false); | 602 request_->set_is_pending(false); |
562 // With async IO, it's quite possible to have a few outstanding | 603 // With async IO, it's quite possible to have a few outstanding |
563 // requests. We could receive a request to Cancel, followed shortly | 604 // requests. We could receive a request to Cancel, followed shortly |
564 // by a successful IO. For tracking the status(), once there is | 605 // by a successful IO. For tracking the status(), once there is |
565 // an error, we do not change the status back to success. To | 606 // an error, we do not change the status back to success. To |
566 // enforce this, only set the status if the job is so far | 607 // enforce this, only set the status if the job is so far |
567 // successful. | 608 // successful. |
568 if (request_->status().is_success()) { | 609 if (request_->status().is_success()) { |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
631 } | 672 } |
632 | 673 |
633 void URLRequestJob::OnCallToDelegate() { | 674 void URLRequestJob::OnCallToDelegate() { |
634 request_->OnCallToDelegate(); | 675 request_->OnCallToDelegate(); |
635 } | 676 } |
636 | 677 |
637 void URLRequestJob::OnCallToDelegateComplete() { | 678 void URLRequestJob::OnCallToDelegateComplete() { |
638 request_->OnCallToDelegateComplete(); | 679 request_->OnCallToDelegateComplete(); |
639 } | 680 } |
640 | 681 |
641 bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size, | 682 int URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size) { |
642 int *bytes_read) { | 683 return 0; |
643 DCHECK(bytes_read); | |
644 *bytes_read = 0; | |
645 return true; | |
646 } | 684 } |
647 | 685 |
648 void URLRequestJob::DoneReading() { | 686 void URLRequestJob::DoneReading() { |
649 // Do nothing. | 687 // Do nothing. |
650 } | 688 } |
651 | 689 |
652 void URLRequestJob::DoneReadingRedirectResponse() { | 690 void URLRequestJob::DoneReadingRedirectResponse() { |
653 } | 691 } |
654 | 692 |
655 void URLRequestJob::FilteredDataRead(int bytes_read) { | 693 void URLRequestJob::PushInputToFilter(int bytes_read) { |
656 DCHECK(filter_); | 694 DCHECK(filter_); |
657 filter_->FlushStreamBuffer(bytes_read); | 695 filter_->FlushStreamBuffer(bytes_read); |
658 } | 696 } |
659 | 697 |
660 bool URLRequestJob::ReadFilteredData(int* bytes_read) { | 698 Error URLRequestJob::ReadFilteredData(int* bytes_read) { |
661 DCHECK(filter_); | 699 DCHECK(filter_); |
662 DCHECK(filtered_read_buffer_.get()); | 700 DCHECK(filtered_read_buffer_.get()); |
663 DCHECK_GT(filtered_read_buffer_len_, 0); | 701 DCHECK_GT(filtered_read_buffer_len_, 0); |
664 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check. | 702 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check. |
665 DCHECK(!raw_read_buffer_.get()); | 703 DCHECK(!raw_read_buffer_); |
666 | 704 |
667 *bytes_read = 0; | 705 *bytes_read = 0; |
668 bool rv = false; | 706 Error error = ERR_FAILED; |
669 | 707 |
670 for (;;) { | 708 for (;;) { |
671 if (is_done()) | 709 if (is_done()) |
672 return true; | 710 return OK; |
673 | 711 |
674 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) { | 712 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) { |
675 // We don't have any raw data to work with, so read from the transaction. | 713 // We don't have any raw data to work with, so read from the transaction. |
676 int filtered_data_read; | 714 int filtered_data_read; |
677 if (ReadRawDataForFilter(&filtered_data_read)) { | 715 error = ReadRawDataForFilter(&filtered_data_read); |
678 if (filtered_data_read > 0) { | 716 // If ReadRawDataForFilter returned some data, fall through to the case |
679 // Give data to filter. | 717 // below; otherwise, return early. |
680 filter_->FlushStreamBuffer(filtered_data_read); | 718 if (error != OK || filtered_data_read == 0) |
681 } else { | 719 return error; |
682 return true; // EOF. | 720 filter_->FlushStreamBuffer(filtered_data_read); |
683 } | |
684 } else { | |
685 return false; // IO Pending (or error). | |
686 } | |
687 } | 721 } |
688 | 722 |
689 if ((filter_->stream_data_len() || filter_needs_more_output_space_) && | 723 if ((filter_->stream_data_len() || filter_needs_more_output_space_) && |
690 !is_done()) { | 724 !is_done()) { |
691 // Get filtered data. | 725 // Get filtered data. |
692 int filtered_data_len = filtered_read_buffer_len_; | 726 int filtered_data_len = filtered_read_buffer_len_; |
693 int output_buffer_size = filtered_data_len; | 727 int output_buffer_size = filtered_data_len; |
694 Filter::FilterStatus status = | 728 Filter::FilterStatus status = |
695 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len); | 729 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len); |
696 | 730 |
697 if (filter_needs_more_output_space_ && !filtered_data_len) { | 731 if (filter_needs_more_output_space_ && !filtered_data_len) { |
698 // filter_needs_more_output_space_ was mistaken... there are no more | 732 // filter_needs_more_output_space_ was mistaken... there are no more |
699 // bytes and we should have at least tried to fill up the filter's input | 733 // bytes and we should have at least tried to fill up the filter's input |
700 // buffer. Correct the state, and try again. | 734 // buffer. Correct the state, and try again. |
701 filter_needs_more_output_space_ = false; | 735 filter_needs_more_output_space_ = false; |
702 continue; | 736 continue; |
703 } | 737 } |
704 filter_needs_more_output_space_ = | 738 filter_needs_more_output_space_ = |
705 (filtered_data_len == output_buffer_size); | 739 (filtered_data_len == output_buffer_size); |
706 | 740 |
707 switch (status) { | 741 switch (status) { |
708 case Filter::FILTER_DONE: { | 742 case Filter::FILTER_DONE: { |
709 filter_needs_more_output_space_ = false; | 743 filter_needs_more_output_space_ = false; |
710 *bytes_read = filtered_data_len; | 744 *bytes_read = filtered_data_len; |
711 postfilter_bytes_read_ += filtered_data_len; | 745 postfilter_bytes_read_ += filtered_data_len; |
712 rv = true; | 746 error = OK; |
713 break; | 747 break; |
714 } | 748 } |
715 case Filter::FILTER_NEED_MORE_DATA: { | 749 case Filter::FILTER_NEED_MORE_DATA: { |
716 // We have finished filtering all data currently in the buffer. | 750 // We have finished filtering all data currently in the buffer. |
717 // There might be some space left in the output buffer. One can | 751 // There might be some space left in the output buffer. One can |
718 // consider reading more data from the stream to feed the filter | 752 // consider reading more data from the stream to feed the filter |
719 // and filling up the output buffer. This leads to more complicated | 753 // and filling up the output buffer. This leads to more complicated |
720 // buffer management and data notification mechanisms. | 754 // buffer management and data notification mechanisms. |
721 // We can revisit this issue if there is a real perf need. | 755 // We can revisit this issue if there is a real perf need. |
722 if (filtered_data_len > 0) { | 756 if (filtered_data_len > 0) { |
723 *bytes_read = filtered_data_len; | 757 *bytes_read = filtered_data_len; |
724 postfilter_bytes_read_ += filtered_data_len; | 758 postfilter_bytes_read_ += filtered_data_len; |
725 rv = true; | 759 error = OK; |
726 } else { | 760 } else { |
727 // Read again since we haven't received enough data yet (e.g., we | 761 // Read again since we haven't received enough data yet (e.g., we |
728 // may not have a complete gzip header yet). | 762 // may not have a complete gzip header yet). |
729 continue; | 763 continue; |
730 } | 764 } |
731 break; | 765 break; |
732 } | 766 } |
733 case Filter::FILTER_OK: { | 767 case Filter::FILTER_OK: { |
734 *bytes_read = filtered_data_len; | 768 *bytes_read = filtered_data_len; |
735 postfilter_bytes_read_ += filtered_data_len; | 769 postfilter_bytes_read_ += filtered_data_len; |
736 rv = true; | 770 error = OK; |
737 break; | 771 break; |
738 } | 772 } |
739 case Filter::FILTER_ERROR: { | 773 case Filter::FILTER_ERROR: { |
740 DVLOG(1) << __FUNCTION__ << "() " | 774 DVLOG(1) << __FUNCTION__ << "() " |
741 << "\"" << (request_ ? request_->url().spec() : "???") | 775 << "\"" << (request_ ? request_->url().spec() : "???") |
742 << "\"" << " Filter Error"; | 776 << "\"" << " Filter Error"; |
743 filter_needs_more_output_space_ = false; | 777 filter_needs_more_output_space_ = false; |
744 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | 778 error = ERR_CONTENT_DECODING_FAILED; |
745 ERR_CONTENT_DECODING_FAILED)); | |
746 rv = false; | |
747 break; | 779 break; |
748 } | 780 } |
749 default: { | 781 default: { |
750 NOTREACHED(); | 782 NOTREACHED(); |
751 filter_needs_more_output_space_ = false; | 783 filter_needs_more_output_space_ = false; |
752 rv = false; | 784 error = ERR_FAILED; |
753 break; | 785 break; |
754 } | 786 } |
755 } | 787 } |
756 | 788 |
757 // If logging all bytes is enabled, log the filtered bytes read. | 789 // If logging all bytes is enabled, log the filtered bytes read. |
758 if (rv && request() && filtered_data_len > 0 && | 790 if (error == OK && request() && filtered_data_len > 0 && |
759 request()->net_log().IsCapturing()) { | 791 request()->net_log().IsCapturing()) { |
760 request()->net_log().AddByteTransferEvent( | 792 request()->net_log().AddByteTransferEvent( |
761 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, filtered_data_len, | 793 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, filtered_data_len, |
762 filtered_read_buffer_->data()); | 794 filtered_read_buffer_->data()); |
763 } | 795 } |
764 } else { | 796 } else { |
765 // we are done, or there is no data left. | 797 // we are done, or there is no data left. |
766 rv = true; | 798 error = OK; |
767 } | 799 } |
768 break; | 800 break; |
769 } | 801 } |
770 | 802 |
771 if (rv) { | 803 if (error == OK) { |
772 // When we successfully finished a read, we no longer need to save the | 804 // When we successfully finished a read, we no longer need to save the |
773 // caller's buffers. Release our reference. | 805 // caller's buffers. Release our reference. |
774 filtered_read_buffer_ = NULL; | 806 filtered_read_buffer_ = NULL; |
775 filtered_read_buffer_len_ = 0; | 807 filtered_read_buffer_len_ = 0; |
776 } | 808 } |
777 return rv; | 809 return error; |
778 } | 810 } |
779 | 811 |
780 void URLRequestJob::DestroyFilters() { | 812 void URLRequestJob::DestroyFilters() { |
781 filter_.reset(); | 813 filter_.reset(); |
782 } | 814 } |
783 | 815 |
784 const URLRequestStatus URLRequestJob::GetStatus() { | 816 const URLRequestStatus URLRequestJob::GetStatus() { |
785 if (request_) | 817 if (request_) |
786 return request_->status(); | 818 return request_->status(); |
787 // If the request is gone, we must be cancelled. | 819 // If the request is gone, we must be cancelled. |
(...skipping 12 matching lines...) Expand all Loading... | |
800 request_->status().is_success() || | 832 request_->status().is_success() || |
801 (!status.is_success() && !status.is_io_pending())); | 833 (!status.is_success() && !status.is_io_pending())); |
802 request_->set_status(status); | 834 request_->set_status(status); |
803 } | 835 } |
804 } | 836 } |
805 | 837 |
806 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) { | 838 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) { |
807 request_->proxy_server_ = proxy_server; | 839 request_->proxy_server_ = proxy_server; |
808 } | 840 } |
809 | 841 |
810 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) { | 842 Error URLRequestJob::ReadRawDataForFilter(int* bytes_read) { |
811 bool rv = false; | 843 Error error = ERR_FAILED; |
812 | |
813 DCHECK(bytes_read); | 844 DCHECK(bytes_read); |
814 DCHECK(filter_.get()); | 845 DCHECK(filter_.get()); |
815 | 846 |
816 *bytes_read = 0; | 847 *bytes_read = 0; |
817 | 848 |
818 // Get more pre-filtered data if needed. | 849 // Get more pre-filtered data if needed. |
819 // TODO(mbelshe): is it possible that the filter needs *MORE* data | 850 // TODO(mbelshe): is it possible that the filter needs *MORE* data |
820 // when there is some data already in the buffer? | 851 // when there is some data already in the buffer? |
821 if (!filter_->stream_data_len() && !is_done()) { | 852 if (!filter_->stream_data_len() && !is_done()) { |
822 IOBuffer* stream_buffer = filter_->stream_buffer(); | 853 IOBuffer* stream_buffer = filter_->stream_buffer(); |
823 int stream_buffer_size = filter_->stream_buffer_size(); | 854 int stream_buffer_size = filter_->stream_buffer_size(); |
824 rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read); | 855 error = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read); |
825 } | 856 } |
826 return rv; | 857 return error; |
827 } | 858 } |
828 | 859 |
829 bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf, int buf_size, | 860 Error URLRequestJob::ReadRawDataHelper(IOBuffer* buf, |
830 int* bytes_read) { | 861 int buf_size, |
862 int* bytes_read) { | |
831 DCHECK(!request_->status().is_io_pending()); | 863 DCHECK(!request_->status().is_io_pending()); |
832 DCHECK(raw_read_buffer_.get() == NULL); | 864 DCHECK(!raw_read_buffer_); |
833 | 865 |
834 // Keep a pointer to the read buffer, so we have access to it in the | 866 // Keep a pointer to the read buffer, so we have access to it in |
835 // OnRawReadComplete() callback in the event that the read completes | 867 // GatherRawReadStats() in the event that the read completes asynchronously. |
836 // asynchronously. | |
837 raw_read_buffer_ = buf; | 868 raw_read_buffer_ = buf; |
838 bool rv = ReadRawData(buf, buf_size, bytes_read); | 869 Error error; |
870 ConvertResultToError(ReadRawData(buf, buf_size), &error, bytes_read); | |
839 | 871 |
840 if (!request_->status().is_io_pending()) { | 872 if (error != ERR_IO_PENDING) { |
841 // If the read completes synchronously, either success or failure, | 873 // If the read completes synchronously, either success or failure, invoke |
842 // invoke the OnRawReadComplete callback so we can account for the | 874 // GatherRawReadStats so we can account for the completed read. |
843 // completed read. | 875 GatherRawReadStats(error, *bytes_read); |
844 OnRawReadComplete(*bytes_read); | |
845 } | 876 } |
846 return rv; | 877 return error; |
847 } | 878 } |
848 | 879 |
849 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { | 880 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) { |
850 int rv = request_->Redirect(redirect_info); | 881 int rv = request_->Redirect(redirect_info); |
851 if (rv != OK) | 882 if (rv != OK) |
852 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); | 883 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv)); |
853 } | 884 } |
854 | 885 |
855 void URLRequestJob::OnRawReadComplete(int bytes_read) { | 886 void URLRequestJob::GatherRawReadStats(Error error, int bytes_read) { |
856 DCHECK(raw_read_buffer_.get()); | 887 DCHECK(raw_read_buffer_ || bytes_read == 0); |
888 DCHECK_NE(ERR_IO_PENDING, error); | |
889 if (error != OK) | |
890 return; | |
Randy Smith (Not in Mondays)
2015/10/22 20:38:45
It looks to me (you should verify) like this will
xunjieli
2015/10/23 13:43:08
Done. I believe you are right.
Randy Smith (Not in Mondays)
2015/10/26 21:38:03
nit, suggestion: I think the code would be a bit c
xunjieli
2015/10/27 14:17:21
Done.
| |
857 // If |filter_| is non-NULL, bytes will be logged after it is applied instead. | 891 // If |filter_| is non-NULL, bytes will be logged after it is applied instead. |
858 if (!filter_.get() && request() && bytes_read > 0 && | 892 if (!filter_.get() && request() && bytes_read > 0 && |
859 request()->net_log().IsCapturing()) { | 893 request()->net_log().IsCapturing()) { |
860 request()->net_log().AddByteTransferEvent( | 894 request()->net_log().AddByteTransferEvent( |
861 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, | 895 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ, |
862 bytes_read, raw_read_buffer_->data()); | 896 bytes_read, raw_read_buffer_->data()); |
863 } | 897 } |
864 | 898 |
865 if (bytes_read > 0) { | 899 if (bytes_read > 0) { |
866 RecordBytesRead(bytes_read); | 900 RecordBytesRead(bytes_read); |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
970 int64_t total_sent_bytes = GetTotalSentBytes(); | 1004 int64_t total_sent_bytes = GetTotalSentBytes(); |
971 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); | 1005 DCHECK_GE(total_sent_bytes, last_notified_total_sent_bytes_); |
972 if (total_sent_bytes > last_notified_total_sent_bytes_) { | 1006 if (total_sent_bytes > last_notified_total_sent_bytes_) { |
973 network_delegate_->NotifyNetworkBytesSent( | 1007 network_delegate_->NotifyNetworkBytesSent( |
974 *request_, total_sent_bytes - last_notified_total_sent_bytes_); | 1008 *request_, total_sent_bytes - last_notified_total_sent_bytes_); |
975 } | 1009 } |
976 last_notified_total_sent_bytes_ = total_sent_bytes; | 1010 last_notified_total_sent_bytes_ = total_sent_bytes; |
977 } | 1011 } |
978 | 1012 |
979 } // namespace net | 1013 } // namespace net |
OLD | NEW |