Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/url_request/url_request_job.h" | 5 #include "net/url_request/url_request_job.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "base/compiler_specific.h" | 8 #include "base/compiler_specific.h" |
| 9 #include "base/message_loop/message_loop.h" | 9 #include "base/message_loop/message_loop.h" |
| 10 #include "base/power_monitor/power_monitor.h" | 10 #include "base/power_monitor/power_monitor.h" |
| (...skipping 523 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 534 } | 534 } |
| 535 | 535 |
| 536 void URLRequestJob::DoneReading() { | 536 void URLRequestJob::DoneReading() { |
| 537 // Do nothing. | 537 // Do nothing. |
| 538 } | 538 } |
| 539 | 539 |
| 540 void URLRequestJob::DoneReadingRedirectResponse() { | 540 void URLRequestJob::DoneReadingRedirectResponse() { |
| 541 } | 541 } |
| 542 | 542 |
| 543 void URLRequestJob::FilteredDataRead(int bytes_read) { | 543 void URLRequestJob::FilteredDataRead(int bytes_read) { |
| 544 DCHECK(filter_.get()); // don't add data if there is no filter | 544 DCHECK(filter_); |
| 545 filter_->FlushStreamBuffer(bytes_read); | 545 filter_->FlushStreamBuffer(bytes_read); |
| 546 } | 546 } |
| 547 | 547 |
| 548 bool URLRequestJob::ReadFilteredData(int* bytes_read) { | 548 bool URLRequestJob::ReadFilteredData(int* bytes_read) { |
| 549 DCHECK(filter_.get()); // don't add data if there is no filter | 549 DCHECK(filter_); |
| 550 DCHECK(filtered_read_buffer_.get() != | 550 DCHECK(filtered_read_buffer_); |
| 551 NULL); // we need to have a buffer to fill | 551 DCHECK_GT(filtered_read_buffer_len_, 0); |
| 552 DCHECK_GT(filtered_read_buffer_len_, 0); // sanity check | 552 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check. |
| 553 DCHECK_LT(filtered_read_buffer_len_, 1000000); // sanity check | 553 DCHECK(!raw_read_buffer_); |
| 554 DCHECK(raw_read_buffer_.get() == | |
| 555 NULL); // there should be no raw read buffer yet | |
| 556 | 554 |
| 555 *bytes_read = 0; | |
| 557 bool rv = false; | 556 bool rv = false; |
| 558 *bytes_read = 0; | 557 bool read_again; |
| 559 | 558 |
| 560 if (is_done()) | 559 do { |
| 561 return true; | 560 read_again = false; |
| 561 if (is_done()) | |
| 562 return true; | |
| 562 | 563 |
| 563 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) { | 564 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) { |
| 564 // We don't have any raw data to work with, so | 565 // We don't have any raw data to work with, so read from the transaction. |
| 565 // read from the socket. | 566 int filtered_data_read; |
| 566 int filtered_data_read; | 567 if (ReadRawDataForFilter(&filtered_data_read)) { |
| 567 if (ReadRawDataForFilter(&filtered_data_read)) { | 568 if (filtered_data_read > 0) { |
| 568 if (filtered_data_read > 0) { | 569 // Give data to filter. |
| 569 filter_->FlushStreamBuffer(filtered_data_read); // Give data to filter. | 570 filter_->FlushStreamBuffer(filtered_data_read); |
| 571 } else { | |
| 572 return true; // EOF. | |
| 573 } | |
| 570 } else { | 574 } else { |
| 571 return true; // EOF | 575 return false; // IO Pending (or error). |
| 572 } | 576 } |
| 573 } else { | |
| 574 return false; // IO Pending (or error) | |
| 575 } | |
| 576 } | |
| 577 | |
| 578 if ((filter_->stream_data_len() || filter_needs_more_output_space_) | |
| 579 && !is_done()) { | |
| 580 // Get filtered data. | |
| 581 int filtered_data_len = filtered_read_buffer_len_; | |
| 582 Filter::FilterStatus status; | |
| 583 int output_buffer_size = filtered_data_len; | |
| 584 status = filter_->ReadData(filtered_read_buffer_->data(), | |
| 585 &filtered_data_len); | |
| 586 | |
| 587 if (filter_needs_more_output_space_ && 0 == filtered_data_len) { | |
| 588 // filter_needs_more_output_space_ was mistaken... there are no more bytes | |
| 589 // and we should have at least tried to fill up the filter's input buffer. | |
| 590 // Correct the state, and try again. | |
| 591 filter_needs_more_output_space_ = false; | |
| 592 return ReadFilteredData(bytes_read); | |
| 593 } | 577 } |
| 594 | 578 |
| 595 switch (status) { | 579 if ((filter_->stream_data_len() || filter_needs_more_output_space_) && |
| 596 case Filter::FILTER_DONE: { | 580 !is_done()) { |
| 581 // Get filtered data. | |
| 582 int filtered_data_len = filtered_read_buffer_len_; | |
| 583 int output_buffer_size = filtered_data_len; | |
| 584 Filter::FilterStatus status = | |
| 585 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len); | |
| 586 | |
| 587 if (filter_needs_more_output_space_ && !filtered_data_len) { | |
| 588 // filter_needs_more_output_space_ was mistaken... there are no more | |
| 589 // bytes and we should have at least tried to fill up the filter's input | |
| 590 // buffer. Correct the state, and try again. | |
| 597 filter_needs_more_output_space_ = false; | 591 filter_needs_more_output_space_ = false; |
| 598 *bytes_read = filtered_data_len; | 592 read_again = true; |
| 599 postfilter_bytes_read_ += filtered_data_len; | 593 continue; |
| 600 rv = true; | |
| 601 break; | |
| 602 } | 594 } |
| 603 case Filter::FILTER_NEED_MORE_DATA: { | 595 filter_needs_more_output_space_ = |
| 604 filter_needs_more_output_space_ = | 596 (filtered_data_len == output_buffer_size); |
| 605 (filtered_data_len == output_buffer_size); | 597 |
| 606 // We have finished filtering all data currently in the buffer. | 598 switch (status) { |
| 607 // There might be some space left in the output buffer. One can | 599 case Filter::FILTER_DONE: { |
| 608 // consider reading more data from the stream to feed the filter | 600 filter_needs_more_output_space_ = false; |
| 609 // and filling up the output buffer. This leads to more complicated | |
| 610 // buffer management and data notification mechanisms. | |
| 611 // We can revisit this issue if there is a real perf need. | |
| 612 if (filtered_data_len > 0) { | |
| 613 *bytes_read = filtered_data_len; | 601 *bytes_read = filtered_data_len; |
| 614 postfilter_bytes_read_ += filtered_data_len; | 602 postfilter_bytes_read_ += filtered_data_len; |
| 615 rv = true; | 603 rv = true; |
| 616 } else { | 604 break; |
| 617 // Read again since we haven't received enough data yet (e.g., we may | |
| 618 // not have a complete gzip header yet) | |
| 619 rv = ReadFilteredData(bytes_read); | |
| 620 } | 605 } |
| 621 break; | 606 case Filter::FILTER_NEED_MORE_DATA: { |
| 607 // We have finished filtering all data currently in the buffer. | |
| 608 // There might be some space left in the output buffer. One can | |
| 609 // consider reading more data from the stream to feed the filter | |
| 610 // and filling up the output buffer. This leads to more complicated | |
| 611 // buffer management and data notification mechanisms. | |
| 612 // We can revisit this issue if there is a real perf need. | |
| 613 if (filtered_data_len > 0) { | |
| 614 *bytes_read = filtered_data_len; | |
| 615 postfilter_bytes_read_ += filtered_data_len; | |
| 616 rv = true; | |
| 617 } else { | |
| 618 // Read again since we haven't received enough data yet (e.g., we | |
| 619 // may not have a complete gzip header yet). | |
| 620 read_again = true; | |
| 621 continue; | |
| 622 } | |
| 623 break; | |
| 624 } | |
| 625 case Filter::FILTER_OK: { | |
| 626 *bytes_read = filtered_data_len; | |
| 627 postfilter_bytes_read_ += filtered_data_len; | |
| 628 rv = true; | |
| 629 break; | |
| 630 } | |
| 631 case Filter::FILTER_ERROR: { | |
| 632 DVLOG(1) << __FUNCTION__ << "() " | |
| 633 << "\"" << (request_ ? request_->url().spec() : "???") | |
| 634 << "\"" << " Filter Error"; | |
| 635 filter_needs_more_output_space_ = false; | |
| 636 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | |
| 637 ERR_CONTENT_DECODING_FAILED)); | |
| 638 rv = false; | |
| 639 break; | |
| 640 } | |
| 641 default: { | |
| 642 NOTREACHED(); | |
| 643 filter_needs_more_output_space_ = false; | |
| 644 rv = false; | |
| 645 break; | |
| 646 } | |
| 622 } | 647 } |
| 623 case Filter::FILTER_OK: { | 648 |
| 624 filter_needs_more_output_space_ = | 649 // If logging all bytes is enabled, log the filtered bytes read. |
| 625 (filtered_data_len == output_buffer_size); | 650 if (rv && request() && request()->net_log().IsLoggingBytes() && |
| 626 *bytes_read = filtered_data_len; | 651 filtered_data_len > 0) { |
| 627 postfilter_bytes_read_ += filtered_data_len; | 652 request()->net_log().AddByteTransferEvent( |
| 628 rv = true; | 653 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, |
| 629 break; | 654 filtered_data_len, filtered_read_buffer_->data()); |
| 630 } | 655 } |
| 631 case Filter::FILTER_ERROR: { | 656 } else { |
| 632 DVLOG(1) << __FUNCTION__ << "() " | 657 // we are done, or there is no data left. |
| 633 << "\"" << (request_ ? request_->url().spec() : "???") << "\"" | 658 rv = true; |
| 634 << " Filter Error"; | |
| 635 filter_needs_more_output_space_ = false; | |
| 636 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, | |
| 637 ERR_CONTENT_DECODING_FAILED)); | |
| 638 rv = false; | |
| 639 break; | |
| 640 } | |
| 641 default: { | |
| 642 NOTREACHED(); | |
| 643 filter_needs_more_output_space_ = false; | |
| 644 rv = false; | |
| 645 break; | |
| 646 } | |
| 647 } | 659 } |
| 648 DVLOG(2) << __FUNCTION__ << "() " | 660 } while(read_again); |
|
wtc
2014/05/02 22:59:13
Just wanted to confirm that you realize you delete
rvargas (doing something else)
2014/05/03 00:19:33
Yes, I'm not a fan of leaving debug info forever i
|
wtc
2014/05/02 22:59:13
1. Nit: add a space between "while" and "(".
2. S
rvargas (doing something else)
2014/05/03 00:19:33
I thought about that before... they are pretty muc
|
| 649 << "\"" << (request_ ? request_->url().spec() : "???") << "\"" | |
| 650 << " rv = " << rv | |
| 651 << " post bytes read = " << filtered_data_len | |
| 652 << " pre total = " << prefilter_bytes_read_ | |
| 653 << " post total = " | |
| 654 << postfilter_bytes_read_; | |
| 655 // If logging all bytes is enabled, log the filtered bytes read. | |
| 656 if (rv && request() && request()->net_log().IsLoggingBytes() && | |
| 657 filtered_data_len > 0) { | |
| 658 request()->net_log().AddByteTransferEvent( | |
| 659 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ, | |
| 660 filtered_data_len, filtered_read_buffer_->data()); | |
| 661 } | |
| 662 } else { | |
| 663 // we are done, or there is no data left. | |
| 664 rv = true; | |
| 665 } | |
| 666 | 661 |
| 667 if (rv) { | 662 if (rv) { |
| 668 // When we successfully finished a read, we no longer need to | 663 // When we successfully finished a read, we no longer need to save the |
| 669 // save the caller's buffers. Release our reference. | 664 // caller's buffers. Release our reference. |
| 670 filtered_read_buffer_ = NULL; | 665 filtered_read_buffer_ = NULL; |
| 671 filtered_read_buffer_len_ = 0; | 666 filtered_read_buffer_len_ = 0; |
| 672 } | 667 } |
| 673 return rv; | 668 return rv; |
| 674 } | 669 } |
| 675 | 670 |
| 676 void URLRequestJob::DestroyFilters() { | 671 void URLRequestJob::DestroyFilters() { |
| 677 filter_.reset(); | 672 filter_.reset(); |
| 678 } | 673 } |
| 679 | 674 |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 767 } | 762 } |
| 768 | 763 |
| 769 bool URLRequestJob::FilterHasData() { | 764 bool URLRequestJob::FilterHasData() { |
| 770 return filter_.get() && filter_->stream_data_len(); | 765 return filter_.get() && filter_->stream_data_len(); |
| 771 } | 766 } |
| 772 | 767 |
| 773 void URLRequestJob::UpdatePacketReadTimes() { | 768 void URLRequestJob::UpdatePacketReadTimes() { |
| 774 } | 769 } |
| 775 | 770 |
| 776 } // namespace net | 771 } // namespace net |
| OLD | NEW |