Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(999)

Side by Side Diff: net/url_request/url_request_job.cc

Issue 100004: Hand craft an A/B test of SDCH compression... (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: '' Created 11 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « net/url_request/url_request_job.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "net/url_request/url_request_job.h" 5 #include "net/url_request/url_request_job.h"
6 6
7 #include "base/message_loop.h" 7 #include "base/message_loop.h"
8 #include "base/string_util.h" 8 #include "base/string_util.h"
9 #include "googleurl/src/gurl.h" 9 #include "googleurl/src/gurl.h"
10 #include "net/base/auth.h" 10 #include "net/base/auth.h"
(...skipping 12 matching lines...) Expand all
23 const int URLRequestJob::kFilterBufSize = 32 * 1024; 23 const int URLRequestJob::kFilterBufSize = 32 * 1024;
24 24
25 URLRequestJob::URLRequestJob(URLRequest* request) 25 URLRequestJob::URLRequestJob(URLRequest* request)
26 : request_(request), 26 : request_(request),
27 done_(false), 27 done_(false),
28 filter_needs_more_output_space_(false), 28 filter_needs_more_output_space_(false),
29 read_buffer_(NULL), 29 read_buffer_(NULL),
30 read_buffer_len_(0), 30 read_buffer_len_(0),
31 has_handled_response_(false), 31 has_handled_response_(false),
32 expected_content_size_(-1), 32 expected_content_size_(-1),
33 filter_input_byte_count_(0) { 33 packet_timing_enabled_(false),
34 filter_input_byte_count_(0),
35 bytes_observed_in_packets_(0),
36 max_packets_timed_(0),
37 observed_packet_count_(0) {
34 load_flags_ = request_->load_flags(); 38 load_flags_ = request_->load_flags();
35 is_profiling_ = request->enable_profiling(); 39 is_profiling_ = request->enable_profiling();
36 if (is_profiling()) { 40 if (is_profiling()) {
37 metrics_.reset(new URLRequestJobMetrics()); 41 metrics_.reset(new URLRequestJobMetrics());
38 metrics_->start_time_ = TimeTicks::Now(); 42 metrics_->start_time_ = TimeTicks::Now();
39 } 43 }
40 g_url_request_job_tracker.AddNewJob(this); 44 g_url_request_job_tracker.AddNewJob(this);
41 } 45 }
42 46
43 URLRequestJob::~URLRequestJob() { 47 URLRequestJob::~URLRequestJob() {
48 // Cause filter chain to be destroyed now, so that any histogram requests can
49 // be made before we are torn down.
50 filter_.reset(NULL);
51
44 g_url_request_job_tracker.RemoveJob(this); 52 g_url_request_job_tracker.RemoveJob(this);
45 } 53 }
46 54
47 void URLRequestJob::Kill() { 55 void URLRequestJob::Kill() {
48 // Make sure the request is notified that we are done. We assume that the 56 // Make sure the request is notified that we are done. We assume that the
49 // request took care of setting its error status before calling Kill. 57 // request took care of setting its error status before calling Kill.
50 if (request_) 58 if (request_)
51 NotifyCanceled(); 59 NotifyCanceled();
52 } 60 }
53 61
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
87 } 95 }
88 96
89 void URLRequestJob::ContinueDespiteLastError() { 97 void URLRequestJob::ContinueDespiteLastError() {
90 // Implementations should know how to recover from errors they generate. 98 // Implementations should know how to recover from errors they generate.
91 // If this code was reached, we are trying to recover from an error that 99 // If this code was reached, we are trying to recover from an error that
92 // we don't know how to recover from. 100 // we don't know how to recover from.
93 NOTREACHED(); 101 NOTREACHED();
94 } 102 }
95 103
96 int64 URLRequestJob::GetByteReadCount() const { 104 int64 URLRequestJob::GetByteReadCount() const {
97 return filter_input_byte_count_ ; 105 return filter_input_byte_count_;
98 } 106 }
99 107
100 bool URLRequestJob::GetURL(GURL* gurl) const { 108 bool URLRequestJob::GetURL(GURL* gurl) const {
101 if (!request_) 109 if (!request_)
102 return false; 110 return false;
103 *gurl = request_->url(); 111 *gurl = request_->url();
104 return true; 112 return true;
105 } 113 }
106 114
107 base::Time URLRequestJob::GetRequestTime() const { 115 base::Time URLRequestJob::GetRequestTime() const {
(...skipping 407 matching lines...) Expand 10 before | Expand all | Expand 10 after
515 if (rv != net::OK) 523 if (rv != net::OK)
516 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, rv)); 524 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, rv));
517 } 525 }
518 526
519 void URLRequestJob::RecordBytesRead(int bytes_read) { 527 void URLRequestJob::RecordBytesRead(int bytes_read) {
520 if (is_profiling()) { 528 if (is_profiling()) {
521 ++(metrics_->number_of_read_IO_); 529 ++(metrics_->number_of_read_IO_);
522 metrics_->total_bytes_read_ += bytes_read; 530 metrics_->total_bytes_read_ += bytes_read;
523 } 531 }
524 filter_input_byte_count_ += bytes_read; 532 filter_input_byte_count_ += bytes_read;
533 UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
525 g_url_request_job_tracker.OnBytesRead(this, bytes_read); 534 g_url_request_job_tracker.OnBytesRead(this, bytes_read);
526 } 535 }
527 536
528 const URLRequestStatus URLRequestJob::GetStatus() { 537 const URLRequestStatus URLRequestJob::GetStatus() {
529 if (request_) 538 if (request_)
530 return request_->status(); 539 return request_->status();
531 // If the request is gone, we must be cancelled. 540 // If the request is gone, we must be cancelled.
532 return URLRequestStatus(URLRequestStatus::CANCELED, 541 return URLRequestStatus(URLRequestStatus::CANCELED,
533 net::ERR_ABORTED); 542 net::ERR_ABORTED);
534 } 543 }
535 544
536 void URLRequestJob::SetStatus(const URLRequestStatus &status) { 545 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
537 if (request_) 546 if (request_)
538 request_->set_status(status); 547 request_->set_status(status);
539 } 548 }
549
550 void URLRequestJob::UpdatePacketReadTimes() {
551 if (!packet_timing_enabled_)
552 return;
553
554 if (filter_input_byte_count_ <= bytes_observed_in_packets_) {
555 DCHECK(filter_input_byte_count_ == bytes_observed_in_packets_);
556 return; // No new bytes have arrived.
557 }
558
559 if (!bytes_observed_in_packets_)
560 request_time_snapshot_ = GetRequestTime();
561
562 final_packet_time_ = base::Time::Now();
563 const size_t kTypicalPacketSize = 1430;
564 while (filter_input_byte_count_ > bytes_observed_in_packets_) {
565 ++observed_packet_count_;
566 if (max_packets_timed_ > packet_times_.size()) {
567 packet_times_.push_back(final_packet_time_);
568 DCHECK(static_cast<size_t>(observed_packet_count_) ==
569 packet_times_.size());
570 }
571 bytes_observed_in_packets_ += kTypicalPacketSize;
572 }
573 // Since packets may not be full, we'll remember the number of bytes we've
574 // accounted for in packets thus far.
575 bytes_observed_in_packets_ = filter_input_byte_count_;
576 }
577
578 void URLRequestJob::EnablePacketCounting(size_t max_packets_timed) {
579 if (max_packets_timed_ < max_packets_timed)
580 max_packets_timed_ = max_packets_timed;
581 packet_timing_enabled_ = true;
582 }
583
584 void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const {
585 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
586 return;
587 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
588 switch (statistic) {
589 case SDCH_DECODE: {
590 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_Latency_F_a", duration,
591 base::TimeDelta::FromMilliseconds(20),
592 base::TimeDelta::FromMinutes(10), 100);
593 UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Decode_Packets_b",
594 static_cast<int>(observed_packet_count_));
595 UMA_HISTOGRAM_COUNTS("Sdch.Network_Decode_Bytes_Processed_a",
596 static_cast<int>(bytes_observed_in_packets_));
597 if (packet_times_.empty())
598 return;
599 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_Last_a",
600 final_packet_time_ - packet_times_[0],
601 base::TimeDelta::FromMilliseconds(20),
602 base::TimeDelta::FromMinutes(10), 100);
603
604 DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
605 DCHECK(kSdchPacketHistogramCount > 4);
606 if (packet_times_.size() <= 4)
607 return;
608 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_2nd_c",
609 packet_times_[1] - packet_times_[0],
610 base::TimeDelta::FromMilliseconds(1),
611 base::TimeDelta::FromSeconds(10), 100);
612 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_2nd_To_3rd_c",
613 packet_times_[2] - packet_times_[1],
614 base::TimeDelta::FromMilliseconds(1),
615 base::TimeDelta::FromSeconds(10), 100);
616 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_3rd_To_4th_c",
617 packet_times_[3] - packet_times_[2],
618 base::TimeDelta::FromMilliseconds(1),
619 base::TimeDelta::FromSeconds(10), 100);
620 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_4th_To_5th_c",
621 packet_times_[4] - packet_times_[3],
622 base::TimeDelta::FromMilliseconds(1),
623 base::TimeDelta::FromSeconds(10), 100);
624 return;
625 }
626 case SDCH_PASSTHROUGH: {
627 // Despite advertising a dictionary, we handled non-sdch compressed
628 // content.
629 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_Latency_F_a",
630 duration,
631 base::TimeDelta::FromMilliseconds(20),
632 base::TimeDelta::FromMinutes(10), 100);
633 UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Pass-through_Packets_b",
634 observed_packet_count_);
635 if (packet_times_.empty())
636 return;
637 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_Last_a",
638 final_packet_time_ - packet_times_[0],
639 base::TimeDelta::FromMilliseconds(20),
640 base::TimeDelta::FromMinutes(10), 100);
641 DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
642 DCHECK(kSdchPacketHistogramCount > 4);
643 if (packet_times_.size() <= 4)
644 return;
645 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_2nd_c",
646 packet_times_[1] - packet_times_[0],
647 base::TimeDelta::FromMilliseconds(1),
648 base::TimeDelta::FromSeconds(10), 100);
649 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_2nd_To_3rd_c",
650 packet_times_[2] - packet_times_[1],
651 base::TimeDelta::FromMilliseconds(1),
652 base::TimeDelta::FromSeconds(10), 100);
653 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_3rd_To_4th_c",
654 packet_times_[3] - packet_times_[2],
655 base::TimeDelta::FromMilliseconds(1),
656 base::TimeDelta::FromSeconds(10), 100);
657 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_4th_To_5th_c",
658 packet_times_[4] - packet_times_[3],
659 base::TimeDelta::FromMilliseconds(1),
660 base::TimeDelta::FromSeconds(10), 100);
661 return;
662 }
663
664 case SDCH_EXPERIMENT_DECODE: {
665 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Decode",
666 duration,
667 base::TimeDelta::FromMilliseconds(20),
668 base::TimeDelta::FromMinutes(10), 100);
669 // We already provided interpacket histograms above in the SDCH_DECODE
670 // case, so we don't need them here.
671 return;
672 }
673 case SDCH_EXPERIMENT_HOLDBACK: {
674 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback",
675 duration,
676 base::TimeDelta::FromMilliseconds(20),
677 base::TimeDelta::FromMinutes(10), 100);
678 DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
679 DCHECK(kSdchPacketHistogramCount > 4);
680 if (packet_times_.size() <= 4)
681 return;
682 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_1st_To_2nd_c",
683 packet_times_[1] - packet_times_[0],
684 base::TimeDelta::FromMilliseconds(1),
685 base::TimeDelta::FromSeconds(10), 100);
686 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_2nd_To_3rd_c",
687 packet_times_[2] - packet_times_[1],
688 base::TimeDelta::FromMilliseconds(1),
689 base::TimeDelta::FromSeconds(10), 100);
690 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_3rd_To_4th_c",
691 packet_times_[3] - packet_times_[2],
692 base::TimeDelta::FromMilliseconds(1),
693 base::TimeDelta::FromSeconds(10), 100);
694 UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_4th_To_5th_c",
695 packet_times_[4] - packet_times_[3],
696 base::TimeDelta::FromMilliseconds(1),
697 base::TimeDelta::FromSeconds(10), 100);
698 return;
699 }
700 default:
701 NOTREACHED();
702 return;
703 }
704 }
OLDNEW
« no previous file with comments | « net/url_request/url_request_job.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698