OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "chrome/browser/net/load_timing_observer.h" |
| 6 |
| 7 #include "base/compiler_specific.h" |
| 8 #include "base/time.h" |
| 9 #include "net/base/load_flags.h" |
| 10 #include "net/url_request/url_request_netlog_params.h" |
| 11 |
| 12 using base::Time; |
| 13 using base::TimeTicks; |
| 14 using webkit_glue::ResourceLoaderBridge; |
| 15 |
| 16 const size_t kMaxNumEntries = 1000; |
| 17 |
| 18 namespace { |
| 19 |
| 20 // We know that this conversion is not solid and suffers from world clock |
| 21 // changes, but it should be good enough for the load timing info. |
| 22 static Time TimeTicksToTime(const TimeTicks& time_ticks) { |
| 23 static int64 tick_to_time_offset; |
| 24 static bool tick_to_time_offset_available = false; |
| 25 if (!tick_to_time_offset_available) { |
| 26 int64 cur_time = (Time::Now() - Time()).InMicroseconds(); |
| 27 int64 cur_time_ticks = (TimeTicks::Now() - TimeTicks()).InMicroseconds(); |
| 28 // If we add this number to a time tick value, it gives the timestamp. |
| 29 tick_to_time_offset = cur_time - cur_time_ticks; |
| 30 tick_to_time_offset_available = true; |
| 31 } |
| 32 return Time::FromInternalValue(time_ticks.ToInternalValue() + |
| 33 tick_to_time_offset); |
| 34 } |
| 35 |
| 36 static int32 TimeTicksToOffset( |
| 37 const TimeTicks& time_ticks, |
| 38 LoadTimingObserver::URLRequestRecord* record) { |
| 39 return static_cast<int32>( |
| 40 (time_ticks - record->base_ticks).InMillisecondsRoundedUp()); |
| 41 } |
| 42 |
| 43 } |
| 44 |
| 45 LoadTimingObserver::URLRequestRecord::URLRequestRecord() |
| 46 : connect_job_id(net::NetLog::Source::kInvalidId), |
| 47 socket_log_id(net::NetLog::Source::kInvalidId), |
| 48 socket_reused(false) { |
| 49 } |
| 50 |
| 51 LoadTimingObserver::LoadTimingObserver() { |
| 52 } |
| 53 |
| 54 LoadTimingObserver::~LoadTimingObserver() { |
| 55 } |
| 56 |
| 57 LoadTimingObserver::URLRequestRecord* |
| 58 LoadTimingObserver::GetURLRequestRecord(uint32 source_id) { |
| 59 URLRequestToRecordMap::iterator it = url_request_to_record_.find(source_id); |
| 60 if (it != url_request_to_record_.end()) |
| 61 return &it->second; |
| 62 return NULL; |
| 63 } |
| 64 |
| 65 void LoadTimingObserver::OnAddEntry(net::NetLog::EventType type, |
| 66 const base::TimeTicks& time, |
| 67 const net::NetLog::Source& source, |
| 68 net::NetLog::EventPhase phase, |
| 69 net::NetLog::EventParameters* params) { |
| 70 if (source.type == net::NetLog::SOURCE_URL_REQUEST) |
| 71 OnAddURLRequestEntry(type, time, source, phase, params); |
| 72 else if (source.type == net::NetLog::SOURCE_CONNECT_JOB) |
| 73 OnAddConnectJobEntry(type, time, source, phase, params); |
| 74 } |
| 75 |
| 76 void LoadTimingObserver::OnAddURLRequestEntry( |
| 77 net::NetLog::EventType type, |
| 78 const base::TimeTicks& time, |
| 79 const net::NetLog::Source& source, |
| 80 net::NetLog::EventPhase phase, |
| 81 net::NetLog::EventParameters* params) { |
| 82 bool is_begin = phase == net::NetLog::PHASE_BEGIN; |
| 83 bool is_end = phase == net::NetLog::PHASE_END; |
| 84 |
| 85 if (type == net::NetLog::TYPE_URL_REQUEST_START_JOB) { |
| 86 if (is_begin) { |
| 87 // Only record timing for entries with corresponding flag. |
| 88 int load_flags = static_cast<URLRequestStartEventParameters*>(params)-> |
| 89 load_flags(); |
| 90 if (!(load_flags & net::LOAD_ENABLE_LOAD_TIMING)) |
| 91 return; |
| 92 |
| 93 // Prevents us from passively growing the memory memory unbounded in case |
| 94 // something went wrong. Should not happen. |
| 95 if (url_request_to_record_.size() > kMaxNumEntries) { |
| 96 LOG(WARNING) << "The load timing observer url request count has grown " |
| 97 "larger than expected, resetting"; |
| 98 url_request_to_record_.clear(); |
| 99 } |
| 100 |
| 101 URLRequestRecord& record = url_request_to_record_[source.id]; |
| 102 record.base_ticks = time; |
| 103 record.timing.base_time = TimeTicksToTime(time); |
| 104 } |
| 105 return; |
| 106 } else if (type == net::NetLog::TYPE_REQUEST_ALIVE) { |
| 107 // Cleanup records based on the TYPE_REQUEST_ALIVE entry. |
| 108 if (is_end) |
| 109 url_request_to_record_.erase(source.id); |
| 110 return; |
| 111 } |
| 112 |
| 113 URLRequestRecord* record = GetURLRequestRecord(source.id); |
| 114 if (!record) |
| 115 return; |
| 116 |
| 117 ResourceLoaderBridge::LoadTimingInfo& timing = record->timing; |
| 118 |
| 119 switch(type) { |
| 120 case net::NetLog::TYPE_PROXY_SERVICE: |
| 121 if (is_begin) |
| 122 timing.proxy_start = TimeTicksToOffset(time, record); |
| 123 else if (is_end) |
| 124 timing.proxy_end = TimeTicksToOffset(time, record); |
| 125 break; |
| 126 case net::NetLog::TYPE_SOCKET_POOL: |
| 127 if (is_begin) |
| 128 timing.connect_start = TimeTicksToOffset(time, record); |
| 129 else if (is_end) |
| 130 timing.connect_end = TimeTicksToOffset(time, record); |
| 131 break; |
| 132 case net::NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB: |
| 133 { |
| 134 uint32 connect_job_id = static_cast<net::NetLogSourceParameter*>( |
| 135 params)->value().id; |
| 136 ConnectJobToRecordMap::iterator it = |
| 137 connect_job_to_record_.find(connect_job_id); |
| 138 if (it != connect_job_to_record_.end() && |
| 139 !it->second.dns_start.is_null()) { |
| 140 timing.dns_start = TimeTicksToOffset(it->second.dns_start, record); |
| 141 timing.dns_end = TimeTicksToOffset(it->second.dns_end, record); |
| 142 } |
| 143 } |
| 144 break; |
| 145 case net::NetLog::TYPE_SOCKET_POOL_REUSED_AN_EXISTING_SOCKET: |
| 146 record->socket_reused = true; |
| 147 break; |
| 148 case net::NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET: |
| 149 record->socket_log_id = static_cast<net::NetLogSourceParameter*>( |
| 150 params)->value().id; |
| 151 break; |
| 152 case net::NetLog::TYPE_HTTP_TRANSACTION_SEND_REQUEST: |
| 153 case net::NetLog::TYPE_SPDY_TRANSACTION_SEND_REQUEST: |
| 154 if (is_begin) |
| 155 timing.send_start = TimeTicksToOffset(time, record); |
| 156 else if (is_end) |
| 157 timing.send_end = TimeTicksToOffset(time, record); |
| 158 break; |
| 159 case net::NetLog::TYPE_HTTP_TRANSACTION_READ_HEADERS: |
| 160 case net::NetLog::TYPE_SPDY_TRANSACTION_READ_HEADERS: |
| 161 if (is_begin) |
| 162 timing.receive_headers_start = TimeTicksToOffset(time, record); |
| 163 else if (is_end) |
| 164 timing.receive_headers_end = TimeTicksToOffset(time, record); |
| 165 break; |
| 166 default: |
| 167 break; |
| 168 } |
| 169 } |
| 170 |
| 171 void LoadTimingObserver::OnAddConnectJobEntry( |
| 172 net::NetLog::EventType type, |
| 173 const base::TimeTicks& time, |
| 174 const net::NetLog::Source& source, |
| 175 net::NetLog::EventPhase phase, |
| 176 net::NetLog::EventParameters* params) { |
| 177 bool is_begin = phase == net::NetLog::PHASE_BEGIN; |
| 178 bool is_end = phase == net::NetLog::PHASE_END; |
| 179 |
| 180 // Manage record lifetime based on the SOCKET_POOL_CONNECT_JOB entry. |
| 181 if (type == net::NetLog::TYPE_SOCKET_POOL_CONNECT_JOB) { |
| 182 if (is_begin) { |
| 183 // Prevents us from passively growing the memory memory unbounded in case |
| 184 // something went wrong. Should not happen. |
| 185 if (connect_job_to_record_.size() > kMaxNumEntries) { |
| 186 LOG(WARNING) << "The load timing observer connect job count has grown " |
| 187 "larger than expected, resetting"; |
| 188 connect_job_to_record_.clear(); |
| 189 } |
| 190 |
| 191 connect_job_to_record_.insert( |
| 192 std::make_pair(source.id, ConnectJobRecord())); |
| 193 } else if (is_end) { |
| 194 connect_job_to_record_.erase(source.id); |
| 195 } |
| 196 } else if (type == net::NetLog::TYPE_HOST_RESOLVER_IMPL) { |
| 197 ConnectJobToRecordMap::iterator it = |
| 198 connect_job_to_record_.find(source.id); |
| 199 if (it != connect_job_to_record_.end()) { |
| 200 if (is_begin) |
| 201 it->second.dns_start = time; |
| 202 else if (is_end) |
| 203 it->second.dns_end = time; |
| 204 } |
| 205 } |
| 206 } |
OLD | NEW |