Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(402)

Side by Side Diff: chrome/browser/net/passive_log_collector.cc

Issue 1556018: Add support for attaching custom parameters to NetLog events. (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: Address willchan's comments Created 10 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "chrome/browser/net/passive_log_collector.h" 5 #include "chrome/browser/net/passive_log_collector.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/string_util.h" 9 #include "base/string_util.h"
10 #include "chrome/browser/chrome_thread.h" 10 #include "chrome/browser/chrome_thread.h"
11 11
12 namespace { 12 namespace {
13 const size_t kMaxNumEntriesPerLog = 50; 13 const size_t kMaxNumEntriesPerLog = 50;
14 const size_t kMaxConnectJobGraveyardSize = 3; 14 const size_t kMaxConnectJobGraveyardSize = 3;
15 const size_t kMaxRequestGraveyardSize = 25; 15 const size_t kMaxRequestGraveyardSize = 25;
16 const size_t kMaxLiveRequests = 200; 16 const size_t kMaxLiveRequests = 200;
17 17
18 // Sort function on source ID. 18 // Sort function on source ID.
19 bool OrderBySourceID(const PassiveLogCollector::RequestInfo& a, 19 bool OrderBySourceID(const PassiveLogCollector::RequestInfo& a,
20 const PassiveLogCollector::RequestInfo& b) { 20 const PassiveLogCollector::RequestInfo& b) {
21 return a.entries[0].source.id < b.entries[0].source.id; 21 return a.entries[0].source.id < b.entries[0].source.id;
22 } 22 }
23 23
24 void AddEntryToRequestInfo(const net::NetLog::Entry& entry, 24 void AddEntryToRequestInfo(const net::CapturingNetLog::Entry& entry,
25 bool is_unbounded, 25 bool is_unbounded,
26 PassiveLogCollector::RequestInfo* out_info) { 26 PassiveLogCollector::RequestInfo* out_info) {
27 // Start dropping new entries when the log has gotten too big. 27 // Start dropping new entries when the log has gotten too big.
28 if (out_info->entries.size() + 1 <= kMaxNumEntriesPerLog || is_unbounded) { 28 if (out_info->entries.size() + 1 <= kMaxNumEntriesPerLog || is_unbounded) {
29 out_info->entries.push_back(entry); 29 out_info->entries.push_back(entry);
30 } else { 30 } else {
31 out_info->num_entries_truncated += 1; 31 out_info->num_entries_truncated += 1;
32 out_info->entries[kMaxNumEntriesPerLog - 1] = entry; 32 out_info->entries[kMaxNumEntriesPerLog - 1] = entry;
33 } 33 }
34 } 34 }
(...skipping 12 matching lines...) Expand all
47 //---------------------------------------------------------------------------- 47 //----------------------------------------------------------------------------
48 48
49 PassiveLogCollector::PassiveLogCollector() 49 PassiveLogCollector::PassiveLogCollector()
50 : url_request_tracker_(&connect_job_tracker_), 50 : url_request_tracker_(&connect_job_tracker_),
51 socket_stream_tracker_(&connect_job_tracker_) { 51 socket_stream_tracker_(&connect_job_tracker_) {
52 } 52 }
53 53
54 PassiveLogCollector::~PassiveLogCollector() { 54 PassiveLogCollector::~PassiveLogCollector() {
55 } 55 }
56 56
57 void PassiveLogCollector::OnAddEntry(const net::NetLog::Entry& entry) { 57 void PassiveLogCollector::OnAddEntry(
58 net::NetLog::EventType type,
59 const base::TimeTicks& time,
60 const net::NetLog::Source& source,
61 net::NetLog::EventPhase phase,
62 net::NetLog::EventParameters* extra_parameters) {
63 // Package the parameters into a single struct for convenience.
64 net::CapturingNetLog::Entry entry(type, time, source, phase,
65 extra_parameters);
66
58 switch (entry.source.type) { 67 switch (entry.source.type) {
59 case net::NetLog::SOURCE_URL_REQUEST: 68 case net::NetLog::SOURCE_URL_REQUEST:
60 url_request_tracker_.OnAddEntry(entry); 69 url_request_tracker_.OnAddEntry(entry);
61 break; 70 break;
62 case net::NetLog::SOURCE_SOCKET_STREAM: 71 case net::NetLog::SOURCE_SOCKET_STREAM:
63 socket_stream_tracker_.OnAddEntry(entry); 72 socket_stream_tracker_.OnAddEntry(entry);
64 break; 73 break;
65 case net::NetLog::SOURCE_CONNECT_JOB: 74 case net::NetLog::SOURCE_CONNECT_JOB:
66 connect_job_tracker_.OnAddEntry(entry); 75 connect_job_tracker_.OnAddEntry(entry);
67 break; 76 break;
(...skipping 17 matching lines...) Expand all
85 //---------------------------------------------------------------------------- 94 //----------------------------------------------------------------------------
86 95
87 PassiveLogCollector::RequestTrackerBase::RequestTrackerBase( 96 PassiveLogCollector::RequestTrackerBase::RequestTrackerBase(
88 size_t max_graveyard_size) 97 size_t max_graveyard_size)
89 : max_graveyard_size_(max_graveyard_size), 98 : max_graveyard_size_(max_graveyard_size),
90 next_graveyard_index_(0), 99 next_graveyard_index_(0),
91 is_unbounded_(false) { 100 is_unbounded_(false) {
92 } 101 }
93 102
94 void PassiveLogCollector::RequestTrackerBase::OnAddEntry( 103 void PassiveLogCollector::RequestTrackerBase::OnAddEntry(
95 const net::NetLog::Entry& entry) { 104 const net::CapturingNetLog::Entry& entry) {
96 RequestInfo& info = live_requests_[entry.source.id]; 105 RequestInfo& info = live_requests_[entry.source.id];
97 Action result = DoAddEntry(entry, &info); 106 Action result = DoAddEntry(entry, &info);
98 107
99 switch (result) { 108 switch (result) {
100 case ACTION_MOVE_TO_GRAVEYARD: 109 case ACTION_MOVE_TO_GRAVEYARD:
101 InsertIntoGraveyard(info); 110 InsertIntoGraveyard(info);
102 // (fall-through) 111 // (fall-through)
103 case ACTION_DELETE: 112 case ACTION_DELETE:
104 RemoveFromLiveRequests(entry.source.id); 113 RemoveFromLiveRequests(entry.source.id);
105 break; 114 break;
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
215 //---------------------------------------------------------------------------- 224 //----------------------------------------------------------------------------
216 225
217 const size_t PassiveLogCollector::ConnectJobTracker::kMaxGraveyardSize = 3; 226 const size_t PassiveLogCollector::ConnectJobTracker::kMaxGraveyardSize = 3;
218 227
219 PassiveLogCollector::ConnectJobTracker::ConnectJobTracker() 228 PassiveLogCollector::ConnectJobTracker::ConnectJobTracker()
220 : RequestTrackerBase(kMaxGraveyardSize) { 229 : RequestTrackerBase(kMaxGraveyardSize) {
221 } 230 }
222 231
223 PassiveLogCollector::RequestTrackerBase::Action 232 PassiveLogCollector::RequestTrackerBase::Action
224 PassiveLogCollector::ConnectJobTracker::DoAddEntry( 233 PassiveLogCollector::ConnectJobTracker::DoAddEntry(
225 const net::NetLog::Entry& entry, 234 const net::CapturingNetLog::Entry& entry,
226 RequestInfo* out_info) { 235 RequestInfo* out_info) {
227 // Save the entry (possibly truncating). 236 // Save the entry (possibly truncating).
228 AddEntryToRequestInfo(entry, is_unbounded(), out_info); 237 AddEntryToRequestInfo(entry, is_unbounded(), out_info);
229 238
230 // If this is the end of the connect job, move the request to the graveyard. 239 // If this is the end of the connect job, move the request to the graveyard.
231 if (entry.type == net::NetLog::Entry::TYPE_EVENT && 240 if (entry.type == net::NetLog::TYPE_SOCKET_POOL_CONNECT_JOB &&
232 entry.event.type == net::NetLog::TYPE_SOCKET_POOL_CONNECT_JOB && 241 entry.phase == net::NetLog::PHASE_END) {
233 entry.event.phase == net::NetLog::PHASE_END) {
234 return ACTION_MOVE_TO_GRAVEYARD; 242 return ACTION_MOVE_TO_GRAVEYARD;
235 } 243 }
236 244
237 return ACTION_NONE; 245 return ACTION_NONE;
238 } 246 }
239 247
240 //---------------------------------------------------------------------------- 248 //----------------------------------------------------------------------------
241 // RequestTracker 249 // RequestTracker
242 //---------------------------------------------------------------------------- 250 //----------------------------------------------------------------------------
243 251
244 const size_t PassiveLogCollector::RequestTracker::kMaxGraveyardSize = 25; 252 const size_t PassiveLogCollector::RequestTracker::kMaxGraveyardSize = 25;
245 const size_t PassiveLogCollector::RequestTracker::kMaxGraveyardURLSize = 1000; 253 const size_t PassiveLogCollector::RequestTracker::kMaxGraveyardURLSize = 1000;
246 254
247 PassiveLogCollector::RequestTracker::RequestTracker( 255 PassiveLogCollector::RequestTracker::RequestTracker(
248 ConnectJobTracker* connect_job_tracker) 256 ConnectJobTracker* connect_job_tracker)
249 : RequestTrackerBase(kMaxGraveyardSize), 257 : RequestTrackerBase(kMaxGraveyardSize),
250 connect_job_tracker_(connect_job_tracker) { 258 connect_job_tracker_(connect_job_tracker) {
251 } 259 }
252 260
253 PassiveLogCollector::RequestTrackerBase::Action 261 PassiveLogCollector::RequestTrackerBase::Action
254 PassiveLogCollector::RequestTracker::DoAddEntry( 262 PassiveLogCollector::RequestTracker::DoAddEntry(
255 const net::NetLog::Entry& entry, 263 const net::CapturingNetLog::Entry& entry,
256 RequestInfo* out_info) { 264 RequestInfo* out_info) {
257 265
258 if (entry.type == net::NetLog::Entry::TYPE_EVENT && 266 if (entry.type == net::NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_ID) {
259 entry.event.type == net::NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_ID) {
260 // If this was notification that a ConnectJob was bound to the request, 267 // If this was notification that a ConnectJob was bound to the request,
261 // copy all the logged data for that ConnectJob. 268 // copy all the logged data for that ConnectJob.
262 AddConnectJobInfo(entry, out_info); 269 AddConnectJobInfo(entry, out_info);
263 } else { 270 } else {
264 // Otherwise just append this entry to the request info. 271 // Otherwise just append this entry to the request info.
265 AddEntryToRequestInfo(entry, is_unbounded(), out_info); 272 AddEntryToRequestInfo(entry, is_unbounded(), out_info);
266 } 273 }
267 274
268 // If this was the start of a URLRequest/SocketStream, extract the URL. 275 // If this was the start of a URLRequest/SocketStream, extract the URL.
269 if (out_info->entries.size() == 1 && 276 // Note: we look at the first *two* entries, since the outer REQUEST_ALIVE
270 entry.type == net::NetLog::Entry::TYPE_EVENT && 277 // doesn't actually contain any data.
271 entry.event.type == net::NetLog::TYPE_REQUEST_ALIVE && 278 if (out_info->url.empty() && out_info->entries.size() <= 2 &&
272 entry.event.phase == net::NetLog::PHASE_BEGIN) { 279 entry.phase == net::NetLog::PHASE_BEGIN && entry.extra_parameters &&
273 out_info->url = entry.string; 280 (entry.type == net::NetLog::TYPE_URL_REQUEST_START ||
274 out_info->entries[0].string = std::string(); 281 entry.type == net::NetLog::TYPE_SOCKET_STREAM_CONNECT)) {
275 282 out_info->url = static_cast<net::NetLogStringParameter*>(
276 // Paranoia check: truncate the URL if it is really big. 283 entry.extra_parameters.get())->value();
277 if (out_info->url.size() > kMaxGraveyardURLSize)
278 out_info->url = out_info->url.substr(0, kMaxGraveyardURLSize);
279 } 284 }
280 285
281 // If the request has ended, move it to the graveyard. 286 // If the request has ended, move it to the graveyard.
282 if (entry.type == net::NetLog::Entry::TYPE_EVENT && 287 if (entry.type == net::NetLog::TYPE_REQUEST_ALIVE &&
283 entry.event.type == net::NetLog::TYPE_REQUEST_ALIVE && 288 entry.phase == net::NetLog::PHASE_END) {
284 entry.event.phase == net::NetLog::PHASE_END) {
285 if (StartsWithASCII(out_info->url, "chrome://", false)) { 289 if (StartsWithASCII(out_info->url, "chrome://", false)) {
286 // Avoid sending "chrome://" requests to the graveyard, since it just 290 // Avoid sending "chrome://" requests to the graveyard, since it just
287 // adds to clutter. 291 // adds to clutter.
288 return ACTION_DELETE; 292 return ACTION_DELETE;
289 } 293 }
290 return ACTION_MOVE_TO_GRAVEYARD; 294 return ACTION_MOVE_TO_GRAVEYARD;
291 } 295 }
292 296
293 return ACTION_NONE; 297 return ACTION_NONE;
294 } 298 }
295 299
296 void PassiveLogCollector::RequestTracker::AddConnectJobInfo( 300 void PassiveLogCollector::RequestTracker::AddConnectJobInfo(
297 const net::NetLog::Entry& entry, 301 const net::CapturingNetLog::Entry& entry,
298 RequestInfo* live_entry) { 302 RequestInfo* live_entry) {
299 // We have just been notified of which ConnectJob the 303 // We have just been notified of which ConnectJob the
300 // URLRequest/SocketStream was assigned. Lookup all the data we captured 304 // URLRequest/SocketStream was assigned. Lookup all the data we captured
301 // for the ConnectJob, and append it to the URLRequest/SocketStream's 305 // for the ConnectJob, and append it to the URLRequest/SocketStream's
302 // RequestInfo. 306 // RequestInfo.
303 307
304 // TODO(eroman): This should NOT be plumbed through via |error_code| ! 308 int connect_job_id = static_cast<net::NetLogIntegerParameter*>(
305 int connect_job_id = entry.error_code; 309 entry.extra_parameters.get())->value();
306 310
307 const RequestInfo* connect_job_info = 311 const RequestInfo* connect_job_info =
308 connect_job_tracker_->GetRequestInfoFromGraveyard(connect_job_id); 312 connect_job_tracker_->GetRequestInfoFromGraveyard(connect_job_id);
309 313
310 if (connect_job_info) { 314 if (connect_job_info) {
311 // Append the ConnectJob information we found. 315 // Append the ConnectJob information we found.
312 AppendToRequestInfo(*connect_job_info, is_unbounded(), live_entry); 316 AppendToRequestInfo(*connect_job_info, is_unbounded(), live_entry);
313 } else { 317 } else {
314 // If we couldn't find the information for the ConnectJob, append a 318 // If we couldn't find the information for the ConnectJob, append a
315 // generic message instead. 319 // generic message instead.
316 net::NetLog::Entry e(entry); 320 net::CapturingNetLog::Entry e(entry);
317 e.type = net::NetLog::Entry::TYPE_STRING; 321 e.type = net::NetLog::TYPE_TODO_STRING;
318 e.string = StringPrintf("Used ConnectJob id=%d", connect_job_id); 322 e.extra_parameters = new net::NetLogStringParameter(
323 StringPrintf("Used ConnectJob id=%d", connect_job_id));
319 AddEntryToRequestInfo(e, is_unbounded(), live_entry); 324 AddEntryToRequestInfo(e, is_unbounded(), live_entry);
320 } 325 }
321 } 326 }
322 327
323 //---------------------------------------------------------------------------- 328 //----------------------------------------------------------------------------
324 // InitProxyResolverTracker 329 // InitProxyResolverTracker
325 //---------------------------------------------------------------------------- 330 //----------------------------------------------------------------------------
326 331
327 PassiveLogCollector::InitProxyResolverTracker::InitProxyResolverTracker() {} 332 PassiveLogCollector::InitProxyResolverTracker::InitProxyResolverTracker() {}
328 333
329 void PassiveLogCollector::InitProxyResolverTracker::OnAddEntry( 334 void PassiveLogCollector::InitProxyResolverTracker::OnAddEntry(
330 const net::NetLog::Entry& entry) { 335 const net::CapturingNetLog::Entry& entry) {
331 if (entry.type == net::NetLog::Entry::TYPE_EVENT && 336 if (entry.type == net::NetLog::TYPE_INIT_PROXY_RESOLVER &&
332 entry.event.type == net::NetLog::TYPE_INIT_PROXY_RESOLVER && 337 entry.phase == net::NetLog::PHASE_BEGIN) {
333 entry.event.phase == net::NetLog::PHASE_BEGIN) {
334 // If this is the start of a new InitProxyResolver, overwrite the old data. 338 // If this is the start of a new InitProxyResolver, overwrite the old data.
335 entries_.clear(); 339 entries_.clear();
336 entries_.push_back(entry); 340 entries_.push_back(entry);
337 } else { 341 } else {
338 // Otherwise append it to the log for the latest InitProxyResolver. 342 // Otherwise append it to the log for the latest InitProxyResolver.
339 if (!entries_.empty() && entries_[0].source.id != entry.source.id) { 343 if (!entries_.empty() && entries_[0].source.id != entry.source.id) {
340 // If this entry doesn't match what we think was the latest 344 // If this entry doesn't match what we think was the latest
341 // InitProxyResolver, drop it. (This shouldn't happen, but we will guard 345 // InitProxyResolver, drop it. (This shouldn't happen, but we will guard
342 // against it). 346 // against it).
343 return; 347 return;
344 } 348 }
345 entries_.push_back(entry); 349 entries_.push_back(entry);
346 } 350 }
347 351
348 // Safety net: INIT_PROXY_RESOLVER shouldn't generate many messages, but in 352 // Safety net: INIT_PROXY_RESOLVER shouldn't generate many messages, but in
349 // case something goes wrong, avoid exploding the memory usage. 353 // case something goes wrong, avoid exploding the memory usage.
350 if (entries_.size() > kMaxNumEntriesPerLog) 354 if (entries_.size() > kMaxNumEntriesPerLog)
351 entries_.clear(); 355 entries_.clear();
352 } 356 }
353 357
OLDNEW
« no previous file with comments | « chrome/browser/net/passive_log_collector.h ('k') | chrome/browser/net/passive_log_collector_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698