| Index: chrome/browser/net/passive_log_collector.cc
|
| ===================================================================
|
| --- chrome/browser/net/passive_log_collector.cc (revision 41560)
|
| +++ chrome/browser/net/passive_log_collector.cc (working copy)
|
| @@ -1,192 +1,318 @@
|
| -// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
|
| +// Copyright (c) 2010 The Chromium Authors. All rights reserved.
|
| // Use of this source code is governed by a BSD-style license that can be
|
| // found in the LICENSE file.
|
|
|
| -#ifndef NET_URL_REQUEST_REQUEST_TRACKER_H_
|
| -#define NET_URL_REQUEST_REQUEST_TRACKER_H_
|
| +#include "chrome/browser/net/passive_log_collector.h"
|
|
|
| -#include <vector>
|
| +#include <algorithm>
|
|
|
| -#include "base/ref_counted.h"
|
| -#include "base/linked_list.h"
|
| -#include "base/logging.h"
|
| -#include "googleurl/src/gurl.h"
|
| -#include "net/base/load_log.h"
|
| +#include "base/string_util.h"
|
| +#include "chrome/browser/chrome_thread.h"
|
|
|
| -// Class to track all of the live instances of Request associated with a
|
| -// particular URLRequestContext. It keeps a circular queue of the LoadLogs
|
| -// for recently deceased requests.
|
| -template<typename Request>
|
| -class RequestTracker {
|
| - public:
|
| - struct RecentRequestInfo {
|
| - GURL original_url;
|
| - scoped_refptr<net::LoadLog> load_log;
|
| - };
|
| +namespace {
|
| +const size_t kMaxNumEntriesPerLog = 50;
|
| +const size_t kMaxConnectJobGraveyardSize = 3;
|
| +const size_t kMaxRequestGraveyardSize = 25;
|
| +const size_t kMaxLiveRequests = 200;
|
|
|
| - // Helper class to make Request insertable into a base::LinkedList,
|
| - // without making the public interface expose base::LinkNode.
|
| - class Node : public base::LinkNode<Node> {
|
| - public:
|
| - Node(Request* request) : request_(request) {}
|
| - ~Node() {}
|
| +// Sort function on source ID.
|
| +bool OrderBySourceID(const PassiveLogCollector::RequestInfo& a,
|
| + const PassiveLogCollector::RequestInfo& b) {
|
| + return a.entries[0].source.id < b.entries[0].source.id;
|
| +}
|
|
|
| - Request* request() const { return request_; }
|
| +void AddEntryToRequestInfo(const net::NetLog::Entry& entry,
|
| + bool is_unbounded,
|
| + PassiveLogCollector::RequestInfo* out_info) {
|
| + // Start dropping new entries when the log has gotten too big.
|
| + if (out_info->entries.size() + 1 <= kMaxNumEntriesPerLog || is_unbounded) {
|
| + out_info->entries.push_back(entry);
|
| + } else {
|
| + out_info->num_entries_truncated += 1;
|
| + out_info->entries[kMaxNumEntriesPerLog - 1] = entry;
|
| + }
|
| +}
|
|
|
| - private:
|
| - Request* request_;
|
| - };
|
| +void AppendToRequestInfo(const PassiveLogCollector::RequestInfo& info,
|
| + bool is_unbounded,
|
| + PassiveLogCollector::RequestInfo* out_info) {
|
| + for (size_t i = 0; i < info.entries.size(); ++i)
|
| + AddEntryToRequestInfo(info.entries[i], is_unbounded, out_info);
|
| +}
|
|
|
| - typedef std::vector<RecentRequestInfo> RecentRequestInfoList;
|
| - typedef bool (*RecentRequestsFilterFunc)(const GURL&);
|
| +} // namespace
|
|
|
| - // The maximum number of entries for |graveyard_|, when in bounded mode.
|
| - static const size_t kMaxGraveyardSize;
|
| +//----------------------------------------------------------------------------
|
| +// PassiveLogCollector
|
| +//----------------------------------------------------------------------------
|
|
|
| - // The maximum size of URLs to stuff into RecentRequestInfo, when in bounded
|
| - // mode.
|
| - static const size_t kMaxGraveyardURLSize;
|
| +PassiveLogCollector::PassiveLogCollector()
|
| + : url_request_tracker_(&connect_job_tracker_),
|
| + socket_stream_tracker_(&connect_job_tracker_) {
|
| +}
|
|
|
| - // The maximum number of entries to use for LoadLogs when in bounded mode.
|
| - static const size_t kBoundedLoadLogMaxEntries;
|
| +PassiveLogCollector::~PassiveLogCollector() {
|
| +}
|
|
|
| - RequestTracker()
|
| - : next_graveyard_index_(0),
|
| - graveyard_filter_func_(NULL),
|
| - is_unbounded_(false) {
|
| +void PassiveLogCollector::OnAddEntry(const net::NetLog::Entry& entry) {
|
| + switch (entry.source.type) {
|
| + case net::NetLog::SOURCE_URL_REQUEST:
|
| + url_request_tracker_.OnAddEntry(entry);
|
| + break;
|
| + case net::NetLog::SOURCE_SOCKET_STREAM:
|
| + socket_stream_tracker_.OnAddEntry(entry);
|
| + break;
|
| + case net::NetLog::SOURCE_CONNECT_JOB:
|
| + connect_job_tracker_.OnAddEntry(entry);
|
| + break;
|
| + default:
|
| + // Drop all other logged events.
|
| + break;
|
| }
|
| +}
|
|
|
| - ~RequestTracker() {}
|
| +void PassiveLogCollector::Clear() {
|
| + connect_job_tracker_.Clear();
|
| + url_request_tracker_.Clear();
|
| + socket_stream_tracker_.Clear();
|
| +}
|
|
|
| - // Returns a list of Requests that are alive.
|
| - std::vector<Request*> GetLiveRequests() {
|
| - std::vector<Request*> list;
|
| - for (base::LinkNode<Node>* node = live_instances_.head();
|
| - node != live_instances_.end();
|
| - node = node->next()) {
|
| - Request* request = node->value()->request();
|
| - list.push_back(request);
|
| - }
|
| - return list;
|
| +//----------------------------------------------------------------------------
|
| +// RequestTrackerBase
|
| +//----------------------------------------------------------------------------
|
| +
|
| +PassiveLogCollector::RequestTrackerBase::RequestTrackerBase(
|
| + size_t max_graveyard_size)
|
| + : max_graveyard_size_(max_graveyard_size),
|
| + next_graveyard_index_(0),
|
| + is_unbounded_(false) {
|
| +}
|
| +
|
| +void PassiveLogCollector::RequestTrackerBase::OnAddEntry(
|
| + const net::NetLog::Entry& entry) {
|
| + RequestInfo& info = live_requests_[entry.source.id];
|
| + Action result = DoAddEntry(entry, &info);
|
| +
|
| + switch (result) {
|
| + case ACTION_MOVE_TO_GRAVEYARD:
|
| + InsertIntoGraveyard(info);
|
| + // (fall-through)
|
| + case ACTION_DELETE:
|
| + RemoveFromLiveRequests(info);
|
| + break;
|
| + default:
|
| + break;
|
| }
|
|
|
| - // Clears the circular buffer of RecentRequestInfos.
|
| - void ClearRecentlyDeceased() {
|
| - next_graveyard_index_ = 0;
|
| - graveyard_.clear();
|
| + if (live_requests_.size() > kMaxLiveRequests) {
|
| + // This is a safety net in case something went wrong, to avoid continually
|
| + // growing memory.
|
| + LOG(WARNING) << "The passive log data has grown larger "
|
| + "than expected, resetting";
|
| + live_requests_.clear();
|
| }
|
| +}
|
|
|
| - // Returns a list of recently completed Requests.
|
| - const RecentRequestInfoList GetRecentlyDeceased() {
|
| - RecentRequestInfoList list;
|
| +PassiveLogCollector::RequestInfoList
|
| +PassiveLogCollector::RequestTrackerBase::GetLiveRequests() const {
|
| + RequestInfoList list;
|
|
|
| - // Copy the items from |graveyard_| (our circular queue of recently
|
| - // deceased request infos) into a vector, ordered from oldest to newest.
|
| - for (size_t i = 0; i < graveyard_.size(); ++i) {
|
| - size_t index = (next_graveyard_index_ + i) % graveyard_.size();
|
| - list.push_back(graveyard_[index]);
|
| - }
|
| - return list;
|
| + // Copy all of the live requests into the vector.
|
| + for (SourceIDToInfoMap::const_iterator it = live_requests_.begin();
|
| + it != live_requests_.end();
|
| + ++it) {
|
| + list.push_back(it->second);
|
| }
|
|
|
| - void Add(Request* request) {
|
| - live_instances_.Append(&request->request_tracker_node_);
|
| - }
|
| + std::sort(list.begin(), list.end(), OrderBySourceID);
|
| + return list;
|
| +}
|
|
|
| - void Remove(Request* request) {
|
| - // Remove from |live_instances_|.
|
| - request->request_tracker_node_.RemoveFromList();
|
| +void PassiveLogCollector::RequestTrackerBase::ClearRecentlyDeceased() {
|
| + next_graveyard_index_ = 0;
|
| + graveyard_.clear();
|
| +}
|
|
|
| - RecentRequestInfo info;
|
| - request->GetInfoForTracker(&info);
|
| +// Returns a list of recently completed Requests.
|
| +PassiveLogCollector::RequestInfoList
|
| +PassiveLogCollector::RequestTrackerBase::GetRecentlyDeceased() const {
|
| + RequestInfoList list;
|
|
|
| - if (!is_unbounded_) {
|
| - // Paranoia check: truncate |info.original_url| if it is really big.
|
| - const std::string& spec = info.original_url.possibly_invalid_spec();
|
| - if (spec.size() > kMaxGraveyardURLSize)
|
| - info.original_url = GURL(spec.substr(0, kMaxGraveyardURLSize));
|
| - }
|
| + // Copy the items from |graveyard_| (our circular queue of recently
|
| + // deceased request infos) into a vector, ordered from oldest to newest.
|
| + for (size_t i = 0; i < graveyard_.size(); ++i) {
|
| + size_t index = (next_graveyard_index_ + i) % graveyard_.size();
|
| + list.push_back(graveyard_[index]);
|
| + }
|
| + return list;
|
| +}
|
|
|
| - if (ShouldInsertIntoGraveyard(info)) {
|
| - // Add into |graveyard_|.
|
| - InsertIntoGraveyard(info);
|
| +const PassiveLogCollector::RequestInfo*
|
| +PassiveLogCollector::RequestTrackerBase::GetRequestInfoFromGraveyard(
|
| + int source_id) const {
|
| + // Scan through the graveyard to find an entry for |source_id|.
|
| + for (size_t i = 0; i < graveyard_.size(); ++i) {
|
| + if (graveyard_[i].entries[0].source.id == source_id) {
|
| + return &graveyard_[i];
|
| }
|
| }
|
| + return NULL;
|
| +}
|
|
|
| - // This function lets you exclude requests from being saved to the graveyard.
|
| - // The graveyard is a circular buffer of the most recently completed
|
| - // requests. Pass NULL turn off filtering. Otherwise pass in a function
|
| - // returns false to exclude requests, true otherwise.
|
| - void SetGraveyardFilter(RecentRequestsFilterFunc filter_func) {
|
| - graveyard_filter_func_ = filter_func;
|
| +void PassiveLogCollector::RequestTrackerBase::RemoveFromLiveRequests(
|
| + const RequestInfo& info) {
|
| + // Remove from |live_requests_|.
|
| + SourceIDToInfoMap::iterator it = live_requests_.find(
|
| + info.entries[0].source.id);
|
| + DCHECK(it != live_requests_.end());
|
| + live_requests_.erase(it);
|
| +}
|
| +
|
| +void PassiveLogCollector::RequestTrackerBase::SetUnbounded(
|
| + bool unbounded) {
|
| + // No change.
|
| + if (is_unbounded_ == unbounded)
|
| + return;
|
| +
|
| + // If we are going from unbounded to bounded, we need to trim the
|
| + // graveyard. For simplicity we will simply clear it.
|
| + if (is_unbounded_ && !unbounded)
|
| + ClearRecentlyDeceased();
|
| +
|
| + is_unbounded_ = unbounded;
|
| +}
|
| +
|
| +void PassiveLogCollector::RequestTrackerBase::Clear() {
|
| + ClearRecentlyDeceased();
|
| + live_requests_.clear();
|
| +}
|
| +
|
| +void PassiveLogCollector::RequestTrackerBase::InsertIntoGraveyard(
|
| + const RequestInfo& info) {
|
| + if (is_unbounded_) {
|
| + graveyard_.push_back(info);
|
| + return;
|
| }
|
|
|
| - bool IsUnbounded() const {
|
| - return is_unbounded_;
|
| + // Otherwise enforce a bound on the graveyard size, by treating it as a
|
| + // circular buffer.
|
| + if (graveyard_.size() < max_graveyard_size_) {
|
| + // Still growing to maximum capacity.
|
| + DCHECK_EQ(next_graveyard_index_, graveyard_.size());
|
| + graveyard_.push_back(info);
|
| + } else {
|
| + // At maximum capacity, overwite the oldest entry.
|
| + graveyard_[next_graveyard_index_] = info;
|
| }
|
| + next_graveyard_index_ = (next_graveyard_index_ + 1) % max_graveyard_size_;
|
| +}
|
|
|
| - void SetUnbounded(bool unbounded) {
|
| - // No change.
|
| - if (is_unbounded_ == unbounded)
|
| - return;
|
| +//----------------------------------------------------------------------------
|
| +// ConnectJobTracker
|
| +//----------------------------------------------------------------------------
|
|
|
| - // If we are going from unbounded to bounded, we need to trim the
|
| - // graveyard. For simplicity we will simply clear it.
|
| - if (is_unbounded_ && !unbounded)
|
| - ClearRecentlyDeceased();
|
| +const size_t PassiveLogCollector::ConnectJobTracker::kMaxGraveyardSize = 3;
|
|
|
| - is_unbounded_ = unbounded;
|
| +PassiveLogCollector::ConnectJobTracker::ConnectJobTracker()
|
| + : RequestTrackerBase(kMaxGraveyardSize) {
|
| +}
|
| +
|
| +PassiveLogCollector::RequestTrackerBase::Action
|
| +PassiveLogCollector::ConnectJobTracker::DoAddEntry(
|
| + const net::NetLog::Entry& entry,
|
| + RequestInfo* out_info) {
|
| + // Save the entry (possibly truncating).
|
| + AddEntryToRequestInfo(entry, is_unbounded(), out_info);
|
| +
|
| + // If this is the end of the connect job, move the request to the graveyard.
|
| + if (entry.type == net::NetLog::Entry::TYPE_EVENT &&
|
| + entry.event.type == net::NetLog::TYPE_SOCKET_POOL_CONNECT_JOB &&
|
| + entry.event.phase == net::NetLog::PHASE_END) {
|
| + return ACTION_MOVE_TO_GRAVEYARD;
|
| }
|
|
|
| - // Creates a LoadLog using the unbounded/bounded constraints that
|
| - // apply to this tracker.
|
| - net::LoadLog* CreateLoadLog() {
|
| - if (IsUnbounded())
|
| - return new net::LoadLog(net::LoadLog::kUnbounded);
|
| - return new net::LoadLog(kBoundedLoadLogMaxEntries);
|
| + return ACTION_NONE;
|
| +}
|
| +
|
| +//----------------------------------------------------------------------------
|
| +// RequestTracker
|
| +//----------------------------------------------------------------------------
|
| +
|
| +const size_t PassiveLogCollector::RequestTracker::kMaxGraveyardSize = 25;
|
| +const size_t PassiveLogCollector::RequestTracker::kMaxGraveyardURLSize = 1000;
|
| +
|
| +PassiveLogCollector::RequestTracker::RequestTracker(
|
| + ConnectJobTracker* connect_job_tracker)
|
| + : RequestTrackerBase(kMaxGraveyardSize),
|
| + connect_job_tracker_(connect_job_tracker) {
|
| +}
|
| +
|
| +PassiveLogCollector::RequestTrackerBase::Action
|
| +PassiveLogCollector::RequestTracker::DoAddEntry(
|
| + const net::NetLog::Entry& entry,
|
| + RequestInfo* out_info) {
|
| +
|
| + if (entry.type == net::NetLog::Entry::TYPE_EVENT &&
|
| + entry.event.type == net::NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_ID) {
|
| + // If this was notification that a ConnectJob was bound to the request,
|
| + // copy all the logged data for that ConnectJob.
|
| + AddConnectJobInfo(entry, out_info);
|
| + } else {
|
| + // Otherwise just append this entry to the request info.
|
| + AddEntryToRequestInfo(entry, is_unbounded(), out_info);
|
| }
|
|
|
| - private:
|
| - bool ShouldInsertIntoGraveyard(const RecentRequestInfo& info) {
|
| - if (!graveyard_filter_func_)
|
| - return true;
|
| - return graveyard_filter_func_(info.original_url);
|
| + // If this was the start of a URLRequest/SocketStream, extract the URL.
|
| + if (out_info->entries.size() == 1 &&
|
| + entry.type == net::NetLog::Entry::TYPE_EVENT &&
|
| + entry.event.type == net::NetLog::TYPE_REQUEST_ALIVE &&
|
| + entry.event.phase == net::NetLog::PHASE_BEGIN) {
|
| + out_info->url = entry.string;
|
| + out_info->entries[0].string = std::string();
|
| +
|
| + // Paranoia check: truncate the URL if it is really big.
|
| + if (out_info->url.size() > kMaxGraveyardURLSize)
|
| + out_info->url = out_info->url.substr(0, kMaxGraveyardURLSize);
|
| }
|
|
|
| - void InsertIntoGraveyard(const RecentRequestInfo& info) {
|
| - if (is_unbounded_) {
|
| - graveyard_.push_back(info);
|
| - return;
|
| + // If the request has ended, move it to the graveyard.
|
| + if (entry.type == net::NetLog::Entry::TYPE_EVENT &&
|
| + entry.event.type == net::NetLog::TYPE_REQUEST_ALIVE &&
|
| + entry.event.phase == net::NetLog::PHASE_END) {
|
| + if (StartsWithASCII(out_info->url, "chrome://", false)) {
|
| + // Avoid sending "chrome://" requests to the graveyard, since it just
|
| + // adds to clutter.
|
| + return ACTION_DELETE;
|
| }
|
| -
|
| - // Otherwise enforce a bound on the graveyard size, by treating it as a
|
| - // circular buffer.
|
| - if (graveyard_.size() < kMaxGraveyardSize) {
|
| - // Still growing to maximum capacity.
|
| - DCHECK_EQ(next_graveyard_index_, graveyard_.size());
|
| - graveyard_.push_back(info);
|
| - } else {
|
| - // At maximum capacity, overwite the oldest entry.
|
| - graveyard_[next_graveyard_index_] = info;
|
| - }
|
| - next_graveyard_index_ = (next_graveyard_index_ + 1) % kMaxGraveyardSize;
|
| + return ACTION_MOVE_TO_GRAVEYARD;
|
| }
|
|
|
| - base::LinkedList<Node> live_instances_;
|
| + return ACTION_NONE;
|
| +}
|
|
|
| - size_t next_graveyard_index_;
|
| - RecentRequestInfoList graveyard_;
|
| - RecentRequestsFilterFunc graveyard_filter_func_;
|
| - bool is_unbounded_;
|
| -};
|
| +void PassiveLogCollector::RequestTracker::AddConnectJobInfo(
|
| + const net::NetLog::Entry& entry,
|
| + RequestInfo* live_entry) {
|
| + // We have just been notified of which ConnectJob the
|
| + // URLRequest/SocketStream was assigned. Lookup all the data we captured
|
| + // for the ConnectJob, and append it to the URLRequest/SocketStream's
|
| + // RequestInfo.
|
|
|
| -template<typename Request>
|
| -const size_t RequestTracker<Request>::kMaxGraveyardSize = 25;
|
| + // TODO(eroman): This should NOT be plumbed through via |error_code| !
|
| + int connect_job_id = entry.error_code;
|
|
|
| -template<typename Request>
|
| -const size_t RequestTracker<Request>::kMaxGraveyardURLSize = 1000;
|
| + const RequestInfo* connect_job_info =
|
| + connect_job_tracker_->GetRequestInfoFromGraveyard(connect_job_id);
|
|
|
| -template<typename Request>
|
| -const size_t RequestTracker<Request>::kBoundedLoadLogMaxEntries = 50;
|
| -
|
| -#endif // NET_URL_REQUEST_REQUEST_TRACKER_H_
|
| + if (connect_job_info) {
|
| + // Append the ConnectJob information we found.
|
| + AppendToRequestInfo(*connect_job_info, is_unbounded(), live_entry);
|
| + } else {
|
| + // If we couldn't find the information for the ConnectJob, append a
|
| + // generic message instead.
|
| + net::NetLog::Entry e(entry);
|
| + e.type = net::NetLog::Entry::TYPE_STRING;
|
| + e.string = StringPrintf("Used ConnectJob id=%d", connect_job_id);
|
| + AddEntryToRequestInfo(e, is_unbounded(), live_entry);
|
| + }
|
| +}
|
|
|