| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #ifndef NET_URL_REQUEST_REQUEST_TRACKER_H_ | |
| 6 #define NET_URL_REQUEST_REQUEST_TRACKER_H_ | |
| 7 | |
| 8 #include <vector> | |
| 9 | |
| 10 #include "base/ref_counted.h" | |
| 11 #include "base/linked_list.h" | |
| 12 #include "base/logging.h" | |
| 13 #include "googleurl/src/gurl.h" | |
| 14 #include "net/base/load_log.h" | |
| 15 | |
| 16 // Class to track all of the live instances of Request associated with a | |
| 17 // particular URLRequestContext. It keeps a circular queue of the LoadLogs | |
| 18 // for recently deceased requests. | |
| 19 template<typename Request> | |
| 20 class RequestTracker { | |
| 21 public: | |
| 22 struct RecentRequestInfo { | |
| 23 GURL original_url; | |
| 24 scoped_refptr<net::LoadLog> load_log; | |
| 25 }; | |
| 26 | |
| 27 // Helper class to make Request insertable into a base::LinkedList, | |
| 28 // without making the public interface expose base::LinkNode. | |
| 29 class Node : public base::LinkNode<Node> { | |
| 30 public: | |
| 31 Node(Request* request) : request_(request) {} | |
| 32 ~Node() {} | |
| 33 | |
| 34 Request* request() const { return request_; } | |
| 35 | |
| 36 private: | |
| 37 Request* request_; | |
| 38 }; | |
| 39 | |
| 40 typedef std::vector<RecentRequestInfo> RecentRequestInfoList; | |
| 41 typedef bool (*RecentRequestsFilterFunc)(const GURL&); | |
| 42 | |
| 43 // The maximum number of entries for |graveyard_|, when in bounded mode. | |
| 44 static const size_t kMaxGraveyardSize; | |
| 45 | |
| 46 // The maximum size of URLs to stuff into RecentRequestInfo, when in bounded | |
| 47 // mode. | |
| 48 static const size_t kMaxGraveyardURLSize; | |
| 49 | |
| 50 // The maximum number of entries to use for LoadLogs when in bounded mode. | |
| 51 static const size_t kBoundedLoadLogMaxEntries; | |
| 52 | |
| 53 RequestTracker() | |
| 54 : next_graveyard_index_(0), | |
| 55 graveyard_filter_func_(NULL), | |
| 56 is_unbounded_(false) { | |
| 57 } | |
| 58 | |
| 59 ~RequestTracker() {} | |
| 60 | |
| 61 // Returns a list of Requests that are alive. | |
| 62 std::vector<Request*> GetLiveRequests() { | |
| 63 std::vector<Request*> list; | |
| 64 for (base::LinkNode<Node>* node = live_instances_.head(); | |
| 65 node != live_instances_.end(); | |
| 66 node = node->next()) { | |
| 67 Request* request = node->value()->request(); | |
| 68 list.push_back(request); | |
| 69 } | |
| 70 return list; | |
| 71 } | |
| 72 | |
| 73 // Clears the circular buffer of RecentRequestInfos. | |
| 74 void ClearRecentlyDeceased() { | |
| 75 next_graveyard_index_ = 0; | |
| 76 graveyard_.clear(); | |
| 77 } | |
| 78 | |
| 79 // Returns a list of recently completed Requests. | |
| 80 const RecentRequestInfoList GetRecentlyDeceased() { | |
| 81 RecentRequestInfoList list; | |
| 82 | |
| 83 // Copy the items from |graveyard_| (our circular queue of recently | |
| 84 // deceased request infos) into a vector, ordered from oldest to newest. | |
| 85 for (size_t i = 0; i < graveyard_.size(); ++i) { | |
| 86 size_t index = (next_graveyard_index_ + i) % graveyard_.size(); | |
| 87 list.push_back(graveyard_[index]); | |
| 88 } | |
| 89 return list; | |
| 90 } | |
| 91 | |
| 92 void Add(Request* request) { | |
| 93 live_instances_.Append(&request->request_tracker_node_); | |
| 94 } | |
| 95 | |
| 96 void Remove(Request* request) { | |
| 97 // Remove from |live_instances_|. | |
| 98 request->request_tracker_node_.RemoveFromList(); | |
| 99 | |
| 100 RecentRequestInfo info; | |
| 101 request->GetInfoForTracker(&info); | |
| 102 | |
| 103 if (!is_unbounded_) { | |
| 104 // Paranoia check: truncate |info.original_url| if it is really big. | |
| 105 const std::string& spec = info.original_url.possibly_invalid_spec(); | |
| 106 if (spec.size() > kMaxGraveyardURLSize) | |
| 107 info.original_url = GURL(spec.substr(0, kMaxGraveyardURLSize)); | |
| 108 } | |
| 109 | |
| 110 if (ShouldInsertIntoGraveyard(info)) { | |
| 111 // Add into |graveyard_|. | |
| 112 InsertIntoGraveyard(info); | |
| 113 } | |
| 114 } | |
| 115 | |
| 116 // This function lets you exclude requests from being saved to the graveyard. | |
| 117 // The graveyard is a circular buffer of the most recently completed | |
| 118 // requests. Pass NULL turn off filtering. Otherwise pass in a function | |
| 119 // returns false to exclude requests, true otherwise. | |
| 120 void SetGraveyardFilter(RecentRequestsFilterFunc filter_func) { | |
| 121 graveyard_filter_func_ = filter_func; | |
| 122 } | |
| 123 | |
| 124 bool IsUnbounded() const { | |
| 125 return is_unbounded_; | |
| 126 } | |
| 127 | |
| 128 void SetUnbounded(bool unbounded) { | |
| 129 // No change. | |
| 130 if (is_unbounded_ == unbounded) | |
| 131 return; | |
| 132 | |
| 133 // If we are going from unbounded to bounded, we need to trim the | |
| 134 // graveyard. For simplicity we will simply clear it. | |
| 135 if (is_unbounded_ && !unbounded) | |
| 136 ClearRecentlyDeceased(); | |
| 137 | |
| 138 is_unbounded_ = unbounded; | |
| 139 } | |
| 140 | |
| 141 // Creates a LoadLog using the unbounded/bounded constraints that | |
| 142 // apply to this tracker. | |
| 143 net::LoadLog* CreateLoadLog() { | |
| 144 if (IsUnbounded()) | |
| 145 return new net::LoadLog(net::LoadLog::kUnbounded); | |
| 146 return new net::LoadLog(kBoundedLoadLogMaxEntries); | |
| 147 } | |
| 148 | |
| 149 private: | |
| 150 bool ShouldInsertIntoGraveyard(const RecentRequestInfo& info) { | |
| 151 if (!graveyard_filter_func_) | |
| 152 return true; | |
| 153 return graveyard_filter_func_(info.original_url); | |
| 154 } | |
| 155 | |
| 156 void InsertIntoGraveyard(const RecentRequestInfo& info) { | |
| 157 if (is_unbounded_) { | |
| 158 graveyard_.push_back(info); | |
| 159 return; | |
| 160 } | |
| 161 | |
| 162 // Otherwise enforce a bound on the graveyard size, by treating it as a | |
| 163 // circular buffer. | |
| 164 if (graveyard_.size() < kMaxGraveyardSize) { | |
| 165 // Still growing to maximum capacity. | |
| 166 DCHECK_EQ(next_graveyard_index_, graveyard_.size()); | |
| 167 graveyard_.push_back(info); | |
| 168 } else { | |
| 169 // At maximum capacity, overwite the oldest entry. | |
| 170 graveyard_[next_graveyard_index_] = info; | |
| 171 } | |
| 172 next_graveyard_index_ = (next_graveyard_index_ + 1) % kMaxGraveyardSize; | |
| 173 } | |
| 174 | |
| 175 base::LinkedList<Node> live_instances_; | |
| 176 | |
| 177 size_t next_graveyard_index_; | |
| 178 RecentRequestInfoList graveyard_; | |
| 179 RecentRequestsFilterFunc graveyard_filter_func_; | |
| 180 bool is_unbounded_; | |
| 181 }; | |
| 182 | |
| 183 template<typename Request> | |
| 184 const size_t RequestTracker<Request>::kMaxGraveyardSize = 25; | |
| 185 | |
| 186 template<typename Request> | |
| 187 const size_t RequestTracker<Request>::kMaxGraveyardURLSize = 1000; | |
| 188 | |
| 189 template<typename Request> | |
| 190 const size_t RequestTracker<Request>::kBoundedLoadLogMaxEntries = 50; | |
| 191 | |
| 192 #endif // NET_URL_REQUEST_REQUEST_TRACKER_H_ | |
| OLD | NEW |