OLD | NEW |
| (Empty) |
1 // Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "net/url_request/request_tracker.h" | |
6 | |
7 #include "base/compiler_specific.h" | |
8 #include "base/format_macros.h" | |
9 #include "base/string_util.h" | |
10 #include "testing/gtest/include/gtest/gtest.h" | |
11 | |
12 namespace { | |
13 | |
14 static const int kMaxNumLoadLogEntries = 1; | |
15 | |
16 class TestRequest { | |
17 public: | |
18 explicit TestRequest(const GURL& url) | |
19 : url_(url), | |
20 load_log_(new net::LoadLog(kMaxNumLoadLogEntries)), | |
21 ALLOW_THIS_IN_INITIALIZER_LIST(request_tracker_node_(this)) {} | |
22 ~TestRequest() {} | |
23 | |
24 // This method is used in RequestTrackerTest::Basic test. | |
25 const GURL& original_url() const { return url_; } | |
26 | |
27 private: | |
28 // RequestTracker<T> will access GetRecentRequestInfo() and | |
29 // |request_tracker_node_|. | |
30 friend class RequestTracker<TestRequest>; | |
31 | |
32 void GetInfoForTracker( | |
33 RequestTracker<TestRequest>::RecentRequestInfo* info) const { | |
34 info->original_url = url_; | |
35 info->load_log = load_log_; | |
36 } | |
37 | |
38 const GURL url_; | |
39 scoped_refptr<net::LoadLog> load_log_; | |
40 | |
41 RequestTracker<TestRequest>::Node request_tracker_node_; | |
42 | |
43 DISALLOW_COPY_AND_ASSIGN(TestRequest); | |
44 }; | |
45 | |
46 | |
47 TEST(RequestTrackerTest, BasicBounded) { | |
48 RequestTracker<TestRequest> tracker; | |
49 EXPECT_FALSE(tracker.IsUnbounded()); | |
50 EXPECT_EQ(0u, tracker.GetLiveRequests().size()); | |
51 EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); | |
52 | |
53 TestRequest req1(GURL("http://req1")); | |
54 TestRequest req2(GURL("http://req2")); | |
55 TestRequest req3(GURL("http://req3")); | |
56 TestRequest req4(GURL("http://req4")); | |
57 TestRequest req5(GURL("http://req5")); | |
58 | |
59 tracker.Add(&req1); | |
60 tracker.Add(&req2); | |
61 tracker.Add(&req3); | |
62 tracker.Add(&req4); | |
63 tracker.Add(&req5); | |
64 | |
65 std::vector<TestRequest*> live_reqs = tracker.GetLiveRequests(); | |
66 | |
67 ASSERT_EQ(5u, live_reqs.size()); | |
68 EXPECT_EQ(GURL("http://req1"), live_reqs[0]->original_url()); | |
69 EXPECT_EQ(GURL("http://req2"), live_reqs[1]->original_url()); | |
70 EXPECT_EQ(GURL("http://req3"), live_reqs[2]->original_url()); | |
71 EXPECT_EQ(GURL("http://req4"), live_reqs[3]->original_url()); | |
72 EXPECT_EQ(GURL("http://req5"), live_reqs[4]->original_url()); | |
73 | |
74 tracker.Remove(&req1); | |
75 tracker.Remove(&req5); | |
76 tracker.Remove(&req3); | |
77 | |
78 ASSERT_EQ(3u, tracker.GetRecentlyDeceased().size()); | |
79 | |
80 live_reqs = tracker.GetLiveRequests(); | |
81 | |
82 ASSERT_EQ(2u, live_reqs.size()); | |
83 EXPECT_EQ(GURL("http://req2"), live_reqs[0]->original_url()); | |
84 EXPECT_EQ(GURL("http://req4"), live_reqs[1]->original_url()); | |
85 } | |
86 | |
87 TEST(RequestTrackerTest, GraveyardBounded) { | |
88 RequestTracker<TestRequest> tracker; | |
89 EXPECT_FALSE(tracker.IsUnbounded()); | |
90 EXPECT_EQ(0u, tracker.GetLiveRequests().size()); | |
91 EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); | |
92 | |
93 // Add twice as many requests as will fit in the graveyard. | |
94 for (size_t i = 0; | |
95 i < RequestTracker<TestRequest>::kMaxGraveyardSize * 2; | |
96 ++i) { | |
97 TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str())); | |
98 tracker.Add(&req); | |
99 tracker.Remove(&req); | |
100 } | |
101 | |
102 // Check that only the last |kMaxGraveyardSize| requests are in-memory. | |
103 | |
104 RequestTracker<TestRequest>::RecentRequestInfoList recent_reqs = | |
105 tracker.GetRecentlyDeceased(); | |
106 | |
107 ASSERT_EQ(RequestTracker<TestRequest>::kMaxGraveyardSize, recent_reqs.size()); | |
108 | |
109 for (size_t i = 0; i < RequestTracker<TestRequest>::kMaxGraveyardSize; ++i) { | |
110 size_t req_number = i + RequestTracker<TestRequest>::kMaxGraveyardSize; | |
111 GURL url(StringPrintf("http://req%" PRIuS, req_number).c_str()); | |
112 EXPECT_EQ(url, recent_reqs[i].original_url); | |
113 } | |
114 } | |
115 | |
116 TEST(RequestTrackerTest, GraveyardUnbounded) { | |
117 RequestTracker<TestRequest> tracker; | |
118 EXPECT_FALSE(tracker.IsUnbounded()); | |
119 EXPECT_EQ(0u, tracker.GetLiveRequests().size()); | |
120 EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); | |
121 | |
122 tracker.SetUnbounded(true); | |
123 | |
124 EXPECT_TRUE(tracker.IsUnbounded()); | |
125 | |
126 // Add twice as many requests as would fit in the bounded graveyard. | |
127 | |
128 size_t kMaxSize = RequestTracker<TestRequest>::kMaxGraveyardSize * 2; | |
129 for (size_t i = 0; i < kMaxSize; ++i) { | |
130 TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str())); | |
131 tracker.Add(&req); | |
132 tracker.Remove(&req); | |
133 } | |
134 | |
135 // Check that all of them got saved. | |
136 | |
137 RequestTracker<TestRequest>::RecentRequestInfoList recent_reqs = | |
138 tracker.GetRecentlyDeceased(); | |
139 | |
140 ASSERT_EQ(kMaxSize, recent_reqs.size()); | |
141 | |
142 for (size_t i = 0; i < kMaxSize; ++i) { | |
143 GURL url(StringPrintf("http://req%" PRIuS, i).c_str()); | |
144 EXPECT_EQ(url, recent_reqs[i].original_url); | |
145 } | |
146 } | |
147 | |
148 // Check that very long URLs are truncated. | |
149 TEST(RequestTrackerTest, GraveyardURLBounded) { | |
150 RequestTracker<TestRequest> tracker; | |
151 EXPECT_FALSE(tracker.IsUnbounded()); | |
152 | |
153 std::string big_url_spec("http://"); | |
154 big_url_spec.resize(2 * RequestTracker<TestRequest>::kMaxGraveyardURLSize, | |
155 'x'); | |
156 GURL big_url(big_url_spec); | |
157 TestRequest req(big_url); | |
158 | |
159 tracker.Add(&req); | |
160 tracker.Remove(&req); | |
161 | |
162 ASSERT_EQ(1u, tracker.GetRecentlyDeceased().size()); | |
163 // The +1 is because GURL canonicalizes with a trailing '/' ... maybe | |
164 // we should just save the std::string rather than the GURL. | |
165 EXPECT_EQ(RequestTracker<TestRequest>::kMaxGraveyardURLSize + 1, | |
166 tracker.GetRecentlyDeceased()[0].original_url.spec().size()); | |
167 } | |
168 | |
169 // Test the doesn't fail if the URL was invalid. http://crbug.com/21423. | |
170 TEST(URLRequestTrackerTest, TrackingInvalidURL) { | |
171 RequestTracker<TestRequest> tracker; | |
172 EXPECT_FALSE(tracker.IsUnbounded()); | |
173 | |
174 EXPECT_EQ(0u, tracker.GetLiveRequests().size()); | |
175 EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); | |
176 | |
177 GURL invalid_url("xabc"); | |
178 EXPECT_FALSE(invalid_url.is_valid()); | |
179 TestRequest req(invalid_url); | |
180 | |
181 tracker.Add(&req); | |
182 tracker.Remove(&req); | |
183 | |
184 // Check that the invalid URL made it into graveyard. | |
185 ASSERT_EQ(1u, tracker.GetRecentlyDeceased().size()); | |
186 EXPECT_FALSE(tracker.GetRecentlyDeceased()[0].original_url.is_valid()); | |
187 } | |
188 | |
189 bool ShouldRequestBeAddedToGraveyard(const GURL& url) { | |
190 return !url.SchemeIs("chrome") && !url.SchemeIs("data"); | |
191 } | |
192 | |
193 // Check that we can exclude "chrome://" URLs and "data:" URLs from being | |
194 // saved into the recent requests list (graveyard), by using a filter. | |
195 TEST(RequestTrackerTest, GraveyardCanBeFiltered) { | |
196 RequestTracker<TestRequest> tracker; | |
197 EXPECT_FALSE(tracker.IsUnbounded()); | |
198 | |
199 tracker.SetGraveyardFilter(ShouldRequestBeAddedToGraveyard); | |
200 | |
201 // This will be excluded. | |
202 GURL url1("chrome://dontcare/"); | |
203 TestRequest req1(url1); | |
204 tracker.Add(&req1); | |
205 tracker.Remove(&req1); | |
206 | |
207 // This will be be added to graveyard. | |
208 GURL url2("chrome2://dontcare/"); | |
209 TestRequest req2(url2); | |
210 tracker.Add(&req2); | |
211 tracker.Remove(&req2); | |
212 | |
213 // This will be be added to graveyard. | |
214 GURL url3("http://foo/"); | |
215 TestRequest req3(url3); | |
216 tracker.Add(&req3); | |
217 tracker.Remove(&req3); | |
218 | |
219 // This will be be excluded. | |
220 GURL url4("data:sup"); | |
221 TestRequest req4(url4); | |
222 tracker.Add(&req4); | |
223 tracker.Remove(&req4); | |
224 | |
225 ASSERT_EQ(2u, tracker.GetRecentlyDeceased().size()); | |
226 EXPECT_EQ(url2, tracker.GetRecentlyDeceased()[0].original_url); | |
227 EXPECT_EQ(url3, tracker.GetRecentlyDeceased()[1].original_url); | |
228 } | |
229 | |
230 // Convert an unbounded tracker back to being bounded. | |
231 TEST(RequestTrackerTest, ConvertUnboundedToBounded) { | |
232 RequestTracker<TestRequest> tracker; | |
233 EXPECT_FALSE(tracker.IsUnbounded()); | |
234 EXPECT_EQ(0u, tracker.GetLiveRequests().size()); | |
235 EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); | |
236 | |
237 tracker.SetUnbounded(true); | |
238 EXPECT_TRUE(tracker.IsUnbounded()); | |
239 | |
240 // Add twice as many requests as would fit in the bounded graveyard. | |
241 | |
242 size_t kMaxSize = RequestTracker<TestRequest>::kMaxGraveyardSize * 2; | |
243 for (size_t i = 0; i < kMaxSize; ++i) { | |
244 TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str())); | |
245 tracker.Add(&req); | |
246 tracker.Remove(&req); | |
247 } | |
248 | |
249 // Check that all of them got saved. | |
250 ASSERT_EQ(kMaxSize, tracker.GetRecentlyDeceased().size()); | |
251 | |
252 // Now make the tracker bounded, and add more entries to its graveyard. | |
253 tracker.SetUnbounded(false); | |
254 | |
255 kMaxSize = RequestTracker<TestRequest>::kMaxGraveyardSize; | |
256 for (size_t i = 0; i < kMaxSize; ++i) { | |
257 TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str())); | |
258 tracker.Add(&req); | |
259 tracker.Remove(&req); | |
260 } | |
261 | |
262 // We should only have kMaxGraveyardSize entries now. | |
263 ASSERT_EQ(kMaxSize, tracker.GetRecentlyDeceased().size()); | |
264 } | |
265 | |
266 } // namespace | |
OLD | NEW |