Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2986)

Unified Diff: chrome/browser/net/passive_log_collector_unittest.cc

Issue 848006: Generalize the net module's LoadLog facility from a passive container, to an event stream (NetLog). (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: Split up RequestTracker into ConnectJobTracker+RequestTracker+RequestTrackerBase, address comments Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « chrome/browser/net/passive_log_collector.cc ('k') | chrome/browser/net/view_net_internals_job_factory.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: chrome/browser/net/passive_log_collector_unittest.cc
===================================================================
--- chrome/browser/net/passive_log_collector_unittest.cc (revision 41560)
+++ chrome/browser/net/passive_log_collector_unittest.cc (working copy)
@@ -1,8 +1,8 @@
-// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "net/url_request/request_tracker.h"
+#include "chrome/browser/net/passive_log_collector.h"
#include "base/compiler_specific.h"
#include "base/format_macros.h"
@@ -11,110 +11,101 @@
namespace {
-static const int kMaxNumLoadLogEntries = 1;
+typedef PassiveLogCollector::RequestTracker RequestTracker;
+typedef PassiveLogCollector::RequestInfoList RequestInfoList;
-class TestRequest {
- public:
- explicit TestRequest(const GURL& url)
- : url_(url),
- load_log_(new net::LoadLog(kMaxNumLoadLogEntries)),
- ALLOW_THIS_IN_INITIALIZER_LIST(request_tracker_node_(this)) {}
- ~TestRequest() {}
+const net::NetLog::SourceType kSourceType = net::NetLog::SOURCE_NONE;
- // This method is used in RequestTrackerTest::Basic test.
- const GURL& original_url() const { return url_; }
+net::NetLog::Entry MakeStartLogEntryWithURL(int source_id,
+ const std::string& url) {
+ net::NetLog::Entry entry;
+ entry.source.type = kSourceType;
+ entry.source.id = source_id;
+ entry.type = net::NetLog::Entry::TYPE_EVENT;
+ entry.event = net::NetLog::Event(net::NetLog::TYPE_REQUEST_ALIVE,
+ net::NetLog::PHASE_BEGIN);
+ entry.string = url;
+ return entry;
+}
- private:
- // RequestTracker<T> will access GetRecentRequestInfo() and
- // |request_tracker_node_|.
- friend class RequestTracker<TestRequest>;
+net::NetLog::Entry MakeStartLogEntry(int source_id) {
+ return MakeStartLogEntryWithURL(source_id,
+ StringPrintf("http://req%d", source_id));
+}
- void GetInfoForTracker(
- RequestTracker<TestRequest>::RecentRequestInfo* info) const {
- info->original_url = url_;
- info->load_log = load_log_;
- }
+net::NetLog::Entry MakeEndLogEntry(int source_id) {
+ net::NetLog::Entry entry;
+ entry.source.type = kSourceType;
+ entry.source.id = source_id;
+ entry.type = net::NetLog::Entry::TYPE_EVENT;
+ entry.event = net::NetLog::Event(net::NetLog::TYPE_REQUEST_ALIVE,
+ net::NetLog::PHASE_END);
+ return entry;
+}
- const GURL url_;
- scoped_refptr<net::LoadLog> load_log_;
+static const int kMaxNumLoadLogEntries = 1;
- RequestTracker<TestRequest>::Node request_tracker_node_;
-
- DISALLOW_COPY_AND_ASSIGN(TestRequest);
-};
-
-
TEST(RequestTrackerTest, BasicBounded) {
- RequestTracker<TestRequest> tracker;
+ RequestTracker tracker(NULL);
EXPECT_FALSE(tracker.IsUnbounded());
EXPECT_EQ(0u, tracker.GetLiveRequests().size());
EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
- TestRequest req1(GURL("http://req1"));
- TestRequest req2(GURL("http://req2"));
- TestRequest req3(GURL("http://req3"));
- TestRequest req4(GURL("http://req4"));
- TestRequest req5(GURL("http://req5"));
+ tracker.OnAddEntry(MakeStartLogEntry(1));
+ tracker.OnAddEntry(MakeStartLogEntry(2));
+ tracker.OnAddEntry(MakeStartLogEntry(3));
+ tracker.OnAddEntry(MakeStartLogEntry(4));
+ tracker.OnAddEntry(MakeStartLogEntry(5));
- tracker.Add(&req1);
- tracker.Add(&req2);
- tracker.Add(&req3);
- tracker.Add(&req4);
- tracker.Add(&req5);
+ RequestInfoList live_reqs = tracker.GetLiveRequests();
- std::vector<TestRequest*> live_reqs = tracker.GetLiveRequests();
-
ASSERT_EQ(5u, live_reqs.size());
- EXPECT_EQ(GURL("http://req1"), live_reqs[0]->original_url());
- EXPECT_EQ(GURL("http://req2"), live_reqs[1]->original_url());
- EXPECT_EQ(GURL("http://req3"), live_reqs[2]->original_url());
- EXPECT_EQ(GURL("http://req4"), live_reqs[3]->original_url());
- EXPECT_EQ(GURL("http://req5"), live_reqs[4]->original_url());
+ EXPECT_EQ("http://req1", live_reqs[0].url);
+ EXPECT_EQ("http://req2", live_reqs[1].url);
+ EXPECT_EQ("http://req3", live_reqs[2].url);
+ EXPECT_EQ("http://req4", live_reqs[3].url);
+ EXPECT_EQ("http://req5", live_reqs[4].url);
- tracker.Remove(&req1);
- tracker.Remove(&req5);
- tracker.Remove(&req3);
+ tracker.OnAddEntry(MakeEndLogEntry(1));
+ tracker.OnAddEntry(MakeEndLogEntry(5));
+ tracker.OnAddEntry(MakeEndLogEntry(3));
ASSERT_EQ(3u, tracker.GetRecentlyDeceased().size());
live_reqs = tracker.GetLiveRequests();
ASSERT_EQ(2u, live_reqs.size());
- EXPECT_EQ(GURL("http://req2"), live_reqs[0]->original_url());
- EXPECT_EQ(GURL("http://req4"), live_reqs[1]->original_url());
+ EXPECT_EQ("http://req2", live_reqs[0].url);
+ EXPECT_EQ("http://req4", live_reqs[1].url);
}
TEST(RequestTrackerTest, GraveyardBounded) {
- RequestTracker<TestRequest> tracker;
+ RequestTracker tracker(NULL);
EXPECT_FALSE(tracker.IsUnbounded());
EXPECT_EQ(0u, tracker.GetLiveRequests().size());
EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
// Add twice as many requests as will fit in the graveyard.
- for (size_t i = 0;
- i < RequestTracker<TestRequest>::kMaxGraveyardSize * 2;
- ++i) {
- TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str()));
- tracker.Add(&req);
- tracker.Remove(&req);
+ for (size_t i = 0; i < RequestTracker::kMaxGraveyardSize * 2; ++i) {
+ tracker.OnAddEntry(MakeStartLogEntry(i));
+ tracker.OnAddEntry(MakeEndLogEntry(i));
}
// Check that only the last |kMaxGraveyardSize| requests are in-memory.
- RequestTracker<TestRequest>::RecentRequestInfoList recent_reqs =
- tracker.GetRecentlyDeceased();
+ RequestInfoList recent_reqs = tracker.GetRecentlyDeceased();
- ASSERT_EQ(RequestTracker<TestRequest>::kMaxGraveyardSize, recent_reqs.size());
+ ASSERT_EQ(RequestTracker::kMaxGraveyardSize, recent_reqs.size());
- for (size_t i = 0; i < RequestTracker<TestRequest>::kMaxGraveyardSize; ++i) {
- size_t req_number = i + RequestTracker<TestRequest>::kMaxGraveyardSize;
- GURL url(StringPrintf("http://req%" PRIuS, req_number).c_str());
- EXPECT_EQ(url, recent_reqs[i].original_url);
+ for (size_t i = 0; i < RequestTracker::kMaxGraveyardSize; ++i) {
+ size_t req_number = i + RequestTracker::kMaxGraveyardSize;
+ std::string url = StringPrintf("http://req%" PRIuS, req_number);
+ EXPECT_EQ(url, recent_reqs[i].url);
}
}
TEST(RequestTrackerTest, GraveyardUnbounded) {
- RequestTracker<TestRequest> tracker;
+ RequestTracker tracker(NULL);
EXPECT_FALSE(tracker.IsUnbounded());
EXPECT_EQ(0u, tracker.GetLiveRequests().size());
EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
@@ -125,111 +116,69 @@
// Add twice as many requests as would fit in the bounded graveyard.
- size_t kMaxSize = RequestTracker<TestRequest>::kMaxGraveyardSize * 2;
+ size_t kMaxSize = RequestTracker::kMaxGraveyardSize * 2;
for (size_t i = 0; i < kMaxSize; ++i) {
- TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str()));
- tracker.Add(&req);
- tracker.Remove(&req);
+ tracker.OnAddEntry(MakeStartLogEntry(i));
+ tracker.OnAddEntry(MakeEndLogEntry(i));
}
// Check that all of them got saved.
- RequestTracker<TestRequest>::RecentRequestInfoList recent_reqs =
- tracker.GetRecentlyDeceased();
+ RequestInfoList recent_reqs = tracker.GetRecentlyDeceased();
ASSERT_EQ(kMaxSize, recent_reqs.size());
for (size_t i = 0; i < kMaxSize; ++i) {
- GURL url(StringPrintf("http://req%" PRIuS, i).c_str());
- EXPECT_EQ(url, recent_reqs[i].original_url);
+ std::string url = StringPrintf("http://req%" PRIuS, i);
+ EXPECT_EQ(url, recent_reqs[i].url);
}
}
// Check that very long URLs are truncated.
TEST(RequestTrackerTest, GraveyardURLBounded) {
- RequestTracker<TestRequest> tracker;
+ RequestTracker tracker(NULL);
EXPECT_FALSE(tracker.IsUnbounded());
- std::string big_url_spec("http://");
- big_url_spec.resize(2 * RequestTracker<TestRequest>::kMaxGraveyardURLSize,
- 'x');
- GURL big_url(big_url_spec);
- TestRequest req(big_url);
+ std::string big_url("http://");
+ big_url.resize(2 * RequestTracker::kMaxGraveyardURLSize, 'x');
- tracker.Add(&req);
- tracker.Remove(&req);
+ tracker.OnAddEntry(MakeStartLogEntryWithURL(1, big_url));
+ tracker.OnAddEntry(MakeEndLogEntry(1));
ASSERT_EQ(1u, tracker.GetRecentlyDeceased().size());
- // The +1 is because GURL canonicalizes with a trailing '/' ... maybe
- // we should just save the std::string rather than the GURL.
- EXPECT_EQ(RequestTracker<TestRequest>::kMaxGraveyardURLSize + 1,
- tracker.GetRecentlyDeceased()[0].original_url.spec().size());
+ EXPECT_EQ(RequestTracker::kMaxGraveyardURLSize,
+ tracker.GetRecentlyDeceased()[0].url.size());
}
-// Test the doesn't fail if the URL was invalid. http://crbug.com/21423.
-TEST(URLRequestTrackerTest, TrackingInvalidURL) {
- RequestTracker<TestRequest> tracker;
+// Check that we exclude "chrome://" URLs from being saved into the recent
+// requests list (graveyard).
+TEST(RequestTrackerTest, GraveyardIsFiltered) {
+ RequestTracker tracker(NULL);
EXPECT_FALSE(tracker.IsUnbounded());
- EXPECT_EQ(0u, tracker.GetLiveRequests().size());
- EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
-
- GURL invalid_url("xabc");
- EXPECT_FALSE(invalid_url.is_valid());
- TestRequest req(invalid_url);
-
- tracker.Add(&req);
- tracker.Remove(&req);
-
- // Check that the invalid URL made it into graveyard.
- ASSERT_EQ(1u, tracker.GetRecentlyDeceased().size());
- EXPECT_FALSE(tracker.GetRecentlyDeceased()[0].original_url.is_valid());
-}
-
-bool ShouldRequestBeAddedToGraveyard(const GURL& url) {
- return !url.SchemeIs("chrome") && !url.SchemeIs("data");
-}
-
-// Check that we can exclude "chrome://" URLs and "data:" URLs from being
-// saved into the recent requests list (graveyard), by using a filter.
-TEST(RequestTrackerTest, GraveyardCanBeFiltered) {
- RequestTracker<TestRequest> tracker;
- EXPECT_FALSE(tracker.IsUnbounded());
-
- tracker.SetGraveyardFilter(ShouldRequestBeAddedToGraveyard);
-
// This will be excluded.
- GURL url1("chrome://dontcare/");
- TestRequest req1(url1);
- tracker.Add(&req1);
- tracker.Remove(&req1);
+ std::string url1 = "chrome://dontcare/";
+ tracker.OnAddEntry(MakeStartLogEntryWithURL(1, url1));
+ tracker.OnAddEntry(MakeEndLogEntry(1));
// This will be be added to graveyard.
- GURL url2("chrome2://dontcare/");
- TestRequest req2(url2);
- tracker.Add(&req2);
- tracker.Remove(&req2);
+ std::string url2 = "chrome2://dontcare/";
+ tracker.OnAddEntry(MakeStartLogEntryWithURL(2, url2));
+ tracker.OnAddEntry(MakeEndLogEntry(2));
// This will be be added to graveyard.
- GURL url3("http://foo/");
- TestRequest req3(url3);
- tracker.Add(&req3);
- tracker.Remove(&req3);
+ std::string url3 = "http://foo/";
+ tracker.OnAddEntry(MakeStartLogEntryWithURL(3, url3));
+ tracker.OnAddEntry(MakeEndLogEntry(3));
- // This will be be excluded.
- GURL url4("data:sup");
- TestRequest req4(url4);
- tracker.Add(&req4);
- tracker.Remove(&req4);
-
ASSERT_EQ(2u, tracker.GetRecentlyDeceased().size());
- EXPECT_EQ(url2, tracker.GetRecentlyDeceased()[0].original_url);
- EXPECT_EQ(url3, tracker.GetRecentlyDeceased()[1].original_url);
+ EXPECT_EQ(url2, tracker.GetRecentlyDeceased()[0].url);
+ EXPECT_EQ(url3, tracker.GetRecentlyDeceased()[1].url);
}
// Convert an unbounded tracker back to being bounded.
TEST(RequestTrackerTest, ConvertUnboundedToBounded) {
- RequestTracker<TestRequest> tracker;
+ RequestTracker tracker(NULL);
EXPECT_FALSE(tracker.IsUnbounded());
EXPECT_EQ(0u, tracker.GetLiveRequests().size());
EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
@@ -239,11 +188,10 @@
// Add twice as many requests as would fit in the bounded graveyard.
- size_t kMaxSize = RequestTracker<TestRequest>::kMaxGraveyardSize * 2;
+ size_t kMaxSize = RequestTracker::kMaxGraveyardSize * 2;
for (size_t i = 0; i < kMaxSize; ++i) {
- TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str()));
- tracker.Add(&req);
- tracker.Remove(&req);
+ tracker.OnAddEntry(MakeStartLogEntry(i));
+ tracker.OnAddEntry(MakeEndLogEntry(i));
}
// Check that all of them got saved.
@@ -252,11 +200,10 @@
// Now make the tracker bounded, and add more entries to its graveyard.
tracker.SetUnbounded(false);
- kMaxSize = RequestTracker<TestRequest>::kMaxGraveyardSize;
- for (size_t i = 0; i < kMaxSize; ++i) {
- TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str()));
- tracker.Add(&req);
- tracker.Remove(&req);
+ kMaxSize = RequestTracker::kMaxGraveyardSize;
+ for (size_t i = kMaxSize; i < 2 * kMaxSize; ++i) {
+ tracker.OnAddEntry(MakeStartLogEntry(i));
+ tracker.OnAddEntry(MakeEndLogEntry(i));
}
// We should only have kMaxGraveyardSize entries now.
« no previous file with comments | « chrome/browser/net/passive_log_collector.cc ('k') | chrome/browser/net/view_net_internals_job_factory.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698