Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(285)

Unified Diff: net/socket/client_socket_pool_base_unittest.cc

Issue 2533063002: Fix Net.Socket.IdleSocketTimeSaving histogram (Closed)
Patch Set: self review Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « net/socket/client_socket_pool_base.cc ('k') | net/socket/socket_test_util.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: net/socket/client_socket_pool_base_unittest.cc
diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc
index a4cc312bb97adf2de2f4dd72e9b1a62d6276b57f..b9e85e46b1b08e6aedd6ef3bc5a93d716d40a988 100644
--- a/net/socket/client_socket_pool_base_unittest.cc
+++ b/net/socket/client_socket_pool_base_unittest.cc
@@ -63,6 +63,8 @@ namespace {
const int kDefaultMaxSockets = 4;
const int kDefaultMaxSocketsPerGroup = 2;
const char kIdleSocketFateHistogram[] = "Net.Socket.IdleSocketFate";
+const char kIdleSocketTimeSavingHistogram[] =
+ "Net.Socket.IdleSocketTimeSaving2";
// Make sure |handle| sets load times correctly when it has been assigned a
// reused socket.
@@ -536,10 +538,12 @@ class TestClientSocketPool : public ClientSocketPool {
base_.CancelRequest(group_name, handle);
}
- void ReleaseSocket(const std::string& group_name,
- std::unique_ptr<StreamSocket> socket,
- int id) override {
- base_.ReleaseSocket(group_name, std::move(socket), id);
+ void ReleaseSocket(
+ const std::string& group_name,
+ std::unique_ptr<StreamSocket> socket,
+ int id,
+ const LoadTimingInfo::ConnectTiming& connect_timing) override {
+ base_.ReleaseSocket(group_name, std::move(socket), id, connect_timing);
}
void FlushWithError(int error) override { base_.FlushWithError(error); }
@@ -893,6 +897,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimit) {
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_RELEASE_UNUSABLE=*/3, 7)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
EXPECT_EQ(static_cast<int>(requests_size()),
client_socket_factory_.allocation_count());
@@ -934,6 +939,8 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitReachedNewGroup) {
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_RELEASE_UNUSABLE=*/3, 5)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
+
EXPECT_EQ(static_cast<int>(requests_size()),
client_socket_factory_.allocation_count());
EXPECT_EQ(requests_size() - kDefaultMaxSockets, completion_count());
@@ -968,6 +975,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitRespectsPriority) {
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_RELEASE_UNUSABLE=*/3, 7)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
EXPECT_EQ(requests_size() - kDefaultMaxSockets, completion_count());
@@ -1007,6 +1015,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitRespectsGroupLimit) {
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_RELEASE_UNUSABLE=*/3, 7)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
EXPECT_EQ(static_cast<int>(requests_size()),
client_socket_factory_.allocation_count());
@@ -1058,6 +1067,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitCountsConnectingSockets) {
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_RELEASE_UNUSABLE=*/3, 5)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
EXPECT_EQ(static_cast<int>(requests_size()),
client_socket_factory_.allocation_count());
@@ -1107,6 +1117,7 @@ TEST_F(ClientSocketPoolBaseTest, CorrectlyCountStalledGroups) {
EXPECT_THAT(
histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(base::Bucket(/*IDLE_SOCKET_FATE_CLOSE_ONE=*/8, 2)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
}
TEST_F(ClientSocketPoolBaseTest, StallAndThenCancelAndTriggerAvailableSocket) {
@@ -1302,6 +1313,7 @@ TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketAtSocketLimitDeleteGroup) {
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_REUSE_UNUSED=*/1, 1)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count());
EXPECT_EQ(kDefaultMaxSockets - 1, pool_->IdleSocketCount());
}
@@ -1324,6 +1336,7 @@ TEST_F(ClientSocketPoolBaseTest, PendingRequests) {
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_REUSE_UNUSED=*/1, 6)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
EXPECT_EQ(kDefaultMaxSocketsPerGroup,
client_socket_factory_.allocation_count());
@@ -1363,6 +1376,7 @@ TEST_F(ClientSocketPoolBaseTest, PendingRequests_NoKeepAlive) {
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_RELEASE_UNUSABE=*/3, 7)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
EXPECT_EQ(static_cast<int>(requests_size()),
client_socket_factory_.allocation_count());
EXPECT_EQ(requests_size() - kDefaultMaxSocketsPerGroup,
@@ -1433,6 +1447,7 @@ TEST_F(ClientSocketPoolBaseTest, CancelRequest) {
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_REUSE_UNUSED=*/1, 4)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
EXPECT_EQ(kDefaultMaxSocketsPerGroup,
client_socket_factory_.allocation_count());
@@ -1617,6 +1632,7 @@ TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketsForced) {
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_CLEAN_UP_FORCED=*/4, 1)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
}
TEST_F(ClientSocketPoolBaseTest, CleanUpUnusableIdleSockets) {
@@ -1645,6 +1661,7 @@ TEST_F(ClientSocketPoolBaseTest, CleanUpUnusableIdleSockets) {
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_CLEAN_UP_UNUSABLE=*/7, 1)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
}
// Regression test for http://crbug.com/17985.
@@ -1694,6 +1711,7 @@ TEST_F(ClientSocketPoolBaseTest, GroupWithPendingRequestsIsNotEmpty) {
histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(base::Bucket(/*IDLE_SOCKET_FATE_REUSE_UNUSED=*/1, 1),
base::Bucket(/*IDLE_SOCKET_FATE_CLOSE_ONE=*/8, 1)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
}
TEST_F(ClientSocketPoolBaseTest, BasicAsynchronous) {
@@ -2216,6 +2234,89 @@ TEST_F(ClientSocketPoolBaseTest, CleanupTimedOutIdleSocketsReuse) {
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_REUSE_REUSED=*/0, 1)));
+ auto buckets = histograms.GetAllSamples(kIdleSocketTimeSavingHistogram);
+ EXPECT_EQ(1u, buckets.size());
+ // The sample should be strictly greater than 0 because kPendingConnectDelay
+ // is used.
+ EXPECT_LT(0, buckets[0].min);
+ EXPECT_EQ(1, buckets[0].count);
+
+ TestNetLogEntry::List entries;
+ log.GetEntries(&entries);
+ EXPECT_TRUE(LogContainsEntryWithType(
+ entries, 1, NetLogEventType::SOCKET_POOL_REUSED_AN_EXISTING_SOCKET));
+}
+
+// Tests when idle socket are reused multiple times, histograms are correct.
+TEST_F(ClientSocketPoolBaseTest, IdleSocketsReusedTwice) {
+ base::HistogramTester histograms;
+ CreatePoolWithIdleTimeouts(
+ kDefaultMaxSockets, kDefaultMaxSocketsPerGroup,
+ base::TimeDelta(), // Time out unused sockets immediately.
+ base::TimeDelta::FromDays(1)); // Don't time out used sockets.
+
+ connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
+
+ ClientSocketHandle handle;
+ TestCompletionCallback callback;
+ int rv = handle.Init("a", params_, LOWEST,
+ ClientSocketPool::RespectLimits::ENABLED,
+ callback.callback(), pool_.get(), NetLogWithSource());
+ ASSERT_THAT(rv, IsError(ERR_IO_PENDING));
+ EXPECT_EQ(LOAD_STATE_CONNECTING, pool_->GetLoadState("a", &handle));
+ ASSERT_THAT(callback.WaitForResult(), IsOk());
+
+ // Use and release the socket.
+ EXPECT_EQ(1, handle.socket()->Write(NULL, 1, CompletionCallback()));
+ TestLoadTimingInfoConnectedNotReused(handle);
+ handle.Reset();
+
+ // Should now have one idle socket.
+ ASSERT_EQ(1, pool_->IdleSocketCount());
+
+ // Request a new socket. This should reuse the old socket and complete
+ // synchronously.
+ BoundTestNetLog log;
+ rv = handle.Init("a", params_, LOWEST,
+ ClientSocketPool::RespectLimits::ENABLED,
+ CompletionCallback(), pool_.get(), log.bound());
+ ASSERT_THAT(rv, IsOk());
+ EXPECT_TRUE(handle.is_reused());
+ TestLoadTimingInfoConnectedReused(handle);
+ // Both reuse should fall into the same bucket since they share the same
+ // connect timing.
+ auto buckets = histograms.GetAllSamples(kIdleSocketTimeSavingHistogram);
+ EXPECT_EQ(1u, buckets.size());
+ // The sample should be strictly greater than 0 because kPendingConnectDelay
+ // is used.
+ EXPECT_LT(0, buckets[0].min);
+ EXPECT_EQ(1, buckets[0].count);
+
+ // Releases socket.
+ handle.Reset();
+
+ // Request a socket again. This should reuse the old socket and complete
+ // synchronously.
+ ClientSocketHandle handle2;
+ rv = handle2.Init("a", params_, LOWEST,
+ ClientSocketPool::RespectLimits::ENABLED,
+ CompletionCallback(), pool_.get(), log.bound());
+ ASSERT_THAT(rv, IsOk());
+ EXPECT_TRUE(handle2.is_reused());
+ TestLoadTimingInfoConnectedReused(handle2);
+
+ ASSERT_TRUE(pool_->HasGroup("a"));
+ EXPECT_EQ(0, pool_->IdleSocketCountInGroup("a"));
+ EXPECT_EQ(1, pool_->NumActiveSocketsInGroup("a"));
+ EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
+ testing::ElementsAre(
+ base::Bucket(/*IDLE_SOCKET_FATE_REUSE_REUSED=*/0, 2)));
+ // Both reuse should fall into the same bucket since they share the same
+ // connect timing.
+ buckets = histograms.GetAllSamples(kIdleSocketTimeSavingHistogram);
+ EXPECT_EQ(1u, buckets.size());
+ EXPECT_LT(0, buckets[0].min);
+ EXPECT_EQ(2, buckets[0].count);
TestNetLogEntry::List entries;
log.GetEntries(&entries);
@@ -2296,6 +2397,7 @@ TEST_F(ClientSocketPoolBaseTest, MAYBE_CleanupTimedOutIdleSocketsNoReuse) {
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_CLEAN_UP_TIMED_OUT_REUSED=*/5, 1),
base::Bucket(/*IDLE_SOCKET_FATE_CLEAN_UP_TIMED_OUT_UNUSED=*/6, 1)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
// Make sure the idle socket is closed.
ASSERT_TRUE(pool_->HasGroup("a"));
@@ -2458,6 +2560,7 @@ TEST_F(ClientSocketPoolBaseTest,
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_RELEASE_UNUSABLE=*/3, 2)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
EXPECT_EQ(1, GetOrderOfRequest(1));
EXPECT_EQ(2, GetOrderOfRequest(2));
@@ -3672,6 +3775,7 @@ TEST_F(ClientSocketPoolBaseTest, ForciblyCloseIdleSocketsHeldByLayeredPool) {
EXPECT_THAT(histograms.GetAllSamples(kIdleSocketFateHistogram),
testing::ElementsAre(
base::Bucket(/*IDLE_SOCKET_FATE_RELEASE_UNUSABLE=*/3, 1)));
+ EXPECT_TRUE(histograms.GetAllSamples(kIdleSocketTimeSavingHistogram).empty());
}
// Tests the basic case of closing an idle socket in a higher layered pool when
« no previous file with comments | « net/socket/client_socket_pool_base.cc ('k') | net/socket/socket_test_util.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698