| Index: net/quic/core/congestion_control/cubic_bytes_test.cc
|
| diff --git a/net/quic/core/congestion_control/cubic_bytes_test.cc b/net/quic/core/congestion_control/cubic_bytes_test.cc
|
| index 0ee3b43c993f7b3595471c53fd7ef72ba094325e..529af4f9b0d9fce4a9c5f5abd6685eb828bab4bd 100644
|
| --- a/net/quic/core/congestion_control/cubic_bytes_test.cc
|
| +++ b/net/quic/core/congestion_control/cubic_bytes_test.cc
|
| @@ -20,14 +20,16 @@ const float kNConnectionBeta = (kNumConnections - 1 + kBeta) / kNumConnections;
|
| const float kNConnectionAlpha = 3 * kNumConnections * kNumConnections *
|
| (1 - kNConnectionBeta) / (1 + kNConnectionBeta);
|
|
|
| -class CubicBytesTest : public ::testing::TestWithParam<bool> {
|
| +class CubicBytesTest : public ::testing::Test {
|
| protected:
|
| CubicBytesTest()
|
| : one_ms_(QuicTime::Delta::FromMilliseconds(1)),
|
| hundred_ms_(QuicTime::Delta::FromMilliseconds(100)),
|
| - cubic_(&clock_),
|
| - fix_convex_mode_(GetParam()) {
|
| - cubic_.SetFixConvexMode(fix_convex_mode_);
|
| + cubic_(&clock_) {
|
| + cubic_.SetFixConvexMode(
|
| + FLAGS_quic_fix_cubic_convex_mode);
|
| + cubic_.SetFixCubicQuantization(
|
| + FLAGS_quic_fix_cubic_bytes_quantization);
|
| }
|
|
|
| QuicByteCount RenoCwndInBytes(QuicByteCount current_cwnd) {
|
| @@ -43,11 +45,14 @@ class CubicBytesTest : public ::testing::TestWithParam<bool> {
|
| }
|
|
|
| QuicByteCount CubicConvexCwndInBytes(QuicByteCount initial_cwnd,
|
| - int64_t rtt_ms,
|
| - int64_t elapsed_time_ms) {
|
| - const int64_t offset = ((elapsed_time_ms + rtt_ms) << 10) / 1000;
|
| + QuicTime::Delta rtt,
|
| + QuicTime::Delta elapsed_time) {
|
| + const int64_t offset =
|
| + ((elapsed_time + rtt).ToMicroseconds() << 10) / 1000000;
|
| const QuicByteCount delta_congestion_window =
|
| - ((410 * offset * offset * offset) >> 40) * kDefaultTCPMSS;
|
| + FLAGS_quic_fix_cubic_bytes_quantization
|
| + ? ((410 * offset * offset * offset) * kDefaultTCPMSS >> 40)
|
| + : ((410 * offset * offset * offset) >> 40) * kDefaultTCPMSS;
|
| const QuicByteCount cubic_cwnd = initial_cwnd + delta_congestion_window;
|
| return cubic_cwnd;
|
| }
|
| @@ -56,17 +61,14 @@ class CubicBytesTest : public ::testing::TestWithParam<bool> {
|
| const QuicTime::Delta hundred_ms_;
|
| MockClock clock_;
|
| CubicBytes cubic_;
|
| - bool fix_convex_mode_;
|
| };
|
|
|
| -INSTANTIATE_TEST_CASE_P(CubicBytesTests, CubicBytesTest, testing::Bool());
|
| -
|
| // TODO(jokulik): The original "AboveOrigin" test, below, is very
|
| // loose. It's nearly impossible to make the test tighter without
|
| // deploying the fix for convex mode. Once cubic convex is deployed,
|
| // replace "AboveOrigin" with this test.
|
| -TEST_P(CubicBytesTest, AboveOriginWithTighterBounds) {
|
| - if (!fix_convex_mode_) {
|
| +TEST_F(CubicBytesTest, AboveOriginWithTighterBounds) {
|
| + if (!FLAGS_quic_fix_cubic_convex_mode) {
|
| // Without convex mode fixed, the behavior of the algorithm is so
|
| // far from expected, there's no point in doing a tighter test.
|
| return;
|
| @@ -89,8 +91,13 @@ TEST_P(CubicBytesTest, AboveOriginWithTighterBounds) {
|
| // The maximum number of expected Reno RTTs is calculated by
|
| // finding the point where the cubic curve and the reno curve meet.
|
| const int max_reno_rtts =
|
| - std::sqrt(kNConnectionAlpha / (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) -
|
| - 1;
|
| + FLAGS_quic_fix_cubic_bytes_quantization
|
| + ? std::sqrt(kNConnectionAlpha /
|
| + (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) -
|
| + 2
|
| + : std::sqrt(kNConnectionAlpha /
|
| + (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) -
|
| + 1;
|
| for (int i = 0; i < max_reno_rtts; ++i) {
|
| // Alternatively, we expect it to increase by one, every time we
|
| // receive current_cwnd/Alpha acks back. (This is another way of
|
| @@ -116,23 +123,23 @@ TEST_P(CubicBytesTest, AboveOriginWithTighterBounds) {
|
| clock_.AdvanceTime(hundred_ms_);
|
| }
|
|
|
| - // Because our byte-wise Reno under-estimates the cwnd, we switch to
|
| - // conservative increases for a few acks before switching to true
|
| - // cubic increases.
|
| - for (int i = 0; i < 3; ++i) {
|
| - const QuicByteCount next_expected_cwnd =
|
| - ConservativeCwndInBytes(current_cwnd);
|
| - current_cwnd =
|
| - cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min);
|
| - ASSERT_EQ(next_expected_cwnd, current_cwnd);
|
| + if (!FLAGS_quic_fix_cubic_bytes_quantization) {
|
| + // Because our byte-wise Reno under-estimates the cwnd, we switch to
|
| + // conservative increases for a few acks before switching to true
|
| + // cubic increases.
|
| + for (int i = 0; i < 3; ++i) {
|
| + const QuicByteCount next_expected_cwnd =
|
| + ConservativeCwndInBytes(current_cwnd);
|
| + current_cwnd = cubic_.CongestionWindowAfterAck(kDefaultTCPMSS,
|
| + current_cwnd, rtt_min);
|
| + ASSERT_EQ(next_expected_cwnd, current_cwnd);
|
| + }
|
| }
|
|
|
| for (int i = 0; i < 54; ++i) {
|
| const uint64_t max_acks_this_epoch = current_cwnd / kDefaultTCPMSS;
|
| - const int elapsed_time_ms =
|
| - (clock_.ApproximateNow() - initial_time).ToMilliseconds();
|
| const QuicByteCount expected_cwnd = CubicConvexCwndInBytes(
|
| - initial_cwnd, rtt_min.ToMilliseconds(), elapsed_time_ms);
|
| + initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time));
|
| current_cwnd =
|
| cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min);
|
| ASSERT_EQ(expected_cwnd, current_cwnd);
|
| @@ -144,24 +151,32 @@ TEST_P(CubicBytesTest, AboveOriginWithTighterBounds) {
|
| }
|
| clock_.AdvanceTime(hundred_ms_);
|
| }
|
| - const int elapsed_time_ms =
|
| - (clock_.ApproximateNow() - initial_time).ToMilliseconds();
|
| const QuicByteCount expected_cwnd = CubicConvexCwndInBytes(
|
| - initial_cwnd, rtt_min.ToMilliseconds(), elapsed_time_ms);
|
| + initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time));
|
| current_cwnd =
|
| cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min);
|
| ASSERT_EQ(expected_cwnd, current_cwnd);
|
| }
|
|
|
| -TEST_P(CubicBytesTest, AboveOrigin) {
|
| +TEST_F(CubicBytesTest, AboveOrigin) {
|
| + if (!FLAGS_quic_fix_cubic_convex_mode &&
|
| + FLAGS_quic_fix_cubic_bytes_quantization) {
|
| + // Without convex mode fixed, the behavior of the algorithm does
|
| + // not fit the exact pattern of this test.
|
| + // TODO(jokulik): Once the convex mode fix becomes default, this
|
| + // test can be replaced with the better AboveOriginTighterBounds
|
| + // test.
|
| + return;
|
| + }
|
| // Convex growth.
|
| const QuicTime::Delta rtt_min = hundred_ms_;
|
| QuicByteCount current_cwnd = 10 * kDefaultTCPMSS;
|
| // Without the signed-integer, cubic-convex fix, we start out in the
|
| // wrong mode.
|
| - QuicPacketCount expected_cwnd = fix_convex_mode_
|
| - ? RenoCwndInBytes(current_cwnd)
|
| - : ConservativeCwndInBytes(current_cwnd);
|
| + QuicPacketCount expected_cwnd =
|
| + FLAGS_quic_fix_cubic_convex_mode
|
| + ? RenoCwndInBytes(current_cwnd)
|
| + : ConservativeCwndInBytes(current_cwnd);
|
| // Initialize the state.
|
| clock_.AdvanceTime(one_ms_);
|
| ASSERT_EQ(expected_cwnd, cubic_.CongestionWindowAfterAck(
|
| @@ -180,7 +195,7 @@ TEST_P(CubicBytesTest, AboveOrigin) {
|
| clock_.AdvanceTime(hundred_ms_);
|
| current_cwnd =
|
| cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min);
|
| - if (fix_convex_mode_) {
|
| + if (FLAGS_quic_fix_cubic_convex_mode) {
|
| // When we fix convex mode and the uint64 arithmetic, we
|
| // increase the expected_cwnd only after after the first 100ms,
|
| // rather than after the initial 1ms.
|
| @@ -210,20 +225,72 @@ TEST_P(CubicBytesTest, AboveOrigin) {
|
| initial_cwnd / kDefaultTCPMSS +
|
| (elapsed_time_s * elapsed_time_s * elapsed_time_s * 410) / 1024;
|
| // Without the convex mode fix, the result is off by one.
|
| - if (!fix_convex_mode_) {
|
| + if (!FLAGS_quic_fix_cubic_convex_mode) {
|
| ++expected_cwnd;
|
| }
|
| EXPECT_EQ(expected_cwnd, current_cwnd / kDefaultTCPMSS);
|
| }
|
|
|
| -TEST_P(CubicBytesTest, LossEvents) {
|
| +// Constructs an artificial scenario to ensure that cubic-convex
|
| +// increases are truly fine-grained:
|
| +//
|
| +// - After starting the epoch, this test advances the elapsed time
|
| +// sufficiently far that cubic will do small increases at less than
|
| +// MaxCubicTimeInterval() intervals.
|
| +//
|
| +// - Sets an artificially large initial cwnd to prevent Reno from the
|
| +// convex increases on every ack.
|
| +TEST_F(CubicBytesTest, AboveOriginFineGrainedCubing) {
|
| + if (!FLAGS_quic_fix_cubic_convex_mode ||
|
| + !FLAGS_quic_fix_cubic_bytes_quantization) {
|
| + // Without these two fixes, this test cannot pass.
|
| + return;
|
| + }
|
| +
|
| + // Start the test with an artificially large cwnd to prevent Reno
|
| + // from over-taking cubic.
|
| + QuicByteCount current_cwnd = 1000 * kDefaultTCPMSS;
|
| + const QuicByteCount initial_cwnd = current_cwnd;
|
| + const QuicTime::Delta rtt_min = hundred_ms_;
|
| + clock_.AdvanceTime(one_ms_);
|
| + QuicTime initial_time = clock_.ApproximateNow();
|
| +
|
| + // Start the epoch and then artificially advance the time.
|
| + current_cwnd =
|
| + cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min);
|
| + clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(600));
|
| + current_cwnd =
|
| + cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min);
|
| +
|
| + // We expect the algorithm to perform only non-zero, fine-grained cubic
|
| + // increases on every ack in this case.
|
| + for (int i = 0; i < 100; ++i) {
|
| + clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
|
| + const QuicByteCount expected_cwnd = CubicConvexCwndInBytes(
|
| + initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time));
|
| + const QuicByteCount next_cwnd =
|
| + cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min);
|
| + // Make sure we are performing cubic increases.
|
| + ASSERT_EQ(expected_cwnd, next_cwnd);
|
| + // Make sure that these are non-zero, less-than-packet sized
|
| + // increases.
|
| + ASSERT_GT(next_cwnd, current_cwnd);
|
| + const QuicByteCount cwnd_delta = next_cwnd - current_cwnd;
|
| + ASSERT_GT(kDefaultTCPMSS * .1, cwnd_delta);
|
| +
|
| + current_cwnd = next_cwnd;
|
| + }
|
| +}
|
| +
|
| +TEST_F(CubicBytesTest, LossEvents) {
|
| const QuicTime::Delta rtt_min = hundred_ms_;
|
| QuicByteCount current_cwnd = 422 * kDefaultTCPMSS;
|
| // Without the signed-integer, cubic-convex fix, we mistakenly
|
| // increment cwnd after only one_ms_ and a single ack.
|
| - QuicPacketCount expected_cwnd = fix_convex_mode_
|
| - ? RenoCwndInBytes(current_cwnd)
|
| - : current_cwnd + kDefaultTCPMSS / 2;
|
| + QuicPacketCount expected_cwnd =
|
| + FLAGS_quic_fix_cubic_convex_mode
|
| + ? RenoCwndInBytes(current_cwnd)
|
| + : current_cwnd + kDefaultTCPMSS / 2;
|
| // Initialize the state.
|
| clock_.AdvanceTime(one_ms_);
|
| EXPECT_EQ(expected_cwnd, cubic_.CongestionWindowAfterAck(
|
| @@ -236,15 +303,16 @@ TEST_P(CubicBytesTest, LossEvents) {
|
| cubic_.CongestionWindowAfterPacketLoss(current_cwnd));
|
| }
|
|
|
| -TEST_P(CubicBytesTest, BelowOrigin) {
|
| +TEST_F(CubicBytesTest, BelowOrigin) {
|
| // Concave growth.
|
| const QuicTime::Delta rtt_min = hundred_ms_;
|
| QuicByteCount current_cwnd = 422 * kDefaultTCPMSS;
|
| // Without the signed-integer, cubic-convex fix, we mistakenly
|
| // increment cwnd after only one_ms_ and a single ack.
|
| - QuicPacketCount expected_cwnd = fix_convex_mode_
|
| - ? RenoCwndInBytes(current_cwnd)
|
| - : current_cwnd + kDefaultTCPMSS / 2;
|
| + QuicPacketCount expected_cwnd =
|
| + FLAGS_quic_fix_cubic_convex_mode
|
| + ? RenoCwndInBytes(current_cwnd)
|
| + : current_cwnd + kDefaultTCPMSS / 2;
|
| // Initialize the state.
|
| clock_.AdvanceTime(one_ms_);
|
| EXPECT_EQ(expected_cwnd, cubic_.CongestionWindowAfterAck(
|
|
|