| OLD | NEW |
| 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/quic/core/congestion_control/cubic_bytes.h" | 5 #include "net/quic/core/congestion_control/cubic_bytes.h" |
| 6 | 6 |
| 7 #include <cstdint> | 7 #include <cstdint> |
| 8 | 8 |
| 9 #include "net/quic/core/quic_flags.h" | 9 #include "net/quic/core/quic_flags.h" |
| 10 #include "net/quic/platform/api/quic_str_cat.h" |
| 10 #include "net/quic/test_tools/mock_clock.h" | 11 #include "net/quic/test_tools/mock_clock.h" |
| 11 #include "testing/gtest/include/gtest/gtest.h" | 12 #include "testing/gtest/include/gtest/gtest.h" |
| 12 | 13 |
| 14 using std::string; |
| 15 |
| 13 namespace net { | 16 namespace net { |
| 14 namespace test { | 17 namespace test { |
| 18 namespace { |
| 15 | 19 |
| 16 const float kBeta = 0.7f; // Default Cubic backoff factor. | 20 const float kBeta = 0.7f; // Default Cubic backoff factor. |
| 17 const float kBetaLastMax = 0.85f; // Default Cubic backoff factor. | 21 const float kBetaLastMax = 0.85f; // Default Cubic backoff factor. |
| 18 const uint32_t kNumConnections = 2; | 22 const uint32_t kNumConnections = 2; |
| 19 const float kNConnectionBeta = (kNumConnections - 1 + kBeta) / kNumConnections; | 23 const float kNConnectionBeta = (kNumConnections - 1 + kBeta) / kNumConnections; |
| 20 const float kNConnectionBetaLastMax = | 24 const float kNConnectionBetaLastMax = |
| 21 (kNumConnections - 1 + kBetaLastMax) / kNumConnections; | 25 (kNumConnections - 1 + kBetaLastMax) / kNumConnections; |
| 22 const float kNConnectionAlpha = 3 * kNumConnections * kNumConnections * | 26 const float kNConnectionAlpha = 3 * kNumConnections * kNumConnections * |
| 23 (1 - kNConnectionBeta) / (1 + kNConnectionBeta); | 27 (1 - kNConnectionBeta) / (1 + kNConnectionBeta); |
| 24 | 28 |
| 25 class CubicBytesTest : public ::testing::Test { | 29 struct TestParams { |
| 30 TestParams(bool fix_convex_mode, |
| 31 bool fix_cubic_quantization, |
| 32 bool fix_beta_last_max) |
| 33 : fix_convex_mode(fix_convex_mode), |
| 34 fix_cubic_quantization(fix_cubic_quantization), |
| 35 fix_beta_last_max(fix_beta_last_max) {} |
| 36 |
| 37 friend std::ostream& operator<<(std::ostream& os, const TestParams& p) { |
| 38 os << "{ fix_convex_mode: " << p.fix_convex_mode |
| 39 << " fix_cubic_quantization: " << p.fix_cubic_quantization |
| 40 << " fix_beta_last_max: " << p.fix_beta_last_max; |
| 41 os << " }"; |
| 42 return os; |
| 43 } |
| 44 |
| 45 bool fix_convex_mode; |
| 46 bool fix_cubic_quantization; |
| 47 bool fix_beta_last_max; |
| 48 }; |
| 49 |
| 50 string TestParamToString(const testing::TestParamInfo<TestParams>& params) { |
| 51 return QuicStrCat("convex_mode_", params.param.fix_convex_mode, "_", |
| 52 "cubic_quantization_", params.param.fix_cubic_quantization, |
| 53 "_", "beta_last_max_", params.param.fix_beta_last_max); |
| 54 } |
| 55 |
| 56 std::vector<TestParams> GetTestParams() { |
| 57 std::vector<TestParams> params; |
| 58 for (bool fix_convex_mode : {true, false}) { |
| 59 for (bool fix_cubic_quantization : {true, false}) { |
| 60 for (bool fix_beta_last_max : {true, false}) { |
| 61 if (!FLAGS_quic_reloadable_flag_quic_fix_cubic_convex_mode && |
| 62 fix_convex_mode) { |
| 63 continue; |
| 64 } |
| 65 if (!FLAGS_quic_reloadable_flag_quic_fix_cubic_bytes_quantization && |
| 66 fix_cubic_quantization) { |
| 67 continue; |
| 68 } |
| 69 if (!FLAGS_quic_reloadable_flag_quic_fix_beta_last_max && |
| 70 fix_beta_last_max) { |
| 71 continue; |
| 72 } |
| 73 TestParams param(fix_convex_mode, fix_cubic_quantization, |
| 74 fix_beta_last_max); |
| 75 params.push_back(param); |
| 76 } |
| 77 } |
| 78 } |
| 79 return params; |
| 80 } |
| 81 |
| 82 } // namespace |
| 83 |
| 84 class CubicBytesTest : public ::testing::TestWithParam<TestParams> { |
| 26 protected: | 85 protected: |
| 27 CubicBytesTest() | 86 CubicBytesTest() |
| 28 : one_ms_(QuicTime::Delta::FromMilliseconds(1)), | 87 : one_ms_(QuicTime::Delta::FromMilliseconds(1)), |
| 29 hundred_ms_(QuicTime::Delta::FromMilliseconds(100)), | 88 hundred_ms_(QuicTime::Delta::FromMilliseconds(100)), |
| 30 cubic_(&clock_) { | 89 cubic_(&clock_) { |
| 31 cubic_.SetFixConvexMode( | 90 cubic_.SetFixConvexMode(GetParam().fix_convex_mode); |
| 32 FLAGS_quic_reloadable_flag_quic_fix_cubic_convex_mode); | 91 cubic_.SetFixCubicQuantization(GetParam().fix_cubic_quantization); |
| 33 cubic_.SetFixCubicQuantization( | 92 cubic_.SetFixBetaLastMax(GetParam().fix_beta_last_max); |
| 34 FLAGS_quic_reloadable_flag_quic_fix_cubic_bytes_quantization); | |
| 35 cubic_.SetFixBetaLastMax(FLAGS_quic_reloadable_flag_quic_fix_beta_last_max); | |
| 36 } | 93 } |
| 37 | 94 |
| 38 QuicByteCount RenoCwndInBytes(QuicByteCount current_cwnd) { | 95 QuicByteCount RenoCwndInBytes(QuicByteCount current_cwnd) { |
| 39 QuicByteCount reno_estimated_cwnd = | 96 QuicByteCount reno_estimated_cwnd = |
| 40 current_cwnd + | 97 current_cwnd + |
| 41 kDefaultTCPMSS * (kNConnectionAlpha * kDefaultTCPMSS) / current_cwnd; | 98 kDefaultTCPMSS * (kNConnectionAlpha * kDefaultTCPMSS) / current_cwnd; |
| 42 return reno_estimated_cwnd; | 99 return reno_estimated_cwnd; |
| 43 } | 100 } |
| 44 | 101 |
| 45 QuicByteCount ConservativeCwndInBytes(QuicByteCount current_cwnd) { | 102 QuicByteCount ConservativeCwndInBytes(QuicByteCount current_cwnd) { |
| 46 QuicByteCount conservative_cwnd = current_cwnd + kDefaultTCPMSS / 2; | 103 QuicByteCount conservative_cwnd = current_cwnd + kDefaultTCPMSS / 2; |
| 47 return conservative_cwnd; | 104 return conservative_cwnd; |
| 48 } | 105 } |
| 49 | 106 |
| 50 QuicByteCount CubicConvexCwndInBytes(QuicByteCount initial_cwnd, | 107 QuicByteCount CubicConvexCwndInBytes(QuicByteCount initial_cwnd, |
| 51 QuicTime::Delta rtt, | 108 QuicTime::Delta rtt, |
| 52 QuicTime::Delta elapsed_time) { | 109 QuicTime::Delta elapsed_time) { |
| 53 const int64_t offset = | 110 const int64_t offset = |
| 54 ((elapsed_time + rtt).ToMicroseconds() << 10) / 1000000; | 111 ((elapsed_time + rtt).ToMicroseconds() << 10) / 1000000; |
| 55 const QuicByteCount delta_congestion_window = | 112 const QuicByteCount delta_congestion_window = |
| 56 FLAGS_quic_reloadable_flag_quic_fix_cubic_bytes_quantization | 113 GetParam().fix_cubic_quantization |
| 57 ? ((410 * offset * offset * offset) * kDefaultTCPMSS >> 40) | 114 ? ((410 * offset * offset * offset) * kDefaultTCPMSS >> 40) |
| 58 : ((410 * offset * offset * offset) >> 40) * kDefaultTCPMSS; | 115 : ((410 * offset * offset * offset) >> 40) * kDefaultTCPMSS; |
| 59 const QuicByteCount cubic_cwnd = initial_cwnd + delta_congestion_window; | 116 const QuicByteCount cubic_cwnd = initial_cwnd + delta_congestion_window; |
| 60 return cubic_cwnd; | 117 return cubic_cwnd; |
| 61 } | 118 } |
| 62 | 119 |
| 63 QuicByteCount LastMaxCongestionWindow() { | 120 QuicByteCount LastMaxCongestionWindow() { |
| 64 return cubic_.last_max_congestion_window(); | 121 return cubic_.last_max_congestion_window(); |
| 65 } | 122 } |
| 66 | 123 |
| 67 const QuicTime::Delta one_ms_; | 124 const QuicTime::Delta one_ms_; |
| 68 const QuicTime::Delta hundred_ms_; | 125 const QuicTime::Delta hundred_ms_; |
| 69 MockClock clock_; | 126 MockClock clock_; |
| 70 CubicBytes cubic_; | 127 CubicBytes cubic_; |
| 71 }; | 128 }; |
| 72 | 129 |
| 130 INSTANTIATE_TEST_CASE_P(CubicBytesTests, |
| 131 CubicBytesTest, |
| 132 ::testing::ValuesIn(GetTestParams()), |
| 133 TestParamToString); |
| 134 |
| 73 // TODO(jokulik): The original "AboveOrigin" test, below, is very | 135 // TODO(jokulik): The original "AboveOrigin" test, below, is very |
| 74 // loose. It's nearly impossible to make the test tighter without | 136 // loose. It's nearly impossible to make the test tighter without |
| 75 // deploying the fix for convex mode. Once cubic convex is deployed, | 137 // deploying the fix for convex mode. Once cubic convex is deployed, |
| 76 // replace "AboveOrigin" with this test. | 138 // replace "AboveOrigin" with this test. |
| 77 TEST_F(CubicBytesTest, AboveOriginWithTighterBounds) { | 139 TEST_P(CubicBytesTest, AboveOriginWithTighterBounds) { |
| 78 if (!FLAGS_quic_reloadable_flag_quic_fix_cubic_convex_mode) { | 140 if (!GetParam().fix_convex_mode) { |
| 79 // Without convex mode fixed, the behavior of the algorithm is so | 141 // Without convex mode fixed, the behavior of the algorithm is so |
| 80 // far from expected, there's no point in doing a tighter test. | 142 // far from expected, there's no point in doing a tighter test. |
| 81 return; | 143 return; |
| 82 } | 144 } |
| 83 // Convex growth. | 145 // Convex growth. |
| 84 const QuicTime::Delta rtt_min = hundred_ms_; | 146 const QuicTime::Delta rtt_min = hundred_ms_; |
| 85 int64_t rtt_min_ms = rtt_min.ToMilliseconds(); | 147 int64_t rtt_min_ms = rtt_min.ToMilliseconds(); |
| 86 float rtt_min_s = rtt_min_ms / 1000.0; | 148 float rtt_min_s = rtt_min_ms / 1000.0; |
| 87 QuicByteCount current_cwnd = 10 * kDefaultTCPMSS; | 149 QuicByteCount current_cwnd = 10 * kDefaultTCPMSS; |
| 88 const QuicByteCount initial_cwnd = current_cwnd; | 150 const QuicByteCount initial_cwnd = current_cwnd; |
| 89 | 151 |
| 90 clock_.AdvanceTime(one_ms_); | 152 clock_.AdvanceTime(one_ms_); |
| 91 const QuicTime initial_time = clock_.ApproximateNow(); | 153 const QuicTime initial_time = clock_.ApproximateNow(); |
| 92 const QuicByteCount expected_first_cwnd = RenoCwndInBytes(current_cwnd); | 154 const QuicByteCount expected_first_cwnd = RenoCwndInBytes(current_cwnd); |
| 93 current_cwnd = cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, | 155 current_cwnd = cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 94 rtt_min, initial_time); | 156 rtt_min, initial_time); |
| 95 ASSERT_EQ(expected_first_cwnd, current_cwnd); | 157 ASSERT_EQ(expected_first_cwnd, current_cwnd); |
| 96 | 158 |
| 97 // Normal TCP phase. | 159 // Normal TCP phase. |
| 98 // The maximum number of expected Reno RTTs is calculated by | 160 // The maximum number of expected Reno RTTs is calculated by |
| 99 // finding the point where the cubic curve and the reno curve meet. | 161 // finding the point where the cubic curve and the reno curve meet. |
| 100 const int max_reno_rtts = | 162 const int max_reno_rtts = |
| 101 FLAGS_quic_reloadable_flag_quic_fix_cubic_bytes_quantization | 163 GetParam().fix_cubic_quantization |
| 102 ? std::sqrt(kNConnectionAlpha / | 164 ? std::sqrt(kNConnectionAlpha / |
| 103 (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) - | 165 (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) - |
| 104 2 | 166 2 |
| 105 : std::sqrt(kNConnectionAlpha / | 167 : std::sqrt(kNConnectionAlpha / |
| 106 (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) - | 168 (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) - |
| 107 1; | 169 1; |
| 108 for (int i = 0; i < max_reno_rtts; ++i) { | 170 for (int i = 0; i < max_reno_rtts; ++i) { |
| 109 // Alternatively, we expect it to increase by one, every time we | 171 // Alternatively, we expect it to increase by one, every time we |
| 110 // receive current_cwnd/Alpha acks back. (This is another way of | 172 // receive current_cwnd/Alpha acks back. (This is another way of |
| 111 // saying we expect cwnd to increase by approximately Alpha once | 173 // saying we expect cwnd to increase by approximately Alpha once |
| (...skipping 11 matching lines...) Expand all Loading... |
| 123 // Our byte-wise Reno implementation is an estimate. We expect | 185 // Our byte-wise Reno implementation is an estimate. We expect |
| 124 // the cwnd to increase by approximately one MSS every | 186 // the cwnd to increase by approximately one MSS every |
| 125 // cwnd/kDefaultTCPMSS/Alpha acks, but it may be off by as much as | 187 // cwnd/kDefaultTCPMSS/Alpha acks, but it may be off by as much as |
| 126 // half a packet for smaller values of current_cwnd. | 188 // half a packet for smaller values of current_cwnd. |
| 127 const QuicByteCount cwnd_change_this_epoch = | 189 const QuicByteCount cwnd_change_this_epoch = |
| 128 current_cwnd - initial_cwnd_this_epoch; | 190 current_cwnd - initial_cwnd_this_epoch; |
| 129 ASSERT_NEAR(kDefaultTCPMSS, cwnd_change_this_epoch, kDefaultTCPMSS / 2); | 191 ASSERT_NEAR(kDefaultTCPMSS, cwnd_change_this_epoch, kDefaultTCPMSS / 2); |
| 130 clock_.AdvanceTime(hundred_ms_); | 192 clock_.AdvanceTime(hundred_ms_); |
| 131 } | 193 } |
| 132 | 194 |
| 133 if (!FLAGS_quic_reloadable_flag_quic_fix_cubic_bytes_quantization) { | 195 if (!GetParam().fix_cubic_quantization) { |
| 134 // Because our byte-wise Reno under-estimates the cwnd, we switch to | 196 // Because our byte-wise Reno under-estimates the cwnd, we switch to |
| 135 // conservative increases for a few acks before switching to true | 197 // conservative increases for a few acks before switching to true |
| 136 // cubic increases. | 198 // cubic increases. |
| 137 for (int i = 0; i < 3; ++i) { | 199 for (int i = 0; i < 3; ++i) { |
| 138 const QuicByteCount next_expected_cwnd = | 200 const QuicByteCount next_expected_cwnd = |
| 139 ConservativeCwndInBytes(current_cwnd); | 201 ConservativeCwndInBytes(current_cwnd); |
| 140 current_cwnd = cubic_.CongestionWindowAfterAck( | 202 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 141 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); | 203 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 142 ASSERT_EQ(next_expected_cwnd, current_cwnd); | 204 ASSERT_EQ(next_expected_cwnd, current_cwnd); |
| 143 } | 205 } |
| (...skipping 15 matching lines...) Expand all Loading... |
| 159 } | 221 } |
| 160 clock_.AdvanceTime(hundred_ms_); | 222 clock_.AdvanceTime(hundred_ms_); |
| 161 } | 223 } |
| 162 const QuicByteCount expected_cwnd = CubicConvexCwndInBytes( | 224 const QuicByteCount expected_cwnd = CubicConvexCwndInBytes( |
| 163 initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time)); | 225 initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time)); |
| 164 current_cwnd = cubic_.CongestionWindowAfterAck( | 226 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 165 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); | 227 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 166 ASSERT_EQ(expected_cwnd, current_cwnd); | 228 ASSERT_EQ(expected_cwnd, current_cwnd); |
| 167 } | 229 } |
| 168 | 230 |
| 169 TEST_F(CubicBytesTest, AboveOrigin) { | 231 TEST_P(CubicBytesTest, AboveOrigin) { |
| 170 if (!FLAGS_quic_reloadable_flag_quic_fix_cubic_convex_mode && | 232 if (!GetParam().fix_convex_mode && GetParam().fix_cubic_quantization) { |
| 171 FLAGS_quic_reloadable_flag_quic_fix_cubic_bytes_quantization) { | |
| 172 // Without convex mode fixed, the behavior of the algorithm does | 233 // Without convex mode fixed, the behavior of the algorithm does |
| 173 // not fit the exact pattern of this test. | 234 // not fit the exact pattern of this test. |
| 174 // TODO(jokulik): Once the convex mode fix becomes default, this | 235 // TODO(jokulik): Once the convex mode fix becomes default, this |
| 175 // test can be replaced with the better AboveOriginTighterBounds | 236 // test can be replaced with the better AboveOriginTighterBounds |
| 176 // test. | 237 // test. |
| 177 return; | 238 return; |
| 178 } | 239 } |
| 179 // Convex growth. | 240 // Convex growth. |
| 180 const QuicTime::Delta rtt_min = hundred_ms_; | 241 const QuicTime::Delta rtt_min = hundred_ms_; |
| 181 QuicByteCount current_cwnd = 10 * kDefaultTCPMSS; | 242 QuicByteCount current_cwnd = 10 * kDefaultTCPMSS; |
| 182 // Without the signed-integer, cubic-convex fix, we start out in the | 243 // Without the signed-integer, cubic-convex fix, we start out in the |
| 183 // wrong mode. | 244 // wrong mode. |
| 184 QuicPacketCount expected_cwnd = | 245 QuicPacketCount expected_cwnd = GetParam().fix_convex_mode |
| 185 FLAGS_quic_reloadable_flag_quic_fix_cubic_convex_mode | 246 ? RenoCwndInBytes(current_cwnd) |
| 186 ? RenoCwndInBytes(current_cwnd) | 247 : ConservativeCwndInBytes(current_cwnd); |
| 187 : ConservativeCwndInBytes(current_cwnd); | |
| 188 // Initialize the state. | 248 // Initialize the state. |
| 189 clock_.AdvanceTime(one_ms_); | 249 clock_.AdvanceTime(one_ms_); |
| 190 ASSERT_EQ(expected_cwnd, | 250 ASSERT_EQ(expected_cwnd, |
| 191 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, | 251 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 192 rtt_min, clock_.ApproximateNow())); | 252 rtt_min, clock_.ApproximateNow())); |
| 193 current_cwnd = expected_cwnd; | 253 current_cwnd = expected_cwnd; |
| 194 const QuicPacketCount initial_cwnd = expected_cwnd; | 254 const QuicPacketCount initial_cwnd = expected_cwnd; |
| 195 // Normal TCP phase. | 255 // Normal TCP phase. |
| 196 for (int i = 0; i < 48; ++i) { | 256 for (int i = 0; i < 48; ++i) { |
| 197 for (QuicPacketCount n = 1; | 257 for (QuicPacketCount n = 1; |
| 198 n < current_cwnd / kDefaultTCPMSS / kNConnectionAlpha; ++n) { | 258 n < current_cwnd / kDefaultTCPMSS / kNConnectionAlpha; ++n) { |
| 199 // Call once per ACK. | 259 // Call once per ACK. |
| 200 ASSERT_NEAR(current_cwnd, cubic_.CongestionWindowAfterAck( | 260 ASSERT_NEAR(current_cwnd, cubic_.CongestionWindowAfterAck( |
| 201 kDefaultTCPMSS, current_cwnd, rtt_min, | 261 kDefaultTCPMSS, current_cwnd, rtt_min, |
| 202 clock_.ApproximateNow()), | 262 clock_.ApproximateNow()), |
| 203 kDefaultTCPMSS); | 263 kDefaultTCPMSS); |
| 204 } | 264 } |
| 205 clock_.AdvanceTime(hundred_ms_); | 265 clock_.AdvanceTime(hundred_ms_); |
| 206 current_cwnd = cubic_.CongestionWindowAfterAck( | 266 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 207 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); | 267 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 208 if (FLAGS_quic_reloadable_flag_quic_fix_cubic_convex_mode) { | 268 if (GetParam().fix_convex_mode) { |
| 209 // When we fix convex mode and the uint64 arithmetic, we | 269 // When we fix convex mode and the uint64 arithmetic, we |
| 210 // increase the expected_cwnd only after after the first 100ms, | 270 // increase the expected_cwnd only after after the first 100ms, |
| 211 // rather than after the initial 1ms. | 271 // rather than after the initial 1ms. |
| 212 expected_cwnd += kDefaultTCPMSS; | 272 expected_cwnd += kDefaultTCPMSS; |
| 213 ASSERT_NEAR(expected_cwnd, current_cwnd, kDefaultTCPMSS); | 273 ASSERT_NEAR(expected_cwnd, current_cwnd, kDefaultTCPMSS); |
| 214 } else { | 274 } else { |
| 215 ASSERT_NEAR(expected_cwnd, current_cwnd, kDefaultTCPMSS); | 275 ASSERT_NEAR(expected_cwnd, current_cwnd, kDefaultTCPMSS); |
| 216 expected_cwnd += kDefaultTCPMSS; | 276 expected_cwnd += kDefaultTCPMSS; |
| 217 } | 277 } |
| 218 } | 278 } |
| (...skipping 10 matching lines...) Expand all Loading... |
| 229 current_cwnd = cubic_.CongestionWindowAfterAck( | 289 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 230 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); | 290 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 231 } | 291 } |
| 232 // Total time elapsed so far; add min_rtt (0.1s) here as well. | 292 // Total time elapsed so far; add min_rtt (0.1s) here as well. |
| 233 float elapsed_time_s = 10.0f + 0.1f; | 293 float elapsed_time_s = 10.0f + 0.1f; |
| 234 // |expected_cwnd| is initial value of cwnd + K * t^3, where K = 0.4. | 294 // |expected_cwnd| is initial value of cwnd + K * t^3, where K = 0.4. |
| 235 expected_cwnd = | 295 expected_cwnd = |
| 236 initial_cwnd / kDefaultTCPMSS + | 296 initial_cwnd / kDefaultTCPMSS + |
| 237 (elapsed_time_s * elapsed_time_s * elapsed_time_s * 410) / 1024; | 297 (elapsed_time_s * elapsed_time_s * elapsed_time_s * 410) / 1024; |
| 238 // Without the convex mode fix, the result is off by one. | 298 // Without the convex mode fix, the result is off by one. |
| 239 if (!FLAGS_quic_reloadable_flag_quic_fix_cubic_convex_mode) { | 299 if (!GetParam().fix_convex_mode) { |
| 240 ++expected_cwnd; | 300 ++expected_cwnd; |
| 241 } | 301 } |
| 242 EXPECT_EQ(expected_cwnd, current_cwnd / kDefaultTCPMSS); | 302 EXPECT_EQ(expected_cwnd, current_cwnd / kDefaultTCPMSS); |
| 243 } | 303 } |
| 244 | 304 |
| 245 // Constructs an artificial scenario to ensure that cubic-convex | 305 // Constructs an artificial scenario to ensure that cubic-convex |
| 246 // increases are truly fine-grained: | 306 // increases are truly fine-grained: |
| 247 // | 307 // |
| 248 // - After starting the epoch, this test advances the elapsed time | 308 // - After starting the epoch, this test advances the elapsed time |
| 249 // sufficiently far that cubic will do small increases at less than | 309 // sufficiently far that cubic will do small increases at less than |
| 250 // MaxCubicTimeInterval() intervals. | 310 // MaxCubicTimeInterval() intervals. |
| 251 // | 311 // |
| 252 // - Sets an artificially large initial cwnd to prevent Reno from the | 312 // - Sets an artificially large initial cwnd to prevent Reno from the |
| 253 // convex increases on every ack. | 313 // convex increases on every ack. |
| 254 TEST_F(CubicBytesTest, AboveOriginFineGrainedCubing) { | 314 TEST_P(CubicBytesTest, AboveOriginFineGrainedCubing) { |
| 255 if (!FLAGS_quic_reloadable_flag_quic_fix_cubic_convex_mode || | 315 if (!GetParam().fix_convex_mode || !GetParam().fix_cubic_quantization) { |
| 256 !FLAGS_quic_reloadable_flag_quic_fix_cubic_bytes_quantization) { | |
| 257 // Without these two fixes, this test cannot pass. | 316 // Without these two fixes, this test cannot pass. |
| 258 return; | 317 return; |
| 259 } | 318 } |
| 260 | 319 |
| 261 // Start the test with an artificially large cwnd to prevent Reno | 320 // Start the test with an artificially large cwnd to prevent Reno |
| 262 // from over-taking cubic. | 321 // from over-taking cubic. |
| 263 QuicByteCount current_cwnd = 1000 * kDefaultTCPMSS; | 322 QuicByteCount current_cwnd = 1000 * kDefaultTCPMSS; |
| 264 const QuicByteCount initial_cwnd = current_cwnd; | 323 const QuicByteCount initial_cwnd = current_cwnd; |
| 265 const QuicTime::Delta rtt_min = hundred_ms_; | 324 const QuicTime::Delta rtt_min = hundred_ms_; |
| 266 clock_.AdvanceTime(one_ms_); | 325 clock_.AdvanceTime(one_ms_); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 286 // Make sure that these are non-zero, less-than-packet sized | 345 // Make sure that these are non-zero, less-than-packet sized |
| 287 // increases. | 346 // increases. |
| 288 ASSERT_GT(next_cwnd, current_cwnd); | 347 ASSERT_GT(next_cwnd, current_cwnd); |
| 289 const QuicByteCount cwnd_delta = next_cwnd - current_cwnd; | 348 const QuicByteCount cwnd_delta = next_cwnd - current_cwnd; |
| 290 ASSERT_GT(kDefaultTCPMSS * .1, cwnd_delta); | 349 ASSERT_GT(kDefaultTCPMSS * .1, cwnd_delta); |
| 291 | 350 |
| 292 current_cwnd = next_cwnd; | 351 current_cwnd = next_cwnd; |
| 293 } | 352 } |
| 294 } | 353 } |
| 295 | 354 |
| 296 TEST_F(CubicBytesTest, LossEvents) { | 355 TEST_P(CubicBytesTest, LossEvents) { |
| 297 const QuicTime::Delta rtt_min = hundred_ms_; | 356 const QuicTime::Delta rtt_min = hundred_ms_; |
| 298 QuicByteCount current_cwnd = 422 * kDefaultTCPMSS; | 357 QuicByteCount current_cwnd = 422 * kDefaultTCPMSS; |
| 299 // Without the signed-integer, cubic-convex fix, we mistakenly | 358 // Without the signed-integer, cubic-convex fix, we mistakenly |
| 300 // increment cwnd after only one_ms_ and a single ack. | 359 // increment cwnd after only one_ms_ and a single ack. |
| 301 QuicPacketCount expected_cwnd = | 360 QuicPacketCount expected_cwnd = GetParam().fix_convex_mode |
| 302 FLAGS_quic_reloadable_flag_quic_fix_cubic_convex_mode | 361 ? RenoCwndInBytes(current_cwnd) |
| 303 ? RenoCwndInBytes(current_cwnd) | 362 : current_cwnd + kDefaultTCPMSS / 2; |
| 304 : current_cwnd + kDefaultTCPMSS / 2; | |
| 305 // Initialize the state. | 363 // Initialize the state. |
| 306 clock_.AdvanceTime(one_ms_); | 364 clock_.AdvanceTime(one_ms_); |
| 307 EXPECT_EQ(expected_cwnd, | 365 EXPECT_EQ(expected_cwnd, |
| 308 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, | 366 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 309 rtt_min, clock_.ApproximateNow())); | 367 rtt_min, clock_.ApproximateNow())); |
| 310 | 368 |
| 311 // On the first loss, the last max congestion window is set to the | 369 // On the first loss, the last max congestion window is set to the |
| 312 // congestion window before the loss. | 370 // congestion window before the loss. |
| 313 QuicByteCount pre_loss_cwnd = current_cwnd; | 371 QuicByteCount pre_loss_cwnd = current_cwnd; |
| 314 ASSERT_EQ(0u, LastMaxCongestionWindow()); | 372 ASSERT_EQ(0u, LastMaxCongestionWindow()); |
| 315 expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta); | 373 expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta); |
| 316 EXPECT_EQ(expected_cwnd, | 374 EXPECT_EQ(expected_cwnd, |
| 317 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); | 375 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); |
| 318 ASSERT_EQ(pre_loss_cwnd, LastMaxCongestionWindow()); | 376 ASSERT_EQ(pre_loss_cwnd, LastMaxCongestionWindow()); |
| 319 current_cwnd = expected_cwnd; | 377 current_cwnd = expected_cwnd; |
| 320 | 378 |
| 321 // On the second loss, the current congestion window has not yet | 379 // On the second loss, the current congestion window has not yet |
| 322 // reached the last max congestion window. The last max congestion | 380 // reached the last max congestion window. The last max congestion |
| 323 // window will be reduced by an additional backoff factor to allow | 381 // window will be reduced by an additional backoff factor to allow |
| 324 // for competition. | 382 // for competition. |
| 325 pre_loss_cwnd = current_cwnd; | 383 pre_loss_cwnd = current_cwnd; |
| 326 expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta); | 384 expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta); |
| 327 ASSERT_EQ(expected_cwnd, | 385 ASSERT_EQ(expected_cwnd, |
| 328 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); | 386 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); |
| 329 current_cwnd = expected_cwnd; | 387 current_cwnd = expected_cwnd; |
| 330 EXPECT_GT(pre_loss_cwnd, LastMaxCongestionWindow()); | 388 EXPECT_GT(pre_loss_cwnd, LastMaxCongestionWindow()); |
| 331 QuicByteCount expected_last_max = | 389 QuicByteCount expected_last_max = |
| 332 FLAGS_quic_reloadable_flag_quic_fix_beta_last_max | 390 GetParam().fix_beta_last_max |
| 333 ? static_cast<QuicByteCount>(pre_loss_cwnd * kNConnectionBetaLastMax) | 391 ? static_cast<QuicByteCount>(pre_loss_cwnd * kNConnectionBetaLastMax) |
| 334 : static_cast<QuicByteCount>(pre_loss_cwnd * kBetaLastMax); | 392 : static_cast<QuicByteCount>(pre_loss_cwnd * kBetaLastMax); |
| 335 EXPECT_EQ(expected_last_max, LastMaxCongestionWindow()); | 393 EXPECT_EQ(expected_last_max, LastMaxCongestionWindow()); |
| 336 if (FLAGS_quic_reloadable_flag_quic_fix_beta_last_max) { | 394 if (GetParam().fix_beta_last_max) { |
| 337 EXPECT_LT(expected_cwnd, LastMaxCongestionWindow()); | 395 EXPECT_LT(expected_cwnd, LastMaxCongestionWindow()); |
| 338 } else { | 396 } else { |
| 339 // If we don't scale kLastBetaMax, the current window is exactly | 397 // If we don't scale kLastBetaMax, the current window is exactly |
| 340 // equal to the last max congestion window, which would cause us | 398 // equal to the last max congestion window, which would cause us |
| 341 // to land above the origin on the next increase. | 399 // to land above the origin on the next increase. |
| 342 EXPECT_EQ(expected_cwnd, LastMaxCongestionWindow()); | 400 EXPECT_EQ(expected_cwnd, LastMaxCongestionWindow()); |
| 343 } | 401 } |
| 344 // Simulate an increase, and check that we are below the origin. | 402 // Simulate an increase, and check that we are below the origin. |
| 345 current_cwnd = cubic_.CongestionWindowAfterAck( | 403 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 346 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); | 404 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 347 if (FLAGS_quic_reloadable_flag_quic_fix_beta_last_max) { | 405 if (GetParam().fix_beta_last_max) { |
| 348 EXPECT_GT(LastMaxCongestionWindow(), current_cwnd); | 406 EXPECT_GT(LastMaxCongestionWindow(), current_cwnd); |
| 349 } else { | 407 } else { |
| 350 // Without the bug fix, we will be at or above the origin. | 408 // Without the bug fix, we will be at or above the origin. |
| 351 EXPECT_LE(LastMaxCongestionWindow(), current_cwnd); | 409 EXPECT_LE(LastMaxCongestionWindow(), current_cwnd); |
| 352 } | 410 } |
| 353 | 411 |
| 354 // On the final loss, simulate the condition where the congestion | 412 // On the final loss, simulate the condition where the congestion |
| 355 // window had a chance to grow nearly to the last congestion window. | 413 // window had a chance to grow nearly to the last congestion window. |
| 356 current_cwnd = LastMaxCongestionWindow() - 1; | 414 current_cwnd = LastMaxCongestionWindow() - 1; |
| 357 pre_loss_cwnd = current_cwnd; | 415 pre_loss_cwnd = current_cwnd; |
| 358 expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta); | 416 expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta); |
| 359 EXPECT_EQ(expected_cwnd, | 417 EXPECT_EQ(expected_cwnd, |
| 360 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); | 418 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); |
| 361 expected_last_max = | 419 expected_last_max = |
| 362 FLAGS_quic_reloadable_flag_quic_fix_beta_last_max | 420 GetParam().fix_beta_last_max |
| 363 ? pre_loss_cwnd | 421 ? pre_loss_cwnd |
| 364 : static_cast<QuicByteCount>(pre_loss_cwnd * kBetaLastMax); | 422 : static_cast<QuicByteCount>(pre_loss_cwnd * kBetaLastMax); |
| 365 ASSERT_EQ(expected_last_max, LastMaxCongestionWindow()); | 423 ASSERT_EQ(expected_last_max, LastMaxCongestionWindow()); |
| 366 } | 424 } |
| 367 | 425 |
| 368 TEST_F(CubicBytesTest, BelowOrigin) { | 426 TEST_P(CubicBytesTest, BelowOrigin) { |
| 369 // Concave growth. | 427 // Concave growth. |
| 370 const QuicTime::Delta rtt_min = hundred_ms_; | 428 const QuicTime::Delta rtt_min = hundred_ms_; |
| 371 QuicByteCount current_cwnd = 422 * kDefaultTCPMSS; | 429 QuicByteCount current_cwnd = 422 * kDefaultTCPMSS; |
| 372 // Without the signed-integer, cubic-convex fix, we mistakenly | 430 // Without the signed-integer, cubic-convex fix, we mistakenly |
| 373 // increment cwnd after only one_ms_ and a single ack. | 431 // increment cwnd after only one_ms_ and a single ack. |
| 374 QuicPacketCount expected_cwnd = | 432 QuicPacketCount expected_cwnd = GetParam().fix_convex_mode |
| 375 FLAGS_quic_reloadable_flag_quic_fix_cubic_convex_mode | 433 ? RenoCwndInBytes(current_cwnd) |
| 376 ? RenoCwndInBytes(current_cwnd) | 434 : current_cwnd + kDefaultTCPMSS / 2; |
| 377 : current_cwnd + kDefaultTCPMSS / 2; | |
| 378 // Initialize the state. | 435 // Initialize the state. |
| 379 clock_.AdvanceTime(one_ms_); | 436 clock_.AdvanceTime(one_ms_); |
| 380 EXPECT_EQ(expected_cwnd, | 437 EXPECT_EQ(expected_cwnd, |
| 381 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, | 438 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 382 rtt_min, clock_.ApproximateNow())); | 439 rtt_min, clock_.ApproximateNow())); |
| 383 expected_cwnd = static_cast<QuicPacketCount>(current_cwnd * kNConnectionBeta); | 440 expected_cwnd = static_cast<QuicPacketCount>(current_cwnd * kNConnectionBeta); |
| 384 EXPECT_EQ(expected_cwnd, | 441 EXPECT_EQ(expected_cwnd, |
| 385 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); | 442 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); |
| 386 current_cwnd = expected_cwnd; | 443 current_cwnd = expected_cwnd; |
| 387 // First update after loss to initialize the epoch. | 444 // First update after loss to initialize the epoch. |
| 388 current_cwnd = cubic_.CongestionWindowAfterAck( | 445 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 389 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); | 446 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 390 // Cubic phase. | 447 // Cubic phase. |
| 391 for (int i = 0; i < 40; ++i) { | 448 for (int i = 0; i < 40; ++i) { |
| 392 clock_.AdvanceTime(hundred_ms_); | 449 clock_.AdvanceTime(hundred_ms_); |
| 393 current_cwnd = cubic_.CongestionWindowAfterAck( | 450 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 394 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); | 451 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 395 } | 452 } |
| 396 expected_cwnd = 553632; | 453 expected_cwnd = 553632; |
| 397 EXPECT_EQ(expected_cwnd, current_cwnd); | 454 EXPECT_EQ(expected_cwnd, current_cwnd); |
| 398 } | 455 } |
| 399 | 456 |
| 400 } // namespace test | 457 } // namespace test |
| 401 } // namespace net | 458 } // namespace net |
| OLD | NEW |