| OLD | NEW |
| 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/quic/core/congestion_control/cubic_bytes.h" | 5 #include "net/quic/core/congestion_control/cubic_bytes.h" |
| 6 | 6 |
| 7 #include <cstdint> | 7 #include <cstdint> |
| 8 | 8 |
| 9 #include "base/logging.h" | 9 #include "base/logging.h" |
| 10 #include "net/quic/core/quic_flags.h" | 10 #include "net/quic/core/quic_flags.h" |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 76 // Convex growth. | 76 // Convex growth. |
| 77 const QuicTime::Delta rtt_min = hundred_ms_; | 77 const QuicTime::Delta rtt_min = hundred_ms_; |
| 78 int64_t rtt_min_ms = rtt_min.ToMilliseconds(); | 78 int64_t rtt_min_ms = rtt_min.ToMilliseconds(); |
| 79 float rtt_min_s = rtt_min_ms / 1000.0; | 79 float rtt_min_s = rtt_min_ms / 1000.0; |
| 80 QuicByteCount current_cwnd = 10 * kDefaultTCPMSS; | 80 QuicByteCount current_cwnd = 10 * kDefaultTCPMSS; |
| 81 const QuicByteCount initial_cwnd = current_cwnd; | 81 const QuicByteCount initial_cwnd = current_cwnd; |
| 82 | 82 |
| 83 clock_.AdvanceTime(one_ms_); | 83 clock_.AdvanceTime(one_ms_); |
| 84 const QuicTime initial_time = clock_.ApproximateNow(); | 84 const QuicTime initial_time = clock_.ApproximateNow(); |
| 85 const QuicByteCount expected_first_cwnd = RenoCwndInBytes(current_cwnd); | 85 const QuicByteCount expected_first_cwnd = RenoCwndInBytes(current_cwnd); |
| 86 current_cwnd = | 86 current_cwnd = cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 87 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min); | 87 rtt_min, initial_time); |
| 88 ASSERT_EQ(expected_first_cwnd, current_cwnd); | 88 ASSERT_EQ(expected_first_cwnd, current_cwnd); |
| 89 | 89 |
| 90 // Normal TCP phase. | 90 // Normal TCP phase. |
| 91 // The maximum number of expected Reno RTTs is calculated by | 91 // The maximum number of expected Reno RTTs is calculated by |
| 92 // finding the point where the cubic curve and the reno curve meet. | 92 // finding the point where the cubic curve and the reno curve meet. |
| 93 const int max_reno_rtts = | 93 const int max_reno_rtts = |
| 94 FLAGS_quic_fix_cubic_bytes_quantization | 94 FLAGS_quic_fix_cubic_bytes_quantization |
| 95 ? std::sqrt(kNConnectionAlpha / | 95 ? std::sqrt(kNConnectionAlpha / |
| 96 (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) - | 96 (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) - |
| 97 2 | 97 2 |
| 98 : std::sqrt(kNConnectionAlpha / | 98 : std::sqrt(kNConnectionAlpha / |
| 99 (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) - | 99 (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) - |
| 100 1; | 100 1; |
| 101 for (int i = 0; i < max_reno_rtts; ++i) { | 101 for (int i = 0; i < max_reno_rtts; ++i) { |
| 102 // Alternatively, we expect it to increase by one, every time we | 102 // Alternatively, we expect it to increase by one, every time we |
| 103 // receive current_cwnd/Alpha acks back. (This is another way of | 103 // receive current_cwnd/Alpha acks back. (This is another way of |
| 104 // saying we expect cwnd to increase by approximately Alpha once | 104 // saying we expect cwnd to increase by approximately Alpha once |
| 105 // we receive current_cwnd number ofacks back). | 105 // we receive current_cwnd number ofacks back). |
| 106 const uint64_t num_acks_this_epoch = | 106 const uint64_t num_acks_this_epoch = |
| 107 current_cwnd / kDefaultTCPMSS / kNConnectionAlpha; | 107 current_cwnd / kDefaultTCPMSS / kNConnectionAlpha; |
| 108 const QuicByteCount initial_cwnd_this_epoch = current_cwnd; | 108 const QuicByteCount initial_cwnd_this_epoch = current_cwnd; |
| 109 for (QuicPacketCount n = 0; n < num_acks_this_epoch; ++n) { | 109 for (QuicPacketCount n = 0; n < num_acks_this_epoch; ++n) { |
| 110 // Call once per ACK. | 110 // Call once per ACK. |
| 111 const QuicByteCount expected_next_cwnd = RenoCwndInBytes(current_cwnd); | 111 const QuicByteCount expected_next_cwnd = RenoCwndInBytes(current_cwnd); |
| 112 current_cwnd = cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, | 112 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 113 current_cwnd, rtt_min); | 113 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 114 ASSERT_EQ(expected_next_cwnd, current_cwnd); | 114 ASSERT_EQ(expected_next_cwnd, current_cwnd); |
| 115 } | 115 } |
| 116 // Our byte-wise Reno implementation is an estimate. We expect | 116 // Our byte-wise Reno implementation is an estimate. We expect |
| 117 // the cwnd to increase by approximately one MSS every | 117 // the cwnd to increase by approximately one MSS every |
| 118 // cwnd/kDefaultTCPMSS/Alpha acks, but it may be off by as much as | 118 // cwnd/kDefaultTCPMSS/Alpha acks, but it may be off by as much as |
| 119 // half a packet for smaller values of current_cwnd. | 119 // half a packet for smaller values of current_cwnd. |
| 120 const QuicByteCount cwnd_change_this_epoch = | 120 const QuicByteCount cwnd_change_this_epoch = |
| 121 current_cwnd - initial_cwnd_this_epoch; | 121 current_cwnd - initial_cwnd_this_epoch; |
| 122 ASSERT_NEAR(kDefaultTCPMSS, cwnd_change_this_epoch, kDefaultTCPMSS / 2); | 122 ASSERT_NEAR(kDefaultTCPMSS, cwnd_change_this_epoch, kDefaultTCPMSS / 2); |
| 123 clock_.AdvanceTime(hundred_ms_); | 123 clock_.AdvanceTime(hundred_ms_); |
| 124 } | 124 } |
| 125 | 125 |
| 126 if (!FLAGS_quic_fix_cubic_bytes_quantization) { | 126 if (!FLAGS_quic_fix_cubic_bytes_quantization) { |
| 127 // Because our byte-wise Reno under-estimates the cwnd, we switch to | 127 // Because our byte-wise Reno under-estimates the cwnd, we switch to |
| 128 // conservative increases for a few acks before switching to true | 128 // conservative increases for a few acks before switching to true |
| 129 // cubic increases. | 129 // cubic increases. |
| 130 for (int i = 0; i < 3; ++i) { | 130 for (int i = 0; i < 3; ++i) { |
| 131 const QuicByteCount next_expected_cwnd = | 131 const QuicByteCount next_expected_cwnd = |
| 132 ConservativeCwndInBytes(current_cwnd); | 132 ConservativeCwndInBytes(current_cwnd); |
| 133 current_cwnd = cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, | 133 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 134 current_cwnd, rtt_min); | 134 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 135 ASSERT_EQ(next_expected_cwnd, current_cwnd); | 135 ASSERT_EQ(next_expected_cwnd, current_cwnd); |
| 136 } | 136 } |
| 137 } | 137 } |
| 138 | 138 |
| 139 for (int i = 0; i < 54; ++i) { | 139 for (int i = 0; i < 54; ++i) { |
| 140 const uint64_t max_acks_this_epoch = current_cwnd / kDefaultTCPMSS; | 140 const uint64_t max_acks_this_epoch = current_cwnd / kDefaultTCPMSS; |
| 141 const QuicByteCount expected_cwnd = CubicConvexCwndInBytes( | 141 const QuicByteCount expected_cwnd = CubicConvexCwndInBytes( |
| 142 initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time)); | 142 initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time)); |
| 143 current_cwnd = | 143 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 144 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min); | 144 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 145 ASSERT_EQ(expected_cwnd, current_cwnd); | 145 ASSERT_EQ(expected_cwnd, current_cwnd); |
| 146 | 146 |
| 147 for (QuicPacketCount n = 1; n < max_acks_this_epoch; ++n) { | 147 for (QuicPacketCount n = 1; n < max_acks_this_epoch; ++n) { |
| 148 // Call once per ACK. | 148 // Call once per ACK. |
| 149 ASSERT_EQ(current_cwnd, cubic_.CongestionWindowAfterAck( | 149 ASSERT_EQ( |
| 150 kDefaultTCPMSS, current_cwnd, rtt_min)); | 150 current_cwnd, |
| 151 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min, |
| 152 clock_.ApproximateNow())); |
| 151 } | 153 } |
| 152 clock_.AdvanceTime(hundred_ms_); | 154 clock_.AdvanceTime(hundred_ms_); |
| 153 } | 155 } |
| 154 const QuicByteCount expected_cwnd = CubicConvexCwndInBytes( | 156 const QuicByteCount expected_cwnd = CubicConvexCwndInBytes( |
| 155 initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time)); | 157 initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time)); |
| 156 current_cwnd = | 158 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 157 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min); | 159 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 158 ASSERT_EQ(expected_cwnd, current_cwnd); | 160 ASSERT_EQ(expected_cwnd, current_cwnd); |
| 159 } | 161 } |
| 160 | 162 |
| 161 TEST_F(CubicBytesTest, AboveOrigin) { | 163 TEST_F(CubicBytesTest, AboveOrigin) { |
| 162 if (!FLAGS_quic_fix_cubic_convex_mode && | 164 if (!FLAGS_quic_fix_cubic_convex_mode && |
| 163 FLAGS_quic_fix_cubic_bytes_quantization) { | 165 FLAGS_quic_fix_cubic_bytes_quantization) { |
| 164 // Without convex mode fixed, the behavior of the algorithm does | 166 // Without convex mode fixed, the behavior of the algorithm does |
| 165 // not fit the exact pattern of this test. | 167 // not fit the exact pattern of this test. |
| 166 // TODO(jokulik): Once the convex mode fix becomes default, this | 168 // TODO(jokulik): Once the convex mode fix becomes default, this |
| 167 // test can be replaced with the better AboveOriginTighterBounds | 169 // test can be replaced with the better AboveOriginTighterBounds |
| 168 // test. | 170 // test. |
| 169 return; | 171 return; |
| 170 } | 172 } |
| 171 // Convex growth. | 173 // Convex growth. |
| 172 const QuicTime::Delta rtt_min = hundred_ms_; | 174 const QuicTime::Delta rtt_min = hundred_ms_; |
| 173 QuicByteCount current_cwnd = 10 * kDefaultTCPMSS; | 175 QuicByteCount current_cwnd = 10 * kDefaultTCPMSS; |
| 174 // Without the signed-integer, cubic-convex fix, we start out in the | 176 // Without the signed-integer, cubic-convex fix, we start out in the |
| 175 // wrong mode. | 177 // wrong mode. |
| 176 QuicPacketCount expected_cwnd = | 178 QuicPacketCount expected_cwnd = |
| 177 FLAGS_quic_fix_cubic_convex_mode | 179 FLAGS_quic_fix_cubic_convex_mode |
| 178 ? RenoCwndInBytes(current_cwnd) | 180 ? RenoCwndInBytes(current_cwnd) |
| 179 : ConservativeCwndInBytes(current_cwnd); | 181 : ConservativeCwndInBytes(current_cwnd); |
| 180 // Initialize the state. | 182 // Initialize the state. |
| 181 clock_.AdvanceTime(one_ms_); | 183 clock_.AdvanceTime(one_ms_); |
| 182 ASSERT_EQ(expected_cwnd, cubic_.CongestionWindowAfterAck( | 184 ASSERT_EQ(expected_cwnd, |
| 183 kDefaultTCPMSS, current_cwnd, rtt_min)); | 185 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 186 rtt_min, clock_.ApproximateNow())); |
| 184 current_cwnd = expected_cwnd; | 187 current_cwnd = expected_cwnd; |
| 185 const QuicPacketCount initial_cwnd = expected_cwnd; | 188 const QuicPacketCount initial_cwnd = expected_cwnd; |
| 186 // Normal TCP phase. | 189 // Normal TCP phase. |
| 187 for (int i = 0; i < 48; ++i) { | 190 for (int i = 0; i < 48; ++i) { |
| 188 for (QuicPacketCount n = 1; | 191 for (QuicPacketCount n = 1; |
| 189 n < current_cwnd / kDefaultTCPMSS / kNConnectionAlpha; ++n) { | 192 n < current_cwnd / kDefaultTCPMSS / kNConnectionAlpha; ++n) { |
| 190 // Call once per ACK. | 193 // Call once per ACK. |
| 191 ASSERT_NEAR(current_cwnd, cubic_.CongestionWindowAfterAck( | 194 ASSERT_NEAR( |
| 192 kDefaultTCPMSS, current_cwnd, rtt_min), | 195 current_cwnd, |
| 193 kDefaultTCPMSS); | 196 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min, |
| 197 clock_.ApproximateNow()), |
| 198 kDefaultTCPMSS); |
| 194 } | 199 } |
| 195 clock_.AdvanceTime(hundred_ms_); | 200 clock_.AdvanceTime(hundred_ms_); |
| 196 current_cwnd = | 201 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 197 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min); | 202 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 198 if (FLAGS_quic_fix_cubic_convex_mode) { | 203 if (FLAGS_quic_fix_cubic_convex_mode) { |
| 199 // When we fix convex mode and the uint64 arithmetic, we | 204 // When we fix convex mode and the uint64 arithmetic, we |
| 200 // increase the expected_cwnd only after after the first 100ms, | 205 // increase the expected_cwnd only after after the first 100ms, |
| 201 // rather than after the initial 1ms. | 206 // rather than after the initial 1ms. |
| 202 expected_cwnd += kDefaultTCPMSS; | 207 expected_cwnd += kDefaultTCPMSS; |
| 203 ASSERT_NEAR(expected_cwnd, current_cwnd, kDefaultTCPMSS); | 208 ASSERT_NEAR(expected_cwnd, current_cwnd, kDefaultTCPMSS); |
| 204 } else { | 209 } else { |
| 205 ASSERT_NEAR(expected_cwnd, current_cwnd, kDefaultTCPMSS); | 210 ASSERT_NEAR(expected_cwnd, current_cwnd, kDefaultTCPMSS); |
| 206 expected_cwnd += kDefaultTCPMSS; | 211 expected_cwnd += kDefaultTCPMSS; |
| 207 } | 212 } |
| 208 } | 213 } |
| 209 // Cubic phase. | 214 // Cubic phase. |
| 210 for (int i = 0; i < 52; ++i) { | 215 for (int i = 0; i < 52; ++i) { |
| 211 for (QuicPacketCount n = 1; n < current_cwnd / kDefaultTCPMSS; ++n) { | 216 for (QuicPacketCount n = 1; n < current_cwnd / kDefaultTCPMSS; ++n) { |
| 212 // Call once per ACK. | 217 // Call once per ACK. |
| 213 ASSERT_NEAR(current_cwnd, cubic_.CongestionWindowAfterAck( | 218 ASSERT_NEAR( |
| 214 kDefaultTCPMSS, current_cwnd, rtt_min), | 219 current_cwnd, |
| 215 kDefaultTCPMSS); | 220 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min, |
| 221 clock_.ApproximateNow()), |
| 222 kDefaultTCPMSS); |
| 216 } | 223 } |
| 217 clock_.AdvanceTime(hundred_ms_); | 224 clock_.AdvanceTime(hundred_ms_); |
| 218 current_cwnd = | 225 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 219 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min); | 226 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 220 } | 227 } |
| 221 // Total time elapsed so far; add min_rtt (0.1s) here as well. | 228 // Total time elapsed so far; add min_rtt (0.1s) here as well. |
| 222 float elapsed_time_s = 10.0f + 0.1f; | 229 float elapsed_time_s = 10.0f + 0.1f; |
| 223 // |expected_cwnd| is initial value of cwnd + K * t^3, where K = 0.4. | 230 // |expected_cwnd| is initial value of cwnd + K * t^3, where K = 0.4. |
| 224 expected_cwnd = | 231 expected_cwnd = |
| 225 initial_cwnd / kDefaultTCPMSS + | 232 initial_cwnd / kDefaultTCPMSS + |
| 226 (elapsed_time_s * elapsed_time_s * elapsed_time_s * 410) / 1024; | 233 (elapsed_time_s * elapsed_time_s * elapsed_time_s * 410) / 1024; |
| 227 // Without the convex mode fix, the result is off by one. | 234 // Without the convex mode fix, the result is off by one. |
| 228 if (!FLAGS_quic_fix_cubic_convex_mode) { | 235 if (!FLAGS_quic_fix_cubic_convex_mode) { |
| 229 ++expected_cwnd; | 236 ++expected_cwnd; |
| (...skipping 19 matching lines...) Expand all Loading... |
| 249 | 256 |
| 250 // Start the test with an artificially large cwnd to prevent Reno | 257 // Start the test with an artificially large cwnd to prevent Reno |
| 251 // from over-taking cubic. | 258 // from over-taking cubic. |
| 252 QuicByteCount current_cwnd = 1000 * kDefaultTCPMSS; | 259 QuicByteCount current_cwnd = 1000 * kDefaultTCPMSS; |
| 253 const QuicByteCount initial_cwnd = current_cwnd; | 260 const QuicByteCount initial_cwnd = current_cwnd; |
| 254 const QuicTime::Delta rtt_min = hundred_ms_; | 261 const QuicTime::Delta rtt_min = hundred_ms_; |
| 255 clock_.AdvanceTime(one_ms_); | 262 clock_.AdvanceTime(one_ms_); |
| 256 QuicTime initial_time = clock_.ApproximateNow(); | 263 QuicTime initial_time = clock_.ApproximateNow(); |
| 257 | 264 |
| 258 // Start the epoch and then artificially advance the time. | 265 // Start the epoch and then artificially advance the time. |
| 259 current_cwnd = | 266 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 260 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min); | 267 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 261 clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(600)); | 268 clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(600)); |
| 262 current_cwnd = | 269 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 263 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min); | 270 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 264 | 271 |
| 265 // We expect the algorithm to perform only non-zero, fine-grained cubic | 272 // We expect the algorithm to perform only non-zero, fine-grained cubic |
| 266 // increases on every ack in this case. | 273 // increases on every ack in this case. |
| 267 for (int i = 0; i < 100; ++i) { | 274 for (int i = 0; i < 100; ++i) { |
| 268 clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10)); | 275 clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10)); |
| 269 const QuicByteCount expected_cwnd = CubicConvexCwndInBytes( | 276 const QuicByteCount expected_cwnd = CubicConvexCwndInBytes( |
| 270 initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time)); | 277 initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time)); |
| 271 const QuicByteCount next_cwnd = | 278 const QuicByteCount next_cwnd = cubic_.CongestionWindowAfterAck( |
| 272 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min); | 279 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 273 // Make sure we are performing cubic increases. | 280 // Make sure we are performing cubic increases. |
| 274 ASSERT_EQ(expected_cwnd, next_cwnd); | 281 ASSERT_EQ(expected_cwnd, next_cwnd); |
| 275 // Make sure that these are non-zero, less-than-packet sized | 282 // Make sure that these are non-zero, less-than-packet sized |
| 276 // increases. | 283 // increases. |
| 277 ASSERT_GT(next_cwnd, current_cwnd); | 284 ASSERT_GT(next_cwnd, current_cwnd); |
| 278 const QuicByteCount cwnd_delta = next_cwnd - current_cwnd; | 285 const QuicByteCount cwnd_delta = next_cwnd - current_cwnd; |
| 279 ASSERT_GT(kDefaultTCPMSS * .1, cwnd_delta); | 286 ASSERT_GT(kDefaultTCPMSS * .1, cwnd_delta); |
| 280 | 287 |
| 281 current_cwnd = next_cwnd; | 288 current_cwnd = next_cwnd; |
| 282 } | 289 } |
| 283 } | 290 } |
| 284 | 291 |
| 285 TEST_F(CubicBytesTest, LossEvents) { | 292 TEST_F(CubicBytesTest, LossEvents) { |
| 286 const QuicTime::Delta rtt_min = hundred_ms_; | 293 const QuicTime::Delta rtt_min = hundred_ms_; |
| 287 QuicByteCount current_cwnd = 422 * kDefaultTCPMSS; | 294 QuicByteCount current_cwnd = 422 * kDefaultTCPMSS; |
| 288 // Without the signed-integer, cubic-convex fix, we mistakenly | 295 // Without the signed-integer, cubic-convex fix, we mistakenly |
| 289 // increment cwnd after only one_ms_ and a single ack. | 296 // increment cwnd after only one_ms_ and a single ack. |
| 290 QuicPacketCount expected_cwnd = | 297 QuicPacketCount expected_cwnd = |
| 291 FLAGS_quic_fix_cubic_convex_mode | 298 FLAGS_quic_fix_cubic_convex_mode |
| 292 ? RenoCwndInBytes(current_cwnd) | 299 ? RenoCwndInBytes(current_cwnd) |
| 293 : current_cwnd + kDefaultTCPMSS / 2; | 300 : current_cwnd + kDefaultTCPMSS / 2; |
| 294 // Initialize the state. | 301 // Initialize the state. |
| 295 clock_.AdvanceTime(one_ms_); | 302 clock_.AdvanceTime(one_ms_); |
| 296 EXPECT_EQ(expected_cwnd, cubic_.CongestionWindowAfterAck( | 303 EXPECT_EQ(expected_cwnd, |
| 297 kDefaultTCPMSS, current_cwnd, rtt_min)); | 304 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 305 rtt_min, clock_.ApproximateNow())); |
| 298 expected_cwnd = static_cast<QuicPacketCount>(current_cwnd * kNConnectionBeta); | 306 expected_cwnd = static_cast<QuicPacketCount>(current_cwnd * kNConnectionBeta); |
| 299 EXPECT_EQ(expected_cwnd, | 307 EXPECT_EQ(expected_cwnd, |
| 300 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); | 308 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); |
| 301 expected_cwnd = static_cast<QuicPacketCount>(current_cwnd * kNConnectionBeta); | 309 expected_cwnd = static_cast<QuicPacketCount>(current_cwnd * kNConnectionBeta); |
| 302 EXPECT_EQ(expected_cwnd, | 310 EXPECT_EQ(expected_cwnd, |
| 303 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); | 311 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); |
| 304 } | 312 } |
| 305 | 313 |
| 306 TEST_F(CubicBytesTest, BelowOrigin) { | 314 TEST_F(CubicBytesTest, BelowOrigin) { |
| 307 // Concave growth. | 315 // Concave growth. |
| 308 const QuicTime::Delta rtt_min = hundred_ms_; | 316 const QuicTime::Delta rtt_min = hundred_ms_; |
| 309 QuicByteCount current_cwnd = 422 * kDefaultTCPMSS; | 317 QuicByteCount current_cwnd = 422 * kDefaultTCPMSS; |
| 310 // Without the signed-integer, cubic-convex fix, we mistakenly | 318 // Without the signed-integer, cubic-convex fix, we mistakenly |
| 311 // increment cwnd after only one_ms_ and a single ack. | 319 // increment cwnd after only one_ms_ and a single ack. |
| 312 QuicPacketCount expected_cwnd = | 320 QuicPacketCount expected_cwnd = |
| 313 FLAGS_quic_fix_cubic_convex_mode | 321 FLAGS_quic_fix_cubic_convex_mode |
| 314 ? RenoCwndInBytes(current_cwnd) | 322 ? RenoCwndInBytes(current_cwnd) |
| 315 : current_cwnd + kDefaultTCPMSS / 2; | 323 : current_cwnd + kDefaultTCPMSS / 2; |
| 316 // Initialize the state. | 324 // Initialize the state. |
| 317 clock_.AdvanceTime(one_ms_); | 325 clock_.AdvanceTime(one_ms_); |
| 318 EXPECT_EQ(expected_cwnd, cubic_.CongestionWindowAfterAck( | 326 EXPECT_EQ(expected_cwnd, |
| 319 kDefaultTCPMSS, current_cwnd, rtt_min)); | 327 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 328 rtt_min, clock_.ApproximateNow())); |
| 320 expected_cwnd = static_cast<QuicPacketCount>(current_cwnd * kNConnectionBeta); | 329 expected_cwnd = static_cast<QuicPacketCount>(current_cwnd * kNConnectionBeta); |
| 321 EXPECT_EQ(expected_cwnd, | 330 EXPECT_EQ(expected_cwnd, |
| 322 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); | 331 cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); |
| 323 current_cwnd = expected_cwnd; | 332 current_cwnd = expected_cwnd; |
| 324 // First update after loss to initialize the epoch. | 333 // First update after loss to initialize the epoch. |
| 325 current_cwnd = | 334 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 326 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min); | 335 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 327 // Cubic phase. | 336 // Cubic phase. |
| 328 for (int i = 0; i < 40; ++i) { | 337 for (int i = 0; i < 40; ++i) { |
| 329 clock_.AdvanceTime(hundred_ms_); | 338 clock_.AdvanceTime(hundred_ms_); |
| 330 current_cwnd = | 339 current_cwnd = cubic_.CongestionWindowAfterAck( |
| 331 cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min); | 340 kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 332 } | 341 } |
| 333 expected_cwnd = 553632; | 342 expected_cwnd = 553632; |
| 334 EXPECT_EQ(expected_cwnd, current_cwnd); | 343 EXPECT_EQ(expected_cwnd, current_cwnd); |
| 335 } | 344 } |
| 336 | 345 |
| 337 } // namespace test | 346 } // namespace test |
| 338 } // namespace net | 347 } // namespace net |
| OLD | NEW |