Index: net/quic/congestion_control/tcp_cubic_sender_test.cc |
diff --git a/net/quic/congestion_control/tcp_cubic_sender_test.cc b/net/quic/congestion_control/tcp_cubic_sender_test.cc |
index 2d0f61c2c015cbf9b6f6619559989ff959a99ee0..ab5fcdbc36b53660b760289d383ad6cb17d5aeba 100644 |
--- a/net/quic/congestion_control/tcp_cubic_sender_test.cc |
+++ b/net/quic/congestion_control/tcp_cubic_sender_test.cc |
@@ -30,13 +30,12 @@ const float kRenoBeta = 0.7f; // Reno backoff factor. |
class TcpCubicSenderPeer : public TcpCubicSender { |
public: |
- TcpCubicSenderPeer(const QuicClock* clock, |
- bool reno, |
- QuicPacketCount max_tcp_congestion_window) |
- : TcpCubicSender( |
- clock, &rtt_stats_, reno, kInitialCongestionWindowPackets, |
- max_tcp_congestion_window, &stats_) { |
- } |
+ TcpCubicSenderPeer(const QuicClock* clock, bool reno) |
+ : TcpCubicSender(clock, |
+ &rtt_stats_, |
+ reno, |
+ kInitialCongestionWindowPackets, |
+ &stats_) {} |
QuicPacketCount congestion_window() { |
return congestion_window_; |
@@ -62,8 +61,7 @@ class TcpCubicSenderTest : public ::testing::Test { |
protected: |
TcpCubicSenderTest() |
: one_ms_(QuicTime::Delta::FromMilliseconds(1)), |
- sender_(new TcpCubicSenderPeer(&clock_, true, |
- kMaxTcpCongestionWindow)), |
+ sender_(new TcpCubicSenderPeer(&clock_, true)), |
sequence_number_(1), |
acked_sequence_number_(0), |
bytes_in_flight_(0) { |
@@ -208,8 +206,6 @@ TEST_F(TcpCubicSenderTest, ExponentialSlowStart) { |
TEST_F(TcpCubicSenderTest, SlowStartAckTrain) { |
sender_->SetNumEmulatedConnections(1); |
- EXPECT_EQ(kMaxTcpCongestionWindow * kDefaultTCPMSS, |
- sender_->GetSlowStartThreshold()); |
// Make sure that we fall out of slow start when we send ACK train longer |
// than half the RTT, in this test case 30ms, which is more than 30 calls to |
@@ -418,8 +414,6 @@ TEST_F(TcpCubicSenderTest, SlowStartBurstPacketLossPRR) { |
TEST_F(TcpCubicSenderTest, RTOCongestionWindow) { |
EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow()); |
- EXPECT_EQ(kMaxTcpCongestionWindow, sender_->slowstart_threshold()); |
- |
// Expect the window to decrease to the minimum once the RTO fires |
// and slow start threshold to be set to 1/2 of the CWND. |
sender_->OnRetransmissionTimeout(true); |
@@ -472,68 +466,6 @@ TEST_F(TcpCubicSenderTest, RetransmissionDelay) { |
sender_->BandwidthEstimate().ToBytesPerSecond()); |
} |
-TEST_F(TcpCubicSenderTest, SlowStartMaxSendWindow) { |
- const QuicPacketCount kMaxCongestionWindowTCP = 50; |
- const int kNumberOfAcks = 100; |
- sender_.reset( |
- new TcpCubicSenderPeer(&clock_, false, kMaxCongestionWindowTCP)); |
- |
- for (int i = 0; i < kNumberOfAcks; ++i) { |
- // Send our full send window. |
- SendAvailableSendWindow(); |
- AckNPackets(2); |
- } |
- QuicByteCount expected_send_window = |
- kMaxCongestionWindowTCP * kDefaultTCPMSS; |
- EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow()); |
-} |
- |
-TEST_F(TcpCubicSenderTest, TcpRenoMaxCongestionWindow) { |
- const QuicPacketCount kMaxCongestionWindowTCP = 50; |
- const int kNumberOfAcks = 1000; |
- sender_.reset( |
- new TcpCubicSenderPeer(&clock_, true, kMaxCongestionWindowTCP)); |
- |
- SendAvailableSendWindow(); |
- AckNPackets(2); |
- // Make sure we fall out of slow start. |
- LoseNPackets(1); |
- |
- for (int i = 0; i < kNumberOfAcks; ++i) { |
- // Send our full send window. |
- SendAvailableSendWindow(); |
- AckNPackets(2); |
- } |
- |
- QuicByteCount expected_send_window = |
- kMaxCongestionWindowTCP * kDefaultTCPMSS; |
- EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow()); |
-} |
- |
-TEST_F(TcpCubicSenderTest, TcpCubicMaxCongestionWindow) { |
- const QuicPacketCount kMaxCongestionWindowTCP = 50; |
- // Set to 10000 to compensate for small cubic alpha. |
- const int kNumberOfAcks = 10000; |
- |
- sender_.reset( |
- new TcpCubicSenderPeer(&clock_, false, kMaxCongestionWindowTCP)); |
- |
- SendAvailableSendWindow(); |
- AckNPackets(2); |
- // Make sure we fall out of slow start. |
- LoseNPackets(1); |
- |
- for (int i = 0; i < kNumberOfAcks; ++i) { |
- // Send our full send window. |
- SendAvailableSendWindow(); |
- AckNPackets(2); |
- } |
- |
- QuicByteCount expected_send_window = |
- kMaxCongestionWindowTCP * kDefaultTCPMSS; |
- EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow()); |
-} |
- |
TEST_F(TcpCubicSenderTest, MultipleLossesInOneWindow) { |
SendAvailableSendWindow(); |
const QuicByteCount initial_window = sender_->GetCongestionWindow(); |
@@ -715,9 +647,10 @@ TEST_F(TcpCubicSenderTest, BandwidthResumption) { |
// Resumed CWND is limited to be in a sensible range. |
cached_network_params.set_bandwidth_estimate_bytes_per_second( |
- (kMaxTcpCongestionWindow + 1) * kMaxPacketSize); |
+ (kMaxCongestionWindowForBandwidthResumption + 1) * kMaxPacketSize); |
EXPECT_TRUE(sender_->ResumeConnectionState(cached_network_params)); |
- EXPECT_EQ(kMaxTcpCongestionWindow, sender_->congestion_window()); |
+ EXPECT_EQ(kMaxCongestionWindowForBandwidthResumption, |
+ sender_->congestion_window()); |
cached_network_params.set_bandwidth_estimate_bytes_per_second( |
(kMinCongestionWindowForBandwidthResumption - 1) * kMaxPacketSize); |