| Index: net/quic/congestion_control/tcp_cubic_sender.cc
|
| diff --git a/net/quic/congestion_control/tcp_cubic_sender.cc b/net/quic/congestion_control/tcp_cubic_sender.cc
|
| index 256126f644b132d8cb7e254d81fe7cea374cd835..ac94217f8f2ad153b26ad1b40ac7585364131c4a 100644
|
| --- a/net/quic/congestion_control/tcp_cubic_sender.cc
|
| +++ b/net/quic/congestion_control/tcp_cubic_sender.cc
|
| @@ -50,7 +50,8 @@ TcpCubicSender::TcpCubicSender(const QuicClock* clock,
|
| last_cutback_exited_slowstart_(false),
|
| max_tcp_congestion_window_(max_tcp_congestion_window),
|
| initial_tcp_congestion_window_(initial_tcp_congestion_window),
|
| - initial_max_tcp_congestion_window_(max_tcp_congestion_window) {}
|
| + initial_max_tcp_congestion_window_(max_tcp_congestion_window),
|
| + slow_start_large_reduction_(false) {}
|
|
|
| TcpCubicSender::~TcpCubicSender() {
|
| UMA_HISTOGRAM_COUNTS("Net.QuicSession.FinalTcpCwnd", congestion_window_);
|
| @@ -90,6 +91,11 @@ void TcpCubicSender::SetFromConfig(const QuicConfig& config,
|
| min4_mode_ = true;
|
| min_congestion_window_ = 1;
|
| }
|
| + if (config.HasReceivedConnectionOptions() &&
|
| + ContainsQuicTag(config.ReceivedConnectionOptions(), kSSLR)) {
|
| + // Slow Start Fast Exit experiment.
|
| + slow_start_large_reduction_ = true;
|
| + }
|
| }
|
| }
|
|
|
| @@ -170,6 +176,12 @@ void TcpCubicSender::OnPacketLost(QuicPacketNumber packet_number,
|
| if (packet_number <= largest_sent_at_last_cutback_) {
|
| if (last_cutback_exited_slowstart_) {
|
| ++stats_->slowstart_packets_lost;
|
| + if (slow_start_large_reduction_) {
|
| + // Reduce congestion window by 1 for every loss.
|
| + congestion_window_ =
|
| + max(congestion_window_ - 1, min_congestion_window_);
|
| + slowstart_threshold_ = congestion_window_;
|
| + }
|
| }
|
| DVLOG(1) << "Ignoring loss for largest_missing:" << packet_number
|
| << " because it was sent prior to the last CWND cutback.";
|
| @@ -183,17 +195,21 @@ void TcpCubicSender::OnPacketLost(QuicPacketNumber packet_number,
|
|
|
| prr_.OnPacketLost(bytes_in_flight);
|
|
|
| - if (reno_) {
|
| + // TODO(jri): Separate out all of slow start into a separate class.
|
| + if (slow_start_large_reduction_) {
|
| + DCHECK_LT(1u, congestion_window_);
|
| + congestion_window_ = congestion_window_ - 1;
|
| + } else if (reno_) {
|
| congestion_window_ = congestion_window_ * RenoBeta();
|
| } else {
|
| congestion_window_ =
|
| cubic_.CongestionWindowAfterPacketLoss(congestion_window_);
|
| }
|
| - slowstart_threshold_ = congestion_window_;
|
| // Enforce a minimum congestion window.
|
| if (congestion_window_ < min_congestion_window_) {
|
| congestion_window_ = min_congestion_window_;
|
| }
|
| + slowstart_threshold_ = congestion_window_;
|
| largest_sent_at_last_cutback_ = largest_sent_packet_number_;
|
| // reset packet count from congestion avoidance mode. We start
|
| // counting again when we're out of recovery.
|
|
|