| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "net/quic/congestion_control/tcp_cubic_sender.h" | |
| 6 | |
| 7 #include <algorithm> | |
| 8 | |
| 9 #include "base/metrics/histogram_macros.h" | |
| 10 #include "net/quic/congestion_control/prr_sender.h" | |
| 11 #include "net/quic/congestion_control/rtt_stats.h" | |
| 12 #include "net/quic/crypto/crypto_protocol.h" | |
| 13 #include "net/quic/proto/cached_network_parameters.pb.h" | |
| 14 #include "net/quic/quic_bug_tracker.h" | |
| 15 #include "net/quic/quic_flags.h" | |
| 16 | |
| 17 using std::max; | |
| 18 using std::min; | |
| 19 | |
| 20 namespace net { | |
| 21 | |
| 22 namespace { | |
| 23 // Constants based on TCP defaults. | |
| 24 // The minimum cwnd based on RFC 3782 (TCP NewReno) for cwnd reductions on a | |
| 25 // fast retransmission. The cwnd after a timeout is still 1. | |
| 26 const QuicPacketCount kDefaultMinimumCongestionWindow = 2; | |
| 27 const QuicByteCount kMaxBurstBytes = 3 * kDefaultTCPMSS; | |
| 28 const float kRenoBeta = 0.7f; // Reno backoff factor. | |
| 29 const uint32_t kDefaultNumConnections = 2; // N-connection emulation. | |
| 30 } // namespace | |
| 31 | |
| 32 TcpCubicSender::TcpCubicSender(const QuicClock* clock, | |
| 33 const RttStats* rtt_stats, | |
| 34 bool reno, | |
| 35 QuicPacketCount initial_tcp_congestion_window, | |
| 36 QuicPacketCount max_tcp_congestion_window, | |
| 37 QuicConnectionStats* stats) | |
| 38 : cubic_(clock), | |
| 39 rtt_stats_(rtt_stats), | |
| 40 stats_(stats), | |
| 41 reno_(reno), | |
| 42 num_connections_(kDefaultNumConnections), | |
| 43 congestion_window_count_(0), | |
| 44 largest_sent_packet_number_(0), | |
| 45 largest_acked_packet_number_(0), | |
| 46 largest_sent_at_last_cutback_(0), | |
| 47 congestion_window_(initial_tcp_congestion_window), | |
| 48 min_congestion_window_(kDefaultMinimumCongestionWindow), | |
| 49 min4_mode_(false), | |
| 50 slowstart_threshold_(max_tcp_congestion_window), | |
| 51 last_cutback_exited_slowstart_(false), | |
| 52 max_tcp_congestion_window_(max_tcp_congestion_window), | |
| 53 initial_tcp_congestion_window_(initial_tcp_congestion_window), | |
| 54 initial_max_tcp_congestion_window_(max_tcp_congestion_window), | |
| 55 slow_start_large_reduction_(false) {} | |
| 56 | |
| 57 TcpCubicSender::~TcpCubicSender() { | |
| 58 UMA_HISTOGRAM_COUNTS("Net.QuicSession.FinalTcpCwnd", congestion_window_); | |
| 59 } | |
| 60 | |
| 61 void TcpCubicSender::SetFromConfig(const QuicConfig& config, | |
| 62 Perspective perspective) { | |
| 63 if (perspective == Perspective::IS_SERVER) { | |
| 64 if (config.HasReceivedConnectionOptions() && | |
| 65 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW03)) { | |
| 66 // Initial window experiment. | |
| 67 congestion_window_ = 3; | |
| 68 } | |
| 69 if (config.HasReceivedConnectionOptions() && | |
| 70 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW10)) { | |
| 71 // Initial window experiment. | |
| 72 congestion_window_ = 10; | |
| 73 } | |
| 74 if (config.HasReceivedConnectionOptions() && | |
| 75 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW20)) { | |
| 76 // Initial window experiment. | |
| 77 congestion_window_ = 20; | |
| 78 } | |
| 79 if (config.HasReceivedConnectionOptions() && | |
| 80 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW50)) { | |
| 81 // Initial window experiment. | |
| 82 congestion_window_ = 50; | |
| 83 } | |
| 84 if (config.HasReceivedConnectionOptions() && | |
| 85 ContainsQuicTag(config.ReceivedConnectionOptions(), kMIN1)) { | |
| 86 // Min CWND experiment. | |
| 87 min_congestion_window_ = 1; | |
| 88 } | |
| 89 if (config.HasReceivedConnectionOptions() && | |
| 90 ContainsQuicTag(config.ReceivedConnectionOptions(), kMIN4)) { | |
| 91 // Min CWND of 4 experiment. | |
| 92 min4_mode_ = true; | |
| 93 min_congestion_window_ = 1; | |
| 94 } | |
| 95 if (config.HasReceivedConnectionOptions() && | |
| 96 ContainsQuicTag(config.ReceivedConnectionOptions(), kSSLR)) { | |
| 97 // Slow Start Fast Exit experiment. | |
| 98 slow_start_large_reduction_ = true; | |
| 99 } | |
| 100 } | |
| 101 } | |
| 102 | |
| 103 void TcpCubicSender::ResumeConnectionState( | |
| 104 const CachedNetworkParameters& cached_network_params, | |
| 105 bool max_bandwidth_resumption) { | |
| 106 QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond( | |
| 107 max_bandwidth_resumption | |
| 108 ? cached_network_params.max_bandwidth_estimate_bytes_per_second() | |
| 109 : cached_network_params.bandwidth_estimate_bytes_per_second()); | |
| 110 QuicTime::Delta rtt_ms = | |
| 111 QuicTime::Delta::FromMilliseconds(cached_network_params.min_rtt_ms()); | |
| 112 | |
| 113 // Make sure CWND is in appropriate range (in case of bad data). | |
| 114 QuicPacketCount new_congestion_window = | |
| 115 bandwidth.ToBytesPerPeriod(rtt_ms) / kDefaultTCPMSS; | |
| 116 congestion_window_ = max(min(new_congestion_window, kMaxCongestionWindow), | |
| 117 kMinCongestionWindowForBandwidthResumption); | |
| 118 } | |
| 119 | |
| 120 void TcpCubicSender::SetNumEmulatedConnections(int num_connections) { | |
| 121 num_connections_ = max(1, num_connections); | |
| 122 cubic_.SetNumConnections(num_connections_); | |
| 123 } | |
| 124 | |
| 125 void TcpCubicSender::SetMaxCongestionWindow( | |
| 126 QuicByteCount max_congestion_window) { | |
| 127 max_tcp_congestion_window_ = max_congestion_window / kDefaultTCPMSS; | |
| 128 } | |
| 129 | |
| 130 float TcpCubicSender::RenoBeta() const { | |
| 131 // kNConnectionBeta is the backoff factor after loss for our N-connection | |
| 132 // emulation, which emulates the effective backoff of an ensemble of N | |
| 133 // TCP-Reno connections on a single loss event. The effective multiplier is | |
| 134 // computed as: | |
| 135 return (num_connections_ - 1 + kRenoBeta) / num_connections_; | |
| 136 } | |
| 137 | |
| 138 void TcpCubicSender::OnCongestionEvent(bool rtt_updated, | |
| 139 QuicByteCount bytes_in_flight, | |
| 140 const CongestionVector& acked_packets, | |
| 141 const CongestionVector& lost_packets) { | |
| 142 if (rtt_updated && InSlowStart() && | |
| 143 hybrid_slow_start_.ShouldExitSlowStart(rtt_stats_->latest_rtt(), | |
| 144 rtt_stats_->min_rtt(), | |
| 145 congestion_window_)) { | |
| 146 slowstart_threshold_ = congestion_window_; | |
| 147 } | |
| 148 for (CongestionVector::const_iterator it = lost_packets.begin(); | |
| 149 it != lost_packets.end(); ++it) { | |
| 150 OnPacketLost(it->first, bytes_in_flight); | |
| 151 } | |
| 152 for (CongestionVector::const_iterator it = acked_packets.begin(); | |
| 153 it != acked_packets.end(); ++it) { | |
| 154 OnPacketAcked(it->first, it->second, bytes_in_flight); | |
| 155 } | |
| 156 } | |
| 157 | |
| 158 void TcpCubicSender::OnPacketAcked(QuicPacketNumber acked_packet_number, | |
| 159 QuicByteCount acked_bytes, | |
| 160 QuicByteCount bytes_in_flight) { | |
| 161 largest_acked_packet_number_ = | |
| 162 max(acked_packet_number, largest_acked_packet_number_); | |
| 163 if (InRecovery()) { | |
| 164 // PRR is used when in recovery. | |
| 165 prr_.OnPacketAcked(acked_bytes); | |
| 166 return; | |
| 167 } | |
| 168 MaybeIncreaseCwnd(acked_packet_number, bytes_in_flight); | |
| 169 if (InSlowStart()) { | |
| 170 hybrid_slow_start_.OnPacketAcked(acked_packet_number); | |
| 171 } | |
| 172 } | |
| 173 | |
| 174 void TcpCubicSender::OnPacketLost(QuicPacketNumber packet_number, | |
| 175 QuicByteCount bytes_in_flight) { | |
| 176 // TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets | |
| 177 // already sent should be treated as a single loss event, since it's expected. | |
| 178 if (packet_number <= largest_sent_at_last_cutback_) { | |
| 179 if (last_cutback_exited_slowstart_) { | |
| 180 ++stats_->slowstart_packets_lost; | |
| 181 if (slow_start_large_reduction_) { | |
| 182 // Reduce congestion window by 1 for every loss. | |
| 183 congestion_window_ = | |
| 184 max(congestion_window_ - 1, min_congestion_window_); | |
| 185 slowstart_threshold_ = congestion_window_; | |
| 186 } | |
| 187 } | |
| 188 DVLOG(1) << "Ignoring loss for largest_missing:" << packet_number | |
| 189 << " because it was sent prior to the last CWND cutback."; | |
| 190 return; | |
| 191 } | |
| 192 ++stats_->tcp_loss_events; | |
| 193 last_cutback_exited_slowstart_ = InSlowStart(); | |
| 194 if (InSlowStart()) { | |
| 195 ++stats_->slowstart_packets_lost; | |
| 196 } | |
| 197 | |
| 198 prr_.OnPacketLost(bytes_in_flight); | |
| 199 | |
| 200 // TODO(jri): Separate out all of slow start into a separate class. | |
| 201 if (slow_start_large_reduction_ && InSlowStart()) { | |
| 202 DCHECK_LT(1u, congestion_window_); | |
| 203 congestion_window_ = congestion_window_ - 1; | |
| 204 } else if (reno_) { | |
| 205 congestion_window_ = congestion_window_ * RenoBeta(); | |
| 206 } else { | |
| 207 congestion_window_ = | |
| 208 cubic_.CongestionWindowAfterPacketLoss(congestion_window_); | |
| 209 } | |
| 210 // Enforce a minimum congestion window. | |
| 211 if (congestion_window_ < min_congestion_window_) { | |
| 212 congestion_window_ = min_congestion_window_; | |
| 213 } | |
| 214 slowstart_threshold_ = congestion_window_; | |
| 215 largest_sent_at_last_cutback_ = largest_sent_packet_number_; | |
| 216 // reset packet count from congestion avoidance mode. We start | |
| 217 // counting again when we're out of recovery. | |
| 218 congestion_window_count_ = 0; | |
| 219 DVLOG(1) << "Incoming loss; congestion window: " << congestion_window_ | |
| 220 << " slowstart threshold: " << slowstart_threshold_; | |
| 221 } | |
| 222 | |
| 223 bool TcpCubicSender::OnPacketSent(QuicTime /*sent_time*/, | |
| 224 QuicByteCount /*bytes_in_flight*/, | |
| 225 QuicPacketNumber packet_number, | |
| 226 QuicByteCount bytes, | |
| 227 HasRetransmittableData is_retransmittable) { | |
| 228 if (InSlowStart()) { | |
| 229 ++(stats_->slowstart_packets_sent); | |
| 230 } | |
| 231 | |
| 232 // Only update bytes_in_flight_ for data packets. | |
| 233 if (is_retransmittable != HAS_RETRANSMITTABLE_DATA) { | |
| 234 return false; | |
| 235 } | |
| 236 if (InRecovery()) { | |
| 237 // PRR is used when in recovery. | |
| 238 prr_.OnPacketSent(bytes); | |
| 239 } | |
| 240 DCHECK_LT(largest_sent_packet_number_, packet_number); | |
| 241 largest_sent_packet_number_ = packet_number; | |
| 242 hybrid_slow_start_.OnPacketSent(packet_number); | |
| 243 return true; | |
| 244 } | |
| 245 | |
| 246 QuicTime::Delta TcpCubicSender::TimeUntilSend( | |
| 247 QuicTime /* now */, | |
| 248 QuicByteCount bytes_in_flight, | |
| 249 HasRetransmittableData has_retransmittable_data) const { | |
| 250 if (has_retransmittable_data == NO_RETRANSMITTABLE_DATA) { | |
| 251 DCHECK(!FLAGS_quic_respect_send_alarm2); | |
| 252 // For TCP we can always send an ACK immediately. | |
| 253 return QuicTime::Delta::Zero(); | |
| 254 } | |
| 255 if (InRecovery()) { | |
| 256 // PRR is used when in recovery. | |
| 257 return prr_.TimeUntilSend(GetCongestionWindow(), bytes_in_flight, | |
| 258 slowstart_threshold_ * kDefaultTCPMSS); | |
| 259 } | |
| 260 if (GetCongestionWindow() > bytes_in_flight) { | |
| 261 return QuicTime::Delta::Zero(); | |
| 262 } | |
| 263 if (min4_mode_ && bytes_in_flight < 4 * kDefaultTCPMSS) { | |
| 264 return QuicTime::Delta::Zero(); | |
| 265 } | |
| 266 return QuicTime::Delta::Infinite(); | |
| 267 } | |
| 268 | |
| 269 QuicBandwidth TcpCubicSender::PacingRate() const { | |
| 270 // We pace at twice the rate of the underlying sender's bandwidth estimate | |
| 271 // during slow start and 1.25x during congestion avoidance to ensure pacing | |
| 272 // doesn't prevent us from filling the window. | |
| 273 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt(); | |
| 274 if (srtt.IsZero()) { | |
| 275 srtt = QuicTime::Delta::FromMicroseconds(rtt_stats_->initial_rtt_us()); | |
| 276 } | |
| 277 const QuicBandwidth bandwidth = | |
| 278 QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt); | |
| 279 return bandwidth.Scale(InSlowStart() ? 2 : 1.25); | |
| 280 } | |
| 281 | |
| 282 QuicBandwidth TcpCubicSender::BandwidthEstimate() const { | |
| 283 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt(); | |
| 284 if (srtt.IsZero()) { | |
| 285 // If we haven't measured an rtt, the bandwidth estimate is unknown. | |
| 286 return QuicBandwidth::Zero(); | |
| 287 } | |
| 288 return QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt); | |
| 289 } | |
| 290 | |
| 291 QuicTime::Delta TcpCubicSender::RetransmissionDelay() const { | |
| 292 if (rtt_stats_->smoothed_rtt().IsZero()) { | |
| 293 return QuicTime::Delta::Zero(); | |
| 294 } | |
| 295 return rtt_stats_->smoothed_rtt().Add( | |
| 296 rtt_stats_->mean_deviation().Multiply(4)); | |
| 297 } | |
| 298 | |
| 299 QuicByteCount TcpCubicSender::GetCongestionWindow() const { | |
| 300 return congestion_window_ * kDefaultTCPMSS; | |
| 301 } | |
| 302 | |
| 303 bool TcpCubicSender::InSlowStart() const { | |
| 304 return congestion_window_ < slowstart_threshold_; | |
| 305 } | |
| 306 | |
| 307 QuicByteCount TcpCubicSender::GetSlowStartThreshold() const { | |
| 308 return slowstart_threshold_ * kDefaultTCPMSS; | |
| 309 } | |
| 310 | |
| 311 bool TcpCubicSender::IsCwndLimited(QuicByteCount bytes_in_flight) const { | |
| 312 const QuicByteCount congestion_window_bytes = GetCongestionWindow(); | |
| 313 if (bytes_in_flight >= congestion_window_bytes) { | |
| 314 return true; | |
| 315 } | |
| 316 const QuicByteCount available_bytes = | |
| 317 congestion_window_bytes - bytes_in_flight; | |
| 318 const bool slow_start_limited = | |
| 319 InSlowStart() && bytes_in_flight > congestion_window_bytes / 2; | |
| 320 return slow_start_limited || available_bytes <= kMaxBurstBytes; | |
| 321 } | |
| 322 | |
| 323 bool TcpCubicSender::InRecovery() const { | |
| 324 return largest_acked_packet_number_ <= largest_sent_at_last_cutback_ && | |
| 325 largest_acked_packet_number_ != 0; | |
| 326 } | |
| 327 | |
| 328 // Called when we receive an ack. Normal TCP tracks how many packets one ack | |
| 329 // represents, but quic has a separate ack for each packet. | |
| 330 void TcpCubicSender::MaybeIncreaseCwnd(QuicPacketNumber acked_packet_number, | |
| 331 QuicByteCount bytes_in_flight) { | |
| 332 QUIC_BUG_IF(InRecovery()) << "Never increase the CWND during recovery."; | |
| 333 // Do not increase the congestion window unless the sender is close to using | |
| 334 // the current window. | |
| 335 if (!IsCwndLimited(bytes_in_flight)) { | |
| 336 cubic_.OnApplicationLimited(); | |
| 337 return; | |
| 338 } | |
| 339 if (congestion_window_ >= max_tcp_congestion_window_) { | |
| 340 return; | |
| 341 } | |
| 342 if (InSlowStart()) { | |
| 343 // TCP slow start, exponential growth, increase by one for each ACK. | |
| 344 ++congestion_window_; | |
| 345 DVLOG(1) << "Slow start; congestion window: " << congestion_window_ | |
| 346 << " slowstart threshold: " << slowstart_threshold_; | |
| 347 return; | |
| 348 } | |
| 349 // Congestion avoidance | |
| 350 if (reno_) { | |
| 351 // Classic Reno congestion avoidance. | |
| 352 ++congestion_window_count_; | |
| 353 // Divide by num_connections to smoothly increase the CWND at a faster | |
| 354 // rate than conventional Reno. | |
| 355 if (congestion_window_count_ * num_connections_ >= congestion_window_) { | |
| 356 ++congestion_window_; | |
| 357 congestion_window_count_ = 0; | |
| 358 } | |
| 359 | |
| 360 DVLOG(1) << "Reno; congestion window: " << congestion_window_ | |
| 361 << " slowstart threshold: " << slowstart_threshold_ | |
| 362 << " congestion window count: " << congestion_window_count_; | |
| 363 } else { | |
| 364 congestion_window_ = min(max_tcp_congestion_window_, | |
| 365 cubic_.CongestionWindowAfterAck( | |
| 366 congestion_window_, rtt_stats_->min_rtt())); | |
| 367 DVLOG(1) << "Cubic; congestion window: " << congestion_window_ | |
| 368 << " slowstart threshold: " << slowstart_threshold_; | |
| 369 } | |
| 370 } | |
| 371 | |
| 372 void TcpCubicSender::OnRetransmissionTimeout(bool packets_retransmitted) { | |
| 373 largest_sent_at_last_cutback_ = 0; | |
| 374 if (!packets_retransmitted) { | |
| 375 return; | |
| 376 } | |
| 377 cubic_.Reset(); | |
| 378 hybrid_slow_start_.Restart(); | |
| 379 slowstart_threshold_ = congestion_window_ / 2; | |
| 380 congestion_window_ = min_congestion_window_; | |
| 381 } | |
| 382 | |
| 383 void TcpCubicSender::OnConnectionMigration() { | |
| 384 hybrid_slow_start_.Restart(); | |
| 385 cubic_.Reset(); | |
| 386 prr_ = PrrSender(); | |
| 387 congestion_window_count_ = 0; | |
| 388 largest_sent_packet_number_ = 0; | |
| 389 largest_acked_packet_number_ = 0; | |
| 390 largest_sent_at_last_cutback_ = 0; | |
| 391 congestion_window_ = initial_tcp_congestion_window_; | |
| 392 slowstart_threshold_ = initial_max_tcp_congestion_window_; | |
| 393 last_cutback_exited_slowstart_ = false; | |
| 394 max_tcp_congestion_window_ = initial_max_tcp_congestion_window_; | |
| 395 } | |
| 396 | |
| 397 CongestionControlType TcpCubicSender::GetCongestionControlType() const { | |
| 398 return reno_ ? kReno : kCubic; | |
| 399 } | |
| 400 | |
| 401 } // namespace net | |
| OLD | NEW |