OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/quic/congestion_control/tcp_cubic_sender.h" | 5 #include "net/quic/congestion_control/tcp_cubic_bytes_sender.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 | 8 |
9 #include "base/metrics/histogram.h" | |
10 #include "net/quic/congestion_control/prr_sender.h" | 9 #include "net/quic/congestion_control/prr_sender.h" |
11 #include "net/quic/congestion_control/rtt_stats.h" | 10 #include "net/quic/congestion_control/rtt_stats.h" |
12 #include "net/quic/crypto/crypto_protocol.h" | 11 #include "net/quic/crypto/crypto_protocol.h" |
13 | 12 |
14 using std::max; | 13 using std::max; |
15 using std::min; | 14 using std::min; |
16 | 15 |
17 namespace net { | 16 namespace net { |
18 | 17 |
19 namespace { | 18 namespace { |
20 // Constants based on TCP defaults. | 19 // Constants based on TCP defaults. |
21 // The minimum cwnd based on RFC 3782 (TCP NewReno) for cwnd reductions on a | 20 // The minimum cwnd based on RFC 3782 (TCP NewReno) for cwnd reductions on a |
22 // fast retransmission. The cwnd after a timeout is still 1. | 21 // fast retransmission. |
23 const QuicPacketCount kDefaultMinimumCongestionWindow = 2; | 22 const QuicByteCount kMinimumCongestionWindow = 2 * kDefaultTCPMSS; |
24 const QuicByteCount kMaxSegmentSize = kDefaultTCPMSS; | 23 const QuicByteCount kMaxSegmentSize = kDefaultTCPMSS; |
25 const int kMaxBurstLength = 3; | 24 const int kMaxBurstLength = 3; |
26 const float kRenoBeta = 0.7f; // Reno backoff factor. | 25 const float kRenoBeta = 0.7f; // Reno backoff factor. |
27 const uint32 kDefaultNumConnections = 2; // N-connection emulation. | 26 const uint32 kDefaultNumConnections = 2; // N-connection emulation. |
28 } // namespace | 27 } // namespace |
29 | 28 |
30 TcpCubicSender::TcpCubicSender(const QuicClock* clock, | 29 TcpCubicBytesSender::TcpCubicBytesSender( |
31 const RttStats* rtt_stats, | 30 const QuicClock* clock, |
32 bool reno, | 31 const RttStats* rtt_stats, |
33 QuicPacketCount initial_tcp_congestion_window, | 32 bool reno, |
34 QuicConnectionStats* stats) | 33 QuicPacketCount initial_tcp_congestion_window, |
| 34 QuicConnectionStats* stats) |
35 : hybrid_slow_start_(clock), | 35 : hybrid_slow_start_(clock), |
36 cubic_(clock), | 36 cubic_(clock), |
37 rtt_stats_(rtt_stats), | 37 rtt_stats_(rtt_stats), |
38 stats_(stats), | 38 stats_(stats), |
39 reno_(reno), | 39 reno_(reno), |
40 num_connections_(kDefaultNumConnections), | 40 num_connections_(kDefaultNumConnections), |
41 num_acked_packets_(0), | 41 num_acked_packets_(0), |
42 largest_sent_sequence_number_(0), | 42 largest_sent_sequence_number_(0), |
43 largest_acked_sequence_number_(0), | 43 largest_acked_sequence_number_(0), |
44 largest_sent_at_last_cutback_(0), | 44 largest_sent_at_last_cutback_(0), |
45 congestion_window_(initial_tcp_congestion_window), | 45 congestion_window_(initial_tcp_congestion_window * kMaxSegmentSize), |
46 min_congestion_window_(kDefaultMinimumCongestionWindow), | |
47 slowstart_threshold_(std::numeric_limits<uint64>::max()), | 46 slowstart_threshold_(std::numeric_limits<uint64>::max()), |
48 last_cutback_exited_slowstart_(false), | 47 last_cutback_exited_slowstart_(false), |
49 clock_(clock) { | 48 clock_(clock) { |
50 } | 49 } |
51 | 50 |
52 TcpCubicSender::~TcpCubicSender() { | 51 TcpCubicBytesSender::~TcpCubicBytesSender() { |
53 UMA_HISTOGRAM_COUNTS("Net.QuicSession.FinalTcpCwnd", congestion_window_); | |
54 } | 52 } |
55 | 53 |
56 void TcpCubicSender::SetFromConfig(const QuicConfig& config, | 54 void TcpCubicBytesSender::SetFromConfig(const QuicConfig& config, |
57 Perspective perspective, | 55 Perspective perspective, |
58 bool using_pacing) { | 56 bool using_pacing) { |
59 if (perspective == Perspective::IS_SERVER) { | 57 if (perspective == Perspective::IS_SERVER) { |
60 if (config.HasReceivedConnectionOptions() && | 58 if (config.HasReceivedConnectionOptions() && |
61 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW10)) { | 59 ContainsQuicTag(config.ReceivedConnectionOptions(), kIW10)) { |
62 // Initial window experiment. | 60 // Initial window experiment. |
63 congestion_window_ = 10; | 61 congestion_window_ = 10 * kMaxSegmentSize; |
64 } | |
65 if (config.HasReceivedConnectionOptions() && | |
66 ContainsQuicTag(config.ReceivedConnectionOptions(), kMIN1)) { | |
67 // Min CWND experiment. | |
68 min_congestion_window_ = 1; | |
69 } | 62 } |
70 if (using_pacing) { | 63 if (using_pacing) { |
71 // Disable the ack train mode in hystart when pacing is enabled, since it | 64 // Disable the ack train mode in hystart when pacing is enabled, since it |
72 // may be falsely triggered. | 65 // may be falsely triggered. |
73 hybrid_slow_start_.set_ack_train_detection(false); | 66 hybrid_slow_start_.set_ack_train_detection(false); |
74 } | 67 } |
75 } | 68 } |
76 } | 69 } |
77 | 70 |
78 bool TcpCubicSender::ResumeConnectionState( | 71 bool TcpCubicBytesSender::ResumeConnectionState( |
79 const CachedNetworkParameters& cached_network_params) { | 72 const CachedNetworkParameters& cached_network_params) { |
80 // If the previous bandwidth estimate is less than an hour old, store in | 73 // If the previous bandwidth estimate is less than an hour old, store in |
81 // preparation for doing bandwidth resumption. | 74 // preparation for doing bandwidth resumption. |
82 int64 seconds_since_estimate = | 75 int64 seconds_since_estimate = |
83 clock_->WallNow().ToUNIXSeconds() - cached_network_params.timestamp(); | 76 clock_->WallNow().ToUNIXSeconds() - cached_network_params.timestamp(); |
84 if (seconds_since_estimate > kNumSecondsPerHour) { | 77 if (seconds_since_estimate > kNumSecondsPerHour) { |
85 return false; | 78 return false; |
86 } | 79 } |
87 | 80 |
88 QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond( | 81 QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond( |
89 cached_network_params.bandwidth_estimate_bytes_per_second()); | 82 cached_network_params.bandwidth_estimate_bytes_per_second()); |
90 QuicTime::Delta rtt_ms = | 83 QuicTime::Delta rtt_ms = |
91 QuicTime::Delta::FromMilliseconds(cached_network_params.min_rtt_ms()); | 84 QuicTime::Delta::FromMilliseconds(cached_network_params.min_rtt_ms()); |
92 | 85 |
93 // Make sure CWND is in appropriate range (in case of bad data). | 86 // Make sure CWND is in appropriate range (in case of bad data). |
94 QuicPacketCount new_congestion_window = | 87 QuicByteCount new_congestion_window = bandwidth.ToBytesPerPeriod(rtt_ms); |
95 bandwidth.ToBytesPerPeriod(rtt_ms) / kMaxPacketSize; | 88 congestion_window_ = |
96 congestion_window_ = max( | 89 max(min(new_congestion_window, |
97 min(new_congestion_window, kMaxCongestionWindowForBandwidthResumption), | 90 kMaxCongestionWindowForBandwidthResumption * kMaxSegmentSize), |
98 kMinCongestionWindowForBandwidthResumption); | 91 kMinCongestionWindowForBandwidthResumption * kMaxSegmentSize); |
99 | 92 |
100 // TODO(rjshade): Set appropriate CWND when previous connection was in slow | 93 // TODO(rjshade): Set appropriate CWND when previous connection was in slow |
101 // start at time of estimate. | 94 // start at time of estimate. |
102 return true; | 95 return true; |
103 } | 96 } |
104 | 97 |
105 void TcpCubicSender::SetNumEmulatedConnections(int num_connections) { | 98 void TcpCubicBytesSender::SetNumEmulatedConnections(int num_connections) { |
106 num_connections_ = max(1, num_connections); | 99 num_connections_ = max(1, num_connections); |
107 cubic_.SetNumConnections(num_connections_); | 100 cubic_.SetNumConnections(num_connections_); |
108 } | 101 } |
109 | 102 |
110 float TcpCubicSender::RenoBeta() const { | 103 float TcpCubicBytesSender::RenoBeta() const { |
111 // kNConnectionBeta is the backoff factor after loss for our N-connection | 104 // kNConnectionBeta is the backoff factor after loss for our N-connection |
112 // emulation, which emulates the effective backoff of an ensemble of N | 105 // emulation, which emulates the effective backoff of an ensemble of N |
113 // TCP-Reno connections on a single loss event. The effective multiplier is | 106 // TCP-Reno connections on a single loss event. The effective multiplier is |
114 // computed as: | 107 // computed as: |
115 return (num_connections_ - 1 + kRenoBeta) / num_connections_; | 108 return (num_connections_ - 1 + kRenoBeta) / num_connections_; |
116 } | 109 } |
117 | 110 |
118 void TcpCubicSender::OnCongestionEvent( | 111 void TcpCubicBytesSender::OnCongestionEvent( |
119 bool rtt_updated, | 112 bool rtt_updated, |
120 QuicByteCount bytes_in_flight, | 113 QuicByteCount bytes_in_flight, |
121 const CongestionVector& acked_packets, | 114 const CongestionVector& acked_packets, |
122 const CongestionVector& lost_packets) { | 115 const CongestionVector& lost_packets) { |
123 if (rtt_updated && InSlowStart() && | 116 if (rtt_updated && InSlowStart() && |
124 hybrid_slow_start_.ShouldExitSlowStart(rtt_stats_->latest_rtt(), | 117 hybrid_slow_start_.ShouldExitSlowStart( |
125 rtt_stats_->min_rtt(), | 118 rtt_stats_->latest_rtt(), rtt_stats_->min_rtt(), |
126 congestion_window_)) { | 119 congestion_window_ / kMaxSegmentSize)) { |
127 slowstart_threshold_ = congestion_window_; | 120 slowstart_threshold_ = congestion_window_; |
128 } | 121 } |
129 for (CongestionVector::const_iterator it = lost_packets.begin(); | 122 for (CongestionVector::const_iterator it = lost_packets.begin(); |
130 it != lost_packets.end(); ++it) { | 123 it != lost_packets.end(); ++it) { |
131 OnPacketLost(it->first, bytes_in_flight); | 124 OnPacketLost(it->first, bytes_in_flight); |
132 } | 125 } |
133 for (CongestionVector::const_iterator it = acked_packets.begin(); | 126 for (CongestionVector::const_iterator it = acked_packets.begin(); |
134 it != acked_packets.end(); ++it) { | 127 it != acked_packets.end(); ++it) { |
135 OnPacketAcked(it->first, it->second.bytes_sent, bytes_in_flight); | 128 OnPacketAcked(it->first, it->second.bytes_sent, bytes_in_flight); |
136 } | 129 } |
137 } | 130 } |
138 | 131 |
139 void TcpCubicSender::OnPacketAcked( | 132 void TcpCubicBytesSender::OnPacketAcked( |
140 QuicPacketSequenceNumber acked_sequence_number, | 133 QuicPacketSequenceNumber acked_sequence_number, |
141 QuicByteCount acked_bytes, | 134 QuicByteCount acked_bytes, |
142 QuicByteCount bytes_in_flight) { | 135 QuicByteCount bytes_in_flight) { |
143 largest_acked_sequence_number_ = max(acked_sequence_number, | 136 largest_acked_sequence_number_ = |
144 largest_acked_sequence_number_); | 137 max(acked_sequence_number, largest_acked_sequence_number_); |
145 if (InRecovery()) { | 138 if (InRecovery()) { |
146 // PRR is used when in recovery. | 139 // PRR is used when in recovery. |
147 prr_.OnPacketAcked(acked_bytes); | 140 prr_.OnPacketAcked(acked_bytes); |
148 return; | 141 return; |
149 } | 142 } |
150 MaybeIncreaseCwnd(acked_sequence_number, bytes_in_flight); | 143 MaybeIncreaseCwnd(acked_sequence_number, acked_bytes, bytes_in_flight); |
151 // TODO(ianswett): Should this even be called when not in slow start? | 144 // TODO(ianswett): Should this even be called when not in slow start? |
152 hybrid_slow_start_.OnPacketAcked(acked_sequence_number, InSlowStart()); | 145 hybrid_slow_start_.OnPacketAcked(acked_sequence_number, InSlowStart()); |
153 } | 146 } |
154 | 147 |
155 void TcpCubicSender::OnPacketLost(QuicPacketSequenceNumber sequence_number, | 148 void TcpCubicBytesSender::OnPacketLost(QuicPacketSequenceNumber sequence_number, |
156 QuicByteCount bytes_in_flight) { | 149 QuicByteCount bytes_in_flight) { |
157 // TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets | 150 // TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets |
158 // already sent should be treated as a single loss event, since it's expected. | 151 // already sent should be treated as a single loss event, since it's expected. |
159 if (sequence_number <= largest_sent_at_last_cutback_) { | 152 if (sequence_number <= largest_sent_at_last_cutback_) { |
160 if (last_cutback_exited_slowstart_) { | 153 if (last_cutback_exited_slowstart_) { |
161 ++stats_->slowstart_packets_lost; | 154 ++stats_->slowstart_packets_lost; |
162 } | 155 } |
163 DVLOG(1) << "Ignoring loss for largest_missing:" << sequence_number | 156 DVLOG(1) << "Ignoring loss for largest_missing:" << sequence_number |
164 << " because it was sent prior to the last CWND cutback."; | 157 << " because it was sent prior to the last CWND cutback."; |
165 return; | 158 return; |
166 } | 159 } |
167 ++stats_->tcp_loss_events; | 160 ++stats_->tcp_loss_events; |
168 last_cutback_exited_slowstart_ = InSlowStart(); | 161 last_cutback_exited_slowstart_ = InSlowStart(); |
169 if (InSlowStart()) { | 162 if (InSlowStart()) { |
170 ++stats_->slowstart_packets_lost; | 163 ++stats_->slowstart_packets_lost; |
171 } | 164 } |
172 | 165 |
173 prr_.OnPacketLost(bytes_in_flight); | 166 prr_.OnPacketLost(bytes_in_flight); |
174 | 167 |
175 if (reno_) { | 168 if (reno_) { |
176 congestion_window_ = congestion_window_ * RenoBeta(); | 169 congestion_window_ = congestion_window_ * RenoBeta(); |
177 } else { | 170 } else { |
178 congestion_window_ = | 171 congestion_window_ = |
179 cubic_.CongestionWindowAfterPacketLoss(congestion_window_); | 172 cubic_.CongestionWindowAfterPacketLoss(congestion_window_); |
180 } | 173 } |
181 slowstart_threshold_ = congestion_window_; | 174 slowstart_threshold_ = congestion_window_; |
182 // Enforce a minimum congestion window. | 175 // Enforce TCP's minimum congestion window of 2*MSS. |
183 if (congestion_window_ < min_congestion_window_) { | 176 if (congestion_window_ < kMinimumCongestionWindow) { |
184 congestion_window_ = min_congestion_window_; | 177 congestion_window_ = kMinimumCongestionWindow; |
185 } | 178 } |
186 largest_sent_at_last_cutback_ = largest_sent_sequence_number_; | 179 largest_sent_at_last_cutback_ = largest_sent_sequence_number_; |
187 // reset packet count from congestion avoidance mode. We start | 180 // Reset packet count from congestion avoidance mode. We start counting again |
188 // counting again when we're out of recovery. | 181 // when we're out of recovery. |
189 num_acked_packets_ = 0; | 182 num_acked_packets_ = 0; |
190 DVLOG(1) << "Incoming loss; congestion window: " << congestion_window_ | 183 DVLOG(1) << "Incoming loss; congestion window: " << congestion_window_ |
191 << " slowstart threshold: " << slowstart_threshold_; | 184 << " slowstart threshold: " << slowstart_threshold_; |
192 } | 185 } |
193 | 186 |
194 bool TcpCubicSender::OnPacketSent(QuicTime /*sent_time*/, | 187 bool TcpCubicBytesSender::OnPacketSent( |
195 QuicByteCount /*bytes_in_flight*/, | 188 QuicTime /*sent_time*/, |
196 QuicPacketSequenceNumber sequence_number, | 189 QuicByteCount /*bytes_in_flight*/, |
197 QuicByteCount bytes, | 190 QuicPacketSequenceNumber sequence_number, |
198 HasRetransmittableData is_retransmittable) { | 191 QuicByteCount bytes, |
| 192 HasRetransmittableData is_retransmittable) { |
199 // Only update bytes_in_flight_ for data packets. | 193 // Only update bytes_in_flight_ for data packets. |
200 if (is_retransmittable != HAS_RETRANSMITTABLE_DATA) { | 194 if (is_retransmittable != HAS_RETRANSMITTABLE_DATA) { |
201 return false; | 195 return false; |
202 } | 196 } |
203 if (InRecovery()) { | 197 if (InRecovery()) { |
204 // PRR is used when in recovery. | 198 // PRR is used when in recovery. |
205 prr_.OnPacketSent(bytes); | 199 prr_.OnPacketSent(bytes); |
206 } | 200 } |
207 DCHECK_LT(largest_sent_sequence_number_, sequence_number); | 201 DCHECK_LT(largest_sent_sequence_number_, sequence_number); |
208 largest_sent_sequence_number_ = sequence_number; | 202 largest_sent_sequence_number_ = sequence_number; |
209 hybrid_slow_start_.OnPacketSent(sequence_number); | 203 hybrid_slow_start_.OnPacketSent(sequence_number); |
210 return true; | 204 return true; |
211 } | 205 } |
212 | 206 |
213 QuicTime::Delta TcpCubicSender::TimeUntilSend( | 207 QuicTime::Delta TcpCubicBytesSender::TimeUntilSend( |
214 QuicTime /* now */, | 208 QuicTime /* now */, |
215 QuicByteCount bytes_in_flight, | 209 QuicByteCount bytes_in_flight, |
216 HasRetransmittableData has_retransmittable_data) const { | 210 HasRetransmittableData has_retransmittable_data) const { |
217 if (has_retransmittable_data == NO_RETRANSMITTABLE_DATA) { | 211 if (has_retransmittable_data == NO_RETRANSMITTABLE_DATA) { |
218 // For TCP we can always send an ACK immediately. | 212 // For TCP we can always send an ACK immediately. |
219 return QuicTime::Delta::Zero(); | 213 return QuicTime::Delta::Zero(); |
220 } | 214 } |
221 if (InRecovery()) { | 215 if (InRecovery()) { |
222 // PRR is used when in recovery. | 216 // PRR is used when in recovery. |
223 return prr_.TimeUntilSend(GetCongestionWindow(), bytes_in_flight, | 217 return prr_.TimeUntilSend(GetCongestionWindow(), bytes_in_flight, |
224 slowstart_threshold_ * kMaxSegmentSize); | 218 slowstart_threshold_); |
225 } | 219 } |
226 if (GetCongestionWindow() > bytes_in_flight) { | 220 if (GetCongestionWindow() > bytes_in_flight) { |
227 return QuicTime::Delta::Zero(); | 221 return QuicTime::Delta::Zero(); |
228 } | 222 } |
229 return QuicTime::Delta::Infinite(); | 223 return QuicTime::Delta::Infinite(); |
230 } | 224 } |
231 | 225 |
232 QuicBandwidth TcpCubicSender::PacingRate() const { | 226 QuicBandwidth TcpCubicBytesSender::PacingRate() const { |
233 // We pace at twice the rate of the underlying sender's bandwidth estimate | 227 // We pace at twice the rate of the underlying sender's bandwidth estimate |
234 // during slow start and 1.25x during congestion avoidance to ensure pacing | 228 // during slow start and 1.25x during congestion avoidance to ensure pacing |
235 // doesn't prevent us from filling the window. | 229 // doesn't prevent us from filling the window. |
236 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt(); | 230 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt(); |
237 if (srtt.IsZero()) { | 231 if (srtt.IsZero()) { |
238 srtt = QuicTime::Delta::FromMicroseconds(rtt_stats_->initial_rtt_us()); | 232 srtt = QuicTime::Delta::FromMicroseconds(rtt_stats_->initial_rtt_us()); |
239 } | 233 } |
240 const QuicBandwidth bandwidth = | 234 const QuicBandwidth bandwidth = |
241 QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt); | 235 QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt); |
242 return bandwidth.Scale(InSlowStart() ? 2 : 1.25); | 236 return bandwidth.Scale(InSlowStart() ? 2 : 1.25); |
243 } | 237 } |
244 | 238 |
245 QuicBandwidth TcpCubicSender::BandwidthEstimate() const { | 239 QuicBandwidth TcpCubicBytesSender::BandwidthEstimate() const { |
246 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt(); | 240 QuicTime::Delta srtt = rtt_stats_->smoothed_rtt(); |
247 if (srtt.IsZero()) { | 241 if (srtt.IsZero()) { |
248 // If we haven't measured an rtt, the bandwidth estimate is unknown. | 242 // If we haven't measured an rtt, the bandwidth estimate is unknown. |
249 return QuicBandwidth::Zero(); | 243 return QuicBandwidth::Zero(); |
250 } | 244 } |
251 return QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt); | 245 return QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt); |
252 } | 246 } |
253 | 247 |
254 bool TcpCubicSender::HasReliableBandwidthEstimate() const { | 248 bool TcpCubicBytesSender::HasReliableBandwidthEstimate() const { |
255 return !InSlowStart() && !InRecovery() && | 249 return !InSlowStart() && !InRecovery() && |
256 !rtt_stats_->smoothed_rtt().IsZero();; | 250 !rtt_stats_->smoothed_rtt().IsZero(); |
257 } | 251 } |
258 | 252 |
259 QuicTime::Delta TcpCubicSender::RetransmissionDelay() const { | 253 QuicTime::Delta TcpCubicBytesSender::RetransmissionDelay() const { |
260 if (rtt_stats_->smoothed_rtt().IsZero()) { | 254 if (rtt_stats_->smoothed_rtt().IsZero()) { |
261 return QuicTime::Delta::Zero(); | 255 return QuicTime::Delta::Zero(); |
262 } | 256 } |
263 return rtt_stats_->smoothed_rtt().Add( | 257 return rtt_stats_->smoothed_rtt().Add( |
264 rtt_stats_->mean_deviation().Multiply(4)); | 258 rtt_stats_->mean_deviation().Multiply(4)); |
265 } | 259 } |
266 | 260 |
267 QuicByteCount TcpCubicSender::GetCongestionWindow() const { | 261 QuicByteCount TcpCubicBytesSender::GetCongestionWindow() const { |
268 return congestion_window_ * kMaxSegmentSize; | 262 return congestion_window_; |
269 } | 263 } |
270 | 264 |
271 bool TcpCubicSender::InSlowStart() const { | 265 bool TcpCubicBytesSender::InSlowStart() const { |
272 return congestion_window_ < slowstart_threshold_; | 266 return congestion_window_ < slowstart_threshold_; |
273 } | 267 } |
274 | 268 |
275 QuicByteCount TcpCubicSender::GetSlowStartThreshold() const { | 269 QuicByteCount TcpCubicBytesSender::GetSlowStartThreshold() const { |
276 return slowstart_threshold_ * kMaxSegmentSize; | 270 return slowstart_threshold_; |
277 } | 271 } |
278 | 272 |
279 bool TcpCubicSender::IsCwndLimited(QuicByteCount bytes_in_flight) const { | 273 bool TcpCubicBytesSender::IsCwndLimited(QuicByteCount bytes_in_flight) const { |
280 const QuicByteCount congestion_window_bytes = congestion_window_ * | 274 if (bytes_in_flight >= congestion_window_) { |
281 kMaxSegmentSize; | |
282 if (bytes_in_flight >= congestion_window_bytes) { | |
283 return true; | 275 return true; |
284 } | 276 } |
285 const QuicByteCount max_burst = kMaxBurstLength * kMaxSegmentSize; | 277 const QuicByteCount max_burst = kMaxBurstLength * kMaxSegmentSize; |
286 const QuicByteCount available_bytes = | 278 const QuicByteCount available_bytes = congestion_window_ - bytes_in_flight; |
287 congestion_window_bytes - bytes_in_flight; | 279 const bool slow_start_limited = |
288 const bool slow_start_limited = InSlowStart() && | 280 InSlowStart() && bytes_in_flight > congestion_window_ / 2; |
289 bytes_in_flight > congestion_window_bytes / 2; | |
290 return slow_start_limited || available_bytes <= max_burst; | 281 return slow_start_limited || available_bytes <= max_burst; |
291 } | 282 } |
292 | 283 |
293 bool TcpCubicSender::InRecovery() const { | 284 bool TcpCubicBytesSender::InRecovery() const { |
294 return largest_acked_sequence_number_ <= largest_sent_at_last_cutback_ && | 285 return largest_acked_sequence_number_ <= largest_sent_at_last_cutback_ && |
295 largest_acked_sequence_number_ != 0; | 286 largest_acked_sequence_number_ != 0; |
296 } | 287 } |
297 | 288 |
298 // Called when we receive an ack. Normal TCP tracks how many packets one ack | 289 // Called when we receive an ack. Normal TCP tracks how many packets one ack |
299 // represents, but quic has a separate ack for each packet. | 290 // represents, but quic has a separate ack for each packet. |
300 void TcpCubicSender::MaybeIncreaseCwnd( | 291 void TcpCubicBytesSender::MaybeIncreaseCwnd( |
301 QuicPacketSequenceNumber acked_sequence_number, | 292 QuicPacketSequenceNumber acked_sequence_number, |
| 293 QuicByteCount acked_bytes, |
302 QuicByteCount bytes_in_flight) { | 294 QuicByteCount bytes_in_flight) { |
303 LOG_IF(DFATAL, InRecovery()) << "Never increase the CWND during recovery."; | 295 LOG_IF(DFATAL, InRecovery()) << "Never increase the CWND during recovery."; |
304 if (!IsCwndLimited(bytes_in_flight)) { | 296 if (!IsCwndLimited(bytes_in_flight)) { |
305 // We don't update the congestion window unless we are close to using the | 297 // We don't update the congestion window unless we are close to using the |
306 // window we have available. | 298 // window we have available. |
307 return; | 299 return; |
308 } | 300 } |
309 if (InSlowStart()) { | 301 if (InSlowStart()) { |
310 // TCP slow start, exponential growth, increase by one for each ACK. | 302 // TCP slow start, exponential growth, increase by one for each ACK. |
311 ++congestion_window_; | 303 congestion_window_ += kMaxSegmentSize; |
312 DVLOG(1) << "Slow start; congestion window: " << congestion_window_ | 304 DVLOG(1) << "Slow start; congestion window: " << congestion_window_ |
313 << " slowstart threshold: " << slowstart_threshold_; | 305 << " slowstart threshold: " << slowstart_threshold_; |
314 return; | 306 return; |
315 } | 307 } |
316 // Congestion avoidance | 308 // Congestion avoidance. |
317 if (reno_) { | 309 if (reno_) { |
318 // Classic Reno congestion avoidance. | 310 // Classic Reno congestion avoidance. |
319 ++num_acked_packets_; | 311 ++num_acked_packets_; |
320 // Divide by num_connections to smoothly increase the CWND at a faster | 312 // Divide by num_connections to smoothly increase the CWND at a faster rate |
321 // rate than conventional Reno. | 313 // than conventional Reno. |
322 if (num_acked_packets_ * num_connections_ >= congestion_window_) { | 314 if (num_acked_packets_ * num_connections_ >= |
323 ++congestion_window_; | 315 congestion_window_ / kMaxSegmentSize) { |
| 316 congestion_window_ += kMaxSegmentSize; |
324 num_acked_packets_ = 0; | 317 num_acked_packets_ = 0; |
325 } | 318 } |
326 | 319 |
327 DVLOG(1) << "Reno; congestion window: " << congestion_window_ | 320 DVLOG(1) << "Reno; congestion window: " << congestion_window_ |
328 << " slowstart threshold: " << slowstart_threshold_ | 321 << " slowstart threshold: " << slowstart_threshold_ |
329 << " congestion window count: " << num_acked_packets_; | 322 << " congestion window count: " << num_acked_packets_; |
330 } else { | 323 } else { |
331 congestion_window_ = cubic_.CongestionWindowAfterAck(congestion_window_, | 324 congestion_window_ = cubic_.CongestionWindowAfterAck( |
332 rtt_stats_->min_rtt()); | 325 acked_bytes, congestion_window_, rtt_stats_->min_rtt()); |
333 DVLOG(1) << "Cubic; congestion window: " << congestion_window_ | 326 DVLOG(1) << "Cubic; congestion window: " << congestion_window_ |
334 << " slowstart threshold: " << slowstart_threshold_; | 327 << " slowstart threshold: " << slowstart_threshold_; |
335 } | 328 } |
336 } | 329 } |
337 | 330 |
338 void TcpCubicSender::OnRetransmissionTimeout(bool packets_retransmitted) { | 331 void TcpCubicBytesSender::OnRetransmissionTimeout(bool packets_retransmitted) { |
339 largest_sent_at_last_cutback_ = 0; | 332 largest_sent_at_last_cutback_ = 0; |
340 if (!packets_retransmitted) { | 333 if (!packets_retransmitted) { |
341 return; | 334 return; |
342 } | 335 } |
343 cubic_.Reset(); | 336 cubic_.Reset(); |
344 hybrid_slow_start_.Restart(); | 337 hybrid_slow_start_.Restart(); |
345 slowstart_threshold_ = congestion_window_ / 2; | 338 slowstart_threshold_ = congestion_window_ / 2; |
346 congestion_window_ = min_congestion_window_; | 339 congestion_window_ = kMinimumCongestionWindow; |
347 } | 340 } |
348 | 341 |
349 CongestionControlType TcpCubicSender::GetCongestionControlType() const { | 342 CongestionControlType TcpCubicBytesSender::GetCongestionControlType() const { |
350 return reno_ ? kReno : kCubic; | 343 return reno_ ? kReno : kCubic; |
351 } | 344 } |
352 | 345 |
353 } // namespace net | 346 } // namespace net |
OLD | NEW |