OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "net/quic/congestion_control/cubic.h" | |
6 | |
7 #include <stdint.h> | |
8 #include <algorithm> | |
9 #include <cmath> | |
10 | |
11 #include "base/logging.h" | |
12 #include "net/quic/quic_flags.h" | |
13 #include "net/quic/quic_protocol.h" | |
14 #include "net/quic/quic_time.h" | |
15 | |
16 using std::max; | |
17 | |
18 namespace net { | |
19 | |
20 namespace { | |
21 | |
22 // Constants based on TCP defaults. | |
23 // The following constants are in 2^10 fractions of a second instead of ms to | |
24 // allow a 10 shift right to divide. | |
25 const int kCubeScale = 40; // 1024*1024^3 (first 1024 is from 0.100^3) | |
26 // where 0.100 is 100 ms which is the scaling | |
27 // round trip time. | |
28 const int kCubeCongestionWindowScale = 410; | |
29 const uint64_t kCubeFactor = | |
30 (UINT64_C(1) << kCubeScale) / kCubeCongestionWindowScale; | |
31 | |
32 const uint32_t kDefaultNumConnections = 2; | |
33 const float kBeta = 0.7f; // Default Cubic backoff factor. | |
34 // Additional backoff factor when loss occurs in the concave part of the Cubic | |
35 // curve. This additional backoff factor is expected to give up bandwidth to | |
36 // new concurrent flows and speed up convergence. | |
37 const float kBetaLastMax = 0.85f; | |
38 | |
39 } // namespace | |
40 | |
41 Cubic::Cubic(const QuicClock* clock) | |
42 : clock_(clock), | |
43 num_connections_(kDefaultNumConnections), | |
44 epoch_(QuicTime::Zero()), | |
45 app_limited_start_time_(QuicTime::Zero()), | |
46 last_update_time_(QuicTime::Zero()) { | |
47 Reset(); | |
48 } | |
49 | |
50 void Cubic::SetNumConnections(int num_connections) { | |
51 num_connections_ = num_connections; | |
52 } | |
53 | |
54 float Cubic::Alpha() const { | |
55 // TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that | |
56 // beta here is a cwnd multiplier, and is equal to 1-beta from the paper. | |
57 // We derive the equivalent alpha for an N-connection emulation as: | |
58 const float beta = Beta(); | |
59 return 3 * num_connections_ * num_connections_ * (1 - beta) / (1 + beta); | |
60 } | |
61 | |
62 float Cubic::Beta() const { | |
63 // kNConnectionBeta is the backoff factor after loss for our N-connection | |
64 // emulation, which emulates the effective backoff of an ensemble of N | |
65 // TCP-Reno connections on a single loss event. The effective multiplier is | |
66 // computed as: | |
67 return (num_connections_ - 1 + kBeta) / num_connections_; | |
68 } | |
69 | |
70 void Cubic::Reset() { | |
71 epoch_ = QuicTime::Zero(); // Reset time. | |
72 app_limited_start_time_ = QuicTime::Zero(); | |
73 last_update_time_ = QuicTime::Zero(); // Reset time. | |
74 last_congestion_window_ = 0; | |
75 last_max_congestion_window_ = 0; | |
76 acked_packets_count_ = 0; | |
77 estimated_tcp_congestion_window_ = 0; | |
78 origin_point_congestion_window_ = 0; | |
79 time_to_origin_point_ = 0; | |
80 last_target_congestion_window_ = 0; | |
81 } | |
82 | |
83 void Cubic::OnApplicationLimited() { | |
84 if (FLAGS_shift_quic_cubic_epoch_when_app_limited) { | |
85 // When sender is not using the available congestion window, Cubic's epoch | |
86 // should not continue growing. Record the time when sender goes into an | |
87 // app-limited period here, to compensate later when cwnd growth happens. | |
88 if (app_limited_start_time_ == QuicTime::Zero()) { | |
89 app_limited_start_time_ = clock_->ApproximateNow(); | |
90 } | |
91 } else { | |
92 // When sender is not using the available congestion window, Cubic's epoch | |
93 // should not continue growing. Reset the epoch when in such a period. | |
94 epoch_ = QuicTime::Zero(); | |
95 } | |
96 } | |
97 | |
98 QuicPacketCount Cubic::CongestionWindowAfterPacketLoss( | |
99 QuicPacketCount current_congestion_window) { | |
100 if (current_congestion_window < last_max_congestion_window_) { | |
101 // We never reached the old max, so assume we are competing with another | |
102 // flow. Use our extra back off factor to allow the other flow to go up. | |
103 last_max_congestion_window_ = | |
104 static_cast<int>(kBetaLastMax * current_congestion_window); | |
105 } else { | |
106 last_max_congestion_window_ = current_congestion_window; | |
107 } | |
108 epoch_ = QuicTime::Zero(); // Reset time. | |
109 return static_cast<int>(current_congestion_window * Beta()); | |
110 } | |
111 | |
112 QuicPacketCount Cubic::CongestionWindowAfterAck( | |
113 QuicPacketCount current_congestion_window, | |
114 QuicTime::Delta delay_min) { | |
115 acked_packets_count_ += 1; // Packets acked. | |
116 QuicTime current_time = clock_->ApproximateNow(); | |
117 | |
118 // Cubic is "independent" of RTT, the update is limited by the time elapsed. | |
119 if (last_congestion_window_ == current_congestion_window && | |
120 (current_time - last_update_time_ <= MaxCubicTimeInterval())) { | |
121 return max(last_target_congestion_window_, | |
122 estimated_tcp_congestion_window_); | |
123 } | |
124 last_congestion_window_ = current_congestion_window; | |
125 last_update_time_ = current_time; | |
126 | |
127 if (!epoch_.IsInitialized()) { | |
128 // First ACK after a loss event. | |
129 epoch_ = current_time; // Start of epoch. | |
130 acked_packets_count_ = 1; // Reset count. | |
131 // Reset estimated_tcp_congestion_window_ to be in sync with cubic. | |
132 estimated_tcp_congestion_window_ = current_congestion_window; | |
133 if (last_max_congestion_window_ <= current_congestion_window) { | |
134 time_to_origin_point_ = 0; | |
135 origin_point_congestion_window_ = current_congestion_window; | |
136 } else { | |
137 time_to_origin_point_ = static_cast<uint32_t>( | |
138 cbrt(kCubeFactor * | |
139 (last_max_congestion_window_ - current_congestion_window))); | |
140 origin_point_congestion_window_ = last_max_congestion_window_; | |
141 } | |
142 } else { | |
143 // If sender was app-limited, then freeze congestion window growth during | |
144 // app-limited period. Continue growth now by shifting the epoch-start | |
145 // through the app-limited period. | |
146 if (FLAGS_shift_quic_cubic_epoch_when_app_limited && | |
147 app_limited_start_time_ != QuicTime::Zero()) { | |
148 QuicTime::Delta shift = current_time - app_limited_start_time_; | |
149 DVLOG(1) << "Shifting epoch for quiescence by " << shift.ToMicroseconds(); | |
150 epoch_ = epoch_ + shift; | |
151 app_limited_start_time_ = QuicTime::Zero(); | |
152 } | |
153 } | |
154 | |
155 // Change the time unit from microseconds to 2^10 fractions per second. Take | |
156 // the round trip time in account. This is done to allow us to use shift as a | |
157 // divide operator. | |
158 int64_t elapsed_time = | |
159 ((current_time + delay_min - epoch_).ToMicroseconds() << 10) / | |
160 kNumMicrosPerSecond; | |
161 | |
162 int64_t offset = time_to_origin_point_ - elapsed_time; | |
163 QuicPacketCount delta_congestion_window = | |
164 (kCubeCongestionWindowScale * offset * offset * offset) >> kCubeScale; | |
165 | |
166 QuicPacketCount target_congestion_window = | |
167 origin_point_congestion_window_ - delta_congestion_window; | |
168 | |
169 DCHECK_LT(0u, estimated_tcp_congestion_window_); | |
170 // With dynamic beta/alpha based on number of active streams, it is possible | |
171 // for the required_ack_count to become much lower than acked_packets_count_ | |
172 // suddenly, leading to more than one iteration through the following loop. | |
173 while (true) { | |
174 // Update estimated TCP congestion_window. | |
175 QuicPacketCount required_ack_count = static_cast<QuicPacketCount>( | |
176 estimated_tcp_congestion_window_ / Alpha()); | |
177 if (acked_packets_count_ < required_ack_count) { | |
178 break; | |
179 } | |
180 acked_packets_count_ -= required_ack_count; | |
181 estimated_tcp_congestion_window_++; | |
182 } | |
183 | |
184 // We have a new cubic congestion window. | |
185 last_target_congestion_window_ = target_congestion_window; | |
186 | |
187 // Compute target congestion_window based on cubic target and estimated TCP | |
188 // congestion_window, use highest (fastest). | |
189 if (target_congestion_window < estimated_tcp_congestion_window_) { | |
190 target_congestion_window = estimated_tcp_congestion_window_; | |
191 } | |
192 | |
193 DVLOG(1) << "Final target congestion_window: " << target_congestion_window; | |
194 return target_congestion_window; | |
195 } | |
196 | |
197 } // namespace net | |
OLD | NEW |