OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "net/quic/congestion_control/cubic.h" | 5 #include "net/quic/congestion_control/cubic.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 | 8 |
9 #include "base/basictypes.h" | 9 #include "base/basictypes.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
11 #include "base/time/time.h" | 11 #include "base/time/time.h" |
12 #include "net/quic/congestion_control/cube_root.h" | 12 #include "net/quic/congestion_control/cube_root.h" |
13 #include "net/quic/quic_protocol.h" | 13 #include "net/quic/quic_protocol.h" |
14 | 14 |
15 using std::max; | 15 using std::max; |
16 | 16 |
17 namespace net { | 17 namespace net { |
18 | 18 |
19 namespace { | 19 namespace { |
20 // Constants based on TCP defaults. | 20 // Constants based on TCP defaults. |
21 // The following constants are in 2^10 fractions of a second instead of ms to | 21 // The following constants are in 2^10 fractions of a second instead of ms to |
22 // allow a 10 shift right to divide. | 22 // allow a 10 shift right to divide. |
23 const int kCubeScale = 40; // 1024*1024^3 (first 1024 is from 0.100^3) | 23 const int kCubeScale = 40; // 1024*1024^3 (first 1024 is from 0.100^3) |
24 // where 0.100 is 100 ms which is the scaling | 24 // where 0.100 is 100 ms which is the scaling |
25 // round trip time. | 25 // round trip time. |
26 const int kCubeCongestionWindowScale = 410; | 26 const int kCubeCongestionWindowScale = 410; |
27 const uint64 kCubeFactor = (GG_UINT64_C(1) << kCubeScale) / | 27 const uint64 kCubeFactor = (GG_UINT64_C(1) << kCubeScale) / |
28 kCubeCongestionWindowScale; | 28 kCubeCongestionWindowScale; |
29 const uint32 kBetaSPDY = 939; // Back off factor after loss for SPDY, reduces | 29 |
30 // the CWND by 1/12th. | 30 const uint32 kNumConnections = 2; |
31 const uint32 kBetaLastMax = 871; // Additional back off factor after loss for | 31 const float kBeta = static_cast<float>(0.7); // Default Cubic backoff factor. |
32 // the stored max value. | 32 // Additional backoff factor when loss occurs in the concave part of the Cubic |
| 33 // curve. This additional backoff factor is expected to give up bandwidth to |
| 34 // new concurrent flows and speed up convergence. |
| 35 const float kBetaLastMax = static_cast<float>(0.85); |
| 36 |
| 37 // kNConnectionBeta is the backoff factor after loss for our N-connection |
| 38 // emulation, which emulates the effective backoff of an ensemble of N TCP-Reno |
| 39 // connections on a single loss event. The effective multiplier is computed as: |
| 40 const float kNConnectionBeta = (kNumConnections - 1 + kBeta) / kNumConnections; |
| 41 |
| 42 // TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that |
| 43 // kBeta here is a cwnd multiplier, and is equal to 1-beta from the CUBIC paper. |
| 44 // We derive the equivalent kNConnectionAlpha for an N-connection emulation as: |
| 45 const float kNConnectionAlpha = 3 * kNumConnections * kNumConnections * |
| 46 (1 - kNConnectionBeta) / (1 + kNConnectionBeta); |
| 47 // TODO(jri): Compute kNConnectionBeta and kNConnectionAlpha from |
| 48 // number of active streams. |
33 } // namespace | 49 } // namespace |
34 | 50 |
35 Cubic::Cubic(const QuicClock* clock) | 51 Cubic::Cubic(const QuicClock* clock) |
36 : clock_(clock), | 52 : clock_(clock), |
37 epoch_(QuicTime::Zero()), | 53 epoch_(QuicTime::Zero()), |
38 last_update_time_(QuicTime::Zero()) { | 54 last_update_time_(QuicTime::Zero()) { |
39 Reset(); | 55 Reset(); |
40 } | 56 } |
41 | 57 |
42 void Cubic::Reset() { | 58 void Cubic::Reset() { |
43 epoch_ = QuicTime::Zero(); // Reset time. | 59 epoch_ = QuicTime::Zero(); // Reset time. |
44 last_update_time_ = QuicTime::Zero(); // Reset time. | 60 last_update_time_ = QuicTime::Zero(); // Reset time. |
45 last_congestion_window_ = 0; | 61 last_congestion_window_ = 0; |
46 last_max_congestion_window_ = 0; | 62 last_max_congestion_window_ = 0; |
47 acked_packets_count_ = 0; | 63 acked_packets_count_ = 0; |
48 estimated_tcp_congestion_window_ = 0; | 64 estimated_tcp_congestion_window_ = 0; |
49 origin_point_congestion_window_ = 0; | 65 origin_point_congestion_window_ = 0; |
50 time_to_origin_point_ = 0; | 66 time_to_origin_point_ = 0; |
51 last_target_congestion_window_ = 0; | 67 last_target_congestion_window_ = 0; |
52 } | 68 } |
53 | 69 |
54 QuicTcpCongestionWindow Cubic::CongestionWindowAfterPacketLoss( | 70 QuicTcpCongestionWindow Cubic::CongestionWindowAfterPacketLoss( |
55 QuicTcpCongestionWindow current_congestion_window) { | 71 QuicTcpCongestionWindow current_congestion_window) { |
56 if (current_congestion_window < last_max_congestion_window_) { | 72 if (current_congestion_window < last_max_congestion_window_) { |
57 // We never reached the old max, so assume we are competing with another | 73 // We never reached the old max, so assume we are competing with another |
58 // flow. Use our extra back off factor to allow the other flow to go up. | 74 // flow. Use our extra back off factor to allow the other flow to go up. |
59 last_max_congestion_window_ = | 75 last_max_congestion_window_ = |
60 (kBetaLastMax * current_congestion_window) >> 10; | 76 static_cast<int>(kBetaLastMax * current_congestion_window); |
61 } else { | 77 } else { |
62 last_max_congestion_window_ = current_congestion_window; | 78 last_max_congestion_window_ = current_congestion_window; |
63 } | 79 } |
64 epoch_ = QuicTime::Zero(); // Reset time. | 80 epoch_ = QuicTime::Zero(); // Reset time. |
65 return (current_congestion_window * kBetaSPDY) >> 10; | 81 return static_cast<int>(current_congestion_window * kNConnectionBeta); |
66 } | 82 } |
67 | 83 |
68 QuicTcpCongestionWindow Cubic::CongestionWindowAfterAck( | 84 QuicTcpCongestionWindow Cubic::CongestionWindowAfterAck( |
69 QuicTcpCongestionWindow current_congestion_window, | 85 QuicTcpCongestionWindow current_congestion_window, |
70 QuicTime::Delta delay_min) { | 86 QuicTime::Delta delay_min) { |
71 acked_packets_count_ += 1; // Packets acked. | 87 acked_packets_count_ += 1; // Packets acked. |
72 QuicTime current_time = clock_->ApproximateNow(); | 88 QuicTime current_time = clock_->ApproximateNow(); |
73 | 89 |
74 // Cubic is "independent" of RTT, the update is limited by the time elapsed. | 90 // Cubic is "independent" of RTT, the update is limited by the time elapsed. |
75 if (last_congestion_window_ == current_congestion_window && | 91 if (last_congestion_window_ == current_congestion_window && |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
107 int64 offset = time_to_origin_point_ - elapsed_time; | 123 int64 offset = time_to_origin_point_ - elapsed_time; |
108 QuicTcpCongestionWindow delta_congestion_window = (kCubeCongestionWindowScale | 124 QuicTcpCongestionWindow delta_congestion_window = (kCubeCongestionWindowScale |
109 * offset * offset * offset) >> kCubeScale; | 125 * offset * offset * offset) >> kCubeScale; |
110 | 126 |
111 QuicTcpCongestionWindow target_congestion_window = | 127 QuicTcpCongestionWindow target_congestion_window = |
112 origin_point_congestion_window_ - delta_congestion_window; | 128 origin_point_congestion_window_ - delta_congestion_window; |
113 | 129 |
114 // We have a new cubic congestion window. | 130 // We have a new cubic congestion window. |
115 last_target_congestion_window_ = target_congestion_window; | 131 last_target_congestion_window_ = target_congestion_window; |
116 | 132 |
117 // Update estimated TCP congestion_window. | 133 DCHECK_LT(0u, estimated_tcp_congestion_window_); |
118 // Note: we do a normal Reno congestion avoidance calculation not the | 134 // With dynamic beta/alpha based on number of active streams, it is possible |
119 // calculation described in section 3.3 TCP-friendly region of the document. | 135 // for the required_ack_count to become much lower than acked_packets_count_ |
120 while (acked_packets_count_ >= estimated_tcp_congestion_window_) { | 136 // suddenly, leading to more than one iteration through the following loop. |
121 acked_packets_count_ -= estimated_tcp_congestion_window_; | 137 while (true) { |
| 138 // Update estimated TCP congestion_window. |
| 139 uint32 required_ack_count = |
| 140 estimated_tcp_congestion_window_ / kNConnectionAlpha; |
| 141 if (acked_packets_count_ < required_ack_count) { |
| 142 break; |
| 143 } |
| 144 acked_packets_count_ -= required_ack_count; |
122 estimated_tcp_congestion_window_++; | 145 estimated_tcp_congestion_window_++; |
123 } | 146 } |
| 147 |
124 // Compute target congestion_window based on cubic target and estimated TCP | 148 // Compute target congestion_window based on cubic target and estimated TCP |
125 // congestion_window, use highest (fastest). | 149 // congestion_window, use highest (fastest). |
126 if (target_congestion_window < estimated_tcp_congestion_window_) { | 150 if (target_congestion_window < estimated_tcp_congestion_window_) { |
127 target_congestion_window = estimated_tcp_congestion_window_; | 151 target_congestion_window = estimated_tcp_congestion_window_; |
128 } | 152 } |
129 DVLOG(1) << "Target congestion_window:" << target_congestion_window; | 153 DVLOG(1) << "Target congestion_window:" << target_congestion_window; |
130 return target_congestion_window; | 154 return target_congestion_window; |
131 } | 155 } |
132 | 156 |
133 } // namespace net | 157 } // namespace net |
OLD | NEW |