| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "net/quic/congestion_control/cubic_bytes.h" | |
| 6 | |
| 7 #include <stdint.h> | |
| 8 #include <algorithm> | |
| 9 #include <cmath> | |
| 10 | |
| 11 #include "base/logging.h" | |
| 12 #include "net/quic/quic_protocol.h" | |
| 13 | |
| 14 using std::max; | |
| 15 | |
| 16 namespace net { | |
| 17 | |
| 18 namespace { | |
| 19 | |
| 20 // Constants based on TCP defaults. | |
| 21 // The following constants are in 2^10 fractions of a second instead of ms to | |
| 22 // allow a 10 shift right to divide. | |
| 23 const int kCubeScale = 40; // 1024*1024^3 (first 1024 is from 0.100^3) | |
| 24 // where 0.100 is 100 ms which is the scaling | |
| 25 // round trip time. | |
| 26 const int kCubeCongestionWindowScale = 410; | |
| 27 // The cube factor for packets in bytes. | |
| 28 const uint64_t kCubeFactor = | |
| 29 (UINT64_C(1) << kCubeScale) / kCubeCongestionWindowScale / kDefaultTCPMSS; | |
| 30 | |
| 31 const uint32_t kDefaultNumConnections = 2; | |
| 32 const float kBeta = 0.7f; // Default Cubic backoff factor. | |
| 33 // Additional backoff factor when loss occurs in the concave part of the Cubic | |
| 34 // curve. This additional backoff factor is expected to give up bandwidth to | |
| 35 // new concurrent flows and speed up convergence. | |
| 36 const float kBetaLastMax = 0.85f; | |
| 37 | |
| 38 } // namespace | |
| 39 | |
| 40 CubicBytes::CubicBytes(const QuicClock* clock) | |
| 41 : clock_(clock), | |
| 42 num_connections_(kDefaultNumConnections), | |
| 43 epoch_(QuicTime::Zero()), | |
| 44 last_update_time_(QuicTime::Zero()) { | |
| 45 Reset(); | |
| 46 } | |
| 47 | |
| 48 void CubicBytes::SetNumConnections(int num_connections) { | |
| 49 num_connections_ = num_connections; | |
| 50 } | |
| 51 | |
| 52 float CubicBytes::Alpha() const { | |
| 53 // TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that | |
| 54 // beta here is a cwnd multiplier, and is equal to 1-beta from the paper. | |
| 55 // We derive the equivalent alpha for an N-connection emulation as: | |
| 56 const float beta = Beta(); | |
| 57 return 3 * num_connections_ * num_connections_ * (1 - beta) / (1 + beta); | |
| 58 } | |
| 59 | |
| 60 float CubicBytes::Beta() const { | |
| 61 // kNConnectionBeta is the backoff factor after loss for our N-connection | |
| 62 // emulation, which emulates the effective backoff of an ensemble of N | |
| 63 // TCP-Reno connections on a single loss event. The effective multiplier is | |
| 64 // computed as: | |
| 65 return (num_connections_ - 1 + kBeta) / num_connections_; | |
| 66 } | |
| 67 | |
| 68 void CubicBytes::Reset() { | |
| 69 epoch_ = QuicTime::Zero(); // Reset time. | |
| 70 last_update_time_ = QuicTime::Zero(); // Reset time. | |
| 71 last_congestion_window_ = 0; | |
| 72 last_max_congestion_window_ = 0; | |
| 73 acked_bytes_count_ = 0; | |
| 74 estimated_tcp_congestion_window_ = 0; | |
| 75 origin_point_congestion_window_ = 0; | |
| 76 time_to_origin_point_ = 0; | |
| 77 last_target_congestion_window_ = 0; | |
| 78 } | |
| 79 | |
| 80 void CubicBytes::OnApplicationLimited() { | |
| 81 // When sender is not using the available congestion window, the window does | |
| 82 // not grow. But to be RTT-independent, Cubic assumes that the sender has been | |
| 83 // using the entire window during the time since the beginning of the current | |
| 84 // "epoch" (the end of the last loss recovery period). Since | |
| 85 // application-limited periods break this assumption, we reset the epoch when | |
| 86 // in such a period. This reset effectively freezes congestion window growth | |
| 87 // through application-limited periods and allows Cubic growth to continue | |
| 88 // when the entire window is being used. | |
| 89 epoch_ = QuicTime::Zero(); | |
| 90 } | |
| 91 | |
| 92 QuicByteCount CubicBytes::CongestionWindowAfterPacketLoss( | |
| 93 QuicByteCount current_congestion_window) { | |
| 94 if (current_congestion_window < last_max_congestion_window_) { | |
| 95 // We never reached the old max, so assume we are competing with another | |
| 96 // flow. Use our extra back off factor to allow the other flow to go up. | |
| 97 last_max_congestion_window_ = | |
| 98 static_cast<int>(kBetaLastMax * current_congestion_window); | |
| 99 } else { | |
| 100 last_max_congestion_window_ = current_congestion_window; | |
| 101 } | |
| 102 epoch_ = QuicTime::Zero(); // Reset time. | |
| 103 return static_cast<int>(current_congestion_window * Beta()); | |
| 104 } | |
| 105 | |
| 106 QuicByteCount CubicBytes::CongestionWindowAfterAck( | |
| 107 QuicByteCount acked_bytes, | |
| 108 QuicByteCount current_congestion_window, | |
| 109 QuicTime::Delta delay_min) { | |
| 110 acked_bytes_count_ += acked_bytes; | |
| 111 QuicTime current_time = clock_->ApproximateNow(); | |
| 112 | |
| 113 // Cubic is "independent" of RTT, the update is limited by the time elapsed. | |
| 114 if (last_congestion_window_ == current_congestion_window && | |
| 115 (current_time - last_update_time_ <= MaxCubicTimeInterval())) { | |
| 116 return max(last_target_congestion_window_, | |
| 117 estimated_tcp_congestion_window_); | |
| 118 } | |
| 119 last_congestion_window_ = current_congestion_window; | |
| 120 last_update_time_ = current_time; | |
| 121 | |
| 122 if (!epoch_.IsInitialized()) { | |
| 123 // First ACK after a loss event. | |
| 124 DVLOG(1) << "Start of epoch"; | |
| 125 epoch_ = current_time; // Start of epoch. | |
| 126 acked_bytes_count_ = acked_bytes; // Reset count. | |
| 127 // Reset estimated_tcp_congestion_window_ to be in sync with cubic. | |
| 128 estimated_tcp_congestion_window_ = current_congestion_window; | |
| 129 if (last_max_congestion_window_ <= current_congestion_window) { | |
| 130 time_to_origin_point_ = 0; | |
| 131 origin_point_congestion_window_ = current_congestion_window; | |
| 132 } else { | |
| 133 time_to_origin_point_ = static_cast<uint32_t>( | |
| 134 cbrt(kCubeFactor * | |
| 135 (last_max_congestion_window_ - current_congestion_window))); | |
| 136 origin_point_congestion_window_ = last_max_congestion_window_; | |
| 137 } | |
| 138 } | |
| 139 // Change the time unit from microseconds to 2^10 fractions per second. Take | |
| 140 // the round trip time in account. This is done to allow us to use shift as a | |
| 141 // divide operator. | |
| 142 int64_t elapsed_time = | |
| 143 ((current_time + delay_min - epoch_).ToMicroseconds() << 10) / | |
| 144 kNumMicrosPerSecond; | |
| 145 | |
| 146 int64_t offset = time_to_origin_point_ - elapsed_time; | |
| 147 QuicByteCount delta_congestion_window = | |
| 148 ((kCubeCongestionWindowScale * offset * offset * offset) >> kCubeScale) * | |
| 149 kDefaultTCPMSS; | |
| 150 | |
| 151 QuicByteCount target_congestion_window = | |
| 152 origin_point_congestion_window_ - delta_congestion_window; | |
| 153 | |
| 154 DCHECK_LT(0u, estimated_tcp_congestion_window_); | |
| 155 // Increase the window by Alpha * 1 MSS of bytes every time we ack an | |
| 156 // estimated tcp window of bytes. | |
| 157 estimated_tcp_congestion_window_ += acked_bytes_count_ * | |
| 158 (Alpha() * kDefaultTCPMSS) / | |
| 159 estimated_tcp_congestion_window_; | |
| 160 acked_bytes_count_ = 0; | |
| 161 | |
| 162 // We have a new cubic congestion window. | |
| 163 last_target_congestion_window_ = target_congestion_window; | |
| 164 | |
| 165 // Compute target congestion_window based on cubic target and estimated TCP | |
| 166 // congestion_window, use highest (fastest). | |
| 167 if (target_congestion_window < estimated_tcp_congestion_window_) { | |
| 168 target_congestion_window = estimated_tcp_congestion_window_; | |
| 169 } | |
| 170 | |
| 171 DVLOG(1) << "Final target congestion_window: " << target_congestion_window; | |
| 172 return target_congestion_window; | |
| 173 } | |
| 174 | |
| 175 } // namespace net | |
| OLD | NEW |