| Index: net/quic/core/congestion_control/cubic_bytes.cc
|
| diff --git a/net/quic/core/congestion_control/cubic_bytes.cc b/net/quic/core/congestion_control/cubic_bytes.cc
|
| index 89ea5e9eaa1c028c926a437094da59b563701922..0c9972d0227229f376b884d183c341a1e7ea4795 100644
|
| --- a/net/quic/core/congestion_control/cubic_bytes.cc
|
| +++ b/net/quic/core/congestion_control/cubic_bytes.cc
|
| @@ -42,7 +42,8 @@ CubicBytes::CubicBytes(const QuicClock* clock)
|
| epoch_(QuicTime::Zero()),
|
| last_update_time_(QuicTime::Zero()),
|
| fix_convex_mode_(false),
|
| - fix_cubic_quantization_(false) {
|
| + fix_cubic_quantization_(false),
|
| + fix_beta_last_max_(false) {
|
| Reset();
|
| }
|
|
|
| @@ -66,6 +67,16 @@ float CubicBytes::Beta() const {
|
| return (num_connections_ - 1 + kBeta) / num_connections_;
|
| }
|
|
|
| +float CubicBytes::BetaLastMax() const {
|
| + // BetaLastMax is the additional backoff factor after loss for our
|
| + // N-connection emulation, which emulates the additional backoff of
|
| + // an ensemble of N TCP-Reno connections on a single loss event. The
|
| + // effective multiplier is computed as:
|
| + return fix_beta_last_max_
|
| + ? (num_connections_ - 1 + kBetaLastMax) / num_connections_
|
| + : kBetaLastMax;
|
| +}
|
| +
|
| void CubicBytes::Reset() {
|
| epoch_ = QuicTime::Zero(); // Reset time.
|
| last_update_time_ = QuicTime::Zero(); // Reset time.
|
| @@ -77,7 +88,6 @@ void CubicBytes::Reset() {
|
| time_to_origin_point_ = 0;
|
| last_target_congestion_window_ = 0;
|
| fix_convex_mode_ = false;
|
| - fix_cubic_quantization_ = false;
|
| }
|
|
|
| void CubicBytes::SetFixConvexMode(bool fix_convex_mode) {
|
| @@ -88,6 +98,10 @@ void CubicBytes::SetFixCubicQuantization(bool fix_cubic_quantization) {
|
| fix_cubic_quantization_ = fix_cubic_quantization;
|
| }
|
|
|
| +void CubicBytes::SetFixBetaLastMax(bool fix_beta_last_max) {
|
| + fix_beta_last_max_ = fix_beta_last_max;
|
| +}
|
| +
|
| void CubicBytes::OnApplicationLimited() {
|
| // When sender is not using the available congestion window, the window does
|
| // not grow. But to be RTT-independent, Cubic assumes that the sender has been
|
| @@ -102,11 +116,18 @@ void CubicBytes::OnApplicationLimited() {
|
|
|
| QuicByteCount CubicBytes::CongestionWindowAfterPacketLoss(
|
| QuicByteCount current_congestion_window) {
|
| - if (current_congestion_window < last_max_congestion_window_) {
|
| - // We never reached the old max, so assume we are competing with another
|
| - // flow. Use our extra back off factor to allow the other flow to go up.
|
| + // Since bytes-mode Reno mode slightly under-estimates the cwnd, we
|
| + // may never reach precisely the last cwnd over the course of an
|
| + // RTT. Do not interpret a slight under-estimation as competing traffic.
|
| + const QuicByteCount last_window_delta =
|
| + fix_beta_last_max_ ? kDefaultTCPMSS : 0;
|
| + if (current_congestion_window + last_window_delta <
|
| + last_max_congestion_window_) {
|
| + // We never reached the old max, so assume we are competing with
|
| + // another flow. Use our extra back off factor to allow the other
|
| + // flow to go up.
|
| last_max_congestion_window_ =
|
| - static_cast<int>(kBetaLastMax * current_congestion_window);
|
| + static_cast<int>(BetaLastMax() * current_congestion_window);
|
| } else {
|
| last_max_congestion_window_ = current_congestion_window;
|
| }
|
|
|