| Index: net/quic/congestion_control/cubic_bytes.cc
|
| diff --git a/net/quic/congestion_control/cubic.cc b/net/quic/congestion_control/cubic_bytes.cc
|
| similarity index 74%
|
| copy from net/quic/congestion_control/cubic.cc
|
| copy to net/quic/congestion_control/cubic_bytes.cc
|
| index 175ede610062744dc47c30424eb868ce29e94356..77e90f46c3b2564bee6f6ffc44bff6ce27bd1340 100644
|
| --- a/net/quic/congestion_control/cubic.cc
|
| +++ b/net/quic/congestion_control/cubic_bytes.cc
|
| @@ -1,16 +1,14 @@
|
| -// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
| +// Copyright (c) 2015 The Chromium Authors. All rights reserved.
|
| // Use of this source code is governed by a BSD-style license that can be
|
| // found in the LICENSE file.
|
|
|
| -#include "net/quic/congestion_control/cubic.h"
|
| +#include "net/quic/congestion_control/cubic_bytes.h"
|
|
|
| #include <algorithm>
|
| #include <cmath>
|
|
|
| #include "base/basictypes.h"
|
| #include "base/logging.h"
|
| -#include "base/time/time.h"
|
| -#include "net/quic/quic_flags.h"
|
| #include "net/quic/quic_protocol.h"
|
|
|
| using std::max;
|
| @@ -26,8 +24,9 @@ const int kCubeScale = 40; // 1024*1024^3 (first 1024 is from 0.100^3)
|
| // where 0.100 is 100 ms which is the scaling
|
| // round trip time.
|
| const int kCubeCongestionWindowScale = 410;
|
| +// The cube factor for packets in bytes.
|
| const uint64 kCubeFactor = (GG_UINT64_C(1) << kCubeScale) /
|
| - kCubeCongestionWindowScale;
|
| + kCubeCongestionWindowScale / kDefaultTCPMSS;
|
|
|
| const uint32 kDefaultNumConnections = 2;
|
| const float kBeta = 0.7f; // Default Cubic backoff factor.
|
| @@ -38,7 +37,7 @@ const float kBetaLastMax = 0.85f;
|
|
|
| } // namespace
|
|
|
| -Cubic::Cubic(const QuicClock* clock)
|
| +CubicBytes::CubicBytes(const QuicClock* clock)
|
| : clock_(clock),
|
| num_connections_(kDefaultNumConnections),
|
| epoch_(QuicTime::Zero()),
|
| @@ -46,11 +45,11 @@ Cubic::Cubic(const QuicClock* clock)
|
| Reset();
|
| }
|
|
|
| -void Cubic::SetNumConnections(int num_connections) {
|
| +void CubicBytes::SetNumConnections(int num_connections) {
|
| num_connections_ = num_connections;
|
| }
|
|
|
| -float Cubic::Alpha() const {
|
| +float CubicBytes::Alpha() const {
|
| // TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that
|
| // beta here is a cwnd multiplier, and is equal to 1-beta from the paper.
|
| // We derive the equivalent alpha for an N-connection emulation as:
|
| @@ -58,7 +57,7 @@ float Cubic::Alpha() const {
|
| return 3 * num_connections_ * num_connections_ * (1 - beta) / (1 + beta);
|
| }
|
|
|
| -float Cubic::Beta() const {
|
| +float CubicBytes::Beta() const {
|
| // kNConnectionBeta is the backoff factor after loss for our N-connection
|
| // emulation, which emulates the effective backoff of an ensemble of N
|
| // TCP-Reno connections on a single loss event. The effective multiplier is
|
| @@ -66,20 +65,20 @@ float Cubic::Beta() const {
|
| return (num_connections_ - 1 + kBeta) / num_connections_;
|
| }
|
|
|
| -void Cubic::Reset() {
|
| - epoch_ = QuicTime::Zero(); // Reset time.
|
| +void CubicBytes::Reset() {
|
| + epoch_ = QuicTime::Zero(); // Reset time.
|
| last_update_time_ = QuicTime::Zero(); // Reset time.
|
| last_congestion_window_ = 0;
|
| last_max_congestion_window_ = 0;
|
| - acked_packets_count_ = 0;
|
| + acked_bytes_count_ = 0;
|
| estimated_tcp_congestion_window_ = 0;
|
| origin_point_congestion_window_ = 0;
|
| time_to_origin_point_ = 0;
|
| last_target_congestion_window_ = 0;
|
| }
|
|
|
| -QuicPacketCount Cubic::CongestionWindowAfterPacketLoss(
|
| - QuicPacketCount current_congestion_window) {
|
| +QuicByteCount CubicBytes::CongestionWindowAfterPacketLoss(
|
| + QuicByteCount current_congestion_window) {
|
| if (current_congestion_window < last_max_congestion_window_) {
|
| // We never reached the old max, so assume we are competing with another
|
| // flow. Use our extra back off factor to allow the other flow to go up.
|
| @@ -92,10 +91,11 @@ QuicPacketCount Cubic::CongestionWindowAfterPacketLoss(
|
| return static_cast<int>(current_congestion_window * Beta());
|
| }
|
|
|
| -QuicPacketCount Cubic::CongestionWindowAfterAck(
|
| - QuicPacketCount current_congestion_window,
|
| +QuicByteCount CubicBytes::CongestionWindowAfterAck(
|
| + QuicByteCount acked_bytes,
|
| + QuicByteCount current_congestion_window,
|
| QuicTime::Delta delay_min) {
|
| - acked_packets_count_ += 1; // Packets acked.
|
| + acked_bytes_count_ += acked_bytes;
|
| QuicTime current_time = clock_->ApproximateNow();
|
|
|
| // Cubic is "independent" of RTT, the update is limited by the time elapsed.
|
| @@ -110,8 +110,8 @@ QuicPacketCount Cubic::CongestionWindowAfterAck(
|
| if (!epoch_.IsInitialized()) {
|
| // First ACK after a loss event.
|
| DVLOG(1) << "Start of epoch";
|
| - epoch_ = current_time; // Start of epoch.
|
| - acked_packets_count_ = 1; // Reset count.
|
| + epoch_ = current_time; // Start of epoch.
|
| + acked_bytes_count_ = acked_bytes; // Reset count.
|
| // Reset estimated_tcp_congestion_window_ to be in sync with cubic.
|
| estimated_tcp_congestion_window_ = current_congestion_window;
|
| if (last_max_congestion_window_ <= current_congestion_window) {
|
| @@ -121,8 +121,7 @@ QuicPacketCount Cubic::CongestionWindowAfterAck(
|
| time_to_origin_point_ =
|
| static_cast<uint32>(cbrt(kCubeFactor * (last_max_congestion_window_ -
|
| current_congestion_window)));
|
| - origin_point_congestion_window_ =
|
| - last_max_congestion_window_;
|
| + origin_point_congestion_window_ = last_max_congestion_window_;
|
| }
|
| }
|
| // Change the time unit from microseconds to 2^10 fractions per second. Take
|
| @@ -130,29 +129,23 @@ QuicPacketCount Cubic::CongestionWindowAfterAck(
|
| // divide operator.
|
| int64 elapsed_time =
|
| (current_time.Add(delay_min).Subtract(epoch_).ToMicroseconds() << 10) /
|
| - base::Time::kMicrosecondsPerSecond;
|
| + kNumMicrosPerSecond;
|
|
|
| int64 offset = time_to_origin_point_ - elapsed_time;
|
| - QuicPacketCount delta_congestion_window = (kCubeCongestionWindowScale
|
| - * offset * offset * offset) >> kCubeScale;
|
| + QuicByteCount delta_congestion_window =
|
| + ((kCubeCongestionWindowScale * offset * offset * offset) >> kCubeScale) *
|
| + kDefaultTCPMSS;
|
|
|
| - QuicPacketCount target_congestion_window =
|
| + QuicByteCount target_congestion_window =
|
| origin_point_congestion_window_ - delta_congestion_window;
|
|
|
| DCHECK_LT(0u, estimated_tcp_congestion_window_);
|
| - // With dynamic beta/alpha based on number of active streams, it is possible
|
| - // for the required_ack_count to become much lower than acked_packets_count_
|
| - // suddenly, leading to more than one iteration through the following loop.
|
| - while (true) {
|
| - // Update estimated TCP congestion_window.
|
| - QuicPacketCount required_ack_count = static_cast<QuicPacketCount>(
|
| - estimated_tcp_congestion_window_ / Alpha());
|
| - if (acked_packets_count_ < required_ack_count) {
|
| - break;
|
| - }
|
| - acked_packets_count_ -= required_ack_count;
|
| - estimated_tcp_congestion_window_++;
|
| - }
|
| + // Increase the window by Alpha * 1 MSS of bytes every time we ack an
|
| + // estimated tcp window of bytes.
|
| + estimated_tcp_congestion_window_ += acked_bytes_count_ *
|
| + (Alpha() * kDefaultTCPMSS) /
|
| + estimated_tcp_congestion_window_;
|
| + acked_bytes_count_ = 0;
|
|
|
| // We have a new cubic congestion window.
|
| last_target_congestion_window_ = target_congestion_window;
|
|
|