Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(688)

Unified Diff: net/quic/congestion_control/tcp_cubic_bytes_sender.cc

Issue 1014433002: Land Recent QUIC Changes until 03/09/2015. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@replaces_Perspective_enun_88006458
Patch Set: Rebase - added NET_EXPORT_PRIVATE to fix compiler error Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: net/quic/congestion_control/tcp_cubic_bytes_sender.cc
diff --git a/net/quic/congestion_control/tcp_cubic_sender.cc b/net/quic/congestion_control/tcp_cubic_bytes_sender.cc
similarity index 67%
copy from net/quic/congestion_control/tcp_cubic_sender.cc
copy to net/quic/congestion_control/tcp_cubic_bytes_sender.cc
index 5b24a1f25281a8ba952373c262e5ef38bb25de39..a5b2f68a25d4e5fed4b7037e37d37168bb509f79 100644
--- a/net/quic/congestion_control/tcp_cubic_sender.cc
+++ b/net/quic/congestion_control/tcp_cubic_bytes_sender.cc
@@ -1,12 +1,11 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "net/quic/congestion_control/tcp_cubic_sender.h"
+#include "net/quic/congestion_control/tcp_cubic_bytes_sender.h"
#include <algorithm>
-#include "base/metrics/histogram.h"
#include "net/quic/congestion_control/prr_sender.h"
#include "net/quic/congestion_control/rtt_stats.h"
#include "net/quic/crypto/crypto_protocol.h"
@@ -19,19 +18,20 @@ namespace net {
namespace {
// Constants based on TCP defaults.
// The minimum cwnd based on RFC 3782 (TCP NewReno) for cwnd reductions on a
-// fast retransmission. The cwnd after a timeout is still 1.
-const QuicPacketCount kMinimumCongestionWindow = 2;
+// fast retransmission.
+const QuicByteCount kMinimumCongestionWindow = 2 * kDefaultTCPMSS;
const QuicByteCount kMaxSegmentSize = kDefaultTCPMSS;
const int kMaxBurstLength = 3;
-const float kRenoBeta = 0.7f; // Reno backoff factor.
+const float kRenoBeta = 0.7f; // Reno backoff factor.
const uint32 kDefaultNumConnections = 2; // N-connection emulation.
} // namespace
-TcpCubicSender::TcpCubicSender(const QuicClock* clock,
- const RttStats* rtt_stats,
- bool reno,
- QuicPacketCount initial_tcp_congestion_window,
- QuicConnectionStats* stats)
+TcpCubicBytesSender::TcpCubicBytesSender(
+ const QuicClock* clock,
+ const RttStats* rtt_stats,
+ bool reno,
+ QuicPacketCount initial_tcp_congestion_window,
+ QuicConnectionStats* stats)
: hybrid_slow_start_(clock),
cubic_(clock),
rtt_stats_(rtt_stats),
@@ -42,24 +42,23 @@ TcpCubicSender::TcpCubicSender(const QuicClock* clock,
largest_sent_sequence_number_(0),
largest_acked_sequence_number_(0),
largest_sent_at_last_cutback_(0),
- congestion_window_(initial_tcp_congestion_window),
+ congestion_window_(initial_tcp_congestion_window * kMaxSegmentSize),
slowstart_threshold_(std::numeric_limits<uint64>::max()),
last_cutback_exited_slowstart_(false),
clock_(clock) {
}
-TcpCubicSender::~TcpCubicSender() {
- UMA_HISTOGRAM_COUNTS("Net.QuicSession.FinalTcpCwnd", congestion_window_);
+TcpCubicBytesSender::~TcpCubicBytesSender() {
}
-void TcpCubicSender::SetFromConfig(const QuicConfig& config,
- Perspective perspective,
- bool using_pacing) {
+void TcpCubicBytesSender::SetFromConfig(const QuicConfig& config,
+ Perspective perspective,
+ bool using_pacing) {
if (perspective == Perspective::IS_SERVER) {
if (config.HasReceivedConnectionOptions() &&
ContainsQuicTag(config.ReceivedConnectionOptions(), kIW10)) {
// Initial window experiment.
- congestion_window_ = 10;
+ congestion_window_ = 10 * kMaxSegmentSize;
}
if (using_pacing) {
// Disable the ack train mode in hystart when pacing is enabled, since it
@@ -69,7 +68,7 @@ void TcpCubicSender::SetFromConfig(const QuicConfig& config,
}
}
-bool TcpCubicSender::ResumeConnectionState(
+bool TcpCubicBytesSender::ResumeConnectionState(
const CachedNetworkParameters& cached_network_params) {
// If the previous bandwidth estimate is less than an hour old, store in
// preparation for doing bandwidth resumption.
@@ -85,23 +84,23 @@ bool TcpCubicSender::ResumeConnectionState(
QuicTime::Delta::FromMilliseconds(cached_network_params.min_rtt_ms());
// Make sure CWND is in appropriate range (in case of bad data).
- QuicPacketCount new_congestion_window =
- bandwidth.ToBytesPerPeriod(rtt_ms) / kMaxPacketSize;
- congestion_window_ = max(
- min(new_congestion_window, kMaxCongestionWindowForBandwidthResumption),
- kMinCongestionWindowForBandwidthResumption);
+ QuicByteCount new_congestion_window = bandwidth.ToBytesPerPeriod(rtt_ms);
+ congestion_window_ =
+ max(min(new_congestion_window,
+ kMaxCongestionWindowForBandwidthResumption * kMaxSegmentSize),
+ kMinCongestionWindowForBandwidthResumption * kMaxSegmentSize);
// TODO(rjshade): Set appropriate CWND when previous connection was in slow
// start at time of estimate.
return true;
}
-void TcpCubicSender::SetNumEmulatedConnections(int num_connections) {
+void TcpCubicBytesSender::SetNumEmulatedConnections(int num_connections) {
num_connections_ = max(1, num_connections);
cubic_.SetNumConnections(num_connections_);
}
-float TcpCubicSender::RenoBeta() const {
+float TcpCubicBytesSender::RenoBeta() const {
// kNConnectionBeta is the backoff factor after loss for our N-connection
// emulation, which emulates the effective backoff of an ensemble of N
// TCP-Reno connections on a single loss event. The effective multiplier is
@@ -109,15 +108,15 @@ float TcpCubicSender::RenoBeta() const {
return (num_connections_ - 1 + kRenoBeta) / num_connections_;
}
-void TcpCubicSender::OnCongestionEvent(
+void TcpCubicBytesSender::OnCongestionEvent(
bool rtt_updated,
QuicByteCount bytes_in_flight,
const CongestionVector& acked_packets,
const CongestionVector& lost_packets) {
if (rtt_updated && InSlowStart() &&
- hybrid_slow_start_.ShouldExitSlowStart(rtt_stats_->latest_rtt(),
- rtt_stats_->min_rtt(),
- congestion_window_)) {
+ hybrid_slow_start_.ShouldExitSlowStart(
+ rtt_stats_->latest_rtt(), rtt_stats_->min_rtt(),
+ congestion_window_ / kMaxSegmentSize)) {
slowstart_threshold_ = congestion_window_;
}
for (CongestionVector::const_iterator it = lost_packets.begin();
@@ -130,24 +129,24 @@ void TcpCubicSender::OnCongestionEvent(
}
}
-void TcpCubicSender::OnPacketAcked(
+void TcpCubicBytesSender::OnPacketAcked(
QuicPacketSequenceNumber acked_sequence_number,
QuicByteCount acked_bytes,
QuicByteCount bytes_in_flight) {
- largest_acked_sequence_number_ = max(acked_sequence_number,
- largest_acked_sequence_number_);
+ largest_acked_sequence_number_ =
+ max(acked_sequence_number, largest_acked_sequence_number_);
if (InRecovery()) {
// PRR is used when in recovery.
prr_.OnPacketAcked(acked_bytes);
return;
}
- MaybeIncreaseCwnd(acked_sequence_number, bytes_in_flight);
+ MaybeIncreaseCwnd(acked_sequence_number, acked_bytes, bytes_in_flight);
// TODO(ianswett): Should this even be called when not in slow start?
hybrid_slow_start_.OnPacketAcked(acked_sequence_number, InSlowStart());
}
-void TcpCubicSender::OnPacketLost(QuicPacketSequenceNumber sequence_number,
- QuicByteCount bytes_in_flight) {
+void TcpCubicBytesSender::OnPacketLost(QuicPacketSequenceNumber sequence_number,
+ QuicByteCount bytes_in_flight) {
// TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets
// already sent should be treated as a single loss event, since it's expected.
if (sequence_number <= largest_sent_at_last_cutback_) {
@@ -178,18 +177,19 @@ void TcpCubicSender::OnPacketLost(QuicPacketSequenceNumber sequence_number,
congestion_window_ = kMinimumCongestionWindow;
}
largest_sent_at_last_cutback_ = largest_sent_sequence_number_;
- // reset packet count from congestion avoidance mode. We start
- // counting again when we're out of recovery.
+ // Reset packet count from congestion avoidance mode. We start counting again
+ // when we're out of recovery.
num_acked_packets_ = 0;
DVLOG(1) << "Incoming loss; congestion window: " << congestion_window_
<< " slowstart threshold: " << slowstart_threshold_;
}
-bool TcpCubicSender::OnPacketSent(QuicTime /*sent_time*/,
- QuicByteCount /*bytes_in_flight*/,
- QuicPacketSequenceNumber sequence_number,
- QuicByteCount bytes,
- HasRetransmittableData is_retransmittable) {
+bool TcpCubicBytesSender::OnPacketSent(
+ QuicTime /*sent_time*/,
+ QuicByteCount /*bytes_in_flight*/,
+ QuicPacketSequenceNumber sequence_number,
+ QuicByteCount bytes,
+ HasRetransmittableData is_retransmittable) {
// Only update bytes_in_flight_ for data packets.
if (is_retransmittable != HAS_RETRANSMITTABLE_DATA) {
return false;
@@ -204,7 +204,7 @@ bool TcpCubicSender::OnPacketSent(QuicTime /*sent_time*/,
return true;
}
-QuicTime::Delta TcpCubicSender::TimeUntilSend(
+QuicTime::Delta TcpCubicBytesSender::TimeUntilSend(
QuicTime /* now */,
QuicByteCount bytes_in_flight,
HasRetransmittableData has_retransmittable_data) const {
@@ -215,7 +215,7 @@ QuicTime::Delta TcpCubicSender::TimeUntilSend(
if (InRecovery()) {
// PRR is used when in recovery.
return prr_.TimeUntilSend(GetCongestionWindow(), bytes_in_flight,
- slowstart_threshold_ * kMaxSegmentSize);
+ slowstart_threshold_);
}
if (GetCongestionWindow() > bytes_in_flight) {
return QuicTime::Delta::Zero();
@@ -223,7 +223,7 @@ QuicTime::Delta TcpCubicSender::TimeUntilSend(
return QuicTime::Delta::Infinite();
}
-QuicBandwidth TcpCubicSender::PacingRate() const {
+QuicBandwidth TcpCubicBytesSender::PacingRate() const {
// We pace at twice the rate of the underlying sender's bandwidth estimate
// during slow start and 1.25x during congestion avoidance to ensure pacing
// doesn't prevent us from filling the window.
@@ -236,7 +236,7 @@ QuicBandwidth TcpCubicSender::PacingRate() const {
return bandwidth.Scale(InSlowStart() ? 2 : 1.25);
}
-QuicBandwidth TcpCubicSender::BandwidthEstimate() const {
+QuicBandwidth TcpCubicBytesSender::BandwidthEstimate() const {
QuicTime::Delta srtt = rtt_stats_->smoothed_rtt();
if (srtt.IsZero()) {
// If we haven't measured an rtt, the bandwidth estimate is unknown.
@@ -245,12 +245,12 @@ QuicBandwidth TcpCubicSender::BandwidthEstimate() const {
return QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
}
-bool TcpCubicSender::HasReliableBandwidthEstimate() const {
+bool TcpCubicBytesSender::HasReliableBandwidthEstimate() const {
return !InSlowStart() && !InRecovery() &&
- !rtt_stats_->smoothed_rtt().IsZero();;
+ !rtt_stats_->smoothed_rtt().IsZero();
}
-QuicTime::Delta TcpCubicSender::RetransmissionDelay() const {
+QuicTime::Delta TcpCubicBytesSender::RetransmissionDelay() const {
if (rtt_stats_->smoothed_rtt().IsZero()) {
return QuicTime::Delta::Zero();
}
@@ -258,41 +258,39 @@ QuicTime::Delta TcpCubicSender::RetransmissionDelay() const {
rtt_stats_->mean_deviation().Multiply(4));
}
-QuicByteCount TcpCubicSender::GetCongestionWindow() const {
- return congestion_window_ * kMaxSegmentSize;
+QuicByteCount TcpCubicBytesSender::GetCongestionWindow() const {
+ return congestion_window_;
}
-bool TcpCubicSender::InSlowStart() const {
+bool TcpCubicBytesSender::InSlowStart() const {
return congestion_window_ < slowstart_threshold_;
}
-QuicByteCount TcpCubicSender::GetSlowStartThreshold() const {
- return slowstart_threshold_ * kMaxSegmentSize;
+QuicByteCount TcpCubicBytesSender::GetSlowStartThreshold() const {
+ return slowstart_threshold_;
}
-bool TcpCubicSender::IsCwndLimited(QuicByteCount bytes_in_flight) const {
- const QuicByteCount congestion_window_bytes = congestion_window_ *
- kMaxSegmentSize;
- if (bytes_in_flight >= congestion_window_bytes) {
+bool TcpCubicBytesSender::IsCwndLimited(QuicByteCount bytes_in_flight) const {
+ if (bytes_in_flight >= congestion_window_) {
return true;
}
const QuicByteCount max_burst = kMaxBurstLength * kMaxSegmentSize;
- const QuicByteCount available_bytes =
- congestion_window_bytes - bytes_in_flight;
- const bool slow_start_limited = InSlowStart() &&
- bytes_in_flight > congestion_window_bytes / 2;
+ const QuicByteCount available_bytes = congestion_window_ - bytes_in_flight;
+ const bool slow_start_limited =
+ InSlowStart() && bytes_in_flight > congestion_window_ / 2;
return slow_start_limited || available_bytes <= max_burst;
}
-bool TcpCubicSender::InRecovery() const {
+bool TcpCubicBytesSender::InRecovery() const {
return largest_acked_sequence_number_ <= largest_sent_at_last_cutback_ &&
- largest_acked_sequence_number_ != 0;
+ largest_acked_sequence_number_ != 0;
}
// Called when we receive an ack. Normal TCP tracks how many packets one ack
// represents, but quic has a separate ack for each packet.
-void TcpCubicSender::MaybeIncreaseCwnd(
+void TcpCubicBytesSender::MaybeIncreaseCwnd(
QuicPacketSequenceNumber acked_sequence_number,
+ QuicByteCount acked_bytes,
QuicByteCount bytes_in_flight) {
LOG_IF(DFATAL, InRecovery()) << "Never increase the CWND during recovery.";
if (!IsCwndLimited(bytes_in_flight)) {
@@ -302,19 +300,20 @@ void TcpCubicSender::MaybeIncreaseCwnd(
}
if (InSlowStart()) {
// TCP slow start, exponential growth, increase by one for each ACK.
- ++congestion_window_;
+ congestion_window_ += kMaxSegmentSize;
DVLOG(1) << "Slow start; congestion window: " << congestion_window_
<< " slowstart threshold: " << slowstart_threshold_;
return;
}
- // Congestion avoidance
+ // Congestion avoidance.
if (reno_) {
// Classic Reno congestion avoidance.
++num_acked_packets_;
- // Divide by num_connections to smoothly increase the CWND at a faster
- // rate than conventional Reno.
- if (num_acked_packets_ * num_connections_ >= congestion_window_) {
- ++congestion_window_;
+ // Divide by num_connections to smoothly increase the CWND at a faster rate
+ // than conventional Reno.
+ if (num_acked_packets_ * num_connections_ >=
+ congestion_window_ / kMaxSegmentSize) {
+ congestion_window_ += kMaxSegmentSize;
num_acked_packets_ = 0;
}
@@ -322,14 +321,14 @@ void TcpCubicSender::MaybeIncreaseCwnd(
<< " slowstart threshold: " << slowstart_threshold_
<< " congestion window count: " << num_acked_packets_;
} else {
- congestion_window_ = cubic_.CongestionWindowAfterAck(congestion_window_,
- rtt_stats_->min_rtt());
+ congestion_window_ = cubic_.CongestionWindowAfterAck(
+ acked_bytes, congestion_window_, rtt_stats_->min_rtt());
DVLOG(1) << "Cubic; congestion window: " << congestion_window_
<< " slowstart threshold: " << slowstart_threshold_;
}
}
-void TcpCubicSender::OnRetransmissionTimeout(bool packets_retransmitted) {
+void TcpCubicBytesSender::OnRetransmissionTimeout(bool packets_retransmitted) {
largest_sent_at_last_cutback_ = 0;
if (!packets_retransmitted) {
return;
@@ -340,7 +339,7 @@ void TcpCubicSender::OnRetransmissionTimeout(bool packets_retransmitted) {
congestion_window_ = kMinimumCongestionWindow;
}
-CongestionControlType TcpCubicSender::GetCongestionControlType() const {
+CongestionControlType TcpCubicBytesSender::GetCongestionControlType() const {
return reno_ ? kReno : kCubic;
}
« no previous file with comments | « net/quic/congestion_control/tcp_cubic_bytes_sender.h ('k') | net/quic/congestion_control/tcp_cubic_bytes_sender_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698