| Index: net/quic/quic_connection.cc
|
| diff --git a/net/quic/quic_connection.cc b/net/quic/quic_connection.cc
|
| index 8b4f4dd4387ac06eeab83526006c9ed3fcd4b468..eed31e49865226f919e5243325fd4a9bc87a01d5 100644
|
| --- a/net/quic/quic_connection.cc
|
| +++ b/net/quic/quic_connection.cc
|
| @@ -34,6 +34,7 @@
|
| #include "net/quic/quic_config.h"
|
| #include "net/quic/quic_flags.h"
|
| #include "net/quic/quic_packet_generator.h"
|
| +#include "net/quic/quic_sent_packet_manager.h"
|
| #include "net/quic/quic_utils.h"
|
|
|
| using base::StringPiece;
|
| @@ -292,14 +293,14 @@ QuicConnection::QuicConnection(QuicConnectionId connection_id,
|
| time_of_last_sent_new_packet_(clock_->ApproximateNow()),
|
| last_send_for_timeout_(clock_->ApproximateNow()),
|
| packet_number_of_last_sent_packet_(0),
|
| - sent_packet_manager_(
|
| + sent_packet_manager_(new QuicSentPacketManager(
|
| perspective,
|
| kDefaultPathId,
|
| clock_,
|
| &stats_,
|
| FLAGS_quic_use_bbr_congestion_control ? kBBR : kCubic,
|
| FLAGS_quic_use_time_loss_detection ? kTime : kNack,
|
| - /*delegate=*/nullptr),
|
| + /*delegate=*/nullptr)),
|
| version_negotiation_state_(START_NEGOTIATION),
|
| perspective_(perspective),
|
| connected_(true),
|
| @@ -318,7 +319,7 @@ QuicConnection::QuicConnection(QuicConnectionId connection_id,
|
| framer_.set_received_entropy_calculator(&received_packet_manager_);
|
| last_stop_waiting_frame_.least_unacked = 0;
|
| stats_.connection_creation_time = clock_->ApproximateNow();
|
| - sent_packet_manager_.set_network_change_visitor(this);
|
| + sent_packet_manager_->SetNetworkChangeVisitor(this);
|
| // Allow the packet writer to potentially reduce the packet size to a value
|
| // even smaller than kDefaultMaxPacketSize.
|
| SetMaxPacketLength(perspective_ == Perspective::IS_SERVER
|
| @@ -363,7 +364,7 @@ void QuicConnection::SetFromConfig(const QuicConfig& config) {
|
| config.max_idle_time_before_crypto_handshake());
|
| }
|
|
|
| - sent_packet_manager_.SetFromConfig(config);
|
| + sent_packet_manager_->SetFromConfig(config);
|
| if (config.HasReceivedBytesForConnectionId() &&
|
| can_truncate_connection_ids_) {
|
| packet_generator_.SetConnectionIdLength(
|
| @@ -416,16 +417,16 @@ void QuicConnection::OnReceiveConnectionState(
|
| void QuicConnection::ResumeConnectionState(
|
| const CachedNetworkParameters& cached_network_params,
|
| bool max_bandwidth_resumption) {
|
| - sent_packet_manager_.ResumeConnectionState(cached_network_params,
|
| - max_bandwidth_resumption);
|
| + sent_packet_manager_->ResumeConnectionState(cached_network_params,
|
| + max_bandwidth_resumption);
|
| }
|
|
|
| void QuicConnection::SetMaxPacingRate(QuicBandwidth max_pacing_rate) {
|
| - sent_packet_manager_.SetMaxPacingRate(max_pacing_rate);
|
| + sent_packet_manager_->SetMaxPacingRate(max_pacing_rate);
|
| }
|
|
|
| void QuicConnection::SetNumOpenStreams(size_t num_streams) {
|
| - sent_packet_manager_.SetNumOpenStreams(num_streams);
|
| + sent_packet_manager_->SetNumOpenStreams(num_streams);
|
| }
|
|
|
| bool QuicConnection::SelectMutualVersion(
|
| @@ -674,7 +675,7 @@ bool QuicConnection::OnPacketHeader(const QuicPacketHeader& header) {
|
| last_packet_source_address_);
|
| if (active_peer_migration_type_ == NO_CHANGE &&
|
| peer_migration_type != NO_CHANGE) {
|
| - StartPeerMigration(peer_migration_type);
|
| + StartPeerMigration(header.path_id, peer_migration_type);
|
| }
|
|
|
| --stats_.packets_dropped;
|
| @@ -749,7 +750,7 @@ bool QuicConnection::OnAckFrame(const QuicAckFrame& incoming_ack) {
|
| // acking packets which we never care about.
|
| // Send an ack to raise the high water mark.
|
| if (!incoming_ack.packets.Empty() &&
|
| - GetLeastUnacked() > incoming_ack.packets.Min()) {
|
| + GetLeastUnacked(incoming_ack.path_id) > incoming_ack.packets.Min()) {
|
| ++stop_waiting_count_;
|
| } else {
|
| stop_waiting_count_ = 0;
|
| @@ -760,11 +761,13 @@ bool QuicConnection::OnAckFrame(const QuicAckFrame& incoming_ack) {
|
|
|
| void QuicConnection::ProcessAckFrame(const QuicAckFrame& incoming_ack) {
|
| largest_seen_packet_with_ack_ = last_header_.packet_number;
|
| - sent_packet_manager_.OnIncomingAck(incoming_ack,
|
| - time_of_last_received_packet_);
|
| + sent_packet_manager_->OnIncomingAck(incoming_ack,
|
| + time_of_last_received_packet_);
|
| if (version() <= QUIC_VERSION_33) {
|
| sent_entropy_manager_.ClearEntropyBefore(
|
| - sent_packet_manager_.least_packet_awaited_by_peer() - 1);
|
| + sent_packet_manager_->GetLeastPacketAwaitedByPeer(
|
| + incoming_ack.path_id) -
|
| + 1);
|
| }
|
| // Always reset the retransmission alarm when an ack comes in, since we now
|
| // have a better estimate of the current rtt than when it was set.
|
| @@ -826,10 +829,12 @@ const char* QuicConnection::ValidateAckFrame(const QuicAckFrame& incoming_ack) {
|
| return "Largest observed too high.";
|
| }
|
|
|
| - if (incoming_ack.largest_observed < sent_packet_manager_.largest_observed()) {
|
| + if (incoming_ack.largest_observed <
|
| + sent_packet_manager_->GetLargestObserved(incoming_ack.path_id)) {
|
| LOG(WARNING) << ENDPOINT << "Peer's largest_observed packet decreased:"
|
| << incoming_ack.largest_observed << " vs "
|
| - << sent_packet_manager_.largest_observed()
|
| + << sent_packet_manager_->GetLargestObserved(
|
| + incoming_ack.path_id)
|
| << " packet_number:" << last_header_.packet_number
|
| << " largest seen with ack:" << largest_seen_packet_with_ack_
|
| << " connection_id: " << connection_id_;
|
| @@ -850,11 +855,13 @@ const char* QuicConnection::ValidateAckFrame(const QuicAckFrame& incoming_ack) {
|
|
|
| if (!incoming_ack.packets.Empty() &&
|
| incoming_ack.packets.Min() <
|
| - sent_packet_manager_.least_packet_awaited_by_peer()) {
|
| + sent_packet_manager_->GetLeastPacketAwaitedByPeer(
|
| + incoming_ack.path_id)) {
|
| LOG(WARNING) << ENDPOINT
|
| << "Peer sent missing packet: " << incoming_ack.packets.Min()
|
| << " which is smaller than least_packet_awaited_by_peer_: "
|
| - << sent_packet_manager_.least_packet_awaited_by_peer();
|
| + << sent_packet_manager_->GetLeastPacketAwaitedByPeer(
|
| + incoming_ack.path_id);
|
| return "Missing packet smaller than least awaited.";
|
| }
|
| if (!sent_entropy_manager_.IsValidEntropy(incoming_ack.largest_observed,
|
| @@ -1051,8 +1058,8 @@ void QuicConnection::MaybeQueueAck(bool was_missing) {
|
| } else if (!ack_alarm_->IsSet()) {
|
| // Wait the minimum of a quarter min_rtt and the delayed ack time.
|
| QuicTime::Delta ack_delay = QuicTime::Delta::Min(
|
| - sent_packet_manager_.DelayedAckTime(),
|
| - sent_packet_manager_.GetRttStats()->min_rtt().Multiply(
|
| + DelayedAckTime(),
|
| + sent_packet_manager_->GetRttStats()->min_rtt().Multiply(
|
| ack_decimation_delay_));
|
| ack_alarm_->Set(clock_->ApproximateNow().Add(ack_delay));
|
| }
|
| @@ -1062,8 +1069,7 @@ void QuicConnection::MaybeQueueAck(bool was_missing) {
|
| kDefaultRetransmittablePacketsBeforeAck) {
|
| ack_queued_ = true;
|
| } else if (!ack_alarm_->IsSet()) {
|
| - ack_alarm_->Set(clock_->ApproximateNow().Add(
|
| - sent_packet_manager_.DelayedAckTime()));
|
| + ack_alarm_->Set(clock_->ApproximateNow().Add(DelayedAckTime()));
|
| }
|
| }
|
|
|
| @@ -1072,7 +1078,7 @@ void QuicConnection::MaybeQueueAck(bool was_missing) {
|
| if (ack_mode_ == ACK_DECIMATION_WITH_REORDERING) {
|
| // Wait the minimum of an eighth min_rtt and the existing ack time.
|
| QuicTime ack_time = clock_->ApproximateNow().Add(
|
| - sent_packet_manager_.GetRttStats()->min_rtt().Multiply(0.125));
|
| + sent_packet_manager_->GetRttStats()->min_rtt().Multiply(0.125));
|
| if (!ack_alarm_->IsSet() || ack_alarm_->deadline() > ack_time) {
|
| ack_alarm_->Cancel();
|
| ack_alarm_->Set(ack_time);
|
| @@ -1099,8 +1105,9 @@ void QuicConnection::MaybeCloseIfTooManyOutstandingPackets() {
|
| }
|
| // This occurs if we don't discard old packets we've sent fast enough.
|
| // It's possible largest observed is less than least unacked.
|
| - if (sent_packet_manager_.largest_observed() >
|
| - (sent_packet_manager_.GetLeastUnacked() + kMaxTrackedPackets)) {
|
| + if (sent_packet_manager_->GetLargestObserved(last_header_.path_id) >
|
| + (sent_packet_manager_->GetLeastUnacked(last_header_.path_id) +
|
| + kMaxTrackedPackets)) {
|
| CloseConnection(
|
| QUIC_TOO_MANY_OUTSTANDING_SENT_PACKETS,
|
| StringPrintf("More than %" PRIu64 " outstanding.", kMaxTrackedPackets),
|
| @@ -1122,15 +1129,15 @@ const QuicFrame QuicConnection::GetUpdatedAckFrame() {
|
|
|
| void QuicConnection::PopulateStopWaitingFrame(
|
| QuicStopWaitingFrame* stop_waiting) {
|
| - stop_waiting->least_unacked = GetLeastUnacked();
|
| + stop_waiting->least_unacked = GetLeastUnacked(stop_waiting->path_id);
|
| if (version() <= QUIC_VERSION_33) {
|
| stop_waiting->entropy_hash = sent_entropy_manager_.GetCumulativeEntropy(
|
| stop_waiting->least_unacked - 1);
|
| }
|
| }
|
|
|
| -QuicPacketNumber QuicConnection::GetLeastUnacked() const {
|
| - return sent_packet_manager_.GetLeastUnacked();
|
| +QuicPacketNumber QuicConnection::GetLeastUnacked(QuicPathId path_id) const {
|
| + return sent_packet_manager_->GetLeastUnacked(path_id);
|
| }
|
|
|
| void QuicConnection::MaybeSendInResponseToPacket() {
|
| @@ -1221,7 +1228,7 @@ void QuicConnection::SendRstStream(QuicStreamId id,
|
| return;
|
| }
|
|
|
| - sent_packet_manager_.CancelRetransmissionsForStream(id);
|
| + sent_packet_manager_->CancelRetransmissionsForStream(id);
|
| // Remove all queued packets which only contain data for the reset stream.
|
| QueuedPacketList::iterator packet_iterator = queued_packets_.begin();
|
| while (packet_iterator != queued_packets_.end()) {
|
| @@ -1264,7 +1271,7 @@ void QuicConnection::SendPathClose(QuicPathId path_id) {
|
| }
|
|
|
| const QuicConnectionStats& QuicConnection::GetStats() {
|
| - const RttStats* rtt_stats = sent_packet_manager_.GetRttStats();
|
| + const RttStats* rtt_stats = sent_packet_manager_->GetRttStats();
|
|
|
| // Update rtt and estimated bandwidth.
|
| QuicTime::Delta min_rtt = rtt_stats->min_rtt();
|
| @@ -1281,7 +1288,7 @@ const QuicConnectionStats& QuicConnection::GetStats() {
|
| }
|
| stats_.srtt_us = srtt.ToMicroseconds();
|
|
|
| - stats_.estimated_bandwidth = sent_packet_manager_.BandwidthEstimate();
|
| + stats_.estimated_bandwidth = sent_packet_manager_->BandwidthEstimate();
|
| stats_.max_packet_size = packet_generator_.GetCurrentMaxPacketLength();
|
| stats_.max_received_packet_size = largest_received_packet_size_;
|
| return stats_;
|
| @@ -1335,9 +1342,9 @@ void QuicConnection::ProcessUdpPacket(const IPEndPoint& self_address,
|
|
|
| ++stats_.packets_processed;
|
| if (active_peer_migration_type_ != NO_CHANGE &&
|
| - sent_packet_manager_.largest_observed() >
|
| + sent_packet_manager_->GetLargestObserved(last_header_.path_id) >
|
| highest_packet_sent_before_peer_migration_) {
|
| - OnPeerMigrationValidated();
|
| + OnPeerMigrationValidated(last_header_.path_id);
|
| }
|
| MaybeProcessUndecryptablePackets();
|
| MaybeSendInResponseToPacket();
|
| @@ -1487,9 +1494,9 @@ void QuicConnection::WriteQueuedPackets() {
|
| void QuicConnection::WritePendingRetransmissions() {
|
| // Keep writing as long as there's a pending retransmission which can be
|
| // written.
|
| - while (sent_packet_manager_.HasPendingRetransmissions()) {
|
| + while (sent_packet_manager_->HasPendingRetransmissions()) {
|
| const PendingRetransmission pending =
|
| - sent_packet_manager_.NextPendingRetransmission();
|
| + sent_packet_manager_->NextPendingRetransmission();
|
| if (!CanWrite(HAS_RETRANSMITTABLE_DATA)) {
|
| break;
|
| }
|
| @@ -1508,13 +1515,13 @@ void QuicConnection::WritePendingRetransmissions() {
|
|
|
| void QuicConnection::RetransmitUnackedPackets(
|
| TransmissionType retransmission_type) {
|
| - sent_packet_manager_.RetransmitUnackedPackets(retransmission_type);
|
| + sent_packet_manager_->RetransmitUnackedPackets(retransmission_type);
|
|
|
| WriteIfNotBlocked();
|
| }
|
|
|
| void QuicConnection::NeuterUnencryptedPackets() {
|
| - sent_packet_manager_.NeuterUnencryptedPackets();
|
| + sent_packet_manager_->NeuterUnencryptedPackets();
|
| // This may have changed the retransmission timer, so re-arm it.
|
| SetRetransmissionAlarm();
|
| }
|
| @@ -1552,14 +1559,19 @@ bool QuicConnection::CanWrite(HasRetransmittableData retransmittable) {
|
| return false;
|
| }
|
|
|
| + // TODO(fayang): If delay is not infinite, the next packet will be created and
|
| + // sent on path_id.
|
| + QuicPathId path_id = kInvalidPathId;
|
| QuicTime now = clock_->Now();
|
| QuicTime::Delta delay =
|
| - sent_packet_manager_.TimeUntilSend(now, retransmittable);
|
| + sent_packet_manager_->TimeUntilSend(now, retransmittable, &path_id);
|
| if (delay.IsInfinite()) {
|
| + DCHECK_EQ(kInvalidPathId, path_id);
|
| send_alarm_->Cancel();
|
| return false;
|
| }
|
|
|
| + DCHECK_NE(kInvalidPathId, path_id);
|
| // If the scheduler requires a delay, then we can not send this packet now.
|
| if (!delay.IsZero()) {
|
| send_alarm_->Update(now.Add(delay), QuicTime::Delta::FromMilliseconds(1));
|
| @@ -1571,9 +1583,10 @@ bool QuicConnection::CanWrite(HasRetransmittableData retransmittable) {
|
| }
|
|
|
| bool QuicConnection::WritePacket(SerializedPacket* packet) {
|
| - if (packet->packet_number < sent_packet_manager_.largest_sent_packet()) {
|
| - QUIC_BUG << "Attempt to write packet:" << packet->packet_number
|
| - << " after:" << sent_packet_manager_.largest_sent_packet();
|
| + if (packet->packet_number <
|
| + sent_packet_manager_->GetLargestSentPacket(packet->path_id)) {
|
| + QUIC_BUG << "Attempt to write packet:" << packet->packet_number << " after:"
|
| + << sent_packet_manager_->GetLargestSentPacket(packet->path_id);
|
| CloseConnection(QUIC_INTERNAL_ERROR, "Packet written out of order.",
|
| ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
|
| return true;
|
| @@ -1668,10 +1681,10 @@ bool QuicConnection::WritePacket(SerializedPacket* packet) {
|
| // options by a more explicit API than setting a struct value directly,
|
| // perhaps via the NetworkChangeVisitor.
|
| packet_generator_.UpdateSequenceNumberLength(
|
| - sent_packet_manager_.least_packet_awaited_by_peer(),
|
| - sent_packet_manager_.EstimateMaxPacketsInFlight(max_packet_length()));
|
| + sent_packet_manager_->GetLeastPacketAwaitedByPeer(packet->path_id),
|
| + sent_packet_manager_->EstimateMaxPacketsInFlight(max_packet_length()));
|
|
|
| - bool reset_retransmission_alarm = sent_packet_manager_.OnPacketSent(
|
| + bool reset_retransmission_alarm = sent_packet_manager_->OnPacketSent(
|
| packet, packet->original_path_id, packet->original_packet_number,
|
| packet_send_time, packet->transmission_type, IsRetransmittable(*packet));
|
|
|
| @@ -1719,9 +1732,10 @@ bool QuicConnection::ShouldDiscardPacket(const SerializedPacket& packet) {
|
| // If a retransmission has been acked before sending, don't send it.
|
| // This occurs if a packet gets serialized, queued, then discarded.
|
| if (packet.transmission_type != NOT_RETRANSMISSION &&
|
| - (!sent_packet_manager_.IsUnacked(packet.original_packet_number) ||
|
| - !sent_packet_manager_.HasRetransmittableFrames(
|
| - packet.original_packet_number))) {
|
| + (!sent_packet_manager_->IsUnacked(packet.original_path_id,
|
| + packet.original_packet_number) ||
|
| + !sent_packet_manager_->HasRetransmittableFrames(
|
| + packet.original_path_id, packet.original_packet_number))) {
|
| DVLOG(1) << ENDPOINT << "Dropping unacked packet: " << packet_number
|
| << " A previous transmission was acked while write blocked.";
|
| return true;
|
| @@ -1769,10 +1783,10 @@ void QuicConnection::OnCongestionChange() {
|
| visitor_->OnCongestionWindowChange(clock_->ApproximateNow());
|
|
|
| // Uses the connection's smoothed RTT. If zero, uses initial_rtt.
|
| - QuicTime::Delta rtt = sent_packet_manager_.GetRttStats()->smoothed_rtt();
|
| + QuicTime::Delta rtt = sent_packet_manager_->GetRttStats()->smoothed_rtt();
|
| if (rtt.IsZero()) {
|
| rtt = QuicTime::Delta::FromMicroseconds(
|
| - sent_packet_manager_.GetRttStats()->initial_rtt_us());
|
| + sent_packet_manager_->GetRttStats()->initial_rtt_us());
|
| }
|
|
|
| if (debug_visitor_)
|
| @@ -1784,7 +1798,7 @@ void QuicConnection::OnPathDegrading() {
|
| }
|
|
|
| void QuicConnection::OnHandshakeComplete() {
|
| - sent_packet_manager_.SetHandshakeConfirmed();
|
| + sent_packet_manager_->SetHandshakeConfirmed();
|
| // The client should immediately ack the SHLO to confirm the handshake is
|
| // complete with the server.
|
| if (perspective_ == Perspective::IS_CLIENT && !ack_queued_ &&
|
| @@ -1849,10 +1863,10 @@ void QuicConnection::SendAck() {
|
| }
|
|
|
| void QuicConnection::OnRetransmissionTimeout() {
|
| - DCHECK(sent_packet_manager_.HasUnackedPackets());
|
| + DCHECK(sent_packet_manager_->HasUnackedPackets());
|
|
|
| if (close_connection_after_five_rtos_ &&
|
| - sent_packet_manager_.consecutive_rto_count() >= 4) {
|
| + sent_packet_manager_->GetConsecutiveRtoCount() >= 4) {
|
| // Close on the 5th consecutive RTO, so after 4 previous RTOs have occurred.
|
| CloseConnection(QUIC_TOO_MANY_RTOS, "5 consecutive retransmission timeouts",
|
| ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
|
| @@ -1863,7 +1877,7 @@ void QuicConnection::OnRetransmissionTimeout() {
|
| if (FLAGS_quic_only_one_sending_alarm) {
|
| send_alarm_->Cancel();
|
| }
|
| - sent_packet_manager_.OnRetransmissionTimeout();
|
| + sent_packet_manager_->OnRetransmissionTimeout();
|
| WriteIfNotBlocked();
|
|
|
| // A write failure can result in the connection being closed, don't attempt to
|
| @@ -1874,7 +1888,7 @@ void QuicConnection::OnRetransmissionTimeout() {
|
|
|
| // In the TLP case, the SentPacketManager gives the connection the opportunity
|
| // to send new data before retransmitting.
|
| - if (sent_packet_manager_.MaybeRetransmitTailLossProbe()) {
|
| + if (sent_packet_manager_->MaybeRetransmitTailLossProbe()) {
|
| // Send the pending retransmission now that it's been queued.
|
| WriteIfNotBlocked();
|
| }
|
| @@ -1899,7 +1913,7 @@ void QuicConnection::SetEncrypter(EncryptionLevel level,
|
| // about two full round trips worth of packets, which should be
|
| // sufficient.
|
| 3 *
|
| - sent_packet_manager_.EstimateMaxPacketsInFlight(
|
| + sent_packet_manager_->EstimateMaxPacketsInFlight(
|
| max_packet_length());
|
| }
|
| }
|
| @@ -2187,17 +2201,17 @@ void QuicConnection::SetRetransmissionAlarm() {
|
| // Once the handshake has been confirmed, the retransmission alarm should
|
| // never fire before the send alarm.
|
| if (FLAGS_quic_only_one_sending_alarm &&
|
| - sent_packet_manager_.handshake_confirmed() && send_alarm_->IsSet()) {
|
| - DCHECK(!sent_packet_manager_.GetRetransmissionTime().IsInitialized() ||
|
| - sent_packet_manager_.GetRetransmissionTime() >=
|
| + sent_packet_manager_->IsHandshakeConfirmed() && send_alarm_->IsSet()) {
|
| + DCHECK(!sent_packet_manager_->GetRetransmissionTime().IsInitialized() ||
|
| + sent_packet_manager_->GetRetransmissionTime() >=
|
| send_alarm_->deadline())
|
| << " retransmission_time:"
|
| - << sent_packet_manager_.GetRetransmissionTime().ToDebuggingValue()
|
| + << sent_packet_manager_->GetRetransmissionTime().ToDebuggingValue()
|
| << " send_alarm:" << send_alarm_->deadline().ToDebuggingValue();
|
| retransmission_alarm_->Cancel();
|
| return;
|
| }
|
| - QuicTime retransmission_time = sent_packet_manager_.GetRetransmissionTime();
|
| + QuicTime retransmission_time = sent_packet_manager_->GetRetransmissionTime();
|
| retransmission_alarm_->Update(retransmission_time,
|
| QuicTime::Delta::FromMilliseconds(1));
|
| }
|
| @@ -2387,7 +2401,7 @@ void QuicConnection::DiscoverMtu() {
|
| DCHECK(!mtu_discovery_alarm_->IsSet());
|
| }
|
|
|
| -void QuicConnection::OnPeerMigrationValidated() {
|
| +void QuicConnection::OnPeerMigrationValidated(QuicPathId path_id) {
|
| if (active_peer_migration_type_ == NO_CHANGE) {
|
| QUIC_BUG << "No migration underway.";
|
| return;
|
| @@ -2401,6 +2415,7 @@ void QuicConnection::OnPeerMigrationValidated() {
|
| // migration. This should happen even if a migration is underway, since the
|
| // most recent migration is the one that we should pay attention to.
|
| void QuicConnection::StartPeerMigration(
|
| + QuicPathId path_id,
|
| PeerAddressChangeType peer_migration_type) {
|
| // TODO(fayang): Currently, all peer address change type are allowed. Need to
|
| // add a method ShouldAllowPeerAddressChange(PeerAddressChangeType type) to
|
| @@ -2423,7 +2438,7 @@ void QuicConnection::StartPeerMigration(
|
| // TODO(jri): Move these calls to OnPeerMigrationValidated. Rename
|
| // OnConnectionMigration methods to OnPeerMigration.
|
| visitor_->OnConnectionMigration(peer_migration_type);
|
| - sent_packet_manager_.OnConnectionMigration(peer_migration_type);
|
| + sent_packet_manager_->OnConnectionMigration(path_id, peer_migration_type);
|
| }
|
|
|
| void QuicConnection::OnPathClosed(QuicPathId path_id) {
|
| @@ -2466,4 +2481,23 @@ bool QuicConnection::MaybeConsiderAsMemoryCorruption(
|
| return false;
|
| }
|
|
|
| +// Uses a 25ms delayed ack timer. Also helps with better signaling
|
| +// in low-bandwidth (< ~384 kbps), where an ack is sent per packet.
|
| +// Ensures that the Delayed Ack timer is always set to a value lesser
|
| +// than the retransmission timer's minimum value (MinRTO). We want the
|
| +// delayed ack to get back to the QUIC peer before the sender's
|
| +// retransmission timer triggers. Since we do not know the
|
| +// reverse-path one-way delay, we assume equal delays for forward and
|
| +// reverse paths, and ensure that the timer is set to less than half
|
| +// of the MinRTO.
|
| +// There may be a value in making this delay adaptive with the help of
|
| +// the sender and a signaling mechanism -- if the sender uses a
|
| +// different MinRTO, we may get spurious retransmissions. May not have
|
| +// any benefits, but if the delayed ack becomes a significant source
|
| +// of (likely, tail) latency, then consider such a mechanism.
|
| +const QuicTime::Delta QuicConnection::DelayedAckTime() {
|
| + return QuicTime::Delta::FromMilliseconds(
|
| + min(kMaxDelayedAckTimeMs, kMinRetransmissionTimeMs / 2));
|
| +}
|
| +
|
| } // namespace net
|
|
|