Index: media/cast/sender/audio_sender.cc |
diff --git a/media/cast/sender/audio_sender.cc b/media/cast/sender/audio_sender.cc |
index e79be079ce5e7dc8756d34b3aa39101a646fc98f..6c988d9722441bb061fafac07b46c3a284e7dec8 100644 |
--- a/media/cast/sender/audio_sender.cc |
+++ b/media/cast/sender/audio_sender.cc |
@@ -4,6 +4,8 @@ |
#include "media/cast/sender/audio_sender.h" |
+#include <algorithm> |
+ |
#include "base/bind.h" |
#include "base/logging.h" |
#include "base/message_loop/message_loop.h" |
@@ -61,9 +63,6 @@ AudioSender::AudioSender(scoped_refptr<CastEnvironment> cast_environment, |
transport_config.ssrc = audio_config.ssrc; |
transport_config.feedback_ssrc = audio_config.incoming_feedback_ssrc; |
transport_config.rtp_payload_type = audio_config.rtp_payload_type; |
- // TODO(miu): AudioSender needs to be like VideoSender in providing an upper |
- // limit on the number of in-flight frames. |
- transport_config.stored_frames = max_unacked_frames_; |
transport_config.aes_key = audio_config.aes_key; |
transport_config.aes_iv_mask = audio_config.aes_iv_mask; |
@@ -86,25 +85,40 @@ void AudioSender::InsertAudio(scoped_ptr<AudioBus> audio_bus, |
} |
DCHECK(audio_encoder_.get()) << "Invalid internal state"; |
- // TODO(miu): An |audio_bus| that represents more duration than a single |
- // frame's duration can defeat our logic here, causing too much data to become |
- // enqueued. This will be addressed in a soon-upcoming change. |
- if (ShouldDropNextFrame(recorded_time)) { |
- VLOG(1) << "Dropping frame due to too many frames currently in-flight."; |
+ // Check that enqueuing the samples in |audio_bus| won't cause more frames to |
+ // become in-flight than the system's design limit. |
+ const int count_unacked_frames = GetUnackedFrameCount(); |
+ const int64 samples_unacked = |
+ count_unacked_frames * audio_encoder_->GetSamplesPerFrame(); |
+ const int64 samples_would_be_in_flight = |
+ samples_unacked + samples_in_encoder_ + audio_bus->frames(); |
+ const int frames_would_be_in_flight = |
+ samples_would_be_in_flight / audio_encoder_->GetSamplesPerFrame(); |
+ if (frames_would_be_in_flight > kMaxUnackedFrames) { |
+ VLOG(1) << "Dropping audio: Too many frames would be in-flight."; |
return; |
} |
- samples_in_encoder_ += audio_bus->frames(); |
+ // Check that enqueuing the samples in |audio_bus| won't exceed the allowed |
+ // in-flight media duration. |
+ const int64 max_samples_in_flight = |
+ TimeDeltaToRtpDelta(GetAllowedInFlightMediaDuration(), rtp_timebase()); |
+ VLOG(2) << "Audio samples in-flight: " |
+ << samples_unacked << " unacked + " |
+ << samples_in_encoder_ << " in encoder + " |
+ << audio_bus->frames() << " additional would be " |
+ << (max_samples_in_flight > 0 ? |
+ 100 * samples_would_be_in_flight / max_samples_in_flight : |
+ kint64max) << "% of allowed in-flight."; |
+ if (samples_would_be_in_flight > max_samples_in_flight) { |
+ VLOG(1) << "Dropping audio: Too long an audio duration would be in-flight."; |
+ return; |
+ } |
+ samples_in_encoder_ += audio_bus->frames(); |
audio_encoder_->InsertAudio(audio_bus.Pass(), recorded_time); |
} |
-int AudioSender::GetNumberOfFramesInEncoder() const { |
- // Note: It's possible for a partial frame to be in the encoder, but returning |
- // the floor() is good enough for the "design limit" check in FrameSender. |
- return samples_in_encoder_ / audio_encoder_->GetSamplesPerFrame(); |
-} |
- |
void AudioSender::OnAck(uint32 frame_id) { |
} |