| Index: media/filters/audio_renderer_impl.cc
|
| diff --git a/media/filters/audio_renderer_impl.cc b/media/filters/audio_renderer_impl.cc
|
| index b4a4b4125aeb1271069cc71f51d4f5745bfb58e8..55978cd22f4e60cfe9d83a14f85e3341f2aa373d 100644
|
| --- a/media/filters/audio_renderer_impl.cc
|
| +++ b/media/filters/audio_renderer_impl.cc
|
| @@ -147,6 +147,7 @@ void AudioRendererImpl::SetMediaTime(base::TimeDelta time) {
|
| DCHECK_EQ(state_, kFlushed);
|
|
|
| start_timestamp_ = time;
|
| + audio_clock_.reset(new AudioClock(time, audio_parameters_.sample_rate()));
|
| }
|
|
|
| base::TimeDelta AudioRendererImpl::CurrentMediaTime() {
|
| @@ -201,7 +202,7 @@ void AudioRendererImpl::ResetDecoderDone() {
|
| DCHECK_EQ(state_, kFlushed);
|
| DCHECK(!flush_cb_.is_null());
|
|
|
| - audio_clock_.reset(new AudioClock(audio_parameters_.sample_rate()));
|
| + audio_clock_.reset();
|
| received_end_of_stream_ = false;
|
| rendered_end_of_stream_ = false;
|
|
|
| @@ -294,7 +295,8 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream,
|
| hardware_config_->GetHighLatencyBufferSize());
|
| }
|
|
|
| - audio_clock_.reset(new AudioClock(audio_parameters_.sample_rate()));
|
| + audio_clock_.reset(
|
| + new AudioClock(base::TimeDelta(), audio_parameters_.sample_rate()));
|
|
|
| audio_buffer_stream_->Initialize(
|
| stream,
|
| @@ -549,18 +551,21 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
|
|
|
| // Ensure Stop() hasn't destroyed our |algorithm_| on the pipeline thread.
|
| if (!algorithm_) {
|
| - audio_clock_->WroteSilence(requested_frames, delay_frames);
|
| + audio_clock_->WroteAudio(
|
| + 0, requested_frames, delay_frames, playback_rate_);
|
| return 0;
|
| }
|
|
|
| if (playback_rate_ == 0) {
|
| - audio_clock_->WroteSilence(requested_frames, delay_frames);
|
| + audio_clock_->WroteAudio(
|
| + 0, requested_frames, delay_frames, playback_rate_);
|
| return 0;
|
| }
|
|
|
| // Mute audio by returning 0 when not playing.
|
| if (state_ != kPlaying) {
|
| - audio_clock_->WroteSilence(requested_frames, delay_frames);
|
| + audio_clock_->WroteAudio(
|
| + 0, requested_frames, delay_frames, playback_rate_);
|
| return 0;
|
| }
|
|
|
| @@ -576,20 +581,16 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
|
| // 3) We are in the kPlaying state
|
| //
|
| // Otherwise the buffer has data we can send to the device.
|
| - const base::TimeDelta media_timestamp_before_filling =
|
| - audio_clock_->CurrentMediaTimestamp(base::TimeDelta());
|
| if (algorithm_->frames_buffered() > 0) {
|
| frames_written =
|
| algorithm_->FillBuffer(audio_bus, requested_frames, playback_rate_);
|
| - audio_clock_->WroteAudio(
|
| - frames_written, delay_frames, playback_rate_, algorithm_->GetTime());
|
| }
|
| - audio_clock_->WroteSilence(requested_frames - frames_written, delay_frames);
|
| + audio_clock_->WroteAudio(
|
| + frames_written, requested_frames, delay_frames, playback_rate_);
|
|
|
| if (frames_written == 0) {
|
| if (received_end_of_stream_ && !rendered_end_of_stream_ &&
|
| - audio_clock_->CurrentMediaTimestamp(base::TimeDelta()) ==
|
| - audio_clock_->last_endpoint_timestamp()) {
|
| + !audio_clock_->AudioDataBuffered()) {
|
| rendered_end_of_stream_ = true;
|
| task_runner_->PostTask(FROM_HERE, ended_cb_);
|
| } else if (!received_end_of_stream_ && state_ == kPlaying) {
|
| @@ -606,15 +607,18 @@ int AudioRendererImpl::Render(AudioBus* audio_bus,
|
| weak_factory_.GetWeakPtr()));
|
| }
|
|
|
| - // We only want to execute |time_cb_| if time has progressed and we haven't
|
| - // signaled end of stream yet.
|
| - if (media_timestamp_before_filling !=
|
| - audio_clock_->CurrentMediaTimestamp(base::TimeDelta()) &&
|
| - !rendered_end_of_stream_) {
|
| - time_cb =
|
| - base::Bind(time_cb_,
|
| - audio_clock_->CurrentMediaTimestamp(base::TimeDelta()),
|
| - audio_clock_->last_endpoint_timestamp());
|
| + // Firing |ended_cb_| means we no longer need to run |time_cb_|.
|
| + if (!rendered_end_of_stream_) {
|
| + // Since |max_time| uses linear interpolation, only provide an upper bound
|
| + // that is for audio data at the same playback rate. Failing to do so can
|
| + // make time jump backwards when the linear interpolated time advances
|
| + // past buffered regions of audio at different rates.
|
| + base::TimeDelta current_timestamp =
|
| + audio_clock_->CurrentMediaTimestamp(base::TimeDelta());
|
| + base::TimeDelta max_time =
|
| + current_timestamp +
|
| + audio_clock_->ContiguousAudioDataBufferedAtSameRate();
|
| + time_cb = base::Bind(time_cb_, current_timestamp, max_time);
|
| }
|
| }
|
|
|
|
|