Index: media/filters/audio_renderer_impl.cc |
diff --git a/media/filters/audio_renderer_impl.cc b/media/filters/audio_renderer_impl.cc |
index 0436e9c237466cb55ed93bb38647acb7a8d4e4d1..d43169fa09d014de0cc4d8ef14412271f20f5d92 100644 |
--- a/media/filters/audio_renderer_impl.cc |
+++ b/media/filters/audio_renderer_impl.cc |
@@ -40,6 +40,14 @@ void HistogramRendererEvent(AudioRendererEvent event) { |
} // namespace |
+AudioRendererImpl::RenderResult::RenderResult() |
+ : requested_frames(0), |
+ delay_frames(0), |
+ frames_written(0), |
+ playback_rate(0), |
+ endpoint_timestamp(kNoTimestamp()) { |
+} |
+ |
AudioRendererImpl::AudioRendererImpl( |
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner, |
media::AudioRendererSink* sink, |
@@ -176,7 +184,7 @@ void AudioRendererImpl::ResetDecoderDone() { |
// Flush() may have been called while underflowed/not fully buffered. |
if (buffering_state_ != BUFFERING_HAVE_NOTHING) |
- SetBufferingState_Locked(BUFFERING_HAVE_NOTHING); |
+ SetBufferingState(BUFFERING_HAVE_NOTHING); |
splicer_->Reset(); |
if (buffer_converter_) |
@@ -417,7 +425,7 @@ void AudioRendererImpl::DecodedAudioReady( |
while (splicer_->HasNextBuffer()) |
need_another_buffer = HandleSplicerBuffer_Locked(splicer_->GetNextBuffer()); |
- if (!need_another_buffer && !CanRead_Locked()) |
+ if (!need_another_buffer) |
return; |
AttemptRead_Locked(); |
@@ -463,7 +471,7 @@ bool AudioRendererImpl::HandleSplicerBuffer_Locked( |
case kPlaying: |
if (buffer->end_of_stream() || algorithm_->IsQueueFull()) { |
if (buffering_state_ == BUFFERING_HAVE_NOTHING) |
- SetBufferingState_Locked(BUFFERING_HAVE_ENOUGH); |
+ SetBufferingState(BUFFERING_HAVE_ENOUGH); |
return false; |
} |
return true; |
@@ -474,40 +482,28 @@ bool AudioRendererImpl::HandleSplicerBuffer_Locked( |
return false; |
} |
-void AudioRendererImpl::AttemptRead() { |
- base::AutoLock auto_lock(lock_); |
- AttemptRead_Locked(); |
-} |
- |
void AudioRendererImpl::AttemptRead_Locked() { |
DCHECK(task_runner_->BelongsToCurrentThread()); |
lock_.AssertAcquired(); |
- if (!CanRead_Locked()) |
- return; |
- |
- pending_read_ = true; |
- audio_buffer_stream_.Read(base::Bind(&AudioRendererImpl::DecodedAudioReady, |
- weak_factory_.GetWeakPtr())); |
-} |
- |
-bool AudioRendererImpl::CanRead_Locked() { |
- lock_.AssertAcquired(); |
- |
switch (state_) { |
case kUninitialized: |
case kInitializing: |
case kFlushing: |
case kFlushed: |
case kStopped: |
- return false; |
+ return; |
case kPlaying: |
break; |
} |
- return !pending_read_ && !received_end_of_stream_ && |
- !algorithm_->IsQueueFull(); |
+ if (pending_read_ || received_end_of_stream_ || algorithm_->IsQueueFull()) |
+ return; |
+ |
+ pending_read_ = true; |
+ audio_buffer_stream_.Read(base::Bind(&AudioRendererImpl::DecodedAudioReady, |
+ weak_factory_.GetWeakPtr())); |
} |
void AudioRendererImpl::SetPlaybackRate(float playback_rate) { |
@@ -547,91 +543,86 @@ bool AudioRendererImpl::IsBeforeStartTime( |
int AudioRendererImpl::Render(AudioBus* audio_bus, |
int audio_delay_milliseconds) { |
- const int requested_frames = audio_bus->frames(); |
- base::TimeDelta playback_delay = base::TimeDelta::FromMilliseconds( |
- audio_delay_milliseconds); |
- const int delay_frames = static_cast<int>(playback_delay.InSecondsF() * |
- audio_parameters_.sample_rate()); |
- int frames_written = 0; |
- base::Closure time_cb; |
+ DVLOG(2) << __FUNCTION__; |
DaleCurtis
2014/07/10 23:37:00
Probably you should remove this, or make DVLOG(3).
|
+ base::TimeDelta playback_delay = |
+ base::TimeDelta::FromMilliseconds(audio_delay_milliseconds); |
+ |
+ RenderResult result; |
+ result.requested_frames = audio_bus->frames(); |
+ result.delay_frames = static_cast<int>(playback_delay.InSecondsF() * |
+ audio_parameters_.sample_rate()); |
+ |
{ |
base::AutoLock auto_lock(lock_); |
- |
- // Ensure Stop() hasn't destroyed our |algorithm_| on the pipeline thread. |
- if (!algorithm_) { |
- audio_clock_->WroteSilence(requested_frames, delay_frames); |
- return 0; |
+ if (state_ == kPlaying && algorithm_->frames_buffered() > 0) { |
+ result.frames_written = |
+ algorithm_->FillBuffer(audio_bus, result.requested_frames); |
+ result.playback_rate = algorithm_->playback_rate(); |
+ result.endpoint_timestamp = algorithm_->GetTime(); |
} |
+ } |
- float playback_rate = algorithm_->playback_rate(); |
- if (playback_rate == 0) { |
- audio_clock_->WroteSilence(requested_frames, delay_frames); |
- return 0; |
- } |
+ task_runner_->PostTask( |
+ FROM_HERE, |
+ base::Bind( |
+ &AudioRendererImpl::DidRender, weak_factory_.GetWeakPtr(), result)); |
- // Mute audio by returning 0 when not playing. |
- if (state_ != kPlaying) { |
- audio_clock_->WroteSilence(requested_frames, delay_frames); |
- return 0; |
- } |
+ return result.frames_written; |
+} |
- // We use the following conditions to determine end of playback: |
- // 1) Algorithm can not fill the audio callback buffer |
- // 2) We received an end of stream buffer |
- // 3) We haven't already signalled that we've ended |
- // 4) We've played all known audio data sent to hardware |
- // |
- // We use the following conditions to determine underflow: |
- // 1) Algorithm can not fill the audio callback buffer |
- // 2) We have NOT received an end of stream buffer |
- // 3) We are in the kPlaying state |
- // |
- // Otherwise the buffer has data we can send to the device. |
- const base::TimeDelta media_timestamp_before_filling = |
- audio_clock_->CurrentMediaTimestamp(); |
- if (algorithm_->frames_buffered() > 0) { |
- frames_written = algorithm_->FillBuffer(audio_bus, requested_frames); |
- audio_clock_->WroteAudio( |
- frames_written, delay_frames, playback_rate, algorithm_->GetTime()); |
- } |
- audio_clock_->WroteSilence(requested_frames - frames_written, delay_frames); |
+void AudioRendererImpl::DidRender(RenderResult result) { |
+ DCHECK(task_runner_->BelongsToCurrentThread()); |
+ DVLOG(2) << __FUNCTION__; |
DaleCurtis
2014/07/10 23:37:00
Ditto.
|
+ |
+ base::AutoLock auto_lock(lock_); |
DaleCurtis
2014/07/10 23:37:00
Only needed when accessing algorithm_ and reading?
|
+ if (state_ == kStopped) |
+ return; |
- if (frames_written == 0) { |
- if (received_end_of_stream_ && !rendered_end_of_stream_ && |
+ base::TimeDelta previous_media_timestamp = |
+ audio_clock_->CurrentMediaTimestamp(); |
+ |
+ if (result.frames_written > 0) { |
+ audio_clock_->WroteAudio(result.frames_written, |
+ result.delay_frames, |
+ result.playback_rate, |
+ result.endpoint_timestamp); |
+ } |
+ audio_clock_->WroteSilence(result.requested_frames - result.frames_written, |
+ result.delay_frames); |
+ |
+ // We use the following conditions to determine end of playback: |
+ // 1) Algorithm can not fill the audio callback buffer |
+ // 2) We received an end of stream buffer |
+ // 3) We haven't already signalled that we've ended |
+ // 4) We've played all known audio data sent to hardware |
+ // |
+ // We use the following conditions to determine underflow: |
+ // 1) Algorithm can not fill the audio callback buffer |
+ // 2) We have NOT received an end of stream buffer |
+ // 3) We are in the kPlaying state |
+ if (result.frames_written == 0) { |
+ if (received_end_of_stream_ && !rendered_end_of_stream_ && |
audio_clock_->CurrentMediaTimestamp() == |
audio_clock_->last_endpoint_timestamp()) { |
- rendered_end_of_stream_ = true; |
- ended_cb_.Run(); |
- } else if (!received_end_of_stream_ && state_ == kPlaying) { |
- if (buffering_state_ != BUFFERING_HAVE_NOTHING) { |
- algorithm_->IncreaseQueueCapacity(); |
- SetBufferingState_Locked(BUFFERING_HAVE_NOTHING); |
- } |
+ rendered_end_of_stream_ = true; |
+ ended_cb_.Run(); |
+ } else if (!received_end_of_stream_ && state_ == kPlaying) { |
+ if (buffering_state_ != BUFFERING_HAVE_NOTHING) { |
+ algorithm_->IncreaseQueueCapacity(); |
+ SetBufferingState(BUFFERING_HAVE_NOTHING); |
} |
} |
- |
- if (CanRead_Locked()) { |
- task_runner_->PostTask(FROM_HERE, |
- base::Bind(&AudioRendererImpl::AttemptRead, |
- weak_factory_.GetWeakPtr())); |
- } |
- |
- // We only want to execute |time_cb_| if time has progressed and we haven't |
- // signaled end of stream yet. |
- if (media_timestamp_before_filling != |
- audio_clock_->CurrentMediaTimestamp() && |
- !rendered_end_of_stream_) { |
- time_cb = base::Bind(time_cb_, |
- audio_clock_->CurrentMediaTimestamp(), |
- audio_clock_->last_endpoint_timestamp()); |
- } |
} |
- if (!time_cb.is_null()) |
- task_runner_->PostTask(FROM_HERE, time_cb); |
+ // Don't fire time updates if we don't need to (e.g., time hasn't changed, |
+ // playback has ended). |
+ if (previous_media_timestamp != audio_clock_->CurrentMediaTimestamp() && |
+ !rendered_end_of_stream_) { |
+ time_cb_.Run(audio_clock_->CurrentMediaTimestamp(), |
+ audio_clock_->last_endpoint_timestamp()); |
+ } |
- DCHECK_LE(frames_written, requested_frames); |
- return frames_written; |
+ AttemptRead_Locked(); |
} |
void AudioRendererImpl::OnRenderError() { |
@@ -694,12 +685,11 @@ void AudioRendererImpl::OnConfigChange() { |
CHECK(splicer_->AddInput(buffer_converter_->GetNextBuffer())); |
} |
-void AudioRendererImpl::SetBufferingState_Locked( |
- BufferingState buffering_state) { |
+void AudioRendererImpl::SetBufferingState(BufferingState buffering_state) { |
DVLOG(1) << __FUNCTION__ << " : " << buffering_state_ << " -> " |
<< buffering_state; |
+ DCHECK(task_runner_->BelongsToCurrentThread()); |
DCHECK_NE(buffering_state_, buffering_state); |
- lock_.AssertAcquired(); |
buffering_state_ = buffering_state; |
task_runner_->PostTask(FROM_HERE, |