Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(355)

Side by Side Diff: media/filters/audio_renderer_impl.cc

Issue 256163005: Introduce AudioClock to improve playback delay calculations. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/filters/audio_renderer_impl.h" 5 #include "media/filters/audio_renderer_impl.h"
6 6
7 #include <math.h> 7 #include <math.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
11 #include "base/bind.h" 11 #include "base/bind.h"
12 #include "base/callback.h" 12 #include "base/callback.h"
13 #include "base/callback_helpers.h" 13 #include "base/callback_helpers.h"
14 #include "base/logging.h" 14 #include "base/logging.h"
15 #include "base/metrics/histogram.h" 15 #include "base/metrics/histogram.h"
16 #include "base/single_thread_task_runner.h" 16 #include "base/single_thread_task_runner.h"
17 #include "media/base/audio_buffer.h" 17 #include "media/base/audio_buffer.h"
18 #include "media/base/audio_buffer_converter.h" 18 #include "media/base/audio_buffer_converter.h"
19 #include "media/base/audio_hardware_config.h" 19 #include "media/base/audio_hardware_config.h"
20 #include "media/base/audio_splicer.h" 20 #include "media/base/audio_splicer.h"
21 #include "media/base/bind_to_current_loop.h" 21 #include "media/base/bind_to_current_loop.h"
22 #include "media/base/demuxer_stream.h" 22 #include "media/base/demuxer_stream.h"
23 #include "media/filters/buffered_audio_tracker.h"
23 #include "media/filters/decrypting_demuxer_stream.h" 24 #include "media/filters/decrypting_demuxer_stream.h"
24 25
25 namespace media { 26 namespace media {
26 27
27 namespace { 28 namespace {
28 29
29 enum AudioRendererEvent { 30 enum AudioRendererEvent {
30 INITIALIZED, 31 INITIALIZED,
31 RENDER_ERROR, 32 RENDER_ERROR,
32 RENDER_EVENT_MAX = RENDER_ERROR, 33 RENDER_EVENT_MAX = RENDER_ERROR,
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
120 121
121 void AudioRendererImpl::DoPause_Locked() { 122 void AudioRendererImpl::DoPause_Locked() {
122 DCHECK(task_runner_->BelongsToCurrentThread()); 123 DCHECK(task_runner_->BelongsToCurrentThread());
123 lock_.AssertAcquired(); 124 lock_.AssertAcquired();
124 125
125 if (sink_playing_) { 126 if (sink_playing_) {
126 { 127 {
127 base::AutoUnlock auto_unlock(lock_); 128 base::AutoUnlock auto_unlock(lock_);
128 sink_->Pause(); 129 sink_->Pause();
129 } 130 }
131 buffered_audio_tracker_->AudioCallbackFired(0);
130 sink_playing_ = false; 132 sink_playing_ = false;
131 } 133 }
132 } 134 }
133 135
134 void AudioRendererImpl::Flush(const base::Closure& callback) { 136 void AudioRendererImpl::Flush(const base::Closure& callback) {
135 DCHECK(task_runner_->BelongsToCurrentThread()); 137 DCHECK(task_runner_->BelongsToCurrentThread());
136 138
137 base::AutoLock auto_lock(lock_); 139 base::AutoLock auto_lock(lock_);
138 DCHECK_EQ(state_, kPaused); 140 DCHECK_EQ(state_, kPaused);
139 DCHECK(flush_cb_.is_null()); 141 DCHECK(flush_cb_.is_null());
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after
281 const AudioParameters& hw_params = hardware_config_->GetOutputConfig(); 283 const AudioParameters& hw_params = hardware_config_->GetOutputConfig();
282 audio_parameters_.Reset(hw_params.format(), 284 audio_parameters_.Reset(hw_params.format(),
283 hw_params.channel_layout(), 285 hw_params.channel_layout(),
284 hw_params.channels(), 286 hw_params.channels(),
285 hw_params.input_channels(), 287 hw_params.input_channels(),
286 hw_params.sample_rate(), 288 hw_params.sample_rate(),
287 hw_params.bits_per_sample(), 289 hw_params.bits_per_sample(),
288 hardware_config_->GetHighLatencyBufferSize()); 290 hardware_config_->GetHighLatencyBufferSize());
289 } 291 }
290 292
293 buffered_audio_tracker_.reset(
294 new BufferedAudioTracker(audio_parameters_.sample_rate()));
295
291 audio_buffer_stream_.Initialize( 296 audio_buffer_stream_.Initialize(
292 stream, 297 stream,
293 statistics_cb, 298 statistics_cb,
294 base::Bind(&AudioRendererImpl::OnAudioBufferStreamInitialized, 299 base::Bind(&AudioRendererImpl::OnAudioBufferStreamInitialized,
295 weak_factory_.GetWeakPtr())); 300 weak_factory_.GetWeakPtr()));
296 } 301 }
297 302
298 void AudioRendererImpl::OnAudioBufferStreamInitialized(bool success) { 303 void AudioRendererImpl::OnAudioBufferStreamInitialized(bool success) {
299 DCHECK(task_runner_->BelongsToCurrentThread()); 304 DCHECK(task_runner_->BelongsToCurrentThread());
300 305
(...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after
559 DCHECK_EQ(state_, kPrerolling); 564 DCHECK_EQ(state_, kPrerolling);
560 return buffer && !buffer->end_of_stream() && 565 return buffer && !buffer->end_of_stream() &&
561 (buffer->timestamp() + buffer->duration()) < preroll_timestamp_; 566 (buffer->timestamp() + buffer->duration()) < preroll_timestamp_;
562 } 567 }
563 568
564 int AudioRendererImpl::Render(AudioBus* audio_bus, 569 int AudioRendererImpl::Render(AudioBus* audio_bus,
565 int audio_delay_milliseconds) { 570 int audio_delay_milliseconds) {
566 const int requested_frames = audio_bus->frames(); 571 const int requested_frames = audio_bus->frames();
567 base::TimeDelta current_time = kNoTimestamp(); 572 base::TimeDelta current_time = kNoTimestamp();
568 base::TimeDelta max_time = kNoTimestamp(); 573 base::TimeDelta max_time = kNoTimestamp();
569 base::TimeDelta playback_delay = base::TimeDelta::FromMilliseconds(
570 audio_delay_milliseconds);
571 574
572 int frames_written = 0; 575 int frames_written = 0;
573 base::Closure underflow_cb; 576 base::Closure underflow_cb;
574 { 577 {
575 base::AutoLock auto_lock(lock_); 578 base::AutoLock auto_lock(lock_);
576 579
580 // Convert milliseconds of delay into frames of delay.
581 buffered_audio_tracker_->AudioCallbackFired(static_cast<int>(
582 static_cast<float>(audio_delay_milliseconds) /
583 base::Time::kMillisecondsPerSecond * audio_parameters_.sample_rate()));
584
577 // Ensure Stop() hasn't destroyed our |algorithm_| on the pipeline thread. 585 // Ensure Stop() hasn't destroyed our |algorithm_| on the pipeline thread.
578 if (!algorithm_) 586 if (!algorithm_) {
587 buffered_audio_tracker_->WroteSilence(requested_frames);
scherkus (not reviewing) 2014/04/29 17:22:06 alternatively, we can delete buffered_audio_tracke
DaleCurtis 2014/04/29 17:55:54 I don't think you need to track this. algorithm_ s
579 return 0; 588 return 0;
589 }
580 590
581 float playback_rate = algorithm_->playback_rate(); 591 float playback_rate = algorithm_->playback_rate();
582 if (playback_rate == 0) 592 if (playback_rate == 0) {
593 buffered_audio_tracker_->WroteSilence(requested_frames);
583 return 0; 594 return 0;
595 }
584 596
585 // Mute audio by returning 0 when not playing. 597 // Mute audio by returning 0 when not playing.
586 if (state_ != kPlaying) 598 if (state_ != kPlaying) {
599 buffered_audio_tracker_->WroteSilence(requested_frames);
587 return 0; 600 return 0;
601 }
588 602
589 // We use the following conditions to determine end of playback: 603 // We use the following conditions to determine end of playback:
590 // 1) Algorithm can not fill the audio callback buffer 604 // 1) Algorithm can not fill the audio callback buffer
591 // 2) We received an end of stream buffer 605 // 2) We received an end of stream buffer
592 // 3) We haven't already signalled that we've ended 606 // 3) We haven't already signalled that we've ended
593 // 4) Our estimated earliest end time has expired 607 // 4) Our estimated earliest end time has expired
594 // 608 //
595 // TODO(enal): we should replace (4) with a check that the browser has no 609 // TODO(enal): we should replace (4) with a check that the browser has no
596 // more audio data or at least use a delayed callback. 610 // more audio data or at least use a delayed callback.
597 // 611 //
(...skipping 22 matching lines...) Expand all
620 // |earliest_end_time_|. 634 // |earliest_end_time_|.
621 } 635 }
622 } 636 }
623 637
624 if (CanRead_Locked()) { 638 if (CanRead_Locked()) {
625 task_runner_->PostTask(FROM_HERE, 639 task_runner_->PostTask(FROM_HERE,
626 base::Bind(&AudioRendererImpl::AttemptRead, 640 base::Bind(&AudioRendererImpl::AttemptRead,
627 weak_factory_.GetWeakPtr())); 641 weak_factory_.GetWeakPtr()));
628 } 642 }
629 643
630 // Adjust the delay according to playback rate. 644 // Update buffered audio stats.
631 base::TimeDelta adjusted_playback_delay = base::TimeDelta::FromMicroseconds( 645 base::TimeDelta playback_delay = buffered_audio_tracker_->BufferedTime();
632 ceil(playback_delay.InMicroseconds() * playback_rate)); 646 if (frames_written > 0)
647 buffered_audio_tracker_->WroteAudio(frames_written, playback_rate);
648 if (frames_written < requested_frames)
649 buffered_audio_tracker_->WroteSilence(requested_frames - frames_written);
633 650
634 // The |audio_time_buffered_| is the ending timestamp of the last frame 651 // The |audio_time_buffered_| is the ending timestamp of the last frame
635 // buffered at the audio device. |playback_delay| is the amount of time 652 // buffered at the audio device. |playback_delay| is the amount of time
636 // buffered at the audio device. The current time can be computed by their 653 // buffered at the audio device. The current time can be computed by their
637 // difference. 654 // difference.
638 if (audio_time_buffered_ != kNoTimestamp()) { 655 if (audio_time_buffered_ != kNoTimestamp()) {
639 base::TimeDelta previous_time = current_time_; 656 base::TimeDelta previous_time = current_time_;
640 current_time_ = audio_time_buffered_ - adjusted_playback_delay; 657 current_time_ = audio_time_buffered_ - playback_delay;
641 658
642 // Time can change in one of two ways: 659 // Time can change in one of two ways:
643 // 1) The time of the audio data at the audio device changed, or 660 // 1) The time of the audio data at the audio device changed, or
644 // 2) The playback delay value has changed 661 // 2) The playback delay value has changed
645 // 662 //
646 // We only want to set |current_time| (and thus execute |time_cb_|) if 663 // We only want to set |current_time| (and thus execute |time_cb_|) if
647 // time has progressed and we haven't signaled end of stream yet. 664 // time has progressed and we haven't signaled end of stream yet.
648 // 665 //
649 // Why? The current latency of the system results in getting the last call 666 // Why? The current latency of the system results in getting the last call
650 // to FillBuffer() later than we'd like, which delays firing the 'ended' 667 // to FillBuffer() later than we'd like, which delays firing the 'ended'
651 // event, which delays the looping/trigging performance of short sound 668 // event, which delays the looping/trigging performance of short sound
652 // effects. 669 // effects.
653 // 670 //
654 // TODO(scherkus): revisit this and switch back to relying on playback 671 // TODO(scherkus): revisit this and switch back to relying on playback
655 // delay after we've revamped our audio IPC subsystem. 672 // delay after we've revamped our audio IPC subsystem.
656 if (current_time_ > previous_time && !rendered_end_of_stream_) { 673 if (current_time_ > previous_time && !rendered_end_of_stream_) {
657 current_time = current_time_; 674 current_time = current_time_;
658 } 675 }
659 } else if (frames_written > 0) { 676 } else if (frames_written > 0) {
660 // Nothing has been buffered yet, so use the first buffer's timestamp. 677 // Nothing has been buffered yet, so use the first buffer's timestamp.
661 DCHECK(time_before_filling != kNoTimestamp()); 678 DCHECK(time_before_filling != kNoTimestamp());
662 current_time_ = current_time = 679 current_time_ = current_time = time_before_filling - playback_delay;
663 time_before_filling - adjusted_playback_delay;
664 } 680 }
665 681
666 // The call to FillBuffer() on |algorithm_| has increased the amount of 682 // The call to FillBuffer() on |algorithm_| has increased the amount of
667 // buffered audio data. Update the new amount of time buffered. 683 // buffered audio data. Update the new amount of time buffered.
668 max_time = algorithm_->GetTime(); 684 max_time = algorithm_->GetTime();
669 audio_time_buffered_ = max_time; 685 audio_time_buffered_ = max_time;
670 686
671 if (frames_written > 0) { 687 if (frames_written > 0) {
672 UpdateEarliestEndTime_Locked( 688 UpdateEarliestEndTime_Locked(
673 frames_written, playback_delay, now_cb_.Run()); 689 frames_written, playback_delay, now_cb_.Run());
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
763 DCHECK(expecting_config_changes_); 779 DCHECK(expecting_config_changes_);
764 buffer_converter_->ResetTimestampState(); 780 buffer_converter_->ResetTimestampState();
765 // Drain flushed buffers from the converter so the AudioSplicer receives all 781 // Drain flushed buffers from the converter so the AudioSplicer receives all
766 // data ahead of any OnNewSpliceBuffer() calls. Since discontinuities should 782 // data ahead of any OnNewSpliceBuffer() calls. Since discontinuities should
767 // only appear after config changes, AddInput() should never fail here. 783 // only appear after config changes, AddInput() should never fail here.
768 while (buffer_converter_->HasNextBuffer()) 784 while (buffer_converter_->HasNextBuffer())
769 CHECK(splicer_->AddInput(buffer_converter_->GetNextBuffer())); 785 CHECK(splicer_->AddInput(buffer_converter_->GetNextBuffer()));
770 } 786 }
771 787
772 } // namespace media 788 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698