| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/renderers/audio_renderer_impl.h" | 5 #include "media/renderers/audio_renderer_impl.h" |
| 6 | 6 |
| 7 #include <math.h> | 7 #include <math.h> |
| 8 #include <stddef.h> | 8 #include <stddef.h> |
| 9 #include <algorithm> | 9 #include <algorithm> |
| 10 #include <utility> | 10 #include <utility> |
| (...skipping 616 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 627 } | 627 } |
| 628 | 628 |
| 629 bool AudioRendererImpl::IsBeforeStartTime( | 629 bool AudioRendererImpl::IsBeforeStartTime( |
| 630 const scoped_refptr<AudioBuffer>& buffer) { | 630 const scoped_refptr<AudioBuffer>& buffer) { |
| 631 DCHECK_EQ(state_, kPlaying); | 631 DCHECK_EQ(state_, kPlaying); |
| 632 return buffer.get() && !buffer->end_of_stream() && | 632 return buffer.get() && !buffer->end_of_stream() && |
| 633 (buffer->timestamp() + buffer->duration()) < start_timestamp_; | 633 (buffer->timestamp() + buffer->duration()) < start_timestamp_; |
| 634 } | 634 } |
| 635 | 635 |
| 636 int AudioRendererImpl::Render(AudioBus* audio_bus, | 636 int AudioRendererImpl::Render(AudioBus* audio_bus, |
| 637 uint32_t audio_delay_milliseconds, | 637 uint32_t frames_delayed, |
| 638 uint32_t frames_skipped) { | 638 uint32_t frames_skipped) { |
| 639 const int requested_frames = audio_bus->frames(); | 639 const int frames_requested = audio_bus->frames(); |
| 640 base::TimeDelta playback_delay = base::TimeDelta::FromMilliseconds( | 640 DVLOG(4) << __FUNCTION__ << " frames_delayed:" << frames_delayed |
| 641 audio_delay_milliseconds); | 641 << " frames_skipped:" << frames_skipped |
| 642 const int delay_frames = static_cast<int>(playback_delay.InSecondsF() * | 642 << " frames_requested:" << frames_requested; |
| 643 audio_parameters_.sample_rate()); | 643 |
| 644 int frames_written = 0; | 644 int frames_written = 0; |
| 645 { | 645 { |
| 646 base::AutoLock auto_lock(lock_); | 646 base::AutoLock auto_lock(lock_); |
| 647 last_render_time_ = tick_clock_->NowTicks(); | 647 last_render_time_ = tick_clock_->NowTicks(); |
| 648 | 648 |
| 649 if (!stop_rendering_time_.is_null()) { | 649 if (!stop_rendering_time_.is_null()) { |
| 650 audio_clock_->CompensateForSuspendedWrites( | 650 audio_clock_->CompensateForSuspendedWrites( |
| 651 last_render_time_ - stop_rendering_time_, delay_frames); | 651 last_render_time_ - stop_rendering_time_, frames_delayed); |
| 652 stop_rendering_time_ = base::TimeTicks(); | 652 stop_rendering_time_ = base::TimeTicks(); |
| 653 } | 653 } |
| 654 | 654 |
| 655 // Ensure Stop() hasn't destroyed our |algorithm_| on the pipeline thread. | 655 // Ensure Stop() hasn't destroyed our |algorithm_| on the pipeline thread. |
| 656 if (!algorithm_) { | 656 if (!algorithm_) { |
| 657 audio_clock_->WroteAudio( | 657 audio_clock_->WroteAudio(0, frames_requested, frames_delayed, |
| 658 0, requested_frames, delay_frames, playback_rate_); | 658 playback_rate_); |
| 659 return 0; | 659 return 0; |
| 660 } | 660 } |
| 661 | 661 |
| 662 if (playback_rate_ == 0) { | 662 if (playback_rate_ == 0) { |
| 663 audio_clock_->WroteAudio( | 663 audio_clock_->WroteAudio(0, frames_requested, frames_delayed, |
| 664 0, requested_frames, delay_frames, playback_rate_); | 664 playback_rate_); |
| 665 return 0; | 665 return 0; |
| 666 } | 666 } |
| 667 | 667 |
| 668 // Mute audio by returning 0 when not playing. | 668 // Mute audio by returning 0 when not playing. |
| 669 if (state_ != kPlaying) { | 669 if (state_ != kPlaying) { |
| 670 audio_clock_->WroteAudio( | 670 audio_clock_->WroteAudio(0, frames_requested, frames_delayed, |
| 671 0, requested_frames, delay_frames, playback_rate_); | 671 playback_rate_); |
| 672 return 0; | 672 return 0; |
| 673 } | 673 } |
| 674 | 674 |
| 675 // Delay playback by writing silence if we haven't reached the first | 675 // Delay playback by writing silence if we haven't reached the first |
| 676 // timestamp yet; this can occur if the video starts before the audio. | 676 // timestamp yet; this can occur if the video starts before the audio. |
| 677 if (algorithm_->frames_buffered() > 0) { | 677 if (algorithm_->frames_buffered() > 0) { |
| 678 CHECK_NE(first_packet_timestamp_, kNoTimestamp()); | 678 CHECK_NE(first_packet_timestamp_, kNoTimestamp()); |
| 679 CHECK_GE(first_packet_timestamp_, base::TimeDelta()); | 679 CHECK_GE(first_packet_timestamp_, base::TimeDelta()); |
| 680 const base::TimeDelta play_delay = | 680 const base::TimeDelta play_delay = |
| 681 first_packet_timestamp_ - audio_clock_->back_timestamp(); | 681 first_packet_timestamp_ - audio_clock_->back_timestamp(); |
| 682 CHECK_LT(play_delay.InSeconds(), 1000) | 682 CHECK_LT(play_delay.InSeconds(), 1000) |
| 683 << "first_packet_timestamp_ = " << first_packet_timestamp_ | 683 << "first_packet_timestamp_ = " << first_packet_timestamp_ |
| 684 << ", audio_clock_->back_timestamp() = " | 684 << ", audio_clock_->back_timestamp() = " |
| 685 << audio_clock_->back_timestamp(); | 685 << audio_clock_->back_timestamp(); |
| 686 if (play_delay > base::TimeDelta()) { | 686 if (play_delay > base::TimeDelta()) { |
| 687 DCHECK_EQ(frames_written, 0); | 687 DCHECK_EQ(frames_written, 0); |
| 688 frames_written = | 688 frames_written = |
| 689 std::min(static_cast<int>(play_delay.InSecondsF() * | 689 std::min(static_cast<int>(play_delay.InSecondsF() * |
| 690 audio_parameters_.sample_rate()), | 690 audio_parameters_.sample_rate()), |
| 691 requested_frames); | 691 frames_requested); |
| 692 audio_bus->ZeroFramesPartial(0, frames_written); | 692 audio_bus->ZeroFramesPartial(0, frames_written); |
| 693 } | 693 } |
| 694 | 694 |
| 695 // If there's any space left, actually render the audio; this is where the | 695 // If there's any space left, actually render the audio; this is where the |
| 696 // aural magic happens. | 696 // aural magic happens. |
| 697 if (frames_written < requested_frames) { | 697 if (frames_written < frames_requested) { |
| 698 frames_written += algorithm_->FillBuffer( | 698 frames_written += algorithm_->FillBuffer( |
| 699 audio_bus, frames_written, requested_frames - frames_written, | 699 audio_bus, frames_written, frames_requested - frames_written, |
| 700 playback_rate_); | 700 playback_rate_); |
| 701 } | 701 } |
| 702 } | 702 } |
| 703 | 703 |
| 704 // We use the following conditions to determine end of playback: | 704 // We use the following conditions to determine end of playback: |
| 705 // 1) Algorithm can not fill the audio callback buffer | 705 // 1) Algorithm can not fill the audio callback buffer |
| 706 // 2) We received an end of stream buffer | 706 // 2) We received an end of stream buffer |
| 707 // 3) We haven't already signalled that we've ended | 707 // 3) We haven't already signalled that we've ended |
| 708 // 4) We've played all known audio data sent to hardware | 708 // 4) We've played all known audio data sent to hardware |
| 709 // | 709 // |
| (...skipping 10 matching lines...) Expand all Loading... |
| 720 // data has ended. | 720 // data has ended. |
| 721 // | 721 // |
| 722 // That being said, we don't want to advance time when underflowed as we | 722 // That being said, we don't want to advance time when underflowed as we |
| 723 // know more decoded frames will eventually arrive. If we did, we would | 723 // know more decoded frames will eventually arrive. If we did, we would |
| 724 // throw things out of sync when said decoded frames arrive. | 724 // throw things out of sync when said decoded frames arrive. |
| 725 int frames_after_end_of_stream = 0; | 725 int frames_after_end_of_stream = 0; |
| 726 if (frames_written == 0) { | 726 if (frames_written == 0) { |
| 727 if (received_end_of_stream_) { | 727 if (received_end_of_stream_) { |
| 728 if (ended_timestamp_ == kInfiniteDuration()) | 728 if (ended_timestamp_ == kInfiniteDuration()) |
| 729 ended_timestamp_ = audio_clock_->back_timestamp(); | 729 ended_timestamp_ = audio_clock_->back_timestamp(); |
| 730 frames_after_end_of_stream = requested_frames; | 730 frames_after_end_of_stream = frames_requested; |
| 731 } else if (state_ == kPlaying && | 731 } else if (state_ == kPlaying && |
| 732 buffering_state_ != BUFFERING_HAVE_NOTHING) { | 732 buffering_state_ != BUFFERING_HAVE_NOTHING) { |
| 733 algorithm_->IncreaseQueueCapacity(); | 733 algorithm_->IncreaseQueueCapacity(); |
| 734 SetBufferingState_Locked(BUFFERING_HAVE_NOTHING); | 734 SetBufferingState_Locked(BUFFERING_HAVE_NOTHING); |
| 735 } | 735 } |
| 736 } | 736 } |
| 737 | 737 |
| 738 audio_clock_->WroteAudio(frames_written + frames_after_end_of_stream, | 738 audio_clock_->WroteAudio(frames_written + frames_after_end_of_stream, |
| 739 requested_frames, | 739 frames_requested, frames_delayed, playback_rate_); |
| 740 delay_frames, | |
| 741 playback_rate_); | |
| 742 | 740 |
| 743 if (CanRead_Locked()) { | 741 if (CanRead_Locked()) { |
| 744 task_runner_->PostTask(FROM_HERE, | 742 task_runner_->PostTask(FROM_HERE, |
| 745 base::Bind(&AudioRendererImpl::AttemptRead, | 743 base::Bind(&AudioRendererImpl::AttemptRead, |
| 746 weak_factory_.GetWeakPtr())); | 744 weak_factory_.GetWeakPtr())); |
| 747 } | 745 } |
| 748 | 746 |
| 749 if (audio_clock_->front_timestamp() >= ended_timestamp_ && | 747 if (audio_clock_->front_timestamp() >= ended_timestamp_ && |
| 750 !rendered_end_of_stream_) { | 748 !rendered_end_of_stream_) { |
| 751 rendered_end_of_stream_ = true; | 749 rendered_end_of_stream_ = true; |
| 752 task_runner_->PostTask(FROM_HERE, ended_cb_); | 750 task_runner_->PostTask(FROM_HERE, ended_cb_); |
| 753 } | 751 } |
| 754 } | 752 } |
| 755 | 753 |
| 756 DCHECK_LE(frames_written, requested_frames); | 754 DCHECK_LE(frames_written, frames_requested); |
| 757 return frames_written; | 755 return frames_written; |
| 758 } | 756 } |
| 759 | 757 |
| 760 void AudioRendererImpl::OnRenderError() { | 758 void AudioRendererImpl::OnRenderError() { |
| 761 // UMA data tells us this happens ~0.01% of the time. Trigger an error instead | 759 // UMA data tells us this happens ~0.01% of the time. Trigger an error instead |
| 762 // of trying to gracefully fall back to a fake sink. It's very likely | 760 // of trying to gracefully fall back to a fake sink. It's very likely |
| 763 // OnRenderError() should be removed and the audio stack handle errors without | 761 // OnRenderError() should be removed and the audio stack handle errors without |
| 764 // notifying clients. See http://crbug.com/234708 for details. | 762 // notifying clients. See http://crbug.com/234708 for details. |
| 765 HistogramRendererEvent(RENDER_ERROR); | 763 HistogramRendererEvent(RENDER_ERROR); |
| 766 | 764 |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 831 << buffering_state; | 829 << buffering_state; |
| 832 DCHECK_NE(buffering_state_, buffering_state); | 830 DCHECK_NE(buffering_state_, buffering_state); |
| 833 lock_.AssertAcquired(); | 831 lock_.AssertAcquired(); |
| 834 buffering_state_ = buffering_state; | 832 buffering_state_ = buffering_state; |
| 835 | 833 |
| 836 task_runner_->PostTask(FROM_HERE, | 834 task_runner_->PostTask(FROM_HERE, |
| 837 base::Bind(buffering_state_cb_, buffering_state_)); | 835 base::Bind(buffering_state_cb_, buffering_state_)); |
| 838 } | 836 } |
| 839 | 837 |
| 840 } // namespace media | 838 } // namespace media |
| OLD | NEW |