Chromium Code Reviews| Index: content/renderer/media/webrtc_audio_renderer.cc |
| diff --git a/content/renderer/media/webrtc_audio_renderer.cc b/content/renderer/media/webrtc_audio_renderer.cc |
| index 640d8dc4ad689ba6db96781017231662b532d0be..6689222262dac1be09a3c8330de50cddbee3c557 100644 |
| --- a/content/renderer/media/webrtc_audio_renderer.cc |
| +++ b/content/renderer/media/webrtc_audio_renderer.cc |
| @@ -187,7 +187,6 @@ WebRtcAudioRenderer::WebRtcAudioRenderer( |
| play_ref_count_(0), |
| start_ref_count_(0), |
| audio_delay_milliseconds_(0), |
| - fifo_delay_milliseconds_(0), |
| sink_params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| media::CHANNEL_LAYOUT_STEREO, |
| 0, |
| @@ -434,6 +433,26 @@ int WebRtcAudioRenderer::Render(media::AudioBus* audio_bus, |
| DCHECK_LE(audio_delay_milliseconds, static_cast<uint32_t>(INT_MAX)); |
| audio_delay_milliseconds_ = static_cast<int>(audio_delay_milliseconds); |
| + // If there are skipped frames, pull and throw away the same amount. |
| + if (frames_skipped > 0) { |
| + const uint32_t frames_per_10ms = |
| + static_cast<uint32_t>(sink_params_.sample_rate() / 100); |
| + if (!audio_fifo_ && frames_skipped != frames_per_10ms) { |
| + audio_fifo_.reset(new media::AudioPullFifo( |
| + sink_params_.channels(), frames_per_10ms, |
| + base::Bind(&WebRtcAudioRenderer::SourceCallback, |
| + base::Unretained(this)))); |
| + } |
| + |
| + scoped_ptr<media::AudioBus> drop_bus = |
| + media::AudioBus::Create(audio_bus->channels(), frames_skipped); |
| + if (audio_fifo_) |
| + audio_fifo_->Consume(drop_bus.get(), drop_bus->frames()); |
| + else |
| + SourceCallback(0, drop_bus.get()); |
| + } |
| + |
| + // Pull the data we shall deliver. |
|
tommi (sloooow) - chröme
2016/01/15 18:50:03
nit: s/shall/will
Henrik Grunell
2016/01/18 13:00:54
Done.
|
| if (audio_fifo_) |
| audio_fifo_->Consume(audio_bus, audio_bus->frames()); |
| else |
| @@ -457,7 +476,9 @@ void WebRtcAudioRenderer::SourceCallback( |
| << audio_bus->frames() << ")"; |
| int output_delay_milliseconds = audio_delay_milliseconds_; |
| - output_delay_milliseconds += fifo_delay_milliseconds_; |
| + output_delay_milliseconds += fifo_frame_delay * |
| + base::Time::kMillisecondsPerSecond / |
| + sink_params_.sample_rate(); |
| DVLOG(2) << "output_delay_milliseconds: " << output_delay_milliseconds; |
| // We need to keep render data for the |source_| regardless of |state_|, |
| @@ -627,34 +648,28 @@ void WebRtcAudioRenderer::PrepareSink() { |
| // Create a FIFO if re-buffering is required to match the source input with |
| // the sink request. The source acts as provider here and the sink as |
| // consumer. |
| - int new_fifo_delay_milliseconds = 0; |
| scoped_ptr<media::AudioPullFifo> new_audio_fifo; |
| - if (source_params.frames_per_buffer() != |
| - new_sink_params.frames_per_buffer()) { |
| + DCHECK_EQ(source_params.channels(), |
| + media::ChannelLayoutToChannelCount(media::CHANNEL_LAYOUT_STEREO)); |
|
tommi (sloooow) - chröme
2016/01/15 18:50:03
new assumption or did the previous code assume thi
Henrik Grunell
2016/01/18 13:00:54
Actually, the assumption is that source and sink o
|
| + const bool different_source_sink_frames = |
| + source_params.frames_per_buffer() != new_sink_params.frames_per_buffer(); |
| + if (different_source_sink_frames) { |
| DVLOG(1) << "Rebuffering from " << source_params.frames_per_buffer() |
| << " to " << new_sink_params.frames_per_buffer(); |
| - new_audio_fifo.reset(new media::AudioPullFifo( |
| - source_params.channels(), source_params.frames_per_buffer(), |
| - base::Bind(&WebRtcAudioRenderer::SourceCallback, |
| - base::Unretained(this)))); |
| - |
| - if (new_sink_params.frames_per_buffer() > |
| - source_params.frames_per_buffer()) { |
| - int frame_duration_milliseconds = |
| - base::Time::kMillisecondsPerSecond / |
| - static_cast<double>(source_params.sample_rate()); |
| - new_fifo_delay_milliseconds = (new_sink_params.frames_per_buffer() - |
| - source_params.frames_per_buffer()) * |
| - frame_duration_milliseconds; |
| - } |
| } |
| { |
| base::AutoLock lock(lock_); |
| + const bool source_frames_changed = |
| + audio_fifo_->SizeInFrames() != source_params.frames_per_buffer(); |
| + if ((!audio_fifo_ && different_source_sink_frames) || |
| + (audio_fifo_ && source_frames_changed)) { |
| + audio_fifo_.reset(new media::AudioPullFifo( |
| + source_params.channels(), source_params.frames_per_buffer(), |
| + base::Bind(&WebRtcAudioRenderer::SourceCallback, |
| + base::Unretained(this)))); |
| + } |
| sink_params_ = new_sink_params; |
| - fifo_delay_milliseconds_ = new_fifo_delay_milliseconds; |
| - if (new_audio_fifo.get()) |
| - audio_fifo_ = std::move(new_audio_fifo); |
| } |
| sink_->Initialize(new_sink_params, this); |