Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(492)

Unified Diff: content/renderer/media/webrtc_audio_renderer.cc

Issue 1596523005: Drop WebRTC audio data if OS has skipped frames. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Code review (tommi@) Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « content/renderer/media/webrtc_audio_renderer.h ('k') | media/base/audio_pull_fifo.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: content/renderer/media/webrtc_audio_renderer.cc
diff --git a/content/renderer/media/webrtc_audio_renderer.cc b/content/renderer/media/webrtc_audio_renderer.cc
index 640d8dc4ad689ba6db96781017231662b532d0be..bd2a4a3c5e8576f98778b1e180b68ffcb8a394c5 100644
--- a/content/renderer/media/webrtc_audio_renderer.cc
+++ b/content/renderer/media/webrtc_audio_renderer.cc
@@ -39,6 +39,13 @@ namespace {
// between each callback leads to one UMA update each 100ms.
const int kNumCallbacksBetweenRenderTimeHistograms = 10;
+// Audio parameters that don't change.
+const media::AudioParameters::Format kFormat =
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY;
+const media::ChannelLayout kChannelLayout = media::CHANNEL_LAYOUT_STEREO;
+const int kChannels = 2;
+const int kBitsPerSample = 16;
+
// This is a simple wrapper class that's handed out to users of a shared
// WebRtcAudioRenderer instance. This class maintains the per-user 'playing'
// and 'started' states to avoid problems related to incorrect usage which
@@ -187,12 +194,7 @@ WebRtcAudioRenderer::WebRtcAudioRenderer(
play_ref_count_(0),
start_ref_count_(0),
audio_delay_milliseconds_(0),
- fifo_delay_milliseconds_(0),
- sink_params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- media::CHANNEL_LAYOUT_STEREO,
- 0,
- 16,
- 0),
+ sink_params_(kFormat, kChannelLayout, 0, kBitsPerSample, 0),
output_device_id_(device_id),
security_origin_(security_origin),
render_callback_count_(0) {
@@ -434,6 +436,28 @@ int WebRtcAudioRenderer::Render(media::AudioBus* audio_bus,
DCHECK_LE(audio_delay_milliseconds, static_cast<uint32_t>(INT_MAX));
audio_delay_milliseconds_ = static_cast<int>(audio_delay_milliseconds);
+ // If there are skipped frames, pull and throw away the same amount. We always
+ // pull 10 ms of data from the source (see PrepareSink()), so the fifo is only
+ // required if the number of frames to drop doesn't correspond to 10 ms.
+ if (frames_skipped > 0) {
+ const uint32_t source_frames_per_buffer =
+ static_cast<uint32_t>(sink_params_.sample_rate() / 100);
+ if (!audio_fifo_ && frames_skipped != source_frames_per_buffer) {
+ audio_fifo_.reset(new media::AudioPullFifo(
+ kChannels, source_frames_per_buffer,
+ base::Bind(&WebRtcAudioRenderer::SourceCallback,
+ base::Unretained(this))));
+ }
+
+ scoped_ptr<media::AudioBus> drop_bus =
+ media::AudioBus::Create(audio_bus->channels(), frames_skipped);
+ if (audio_fifo_)
+ audio_fifo_->Consume(drop_bus.get(), drop_bus->frames());
+ else
+ SourceCallback(0, drop_bus.get());
+ }
+
+ // Pull the data we will deliver.
if (audio_fifo_)
audio_fifo_->Consume(audio_bus, audio_bus->frames());
else
@@ -457,7 +481,9 @@ void WebRtcAudioRenderer::SourceCallback(
<< audio_bus->frames() << ")";
int output_delay_milliseconds = audio_delay_milliseconds_;
- output_delay_milliseconds += fifo_delay_milliseconds_;
+ output_delay_milliseconds += fifo_frame_delay *
+ base::Time::kMillisecondsPerSecond /
+ sink_params_.sample_rate();
DVLOG(2) << "output_delay_milliseconds: " << output_delay_milliseconds;
// We need to keep render data for the |source_| regardless of |state_|,
@@ -606,55 +632,38 @@ void WebRtcAudioRenderer::PrepareSink() {
UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate);
}
- // Set up audio parameters for the source, i.e., the WebRTC client.
+ // Calculate the frames per buffer for the source, i.e. the WebRTC client. We
+ // use 10 ms of data since the WebRTC client only supports multiples of 10 ms
+ // as buffer size where 10 ms is preferred for lowest possible delay.
+ const int source_frames_per_buffer = (sample_rate / 100);
+ DVLOG(1) << "Using WebRTC output buffer size: " << source_frames_per_buffer;
- // The WebRTC client only supports multiples of 10ms as buffer size where
- // 10ms is preferred for lowest possible delay.
- const int frames_per_10ms = (sample_rate / 100);
- DVLOG(1) << "Using WebRTC output buffer size: " << frames_per_10ms;
- media::AudioParameters source_params(
- media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- new_sink_params.channel_layout(), sample_rate, 16, frames_per_10ms);
- source_params.set_channels_for_discrete(new_sink_params.channels());
-
- const int frames_per_buffer = GetOptimalBufferSize(
+ // Setup sink parameters.
+ const int sink_frames_per_buffer = GetOptimalBufferSize(
sample_rate, sink_->GetOutputParameters().frames_per_buffer());
-
- new_sink_params.Reset(
- new_sink_params.format(), new_sink_params.channel_layout(),
- sample_rate, 16, frames_per_buffer);
+ new_sink_params.set_sample_rate(sample_rate);
+ new_sink_params.set_frames_per_buffer(sink_frames_per_buffer);
// Create a FIFO if re-buffering is required to match the source input with
// the sink request. The source acts as provider here and the sink as
// consumer.
- int new_fifo_delay_milliseconds = 0;
- scoped_ptr<media::AudioPullFifo> new_audio_fifo;
- if (source_params.frames_per_buffer() !=
- new_sink_params.frames_per_buffer()) {
- DVLOG(1) << "Rebuffering from " << source_params.frames_per_buffer()
- << " to " << new_sink_params.frames_per_buffer();
- new_audio_fifo.reset(new media::AudioPullFifo(
- source_params.channels(), source_params.frames_per_buffer(),
- base::Bind(&WebRtcAudioRenderer::SourceCallback,
- base::Unretained(this))));
-
- if (new_sink_params.frames_per_buffer() >
- source_params.frames_per_buffer()) {
- int frame_duration_milliseconds =
- base::Time::kMillisecondsPerSecond /
- static_cast<double>(source_params.sample_rate());
- new_fifo_delay_milliseconds = (new_sink_params.frames_per_buffer() -
- source_params.frames_per_buffer()) *
- frame_duration_milliseconds;
- }
+ const bool different_source_sink_frames =
+ source_frames_per_buffer != new_sink_params.frames_per_buffer();
+ if (different_source_sink_frames) {
+ DVLOG(1) << "Rebuffering from " << source_frames_per_buffer << " to "
+ << new_sink_params.frames_per_buffer();
}
-
{
base::AutoLock lock(lock_);
+ if ((!audio_fifo_ && different_source_sink_frames) ||
+ (audio_fifo_ &&
o1ka 2016/01/22 13:15:03 So in certain cases an existing fifo is reset. Doe
Henrik Grunell 2016/01/25 11:56:23 [We talked about this offline, summarizing here.]
+ audio_fifo_->SizeInFrames() != source_frames_per_buffer)) {
+ audio_fifo_.reset(new media::AudioPullFifo(
+ kChannels, source_frames_per_buffer,
+ base::Bind(&WebRtcAudioRenderer::SourceCallback,
+ base::Unretained(this))));
+ }
sink_params_ = new_sink_params;
- fifo_delay_milliseconds_ = new_fifo_delay_milliseconds;
- if (new_audio_fifo.get())
- audio_fifo_ = std::move(new_audio_fifo);
}
sink_->Initialize(new_sink_params, this);
« no previous file with comments | « content/renderer/media/webrtc_audio_renderer.h ('k') | media/base/audio_pull_fifo.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698