Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1560)

Unified Diff: content/renderer/media/webrtc_audio_renderer.cc

Issue 139303016: Feed the render data to MediaStreamAudioProcessor and used AudioBus in render callback (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: rebased and added check the thread check on the destructor Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/renderer/media/webrtc_audio_renderer.cc
diff --git a/content/renderer/media/webrtc_audio_renderer.cc b/content/renderer/media/webrtc_audio_renderer.cc
index fb7a848910cabc5151d4b07d37eddbec323d7fd5..471f92aa49915a7d4dda0f491f5f7fdc0d9687d9 100644
--- a/content/renderer/media/webrtc_audio_renderer.cc
+++ b/content/renderer/media/webrtc_audio_renderer.cc
@@ -201,6 +201,7 @@ WebRtcAudioRenderer::WebRtcAudioRenderer(
audio_delay_milliseconds_(0),
fifo_delay_milliseconds_(0),
sample_rate_(sample_rate),
+ number_of_channels_(0),
frames_per_buffer_(frames_per_buffer) {
WebRtcLogMessage(base::StringPrintf(
"WAR::WAR. source_render_view_id=%d"
@@ -214,7 +215,6 @@ WebRtcAudioRenderer::WebRtcAudioRenderer(
WebRtcAudioRenderer::~WebRtcAudioRenderer() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK_EQ(state_, UNINITIALIZED);
- buffer_.reset();
}
bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
@@ -229,10 +229,7 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
// Use stereo output on all platforms.
media::ChannelLayout channel_layout = media::CHANNEL_LAYOUT_STEREO;
- // TODO(tommi,henrika): Maybe we should just change |sample_rate_| to be
- // immutable and change its value instead of using a temporary?
- int sample_rate = sample_rate_;
- DVLOG(1) << "Audio output hardware sample rate: " << sample_rate;
+ DVLOG(1) << "Audio output hardware sample rate: " << sample_rate_;
// WebRTC does not yet support higher rates than 96000 on the client side
// and 48000 is the preferred sample rate. Therefore, if 192000 is detected,
@@ -240,25 +237,26 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
// layer will be opened up at 192kHz but WebRTC will provide data at 48kHz
// which will then be resampled by the audio converted on the browser side
// to match the native audio layer.
- if (sample_rate == 192000) {
+ if (sample_rate_ == 192000) {
DVLOG(1) << "Resampling from 48000 to 192000 is required";
- sample_rate = 48000;
+ sample_rate_ = 48000;
}
- media::AudioSampleRate asr = media::AsAudioSampleRate(sample_rate);
+ media::AudioSampleRate asr = media::AsAudioSampleRate(sample_rate_);
if (asr != media::kUnexpectedAudioSampleRate) {
UMA_HISTOGRAM_ENUMERATION(
"WebRTC.AudioOutputSampleRate", asr, media::kUnexpectedAudioSampleRate);
} else {
- UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate);
+ UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected",
+ sample_rate_);
}
// Verify that the reported output hardware sample rate is supported
// on the current platform.
if (std::find(&kValidOutputRates[0],
&kValidOutputRates[0] + arraysize(kValidOutputRates),
- sample_rate) ==
+ sample_rate_) ==
&kValidOutputRates[arraysize(kValidOutputRates)]) {
- DLOG(ERROR) << sample_rate << " is not a supported output rate.";
+ DLOG(ERROR) << sample_rate_ << " is not a supported output rate.";
return false;
}
@@ -267,13 +265,13 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
// The WebRTC client only supports multiples of 10ms as buffer size where
// 10ms is preferred for lowest possible delay.
media::AudioParameters source_params;
- int buffer_size = (sample_rate / 100);
- DVLOG(1) << "Using WebRTC output buffer size: " << buffer_size;
+ const int frames_per_10ms = (sample_rate_ / 100);
+ DVLOG(1) << "Using WebRTC output buffer size: " << frames_per_10ms;
- int channels = ChannelLayoutToChannelCount(channel_layout);
+ number_of_channels_ = ChannelLayoutToChannelCount(channel_layout);
source_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- channel_layout, channels, 0,
- sample_rate, 16, buffer_size);
+ channel_layout, number_of_channels_, 0,
+ sample_rate_, 16, frames_per_10ms);
// Set up audio parameters for the sink, i.e., the native audio output stream.
// We strive to open up using native parameters to achieve best possible
@@ -283,26 +281,25 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
media::AudioParameters sink_params;
- // Use native output siz as default.
- buffer_size = frames_per_buffer_;
+ // Use native output size as default.
#if defined(OS_ANDROID)
// TODO(henrika): Keep tuning this scheme and espcicially for low-latency
// cases. Might not be possible to come up with the perfect solution using
// the render side only.
- const int frames_per_10ms = (sample_rate / 100);
- if (buffer_size < 2 * frames_per_10ms) {
+ if (frames_per_buffer_ < 2 * frames_per_10ms) {
// Examples of low-latency frame sizes and the resulting |buffer_size|:
// Nexus 7 : 240 audio frames => 2*480 = 960
// Nexus 10 : 256 => 2*441 = 882
// Galaxy Nexus: 144 => 2*441 = 882
- buffer_size = 2 * frames_per_10ms;
+ frames_per_buffer_ = 2 * frames_per_10ms;
DVLOG(1) << "Low-latency output detected on Android";
}
#endif
- DVLOG(1) << "Using sink output buffer size: " << buffer_size;
+ DVLOG(1) << "Using sink output buffer size: " << frames_per_buffer_;
sink_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- channel_layout, channels, 0, sample_rate, 16, buffer_size);
+ channel_layout, number_of_channels_, 0, sample_rate_, 16,
+ frames_per_buffer_);
// Create a FIFO if re-buffering is required to match the source input with
// the sink request. The source acts as provider here and the sink as
@@ -326,15 +323,7 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
}
}
- // Allocate local audio buffers based on the parameters above.
- // It is assumed that each audio sample contains 16 bits and each
- // audio frame contains one or two audio samples depending on the
- // number of channels.
- buffer_.reset(
- new int16[source_params.frames_per_buffer() * source_params.channels()]);
-
source_ = source;
- source->SetRenderFormat(source_params);
// Configure the audio rendering client and start rendering.
sink_ = AudioDeviceFactory::NewOutputDevice(
@@ -515,22 +504,12 @@ void WebRtcAudioRenderer::SourceCallback(
// We need to keep render data for the |source_| regardless of |state_|,
// otherwise the data will be buffered up inside |source_|.
- source_->RenderData(reinterpret_cast<uint8*>(buffer_.get()),
- audio_bus->channels(), audio_bus->frames(),
- output_delay_milliseconds);
+ source_->RenderData(audio_bus, sample_rate_, output_delay_milliseconds);
// Avoid filling up the audio bus if we are not playing; instead
// return here and ensure that the returned value in Render() is 0.
- if (state_ != PLAYING) {
+ if (state_ != PLAYING)
audio_bus->Zero();
- return;
- }
-
- // De-interleave each channel and convert to 32-bit floating-point
- // with nominal range -1.0 -> +1.0 to match the callback format.
- audio_bus->FromInterleaved(buffer_.get(),
- audio_bus->frames(),
- sizeof(buffer_[0]));
}
void WebRtcAudioRenderer::UpdateSourceVolume(

Powered by Google App Engine
This is Rietveld 408576698