Chromium Code Reviews| Index: content/renderer/media/webrtc_audio_renderer.cc |
| diff --git a/content/renderer/media/webrtc_audio_renderer.cc b/content/renderer/media/webrtc_audio_renderer.cc |
| index 825a4e8f456220e8b3bca75a77474df9763c135c..fe26c9a3f3cf0e70c042534c20f1afb52b0f6575 100644 |
| --- a/content/renderer/media/webrtc_audio_renderer.cc |
| +++ b/content/renderer/media/webrtc_audio_renderer.cc |
| @@ -179,6 +179,7 @@ WebRtcAudioRenderer::WebRtcAudioRenderer(int source_render_view_id, |
| audio_delay_milliseconds_(0), |
| fifo_delay_milliseconds_(0), |
| sample_rate_(sample_rate), |
| + number_of_channels_(0), |
| frames_per_buffer_(frames_per_buffer) { |
| WebRtcLogMessage(base::StringPrintf( |
| "WAR::WAR. source_render_view_id=%d" |
| @@ -192,7 +193,6 @@ WebRtcAudioRenderer::WebRtcAudioRenderer(int source_render_view_id, |
| WebRtcAudioRenderer::~WebRtcAudioRenderer() { |
| DCHECK(thread_checker_.CalledOnValidThread()); |
| DCHECK_EQ(state_, UNINITIALIZED); |
| - buffer_.reset(); |
| } |
| bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { |
| @@ -207,10 +207,7 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { |
| // Use stereo output on all platforms. |
| media::ChannelLayout channel_layout = media::CHANNEL_LAYOUT_STEREO; |
| - // TODO(tommi,henrika): Maybe we should just change |sample_rate_| to be |
| - // immutable and change its value instead of using a temporary? |
| - int sample_rate = sample_rate_; |
| - DVLOG(1) << "Audio output hardware sample rate: " << sample_rate; |
| + DVLOG(1) << "Audio output hardware sample rate: " << sample_rate_; |
| // WebRTC does not yet support higher rates than 96000 on the client side |
| // and 48000 is the preferred sample rate. Therefore, if 192000 is detected, |
| @@ -218,25 +215,26 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { |
| // layer will be opened up at 192kHz but WebRTC will provide data at 48kHz |
| // which will then be resampled by the audio converted on the browser side |
| // to match the native audio layer. |
| - if (sample_rate == 192000) { |
| + if (sample_rate_ == 192000) { |
| DVLOG(1) << "Resampling from 48000 to 192000 is required"; |
| - sample_rate = 48000; |
| + sample_rate_ = 48000; |
| } |
| - media::AudioSampleRate asr = media::AsAudioSampleRate(sample_rate); |
| + media::AudioSampleRate asr = media::AsAudioSampleRate(sample_rate_); |
| if (asr != media::kUnexpectedAudioSampleRate) { |
| UMA_HISTOGRAM_ENUMERATION( |
| "WebRTC.AudioOutputSampleRate", asr, media::kUnexpectedAudioSampleRate); |
| } else { |
| - UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", sample_rate); |
| + UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected", |
| + sample_rate_); |
| } |
| // Verify that the reported output hardware sample rate is supported |
| // on the current platform. |
| if (std::find(&kValidOutputRates[0], |
| &kValidOutputRates[0] + arraysize(kValidOutputRates), |
| - sample_rate) == |
| + sample_rate_) == |
| &kValidOutputRates[arraysize(kValidOutputRates)]) { |
| - DLOG(ERROR) << sample_rate << " is not a supported output rate."; |
| + DLOG(ERROR) << sample_rate_ << " is not a supported output rate."; |
| return false; |
| } |
| @@ -245,13 +243,13 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { |
| // The WebRTC client only supports multiples of 10ms as buffer size where |
| // 10ms is preferred for lowest possible delay. |
| media::AudioParameters source_params; |
| - int buffer_size = (sample_rate / 100); |
| - DVLOG(1) << "Using WebRTC output buffer size: " << buffer_size; |
| + const int frames_per_10ms = (sample_rate_ / 100); |
| + DVLOG(1) << "Using WebRTC output buffer size: " << frames_per_10ms; |
| - int channels = ChannelLayoutToChannelCount(channel_layout); |
| + number_of_channels_ = ChannelLayoutToChannelCount(channel_layout); |
| source_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| - channel_layout, channels, 0, |
| - sample_rate, 16, buffer_size); |
| + channel_layout, number_of_channels_, 0, |
| + sample_rate_, 16, frames_per_10ms); |
| // Set up audio parameters for the sink, i.e., the native audio output stream. |
| // We strive to open up using native parameters to achieve best possible |
| @@ -261,26 +259,25 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { |
| media::AudioParameters sink_params; |
| - // Use native output siz as default. |
| - buffer_size = frames_per_buffer_; |
| + // Use native output size as default. |
| #if defined(OS_ANDROID) |
| // TODO(henrika): Keep tuning this scheme and espcicially for low-latency |
| // cases. Might not be possible to come up with the perfect solution using |
| // the render side only. |
| - const int frames_per_10ms = (sample_rate / 100); |
| - if (buffer_size < 2 * frames_per_10ms) { |
| + if (frames_per_buffer_ < 2 * frames_per_10ms) { |
| // Examples of low-latency frame sizes and the resulting |buffer_size|: |
| // Nexus 7 : 240 audio frames => 2*480 = 960 |
| // Nexus 10 : 256 => 2*441 = 882 |
| // Galaxy Nexus: 144 => 2*441 = 882 |
| - buffer_size = 2 * frames_per_10ms; |
| + frames_per_buffer_ = 2 * frames_per_10ms; |
| DVLOG(1) << "Low-latency output detected on Android"; |
| } |
| #endif |
| - DVLOG(1) << "Using sink output buffer size: " << buffer_size; |
| + DVLOG(1) << "Using sink output buffer size: " << frames_per_buffer_; |
| sink_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| - channel_layout, channels, 0, sample_rate, 16, buffer_size); |
| + channel_layout, number_of_channels_, 0, sample_rate_, 16, |
| + frames_per_buffer_); |
| // Create a FIFO if re-buffering is required to match the source input with |
| // the sink request. The source acts as provider here and the sink as |
| @@ -304,15 +301,7 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { |
| } |
| } |
| - // Allocate local audio buffers based on the parameters above. |
| - // It is assumed that each audio sample contains 16 bits and each |
| - // audio frame contains one or two audio samples depending on the |
| - // number of channels. |
| - buffer_.reset( |
| - new int16[source_params.frames_per_buffer() * source_params.channels()]); |
| - |
| source_ = source; |
| - source->SetRenderFormat(source_params); |
| // Configure the audio rendering client and start rendering. |
| sink_ = AudioDeviceFactory::NewOutputDevice( |
| @@ -468,9 +457,7 @@ void WebRtcAudioRenderer::SourceCallback( |
| // We need to keep render data for the |source_| regardless of |state_|, |
| // otherwise the data will be buffered up inside |source_|. |
| - source_->RenderData(reinterpret_cast<uint8*>(buffer_.get()), |
| - audio_bus->channels(), audio_bus->frames(), |
| - output_delay_milliseconds); |
| + source_->RenderData(audio_bus, sample_rate_, output_delay_milliseconds); |
| // Avoid filling up the audio bus if we are not playing; instead |
| // return here and ensure that the returned value in Render() is 0. |
| @@ -478,12 +465,6 @@ void WebRtcAudioRenderer::SourceCallback( |
| audio_bus->Zero(); |
| return; |
|
tommi (sloooow) - chröme
2014/01/31 13:58:32
this return statement is not needed now
no longer working on chromium
2014/02/02 16:50:16
Done.
|
| } |
| - |
| - // De-interleave each channel and convert to 32-bit floating-point |
| - // with nominal range -1.0 -> +1.0 to match the callback format. |
| - audio_bus->FromInterleaved(buffer_.get(), |
| - audio_bus->frames(), |
| - sizeof(buffer_[0])); |
| } |
| } // namespace content |