| Index: content/renderer/media/webrtc_audio_device_impl.cc
|
| diff --git a/content/renderer/media/webrtc_audio_device_impl.cc b/content/renderer/media/webrtc_audio_device_impl.cc
|
| index 820a40901c42028c96f80b4ba8e309cfee3654ac..501cf35bb8fbb32a2b01b7f60de640c642d55547 100644
|
| --- a/content/renderer/media/webrtc_audio_device_impl.cc
|
| +++ b/content/renderer/media/webrtc_audio_device_impl.cc
|
| @@ -128,7 +128,7 @@ static void AddHistogramFramesPerBuffer(HistogramDirection dir, int param) {
|
| }
|
| }
|
|
|
| -WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()
|
| +WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl(int render_view_id)
|
| : ref_count_(0),
|
| render_loop_(base::MessageLoopProxy::current()),
|
| audio_transport_callback_(NULL),
|
| @@ -147,7 +147,9 @@ WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()
|
| // input side as well.
|
| DCHECK(RenderThreadImpl::current()) <<
|
| "WebRtcAudioDeviceImpl must be constructed on the render thread";
|
| - audio_output_device_ = AudioDeviceFactory::NewOutputDevice();
|
| + audio_input_device_ = AudioDeviceFactory::NewInputDevice(render_view_id);
|
| + DCHECK(audio_input_device_);
|
| + audio_output_device_ = AudioDeviceFactory::NewOutputDevice(render_view_id);
|
| DCHECK(audio_output_device_);
|
| }
|
|
|
| @@ -407,7 +409,6 @@ int32_t WebRtcAudioDeviceImpl::Init() {
|
| if (initialized_)
|
| return 0;
|
|
|
| - DCHECK(!audio_input_device_);
|
| DCHECK(!input_buffer_.get());
|
| DCHECK(!output_buffer_.get());
|
|
|
| @@ -571,7 +572,6 @@ int32_t WebRtcAudioDeviceImpl::Init() {
|
| 16, in_buffer_size);
|
|
|
| // Create and configure the audio capturing client.
|
| - audio_input_device_ = AudioDeviceFactory::NewInputDevice();
|
| audio_input_device_->Initialize(input_audio_parameters_, this, this);
|
|
|
| UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout",
|
| @@ -584,8 +584,6 @@ int32_t WebRtcAudioDeviceImpl::Init() {
|
| // Configure the audio rendering client.
|
| audio_output_device_->Initialize(output_audio_parameters_, this);
|
|
|
| - DCHECK(audio_input_device_);
|
| -
|
| // Allocate local audio buffers based on the parameters above.
|
| // It is assumed that each audio sample contains 16 bits and each
|
| // audio frame contains one or two audio samples depending on the
|
| @@ -623,12 +621,10 @@ int32_t WebRtcAudioDeviceImpl::Terminate() {
|
| if (!initialized_)
|
| return 0;
|
|
|
| - DCHECK(audio_input_device_);
|
| DCHECK(input_buffer_.get());
|
| DCHECK(output_buffer_.get());
|
|
|
| // Release all resources allocated in Init().
|
| - audio_input_device_ = NULL;
|
| input_buffer_.reset();
|
| output_buffer_.reset();
|
|
|
|
|