| Index: content/renderer/media/webrtc_audio_device_impl.cc
|
| diff --git a/content/renderer/media/webrtc_audio_device_impl.cc b/content/renderer/media/webrtc_audio_device_impl.cc
|
| index b5dc5008ad7ff831a8de3c05ff9d9795c005d950..018716d30651124d523c561086bd20b80b3fc4f1 100644
|
| --- a/content/renderer/media/webrtc_audio_device_impl.cc
|
| +++ b/content/renderer/media/webrtc_audio_device_impl.cc
|
| @@ -9,7 +9,7 @@
|
| #include "base/strings/string_util.h"
|
| #include "base/win/windows_version.h"
|
| #include "content/renderer/media/media_stream_audio_processor.h"
|
| -#include "content/renderer/media/webrtc_audio_capturer.h"
|
| +#include "content/renderer/media/webrtc/processed_local_audio_source.h"
|
| #include "content/renderer/media/webrtc_audio_renderer.h"
|
| #include "content/renderer/render_thread_impl.h"
|
| #include "media/audio/audio_parameters.h"
|
| @@ -170,15 +170,9 @@ int32_t WebRtcAudioDeviceImpl::Terminate() {
|
| DCHECK(!renderer_.get() || !renderer_->IsStarted())
|
| << "The shared audio renderer shouldn't be running";
|
|
|
| - // Stop all the capturers to ensure no further OnData() and
|
| - // RemoveAudioCapturer() callback.
|
| - // Cache the capturers in a local list since WebRtcAudioCapturer::Stop()
|
| - // will trigger RemoveAudioCapturer() callback.
|
| - CapturerList capturers;
|
| - capturers.swap(capturers_);
|
| - for (CapturerList::const_iterator iter = capturers.begin();
|
| - iter != capturers.end(); ++iter) {
|
| - (*iter)->Stop();
|
| + {
|
| + base::AutoLock auto_lock(lock_);
|
| + capturers_.clear();
|
| }
|
|
|
| initialized_ = false;
|
| @@ -294,11 +288,10 @@ int32_t WebRtcAudioDeviceImpl::SetMicrophoneVolume(uint32_t volume) {
|
|
|
| // Only one microphone is supported at the moment, which is represented by
|
| // the default capturer.
|
| - scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer());
|
| - if (!capturer.get())
|
| + base::AutoLock auto_lock(lock_);
|
| + if (capturers_.empty())
|
| return -1;
|
| -
|
| - capturer->SetVolume(volume);
|
| + capturers_.back()->SetVolume(volume);
|
| return 0;
|
| }
|
|
|
| @@ -309,12 +302,10 @@ int32_t WebRtcAudioDeviceImpl::MicrophoneVolume(uint32_t* volume) const {
|
| // We only support one microphone now, which is accessed via the default
|
| // capturer.
|
| DCHECK(initialized_);
|
| - scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer());
|
| - if (!capturer.get())
|
| + base::AutoLock auto_lock(lock_);
|
| + if (capturers_.empty())
|
| return -1;
|
| -
|
| - *volume = static_cast<uint32_t>(capturer->Volume());
|
| -
|
| + *volume = static_cast<uint32_t>(capturers_.back()->Volume());
|
| return 0;
|
| }
|
|
|
| @@ -352,11 +343,10 @@ int32_t WebRtcAudioDeviceImpl::StereoRecordingIsAvailable(
|
|
|
| // TODO(xians): These kind of hardware methods do not make much sense since we
|
| // support multiple sources. Remove or figure out new APIs for such methods.
|
| - scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer());
|
| - if (!capturer.get())
|
| + base::AutoLock auto_lock(lock_);
|
| + if (capturers_.empty())
|
| return -1;
|
| -
|
| - *available = (capturer->source_audio_parameters().channels() == 2);
|
| + *available = (capturers_.back()->GetInputAudioParameters().channels() == 2);
|
| return 0;
|
| }
|
|
|
| @@ -371,7 +361,7 @@ int32_t WebRtcAudioDeviceImpl::RecordingDelay(uint16_t* delay_ms) const {
|
| DCHECK(signaling_thread_checker_.CalledOnValidThread());
|
|
|
| // There is no way to report a correct delay value to WebRTC since there
|
| - // might be multiple WebRtcAudioCapturer instances.
|
| + // might be multiple ProcessedLocalAudioSource instances.
|
| NOTREACHED();
|
| return -1;
|
| }
|
| @@ -380,12 +370,14 @@ int32_t WebRtcAudioDeviceImpl::RecordingSampleRate(
|
| uint32_t* sample_rate) const {
|
| DCHECK(signaling_thread_checker_.CalledOnValidThread());
|
| // We use the default capturer as the recording sample rate.
|
| - scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer());
|
| - if (!capturer.get())
|
| + base::AutoLock auto_lock(lock_);
|
| + if (capturers_.empty())
|
| return -1;
|
| -
|
| - *sample_rate = static_cast<uint32_t>(
|
| - capturer->source_audio_parameters().sample_rate());
|
| + const media::AudioParameters& params =
|
| + capturers_.back()->GetInputAudioParameters();
|
| + if (!params.IsValid())
|
| + return -1;
|
| + *sample_rate = static_cast<uint32_t>(params.sample_rate());
|
| return 0;
|
| }
|
|
|
| @@ -434,11 +426,10 @@ bool WebRtcAudioDeviceImpl::SetAudioRenderer(WebRtcAudioRenderer* renderer) {
|
| }
|
|
|
| void WebRtcAudioDeviceImpl::AddAudioCapturer(
|
| - const scoped_refptr<WebRtcAudioCapturer>& capturer) {
|
| + ProcessedLocalAudioSource* capturer) {
|
| DCHECK(main_thread_checker_.CalledOnValidThread());
|
| DVLOG(1) << "WebRtcAudioDeviceImpl::AddAudioCapturer()";
|
| - DCHECK(capturer.get());
|
| - DCHECK(!capturer->device_id().empty());
|
| + DCHECK(capturer);
|
|
|
| base::AutoLock auto_lock(lock_);
|
| DCHECK(std::find(capturers_.begin(), capturers_.end(), capturer) ==
|
| @@ -447,28 +438,14 @@ void WebRtcAudioDeviceImpl::AddAudioCapturer(
|
| }
|
|
|
| void WebRtcAudioDeviceImpl::RemoveAudioCapturer(
|
| - const scoped_refptr<WebRtcAudioCapturer>& capturer) {
|
| + ProcessedLocalAudioSource* capturer) {
|
| DCHECK(main_thread_checker_.CalledOnValidThread());
|
| DVLOG(1) << "WebRtcAudioDeviceImpl::AddAudioCapturer()";
|
| - DCHECK(capturer.get());
|
| + DCHECK(capturer);
|
| base::AutoLock auto_lock(lock_);
|
| capturers_.remove(capturer);
|
| }
|
|
|
| -scoped_refptr<WebRtcAudioCapturer>
|
| -WebRtcAudioDeviceImpl::GetDefaultCapturer() const {
|
| - // Called on the signaling thread (during initialization), worker
|
| - // thread during capture or main thread for a WebAudio source.
|
| - // We can't DCHECK on those three checks here since GetDefaultCapturer
|
| - // may be the first call and therefore could incorrectly initialize the
|
| - // thread checkers.
|
| - DCHECK(initialized_);
|
| - base::AutoLock auto_lock(lock_);
|
| - // Use the last |capturer| which is from the latest getUserMedia call as
|
| - // the default capture device.
|
| - return capturers_.empty() ? NULL : capturers_.back();
|
| -}
|
| -
|
| void WebRtcAudioDeviceImpl::AddPlayoutSink(
|
| WebRtcPlayoutDataSource::Sink* sink) {
|
| DCHECK(main_thread_checker_.CalledOnValidThread());
|
| @@ -498,8 +475,19 @@ bool WebRtcAudioDeviceImpl::GetAuthorizedDeviceInfoForAudioRenderer(
|
| if (capturers_.size() != 1)
|
| return false;
|
|
|
| - return capturers_.back()->GetPairedOutputParameters(
|
| - session_id, output_sample_rate, output_frames_per_buffer);
|
| + // Don't set output parameters unless all of them are valid.
|
| + const StreamDeviceInfo& device_info = capturers_.back()->device_info();
|
| + if (device_info.session_id <= 0 ||
|
| + !device_info.device.matched_output.sample_rate ||
|
| + !device_info.device.matched_output.frames_per_buffer)
|
| + return false;
|
| +
|
| + *session_id = device_info.session_id;
|
| + *output_sample_rate = device_info.device.matched_output.sample_rate;
|
| + *output_frames_per_buffer =
|
| + device_info.device.matched_output.frames_per_buffer;
|
| +
|
| + return true;
|
| }
|
|
|
| } // namespace content
|
|
|