| Index: content/renderer/media/webrtc_audio_capturer.cc
|
| diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc
|
| index f396813387a821176df21a827af07ed2e550ab58..3339a21fbc2fb8e9dacbb37a9f941c031a169273 100644
|
| --- a/content/renderer/media/webrtc_audio_capturer.cc
|
| +++ b/content/renderer/media/webrtc_audio_capturer.cc
|
| @@ -194,9 +194,7 @@ bool WebRtcAudioCapturer::Initialize() {
|
| // Create and configure the default audio capturing source.
|
| SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id_),
|
| channel_layout,
|
| - static_cast<float>(device_info_.device.input.sample_rate),
|
| - device_info_.device.input.effects,
|
| - constraints_);
|
| + static_cast<float>(device_info_.device.input.sample_rate));
|
|
|
| // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware
|
| // information from the capturer.
|
| @@ -212,6 +210,9 @@ WebRtcAudioCapturer::WebRtcAudioCapturer(
|
| const blink::WebMediaConstraints& constraints,
|
| WebRtcAudioDeviceImpl* audio_device)
|
| : constraints_(constraints),
|
| + audio_processor_(
|
| + new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
|
| + constraints, device_info.device.input.effects, audio_device)),
|
| running_(false),
|
| render_view_id_(render_view_id),
|
| device_info_(device_info),
|
| @@ -268,9 +269,7 @@ void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) {
|
| void WebRtcAudioCapturer::SetCapturerSource(
|
| const scoped_refptr<media::AudioCapturerSource>& source,
|
| media::ChannelLayout channel_layout,
|
| - float sample_rate,
|
| - int effects,
|
| - const blink::WebMediaConstraints& constraints) {
|
| + float sample_rate) {
|
| DCHECK(thread_checker_.CalledOnValidThread());
|
| DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << ","
|
| << "sample_rate=" << sample_rate << ")";
|
| @@ -301,15 +300,16 @@ void WebRtcAudioCapturer::SetCapturerSource(
|
| int buffer_size = GetBufferSize(sample_rate);
|
| media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
|
| channel_layout, 0, sample_rate,
|
| - 16, buffer_size, effects);
|
| - scoped_refptr<MediaStreamAudioProcessor> new_audio_processor(
|
| - new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
|
| - params, constraints, effects, audio_device_));
|
| + 16, buffer_size,
|
| + device_info_.device.input.effects);
|
| +
|
| {
|
| base::AutoLock auto_lock(lock_);
|
| - audio_processor_ = new_audio_processor;
|
| - need_audio_processing_ = NeedsAudioProcessing(constraints, effects);
|
| + // Notify the |audio_processor_| of the new format.
|
| + audio_processor_->OnCaptureFormatChanged(params);
|
|
|
| + need_audio_processing_ = NeedsAudioProcessing(
|
| + constraints_, device_info_.device.input.effects);
|
| // Notify all tracks about the new format.
|
| tracks_.TagAll();
|
| }
|
| @@ -352,12 +352,11 @@ void WebRtcAudioCapturer::EnablePeerConnectionMode() {
|
| // WebRtc native buffer size.
|
| SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id),
|
| input_params.channel_layout(),
|
| - static_cast<float>(input_params.sample_rate()),
|
| - input_params.effects(),
|
| - constraints_);
|
| + static_cast<float>(input_params.sample_rate()));
|
| }
|
|
|
| void WebRtcAudioCapturer::Start() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| DVLOG(1) << "WebRtcAudioCapturer::Start()";
|
| base::AutoLock auto_lock(lock_);
|
| if (running_ || !source_)
|
| @@ -371,6 +370,7 @@ void WebRtcAudioCapturer::Start() {
|
| }
|
|
|
| void WebRtcAudioCapturer::Stop() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| DVLOG(1) << "WebRtcAudioCapturer::Stop()";
|
| scoped_refptr<media::AudioCapturerSource> source;
|
| TrackList::ItemList tracks;
|
| @@ -389,6 +389,9 @@ void WebRtcAudioCapturer::Stop() {
|
| if (audio_device_)
|
| audio_device_->RemoveAudioCapturer(this);
|
|
|
| + // Stop the Aec dump.
|
| + StopAecDump();
|
| +
|
| for (TrackList::ItemList::const_iterator it = tracks.begin();
|
| it != tracks.end();
|
| ++it) {
|
| @@ -439,7 +442,6 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source,
|
| TrackList::ItemList tracks_to_notify_format;
|
| int current_volume = 0;
|
| base::TimeDelta audio_delay;
|
| - scoped_refptr<MediaStreamAudioProcessor> audio_processor;
|
| bool need_audio_processing = true;
|
| {
|
| base::AutoLock auto_lock(lock_);
|
| @@ -456,38 +458,37 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source,
|
| key_pressed_ = key_pressed;
|
| tracks = tracks_.Items();
|
| tracks_.RetrieveAndClearTags(&tracks_to_notify_format);
|
| - audio_processor = audio_processor_;
|
|
|
| // Set the flag to turn on the audio processing in PeerConnection level.
|
| // Note that, we turn off the audio processing in PeerConnection if the
|
| // processor has already processed the data.
|
| need_audio_processing = need_audio_processing_ ?
|
| - !audio_processor->has_audio_processing() : false;
|
| + !audio_processor_->has_audio_processing() : false;
|
| }
|
|
|
| - DCHECK(audio_processor->InputFormat().IsValid());
|
| + DCHECK(audio_processor_->InputFormat().IsValid());
|
| DCHECK_EQ(audio_source->channels(),
|
| - audio_processor->InputFormat().channels());
|
| + audio_processor_->InputFormat().channels());
|
| DCHECK_EQ(audio_source->frames(),
|
| - audio_processor->InputFormat().frames_per_buffer());
|
| + audio_processor_->InputFormat().frames_per_buffer());
|
|
|
| // Notify the tracks on when the format changes. This will do nothing if
|
| // |tracks_to_notify_format| is empty.
|
| - media::AudioParameters output_params = audio_processor->OutputFormat();
|
| + media::AudioParameters output_params = audio_processor_->OutputFormat();
|
| for (TrackList::ItemList::const_iterator it = tracks_to_notify_format.begin();
|
| it != tracks_to_notify_format.end(); ++it) {
|
| (*it)->OnSetFormat(output_params);
|
| - (*it)->SetAudioProcessor(audio_processor);
|
| + (*it)->SetAudioProcessor(audio_processor_);
|
| }
|
|
|
| // Push the data to the processor for processing.
|
| - audio_processor->PushCaptureData(audio_source);
|
| + audio_processor_->PushCaptureData(audio_source);
|
|
|
| // Process and consume the data in the processor until there is not enough
|
| // data in the processor.
|
| int16* output = NULL;
|
| int new_volume = 0;
|
| - while (audio_processor->ProcessAndConsumeData(
|
| + while (audio_processor_->ProcessAndConsumeData(
|
| audio_delay, current_volume, key_pressed, &new_volume, &output)) {
|
| // Feed the post-processed data to the tracks.
|
| for (TrackList::ItemList::const_iterator it = tracks.begin();
|
| @@ -569,9 +570,19 @@ void WebRtcAudioCapturer::SetCapturerSourceForTesting(
|
| media::AudioParameters params) {
|
| // Create a new audio stream as source which uses the new source.
|
| SetCapturerSource(source, params.channel_layout(),
|
| - static_cast<float>(params.sample_rate()),
|
| - params.effects(),
|
| - constraints_);
|
| + static_cast<float>(params.sample_rate()));
|
| +}
|
| +
|
| +void WebRtcAudioCapturer::StartAecDump(
|
| + const base::PlatformFile& aec_dump_file) {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + DCHECK_NE(aec_dump_file, base::kInvalidPlatformFileValue);
|
| + audio_processor_->StartAecDump(aec_dump_file);
|
| +}
|
| +
|
| +void WebRtcAudioCapturer::StopAecDump() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + audio_processor_->StopAecDump();
|
| }
|
|
|
| } // namespace content
|
|
|