| Index: content/renderer/media/webrtc_audio_capturer.cc
|
| diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc
|
| index 822c13aea90d8552fdcddb93c49a78c9e2822976..12206bd165f80d53ad363c696721f2096df0a576 100644
|
| --- a/content/renderer/media/webrtc_audio_capturer.cc
|
| +++ b/content/renderer/media/webrtc_audio_capturer.cc
|
| @@ -104,7 +104,8 @@ scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() {
|
| }
|
|
|
| void WebRtcAudioCapturer::Reconfigure(int sample_rate,
|
| - media::ChannelLayout channel_layout) {
|
| + media::ChannelLayout channel_layout,
|
| + bool use_platform_aec) {
|
| DCHECK(thread_checker_.CalledOnValidThread());
|
| int buffer_size = GetBufferSize(sample_rate);
|
| DVLOG(1) << "Using WebRTC input buffer size: " << buffer_size;
|
| @@ -116,6 +117,7 @@ void WebRtcAudioCapturer::Reconfigure(int sample_rate,
|
| int bits_per_sample = 16;
|
| media::AudioParameters params(format, channel_layout, sample_rate,
|
| bits_per_sample, buffer_size);
|
| + params.set_use_platform_aec(use_platform_aec);
|
|
|
| {
|
| base::AutoLock auto_lock(lock_);
|
| @@ -134,7 +136,8 @@ bool WebRtcAudioCapturer::Initialize(int render_view_id,
|
| int session_id,
|
| const std::string& device_id,
|
| int paired_output_sample_rate,
|
| - int paired_output_frames_per_buffer) {
|
| + int paired_output_frames_per_buffer,
|
| + bool use_platform_aec) {
|
| DCHECK(thread_checker_.CalledOnValidThread());
|
| DVLOG(1) << "WebRtcAudioCapturer::Initialize()";
|
|
|
| @@ -200,7 +203,8 @@ bool WebRtcAudioCapturer::Initialize(int render_view_id,
|
| // providing an alternative media::AudioCapturerSource.
|
| SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id),
|
| channel_layout,
|
| - static_cast<float>(sample_rate));
|
| + static_cast<float>(sample_rate),
|
| + use_platform_aec);
|
|
|
| return true;
|
| }
|
| @@ -286,10 +290,12 @@ void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) {
|
| void WebRtcAudioCapturer::SetCapturerSource(
|
| const scoped_refptr<media::AudioCapturerSource>& source,
|
| media::ChannelLayout channel_layout,
|
| - float sample_rate) {
|
| + float sample_rate,
|
| + bool use_platform_aec) {
|
| DCHECK(thread_checker_.CalledOnValidThread());
|
| DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << ","
|
| - << "sample_rate=" << sample_rate << ")";
|
| + << "sample_rate=" << sample_rate << "," << "use_platform_aec="
|
| + << use_platform_aec << ")";
|
| scoped_refptr<media::AudioCapturerSource> old_source;
|
| bool restart_source = false;
|
| {
|
| @@ -312,7 +318,7 @@ void WebRtcAudioCapturer::SetCapturerSource(
|
| // Dispatch the new parameters both to the sink(s) and to the new source.
|
| // The idea is to get rid of any dependency of the microphone parameters
|
| // which would normally be used by default.
|
| - Reconfigure(sample_rate, channel_layout);
|
| + Reconfigure(sample_rate, channel_layout, use_platform_aec);
|
|
|
| // Make sure to grab the new parameters in case they were reconfigured.
|
| media::AudioParameters params = audio_parameters();
|
| @@ -351,7 +357,8 @@ void WebRtcAudioCapturer::EnablePeerConnectionMode() {
|
| // WebRtc native buffer size.
|
| SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id),
|
| params.channel_layout(),
|
| - static_cast<float>(params.sample_rate()));
|
| + static_cast<float>(params.sample_rate()),
|
| + params.use_platform_aec());
|
| }
|
|
|
| void WebRtcAudioCapturer::Start() {
|
|
|