Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(74)

Side by Side Diff: trunk/src/content/renderer/media/media_stream_audio_processor.cc

Issue 148213002: Revert 246905 "Revert 246894 "Wire up AGC to the MediaStreamAudi..." (Closed) Base URL: svn://svn.chromium.org/chrome/
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/media/media_stream_audio_processor.h" 5 #include "content/renderer/media/media_stream_audio_processor.h"
6 6
7 #include "base/command_line.h" 7 #include "base/command_line.h"
8 #include "base/debug/trace_event.h" 8 #include "base/debug/trace_event.h"
9 #include "content/public/common/content_switches.h" 9 #include "content/public/common/content_switches.h"
10 #include "content/renderer/media/media_stream_audio_processor_options.h" 10 #include "content/renderer/media/media_stream_audio_processor_options.h"
11 #include "content/renderer/media/rtc_media_constraints.h" 11 #include "content/renderer/media/rtc_media_constraints.h"
12 #include "media/audio/audio_parameters.h" 12 #include "media/audio/audio_parameters.h"
13 #include "media/base/audio_converter.h" 13 #include "media/base/audio_converter.h"
14 #include "media/base/audio_fifo.h" 14 #include "media/base/audio_fifo.h"
15 #include "media/base/channel_layout.h" 15 #include "media/base/channel_layout.h"
16 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" 16 #include "third_party/WebKit/public/platform/WebMediaConstraints.h"
17 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface .h" 17 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface .h"
18 18
19 namespace content { 19 namespace content {
20 20
21 namespace { 21 namespace {
22 22
23 using webrtc::AudioProcessing; 23 using webrtc::AudioProcessing;
24 using webrtc::MediaConstraintsInterface; 24 using webrtc::MediaConstraintsInterface;
25 25
26 #if defined(ANDROID) 26 #if defined(OS_ANDROID)
27 const int kAudioProcessingSampleRate = 16000; 27 const int kAudioProcessingSampleRate = 16000;
28 #else 28 #else
29 const int kAudioProcessingSampleRate = 32000; 29 const int kAudioProcessingSampleRate = 32000;
30 #endif 30 #endif
31 const int kAudioProcessingNumberOfChannel = 1; 31 const int kAudioProcessingNumberOfChannel = 1;
32 32
33 const int kMaxNumberOfBuffersInFifo = 2; 33 const int kMaxNumberOfBuffersInFifo = 2;
34 34
35 } // namespace 35 } // namespace
36 36
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
135 // Handles mixing and resampling between input and output parameters. 135 // Handles mixing and resampling between input and output parameters.
136 media::AudioConverter audio_converter_; 136 media::AudioConverter audio_converter_;
137 scoped_ptr<media::AudioBus> audio_wrapper_; 137 scoped_ptr<media::AudioBus> audio_wrapper_;
138 scoped_ptr<media::AudioFifo> fifo_; 138 scoped_ptr<media::AudioFifo> fifo_;
139 }; 139 };
140 140
141 MediaStreamAudioProcessor::MediaStreamAudioProcessor( 141 MediaStreamAudioProcessor::MediaStreamAudioProcessor(
142 const media::AudioParameters& source_params, 142 const media::AudioParameters& source_params,
143 const blink::WebMediaConstraints& constraints, 143 const blink::WebMediaConstraints& constraints,
144 int effects) 144 int effects)
145 : render_delay_ms_(0) { 145 : render_delay_ms_(0),
146 audio_mirroring_(false) {
146 capture_thread_checker_.DetachFromThread(); 147 capture_thread_checker_.DetachFromThread();
147 render_thread_checker_.DetachFromThread(); 148 render_thread_checker_.DetachFromThread();
148 InitializeAudioProcessingModule(constraints, effects); 149 InitializeAudioProcessingModule(constraints, effects);
149 InitializeCaptureConverter(source_params); 150 InitializeCaptureConverter(source_params);
150 } 151 }
151 152
152 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { 153 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() {
153 DCHECK(main_thread_checker_.CalledOnValidThread()); 154 DCHECK(main_thread_checker_.CalledOnValidThread());
154 StopAudioProcessing(); 155 StopAudioProcessing();
155 } 156 }
(...skipping 28 matching lines...) Expand all
184 render_data_bus_->FromInterleaved(render_audio, 185 render_data_bus_->FromInterleaved(render_audio,
185 render_data_bus_->frames(), 186 render_data_bus_->frames(),
186 sizeof(render_audio[0])); 187 sizeof(render_audio[0]));
187 render_converter_->Push(render_data_bus_.get()); 188 render_converter_->Push(render_data_bus_.get());
188 while (render_converter_->Convert(&render_frame_)) 189 while (render_converter_->Convert(&render_frame_))
189 audio_processing_->AnalyzeReverseStream(&render_frame_); 190 audio_processing_->AnalyzeReverseStream(&render_frame_);
190 } 191 }
191 192
192 bool MediaStreamAudioProcessor::ProcessAndConsumeData( 193 bool MediaStreamAudioProcessor::ProcessAndConsumeData(
193 base::TimeDelta capture_delay, int volume, bool key_pressed, 194 base::TimeDelta capture_delay, int volume, bool key_pressed,
194 int16** out) { 195 int* new_volume, int16** out) {
195 DCHECK(capture_thread_checker_.CalledOnValidThread()); 196 DCHECK(capture_thread_checker_.CalledOnValidThread());
196 TRACE_EVENT0("audio", 197 TRACE_EVENT0("audio",
197 "MediaStreamAudioProcessor::ProcessAndConsumeData"); 198 "MediaStreamAudioProcessor::ProcessAndConsumeData");
198 199
199 if (!capture_converter_->Convert(&capture_frame_)) 200 if (!capture_converter_->Convert(&capture_frame_))
200 return false; 201 return false;
201 202
202 ProcessData(&capture_frame_, capture_delay, volume, key_pressed); 203 *new_volume = ProcessData(&capture_frame_, capture_delay, volume,
204 key_pressed);
203 *out = capture_frame_.data_; 205 *out = capture_frame_.data_;
204 206
205 return true; 207 return true;
206 } 208 }
207 209
208 const media::AudioParameters& MediaStreamAudioProcessor::InputFormat() const { 210 const media::AudioParameters& MediaStreamAudioProcessor::InputFormat() const {
209 return capture_converter_->source_parameters(); 211 return capture_converter_->source_parameters();
210 } 212 }
211 213
212 const media::AudioParameters& MediaStreamAudioProcessor::OutputFormat() const { 214 const media::AudioParameters& MediaStreamAudioProcessor::OutputFormat() const {
213 return capture_converter_->sink_parameters(); 215 return capture_converter_->sink_parameters();
214 } 216 }
215 217
216 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( 218 void MediaStreamAudioProcessor::InitializeAudioProcessingModule(
217 const blink::WebMediaConstraints& constraints, int effects) { 219 const blink::WebMediaConstraints& constraints, int effects) {
218 DCHECK(!audio_processing_); 220 DCHECK(!audio_processing_);
219 if (!CommandLine::ForCurrentProcess()->HasSwitch( 221 if (!CommandLine::ForCurrentProcess()->HasSwitch(
220 switches::kEnableAudioTrackProcessing)) { 222 switches::kEnableAudioTrackProcessing)) {
221 return; 223 return;
222 } 224 }
223 225
224 RTCMediaConstraints native_constraints(constraints); 226 RTCMediaConstraints native_constraints(constraints);
225 ApplyFixedAudioConstraints(&native_constraints); 227 ApplyFixedAudioConstraints(&native_constraints);
226 if (effects & media::AudioParameters::ECHO_CANCELLER) { 228 if (effects & media::AudioParameters::ECHO_CANCELLER) {
227 // If platform echo cancellator is enabled, disable the software AEC. 229 // If platform echo canceller is enabled, disable the software AEC.
228 native_constraints.AddMandatory( 230 native_constraints.AddMandatory(
229 MediaConstraintsInterface::kEchoCancellation, 231 MediaConstraintsInterface::kEchoCancellation,
230 MediaConstraintsInterface::kValueFalse, true); 232 MediaConstraintsInterface::kValueFalse, true);
231 } 233 }
232 234
235 #if defined(OS_IOS)
236 // On iOS, VPIO provides built-in AEC and AGC.
237 const bool enable_aec = false;
238 const bool enable_agc = false;
239 #else
233 const bool enable_aec = GetPropertyFromConstraints( 240 const bool enable_aec = GetPropertyFromConstraints(
234 &native_constraints, MediaConstraintsInterface::kEchoCancellation); 241 &native_constraints, MediaConstraintsInterface::kEchoCancellation);
235 const bool enable_ns = GetPropertyFromConstraints( 242 const bool enable_agc = GetPropertyFromConstraints(
236 &native_constraints, MediaConstraintsInterface::kNoiseSuppression); 243 &native_constraints, webrtc::MediaConstraintsInterface::kAutoGainControl);
237 const bool enable_high_pass_filter = GetPropertyFromConstraints( 244 #endif
238 &native_constraints, MediaConstraintsInterface::kHighpassFilter); 245
239 #if defined(IOS) || defined(ANDROID) 246 #if defined(OS_IOS) || defined(OS_ANDROID)
240 const bool enable_experimental_aec = false; 247 const bool enable_experimental_aec = false;
241 const bool enable_typing_detection = false; 248 const bool enable_typing_detection = false;
242 #else 249 #else
243 const bool enable_experimental_aec = GetPropertyFromConstraints( 250 const bool enable_experimental_aec = GetPropertyFromConstraints(
244 &native_constraints, 251 &native_constraints,
245 MediaConstraintsInterface::kExperimentalEchoCancellation); 252 MediaConstraintsInterface::kExperimentalEchoCancellation);
246 const bool enable_typing_detection = GetPropertyFromConstraints( 253 const bool enable_typing_detection = GetPropertyFromConstraints(
247 &native_constraints, MediaConstraintsInterface::kTypingNoiseDetection); 254 &native_constraints, MediaConstraintsInterface::kTypingNoiseDetection);
248 #endif 255 #endif
249 256
257 const bool enable_ns = GetPropertyFromConstraints(
258 &native_constraints, MediaConstraintsInterface::kNoiseSuppression);
259 const bool enable_high_pass_filter = GetPropertyFromConstraints(
260 &native_constraints, MediaConstraintsInterface::kHighpassFilter);
261
262 audio_mirroring_ = GetPropertyFromConstraints(
263 &native_constraints, webrtc::MediaConstraintsInterface::kAudioMirroring);
264
250 // Return immediately if no audio processing component is enabled. 265 // Return immediately if no audio processing component is enabled.
251 if (!enable_aec && !enable_experimental_aec && !enable_ns && 266 if (!enable_aec && !enable_experimental_aec && !enable_ns &&
252 !enable_high_pass_filter && !enable_typing_detection) { 267 !enable_high_pass_filter && !enable_typing_detection && !enable_agc) {
253 return; 268 return;
254 } 269 }
255 270
256 // Create and configure the webrtc::AudioProcessing. 271 // Create and configure the webrtc::AudioProcessing.
257 audio_processing_.reset(webrtc::AudioProcessing::Create(0)); 272 audio_processing_.reset(webrtc::AudioProcessing::Create(0));
258 273
259 // Enable the audio processing components. 274 // Enable the audio processing components.
260 if (enable_aec) { 275 if (enable_aec) {
261 EnableEchoCancellation(audio_processing_.get()); 276 EnableEchoCancellation(audio_processing_.get());
262 if (enable_experimental_aec) 277 if (enable_experimental_aec)
263 EnableExperimentalEchoCancellation(audio_processing_.get()); 278 EnableExperimentalEchoCancellation(audio_processing_.get());
264 } 279 }
265 280
266 if (enable_ns) 281 if (enable_ns)
267 EnableNoiseSuppression(audio_processing_.get()); 282 EnableNoiseSuppression(audio_processing_.get());
268 283
269 if (enable_high_pass_filter) 284 if (enable_high_pass_filter)
270 EnableHighPassFilter(audio_processing_.get()); 285 EnableHighPassFilter(audio_processing_.get());
271 286
272 if (enable_typing_detection) 287 if (enable_typing_detection)
273 EnableTypingDetection(audio_processing_.get()); 288 EnableTypingDetection(audio_processing_.get());
274 289
290 if (enable_agc)
291 EnableAutomaticGainControl(audio_processing_.get());
275 292
276 // Configure the audio format the audio processing is running on. This 293 // Configure the audio format the audio processing is running on. This
277 // has to be done after all the needed components are enabled. 294 // has to be done after all the needed components are enabled.
278 CHECK_EQ(audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate), 295 CHECK_EQ(audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate),
279 0); 296 0);
280 CHECK_EQ(audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel, 297 CHECK_EQ(audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel,
281 kAudioProcessingNumberOfChannel), 298 kAudioProcessingNumberOfChannel),
282 0); 299 0);
283 } 300 }
284 301
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
334 media::AudioParameters sink_params( 351 media::AudioParameters sink_params(
335 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, 352 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
336 media::CHANNEL_LAYOUT_MONO, kAudioProcessingSampleRate, 16, 353 media::CHANNEL_LAYOUT_MONO, kAudioProcessingSampleRate, 16,
337 kAudioProcessingSampleRate / 100); 354 kAudioProcessingSampleRate / 100);
338 render_converter_.reset( 355 render_converter_.reset(
339 new MediaStreamAudioConverter(source_params, sink_params)); 356 new MediaStreamAudioConverter(source_params, sink_params));
340 render_data_bus_ = media::AudioBus::Create(number_of_channels, 357 render_data_bus_ = media::AudioBus::Create(number_of_channels,
341 frames_per_buffer); 358 frames_per_buffer);
342 } 359 }
343 360
344 void MediaStreamAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame, 361 int MediaStreamAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame,
345 base::TimeDelta capture_delay, 362 base::TimeDelta capture_delay,
346 int volume, 363 int volume,
347 bool key_pressed) { 364 bool key_pressed) {
348 DCHECK(capture_thread_checker_.CalledOnValidThread()); 365 DCHECK(capture_thread_checker_.CalledOnValidThread());
349 if (!audio_processing_) 366 if (!audio_processing_)
350 return; 367 return 0;
351 368
352 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::Process10MsData"); 369 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessData");
353 DCHECK_EQ(audio_processing_->sample_rate_hz(), 370 DCHECK_EQ(audio_processing_->sample_rate_hz(),
354 capture_converter_->sink_parameters().sample_rate()); 371 capture_converter_->sink_parameters().sample_rate());
355 DCHECK_EQ(audio_processing_->num_input_channels(), 372 DCHECK_EQ(audio_processing_->num_input_channels(),
356 capture_converter_->sink_parameters().channels()); 373 capture_converter_->sink_parameters().channels());
357 DCHECK_EQ(audio_processing_->num_output_channels(), 374 DCHECK_EQ(audio_processing_->num_output_channels(),
358 capture_converter_->sink_parameters().channels()); 375 capture_converter_->sink_parameters().channels());
359 376
360 base::subtle::Atomic32 render_delay_ms = 377 base::subtle::Atomic32 render_delay_ms =
361 base::subtle::Acquire_Load(&render_delay_ms_); 378 base::subtle::Acquire_Load(&render_delay_ms_);
362 int64 capture_delay_ms = capture_delay.InMilliseconds(); 379 int64 capture_delay_ms = capture_delay.InMilliseconds();
363 DCHECK_LT(capture_delay_ms, 380 DCHECK_LT(capture_delay_ms,
364 std::numeric_limits<base::subtle::Atomic32>::max()); 381 std::numeric_limits<base::subtle::Atomic32>::max());
365 int total_delay_ms = capture_delay_ms + render_delay_ms; 382 int total_delay_ms = capture_delay_ms + render_delay_ms;
366 if (total_delay_ms > 1000) { 383 if (total_delay_ms > 300) {
367 LOG(WARNING) << "Large audio delay, capture delay: " << capture_delay_ms 384 LOG(WARNING) << "Large audio delay, capture delay: " << capture_delay_ms
368 << "ms; render delay: " << render_delay_ms << "ms"; 385 << "ms; render delay: " << render_delay_ms << "ms";
369 } 386 }
370 387
371 audio_processing_->set_stream_delay_ms(total_delay_ms); 388 audio_processing_->set_stream_delay_ms(total_delay_ms);
372 webrtc::GainControl* agc = audio_processing_->gain_control(); 389 webrtc::GainControl* agc = audio_processing_->gain_control();
373 int err = agc->set_stream_analog_level(volume); 390 int err = agc->set_stream_analog_level(volume);
374 DCHECK_EQ(err, 0) << "set_stream_analog_level() error: " << err; 391 DCHECK_EQ(err, 0) << "set_stream_analog_level() error: " << err;
375 err = audio_processing_->ProcessStream(audio_frame); 392 err = audio_processing_->ProcessStream(audio_frame);
376 DCHECK_EQ(err, 0) << "ProcessStream() error: " << err; 393 DCHECK_EQ(err, 0) << "ProcessStream() error: " << err;
377 394
378 // TODO(xians): Add support for AGC, typing detection, audio level 395 // TODO(xians): Add support for typing detection, audio level calculation.
379 // calculation, stereo swapping. 396
397 if (audio_mirroring_ && audio_frame->num_channels_ == 2) {
398 // TODO(xians): Swap the stereo channels after switching to media::AudioBus.
399 }
400
401 // Return 0 if the volume has not been changed, otherwise return the new
402 // volume.
403 return (agc->stream_analog_level() == volume) ?
404 0 : agc->stream_analog_level();
380 } 405 }
381 406
382 void MediaStreamAudioProcessor::StopAudioProcessing() { 407 void MediaStreamAudioProcessor::StopAudioProcessing() {
383 if (!audio_processing_.get()) 408 if (!audio_processing_.get())
384 return; 409 return;
385 410
386 audio_processing_.reset(); 411 audio_processing_.reset();
387 } 412 }
388 413
389 } // namespace content 414 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698