Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(613)

Side by Side Diff: content/renderer/media/media_stream_audio_processor.cc

Issue 1275783003: Add a virtual beamforming audio device on ChromeOS. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: aluebs comments. Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/media/media_stream_audio_processor.h" 5 #include "content/renderer/media/media_stream_audio_processor.h"
6 6
7 #include "base/command_line.h" 7 #include "base/command_line.h"
8 #include "base/metrics/field_trial.h" 8 #include "base/metrics/field_trial.h"
9 #include "base/metrics/histogram.h" 9 #include "base/metrics/histogram.h"
10 #include "base/trace_event/trace_event.h" 10 #include "base/trace_event/trace_event.h"
11 #include "content/public/common/content_switches.h" 11 #include "content/public/common/content_switches.h"
12 #include "content/renderer/media/media_stream_audio_processor_options.h" 12 #include "content/renderer/media/media_stream_audio_processor_options.h"
13 #include "content/renderer/media/rtc_media_constraints.h" 13 #include "content/renderer/media/rtc_media_constraints.h"
14 #include "content/renderer/media/webrtc_audio_device_impl.h" 14 #include "content/renderer/media/webrtc_audio_device_impl.h"
15 #include "media/audio/audio_parameters.h" 15 #include "media/audio/audio_parameters.h"
16 #include "media/base/audio_converter.h" 16 #include "media/base/audio_converter.h"
17 #include "media/base/audio_fifo.h" 17 #include "media/base/audio_fifo.h"
18 #include "media/base/channel_layout.h" 18 #include "media/base/channel_layout.h"
19 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" 19 #include "third_party/WebKit/public/platform/WebMediaConstraints.h"
20 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface .h" 20 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface .h"
21 #include "third_party/webrtc/modules/audio_processing/typing_detection.h" 21 #include "third_party/webrtc/modules/audio_processing/typing_detection.h"
22 22
23 #if defined(OS_CHROMEOS)
24 #include "base/sys_info.h"
25 #endif
26
27 namespace content { 23 namespace content {
28 24
29 namespace { 25 namespace {
30 26
31 using webrtc::AudioProcessing; 27 using webrtc::AudioProcessing;
32 using webrtc::NoiseSuppression; 28 using webrtc::NoiseSuppression;
33 29
34 const int kAudioProcessingNumberOfChannels = 1; 30 const int kAudioProcessingNumberOfChannels = 1;
35 31
36 AudioProcessing::ChannelLayout MapLayout(media::ChannelLayout media_layout) { 32 AudioProcessing::ChannelLayout MapLayout(media::ChannelLayout media_layout) {
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
86 82
87 return (group_name == "Enabled" || group_name == "DefaultEnabled"); 83 return (group_name == "Enabled" || group_name == "DefaultEnabled");
88 } 84 }
89 85
90 bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) { 86 bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) {
91 return base::FieldTrialList::FindFullName("ChromebookBeamforming") == 87 return base::FieldTrialList::FindFullName("ChromebookBeamforming") ==
92 "Enabled" || 88 "Enabled" ||
93 audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming); 89 audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming);
94 } 90 }
95 91
96 void ConfigureBeamforming(webrtc::Config* config,
97 const std::string& geometry_str) {
98 std::vector<webrtc::Point> geometry = ParseArrayGeometry(geometry_str);
99 #if defined(OS_CHROMEOS)
100 if (geometry.empty()) {
101 const std::string& board = base::SysInfo::GetLsbReleaseBoard();
102 if (board.find("nyan_kitty") != std::string::npos) {
103 geometry.push_back(webrtc::Point(-0.03f, 0.f, 0.f));
104 geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f));
105 } else if (board.find("peach_pi") != std::string::npos) {
106 geometry.push_back(webrtc::Point(-0.025f, 0.f, 0.f));
107 geometry.push_back(webrtc::Point(0.025f, 0.f, 0.f));
108 } else if (board.find("samus") != std::string::npos) {
109 geometry.push_back(webrtc::Point(-0.032f, 0.f, 0.f));
110 geometry.push_back(webrtc::Point(0.032f, 0.f, 0.f));
111 } else if (board.find("swanky") != std::string::npos) {
112 geometry.push_back(webrtc::Point(-0.026f, 0.f, 0.f));
113 geometry.push_back(webrtc::Point(0.026f, 0.f, 0.f));
114 }
115 }
116 #endif
117 config->Set<webrtc::Beamforming>(
118 new webrtc::Beamforming(geometry.size() > 1, geometry));
119 }
120
121 } // namespace 92 } // namespace
122 93
123 // Wraps AudioBus to provide access to the array of channel pointers, since this 94 // Wraps AudioBus to provide access to the array of channel pointers, since this
124 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every 95 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every
125 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers 96 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers
126 // are changed, e.g. through calls to SetChannelData() or SwapChannels(). 97 // are changed, e.g. through calls to SetChannelData() or SwapChannels().
127 // 98 //
128 // All methods are called on one of the capture or render audio threads 99 // All methods are called on one of the capture or render audio threads
129 // exclusively. 100 // exclusively.
130 class MediaStreamAudioBus { 101 class MediaStreamAudioBus {
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
264 // delay of the first sample in |destination_|. 235 // delay of the first sample in |destination_|.
265 base::TimeDelta next_audio_delay_; 236 base::TimeDelta next_audio_delay_;
266 237
267 // True when |destination_| contains the data to be returned by the next call 238 // True when |destination_| contains the data to be returned by the next call
268 // to Consume(). Only used when the FIFO is disabled. 239 // to Consume(). Only used when the FIFO is disabled.
269 bool data_available_; 240 bool data_available_;
270 }; 241 };
271 242
272 MediaStreamAudioProcessor::MediaStreamAudioProcessor( 243 MediaStreamAudioProcessor::MediaStreamAudioProcessor(
273 const blink::WebMediaConstraints& constraints, 244 const blink::WebMediaConstraints& constraints,
274 int effects, 245 const MediaStreamDevice::AudioDeviceParameters& input_params,
275 WebRtcPlayoutDataSource* playout_data_source) 246 WebRtcPlayoutDataSource* playout_data_source)
276 : render_delay_ms_(0), 247 : render_delay_ms_(0),
277 playout_data_source_(playout_data_source), 248 playout_data_source_(playout_data_source),
278 audio_mirroring_(false), 249 audio_mirroring_(false),
279 typing_detected_(false), 250 typing_detected_(false),
280 stopped_(false) { 251 stopped_(false) {
281 capture_thread_checker_.DetachFromThread(); 252 capture_thread_checker_.DetachFromThread();
282 render_thread_checker_.DetachFromThread(); 253 render_thread_checker_.DetachFromThread();
283 InitializeAudioProcessingModule(constraints, effects); 254 InitializeAudioProcessingModule(constraints, input_params);
284 255
285 aec_dump_message_filter_ = AecDumpMessageFilter::Get(); 256 aec_dump_message_filter_ = AecDumpMessageFilter::Get();
286 // In unit tests not creating a message filter, |aec_dump_message_filter_| 257 // In unit tests not creating a message filter, |aec_dump_message_filter_|
287 // will be NULL. We can just ignore that. Other unit tests and browser tests 258 // will be NULL. We can just ignore that. Other unit tests and browser tests
288 // ensure that we do get the filter when we should. 259 // ensure that we do get the filter when we should.
289 if (aec_dump_message_filter_.get()) 260 if (aec_dump_message_filter_.get())
290 aec_dump_message_filter_->AddDelegate(this); 261 aec_dump_message_filter_->AddDelegate(this);
291 } 262 }
292 263
293 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { 264 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() {
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
448 render_fifo_.reset(); 419 render_fifo_.reset();
449 } 420 }
450 421
451 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) { 422 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) {
452 stats->typing_noise_detected = 423 stats->typing_noise_detected =
453 (base::subtle::Acquire_Load(&typing_detected_) != false); 424 (base::subtle::Acquire_Load(&typing_detected_) != false);
454 GetAecStats(audio_processing_.get()->echo_cancellation(), stats); 425 GetAecStats(audio_processing_.get()->echo_cancellation(), stats);
455 } 426 }
456 427
457 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( 428 void MediaStreamAudioProcessor::InitializeAudioProcessingModule(
458 const blink::WebMediaConstraints& constraints, int effects) { 429 const blink::WebMediaConstraints& constraints,
430 const MediaStreamDevice::AudioDeviceParameters& input_params) {
459 DCHECK(main_thread_checker_.CalledOnValidThread()); 431 DCHECK(main_thread_checker_.CalledOnValidThread());
460 DCHECK(!audio_processing_); 432 DCHECK(!audio_processing_);
461 433
462 MediaAudioConstraints audio_constraints(constraints, effects); 434 MediaAudioConstraints audio_constraints(constraints, input_params.effects);
463 435
464 // Audio mirroring can be enabled even though audio processing is otherwise 436 // Audio mirroring can be enabled even though audio processing is otherwise
465 // disabled. 437 // disabled.
466 audio_mirroring_ = audio_constraints.GetProperty( 438 audio_mirroring_ = audio_constraints.GetProperty(
467 MediaAudioConstraints::kGoogAudioMirroring); 439 MediaAudioConstraints::kGoogAudioMirroring);
468 440
469 #if defined(OS_IOS) 441 #if defined(OS_IOS)
470 // On iOS, VPIO provides built-in AGC and AEC. 442 // On iOS, VPIO provides built-in AGC and AEC.
471 const bool echo_cancellation = false; 443 const bool echo_cancellation = false;
472 const bool goog_agc = false; 444 const bool goog_agc = false;
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
504 476
505 // Experimental options provided at creation. 477 // Experimental options provided at creation.
506 webrtc::Config config; 478 webrtc::Config config;
507 if (goog_experimental_aec) 479 if (goog_experimental_aec)
508 config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(true)); 480 config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(true));
509 if (goog_experimental_ns) 481 if (goog_experimental_ns)
510 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true)); 482 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true));
511 if (IsDelayAgnosticAecEnabled()) 483 if (IsDelayAgnosticAecEnabled())
512 config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true)); 484 config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true));
513 if (goog_beamforming) { 485 if (goog_beamforming) {
514 ConfigureBeamforming(&config, 486 const auto& geometry =
515 audio_constraints.GetPropertyAsString( 487 GetArrayGeometryPreferringConstraints(audio_constraints, input_params);
516 MediaAudioConstraints::kGoogArrayGeometry)); 488
489 // Only enable beamforming if we have at least two mics.
490 config.Set<webrtc::Beamforming>(
491 new webrtc::Beamforming(geometry.size() > 1, geometry));
517 } 492 }
518 493
519 // Create and configure the webrtc::AudioProcessing. 494 // Create and configure the webrtc::AudioProcessing.
520 audio_processing_.reset(webrtc::AudioProcessing::Create(config)); 495 audio_processing_.reset(webrtc::AudioProcessing::Create(config));
521 496
522 // Enable the audio processing components. 497 // Enable the audio processing components.
523 if (echo_cancellation) { 498 if (echo_cancellation) {
524 EnableEchoCancellation(audio_processing_.get()); 499 EnableEchoCancellation(audio_processing_.get());
525 500
526 if (playout_data_source_) 501 if (playout_data_source_)
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
596 571
597 // webrtc::AudioProcessing requires a 10 ms chunk size. We use this native 572 // webrtc::AudioProcessing requires a 10 ms chunk size. We use this native
598 // size when processing is enabled. When disabled we use the same size as 573 // size when processing is enabled. When disabled we use the same size as
599 // the source if less than 10 ms. 574 // the source if less than 10 ms.
600 // 575 //
601 // TODO(ajm): This conditional buffer size appears to be assuming knowledge of 576 // TODO(ajm): This conditional buffer size appears to be assuming knowledge of
602 // the sink based on the source parameters. PeerConnection sinks seem to want 577 // the sink based on the source parameters. PeerConnection sinks seem to want
603 // 10 ms chunks regardless, while WebAudio sinks want less, and we're assuming 578 // 10 ms chunks regardless, while WebAudio sinks want less, and we're assuming
604 // we can identify WebAudio sinks by the input chunk size. Less fragile would 579 // we can identify WebAudio sinks by the input chunk size. Less fragile would
605 // be to have the sink actually tell us how much it wants (as in the above 580 // be to have the sink actually tell us how much it wants (as in the above
606 // TODO). 581 // todo).
607 int processing_frames = input_format.sample_rate() / 100; 582 int processing_frames = input_format.sample_rate() / 100;
608 int output_frames = output_sample_rate / 100; 583 int output_frames = output_sample_rate / 100;
609 if (!audio_processing_ && input_format.frames_per_buffer() < output_frames) { 584 if (!audio_processing_ && input_format.frames_per_buffer() < output_frames) {
610 processing_frames = input_format.frames_per_buffer(); 585 processing_frames = input_format.frames_per_buffer();
611 output_frames = processing_frames; 586 output_frames = processing_frames;
612 } 587 }
613 588
614 output_format_ = media::AudioParameters( 589 output_format_ = media::AudioParameters(
615 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, 590 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
616 output_channel_layout, 591 output_channel_layout,
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
710 if (echo_information_) { 685 if (echo_information_) {
711 echo_information_.get()->UpdateAecDelayStats(ap->echo_cancellation()); 686 echo_information_.get()->UpdateAecDelayStats(ap->echo_cancellation());
712 } 687 }
713 688
714 // Return 0 if the volume hasn't been changed, and otherwise the new volume. 689 // Return 0 if the volume hasn't been changed, and otherwise the new volume.
715 return (agc->stream_analog_level() == volume) ? 690 return (agc->stream_analog_level() == volume) ?
716 0 : agc->stream_analog_level(); 691 0 : agc->stream_analog_level();
717 } 692 }
718 693
719 } // namespace content 694 } // namespace content
OLDNEW
« no previous file with comments | « content/renderer/media/media_stream_audio_processor.h ('k') | content/renderer/media/media_stream_audio_processor_options.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698