OLD | NEW |
---|---|
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/media_stream_audio_processor.h" | 5 #include "content/renderer/media/media_stream_audio_processor.h" |
6 | 6 |
7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
8 #include "base/metrics/field_trial.h" | 8 #include "base/metrics/field_trial.h" |
9 #include "base/metrics/histogram.h" | 9 #include "base/metrics/histogram.h" |
10 #include "base/trace_event/trace_event.h" | 10 #include "base/trace_event/trace_event.h" |
11 #include "content/public/common/content_switches.h" | 11 #include "content/public/common/content_switches.h" |
12 #include "content/renderer/media/media_stream_audio_processor_options.h" | 12 #include "content/renderer/media/media_stream_audio_processor_options.h" |
13 #include "content/renderer/media/rtc_media_constraints.h" | 13 #include "content/renderer/media/rtc_media_constraints.h" |
14 #include "content/renderer/media/webrtc_audio_device_impl.h" | 14 #include "content/renderer/media/webrtc_audio_device_impl.h" |
15 #include "media/audio/audio_parameters.h" | 15 #include "media/audio/audio_parameters.h" |
16 #include "media/base/audio_converter.h" | 16 #include "media/base/audio_converter.h" |
17 #include "media/base/audio_fifo.h" | 17 #include "media/base/audio_fifo.h" |
18 #include "media/base/channel_layout.h" | 18 #include "media/base/channel_layout.h" |
19 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" | 19 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" |
20 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface .h" | 20 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface .h" |
21 #include "third_party/webrtc/modules/audio_processing/typing_detection.h" | 21 #include "third_party/webrtc/modules/audio_processing/typing_detection.h" |
22 | 22 |
23 #if defined(OS_CHROMEOS) | 23 #if defined(OS_CHROMEOS) |
24 #include "base/sys_info.h" | 24 #include "base/sys_info.h" |
aluebs-chromium
2015/08/28 19:14:30
This is not needed anymore, right?
aluebs-chromium
2015/09/10 02:41:39
right?
ajm
2015/09/10 21:56:57
Thanks for the reminder; removed.
| |
25 #endif | 25 #endif |
26 | 26 |
27 namespace content { | 27 namespace content { |
28 | 28 |
29 namespace { | 29 namespace { |
30 | 30 |
31 using webrtc::AudioProcessing; | 31 using webrtc::AudioProcessing; |
32 using webrtc::NoiseSuppression; | 32 using webrtc::NoiseSuppression; |
33 | 33 |
34 const int kAudioProcessingNumberOfChannels = 1; | 34 const int kAudioProcessingNumberOfChannels = 1; |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
86 | 86 |
87 return (group_name == "Enabled" || group_name == "DefaultEnabled"); | 87 return (group_name == "Enabled" || group_name == "DefaultEnabled"); |
88 } | 88 } |
89 | 89 |
90 bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) { | 90 bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) { |
91 return base::FieldTrialList::FindFullName("ChromebookBeamforming") == | 91 return base::FieldTrialList::FindFullName("ChromebookBeamforming") == |
92 "Enabled" || | 92 "Enabled" || |
93 audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming); | 93 audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming); |
94 } | 94 } |
95 | 95 |
96 void ConfigureBeamforming(webrtc::Config* config, | 96 void ConfigureBeamforming( |
97 const std::string& geometry_str) { | 97 webrtc::Config* config, |
98 std::vector<webrtc::Point> geometry = ParseArrayGeometry(geometry_str); | 98 const MediaAudioConstraints& audio_constraints, |
99 #if defined(OS_CHROMEOS) | 99 const MediaStreamDevice::AudioDeviceParameters& input_params) { |
100 if (geometry.empty()) { | 100 const std::string& constraints_geometry = |
101 const std::string& board = base::SysInfo::GetLsbReleaseBoard(); | 101 audio_constraints.GetPropertyAsString( |
102 if (board.find("nyan_kitty") != std::string::npos) { | 102 MediaAudioConstraints::kGoogArrayGeometry); |
103 geometry.push_back(webrtc::Point(-0.03f, 0.f, 0.f)); | 103 // Give preference to the audio constraint over the device-supplied mic |
104 geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f)); | 104 // positions. This is mainly for testing purposes. |
105 } else if (board.find("peach_pi") != std::string::npos) { | 105 const std::string& geometry_str = constraints_geometry.empty() |
106 geometry.push_back(webrtc::Point(-0.025f, 0.f, 0.f)); | 106 ? input_params.mic_positions |
107 geometry.push_back(webrtc::Point(0.025f, 0.f, 0.f)); | 107 : constraints_geometry; |
108 } else if (board.find("samus") != std::string::npos) { | 108 const auto& geometry = ParseArrayGeometry(geometry_str); |
aluebs-chromium
2015/08/28 19:14:30
Personally I think in this case having the type sp
| |
109 geometry.push_back(webrtc::Point(-0.032f, 0.f, 0.f)); | 109 |
110 geometry.push_back(webrtc::Point(0.032f, 0.f, 0.f)); | 110 // Only enable beamforming if we have at least two mics. |
111 } else if (board.find("swanky") != std::string::npos) { | |
112 geometry.push_back(webrtc::Point(-0.026f, 0.f, 0.f)); | |
113 geometry.push_back(webrtc::Point(0.026f, 0.f, 0.f)); | |
114 } | |
115 } | |
116 #endif | |
117 config->Set<webrtc::Beamforming>( | 111 config->Set<webrtc::Beamforming>( |
118 new webrtc::Beamforming(geometry.size() > 1, geometry)); | 112 new webrtc::Beamforming(geometry.size() > 1, geometry)); |
119 } | 113 } |
120 | 114 |
121 } // namespace | 115 } // namespace |
122 | 116 |
123 // Wraps AudioBus to provide access to the array of channel pointers, since this | 117 // Wraps AudioBus to provide access to the array of channel pointers, since this |
124 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every | 118 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every |
125 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers | 119 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers |
126 // are changed, e.g. through calls to SetChannelData() or SwapChannels(). | 120 // are changed, e.g. through calls to SetChannelData() or SwapChannels(). |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
264 // delay of the first sample in |destination_|. | 258 // delay of the first sample in |destination_|. |
265 base::TimeDelta next_audio_delay_; | 259 base::TimeDelta next_audio_delay_; |
266 | 260 |
267 // True when |destination_| contains the data to be returned by the next call | 261 // True when |destination_| contains the data to be returned by the next call |
268 // to Consume(). Only used when the FIFO is disabled. | 262 // to Consume(). Only used when the FIFO is disabled. |
269 bool data_available_; | 263 bool data_available_; |
270 }; | 264 }; |
271 | 265 |
272 MediaStreamAudioProcessor::MediaStreamAudioProcessor( | 266 MediaStreamAudioProcessor::MediaStreamAudioProcessor( |
273 const blink::WebMediaConstraints& constraints, | 267 const blink::WebMediaConstraints& constraints, |
274 int effects, | 268 const MediaStreamDevice::AudioDeviceParameters& input_params, |
275 WebRtcPlayoutDataSource* playout_data_source) | 269 WebRtcPlayoutDataSource* playout_data_source) |
276 : render_delay_ms_(0), | 270 : render_delay_ms_(0), |
277 playout_data_source_(playout_data_source), | 271 playout_data_source_(playout_data_source), |
278 audio_mirroring_(false), | 272 audio_mirroring_(false), |
279 typing_detected_(false), | 273 typing_detected_(false), |
280 stopped_(false) { | 274 stopped_(false) { |
281 capture_thread_checker_.DetachFromThread(); | 275 capture_thread_checker_.DetachFromThread(); |
282 render_thread_checker_.DetachFromThread(); | 276 render_thread_checker_.DetachFromThread(); |
283 InitializeAudioProcessingModule(constraints, effects); | 277 InitializeAudioProcessingModule(constraints, input_params); |
284 | 278 |
285 aec_dump_message_filter_ = AecDumpMessageFilter::Get(); | 279 aec_dump_message_filter_ = AecDumpMessageFilter::Get(); |
286 // In unit tests not creating a message filter, |aec_dump_message_filter_| | 280 // In unit tests not creating a message filter, |aec_dump_message_filter_| |
287 // will be NULL. We can just ignore that. Other unit tests and browser tests | 281 // will be NULL. We can just ignore that. Other unit tests and browser tests |
288 // ensure that we do get the filter when we should. | 282 // ensure that we do get the filter when we should. |
289 if (aec_dump_message_filter_.get()) | 283 if (aec_dump_message_filter_.get()) |
290 aec_dump_message_filter_->AddDelegate(this); | 284 aec_dump_message_filter_->AddDelegate(this); |
291 } | 285 } |
292 | 286 |
293 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { | 287 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { |
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
448 render_fifo_.reset(); | 442 render_fifo_.reset(); |
449 } | 443 } |
450 | 444 |
451 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) { | 445 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) { |
452 stats->typing_noise_detected = | 446 stats->typing_noise_detected = |
453 (base::subtle::Acquire_Load(&typing_detected_) != false); | 447 (base::subtle::Acquire_Load(&typing_detected_) != false); |
454 GetAecStats(audio_processing_.get()->echo_cancellation(), stats); | 448 GetAecStats(audio_processing_.get()->echo_cancellation(), stats); |
455 } | 449 } |
456 | 450 |
457 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( | 451 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( |
458 const blink::WebMediaConstraints& constraints, int effects) { | 452 const blink::WebMediaConstraints& constraints, |
453 const MediaStreamDevice::AudioDeviceParameters& input_params) { | |
459 DCHECK(main_thread_checker_.CalledOnValidThread()); | 454 DCHECK(main_thread_checker_.CalledOnValidThread()); |
460 DCHECK(!audio_processing_); | 455 DCHECK(!audio_processing_); |
461 | 456 |
462 MediaAudioConstraints audio_constraints(constraints, effects); | 457 MediaAudioConstraints audio_constraints(constraints, input_params.effects); |
463 | 458 |
464 // Audio mirroring can be enabled even though audio processing is otherwise | 459 // Audio mirroring can be enabled even though audio processing is otherwise |
465 // disabled. | 460 // disabled. |
466 audio_mirroring_ = audio_constraints.GetProperty( | 461 audio_mirroring_ = audio_constraints.GetProperty( |
467 MediaAudioConstraints::kGoogAudioMirroring); | 462 MediaAudioConstraints::kGoogAudioMirroring); |
468 | 463 |
469 #if defined(OS_IOS) | 464 #if defined(OS_IOS) |
470 // On iOS, VPIO provides built-in AGC and AEC. | 465 // On iOS, VPIO provides built-in AGC and AEC. |
471 const bool echo_cancellation = false; | 466 const bool echo_cancellation = false; |
472 const bool goog_agc = false; | 467 const bool goog_agc = false; |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
504 | 499 |
505 // Experimental options provided at creation. | 500 // Experimental options provided at creation. |
506 webrtc::Config config; | 501 webrtc::Config config; |
507 if (goog_experimental_aec) | 502 if (goog_experimental_aec) |
508 config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(true)); | 503 config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(true)); |
509 if (goog_experimental_ns) | 504 if (goog_experimental_ns) |
510 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true)); | 505 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true)); |
511 if (IsDelayAgnosticAecEnabled()) | 506 if (IsDelayAgnosticAecEnabled()) |
512 config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true)); | 507 config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true)); |
513 if (goog_beamforming) { | 508 if (goog_beamforming) { |
514 ConfigureBeamforming(&config, | 509 ConfigureBeamforming(&config, audio_constraints, input_params); |
515 audio_constraints.GetPropertyAsString( | |
516 MediaAudioConstraints::kGoogArrayGeometry)); | |
517 } | 510 } |
518 | 511 |
519 // Create and configure the webrtc::AudioProcessing. | 512 // Create and configure the webrtc::AudioProcessing. |
520 audio_processing_.reset(webrtc::AudioProcessing::Create(config)); | 513 audio_processing_.reset(webrtc::AudioProcessing::Create(config)); |
521 | 514 |
522 // Enable the audio processing components. | 515 // Enable the audio processing components. |
523 if (echo_cancellation) { | 516 if (echo_cancellation) { |
524 EnableEchoCancellation(audio_processing_.get()); | 517 EnableEchoCancellation(audio_processing_.get()); |
525 | 518 |
526 if (playout_data_source_) | 519 if (playout_data_source_) |
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
710 if (echo_information_) { | 703 if (echo_information_) { |
711 echo_information_.get()->UpdateAecDelayStats(ap->echo_cancellation()); | 704 echo_information_.get()->UpdateAecDelayStats(ap->echo_cancellation()); |
712 } | 705 } |
713 | 706 |
714 // Return 0 if the volume hasn't been changed, and otherwise the new volume. | 707 // Return 0 if the volume hasn't been changed, and otherwise the new volume. |
715 return (agc->stream_analog_level() == volume) ? | 708 return (agc->stream_analog_level() == volume) ? |
716 0 : agc->stream_analog_level(); | 709 0 : agc->stream_analog_level(); |
717 } | 710 } |
718 | 711 |
719 } // namespace content | 712 } // namespace content |
OLD | NEW |