Index: content/renderer/media/media_stream_audio_processor.cc |
diff --git a/content/renderer/media/media_stream_audio_processor.cc b/content/renderer/media/media_stream_audio_processor.cc |
index 168697e20b945e217c530e11acb237d000e04164..9e101fd8de08dd314ab4d1e2bf4b1c6e32ea34f1 100644 |
--- a/content/renderer/media/media_stream_audio_processor.cc |
+++ b/content/renderer/media/media_stream_audio_processor.cc |
@@ -93,27 +93,21 @@ bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) { |
audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming); |
} |
-void ConfigureBeamforming(webrtc::Config* config, |
- const std::string& geometry_str) { |
- std::vector<webrtc::Point> geometry = ParseArrayGeometry(geometry_str); |
-#if defined(OS_CHROMEOS) |
- if (geometry.empty()) { |
- const std::string& board = base::SysInfo::GetLsbReleaseBoard(); |
- if (board.find("nyan_kitty") != std::string::npos) { |
- geometry.push_back(webrtc::Point(-0.03f, 0.f, 0.f)); |
- geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f)); |
- } else if (board.find("peach_pi") != std::string::npos) { |
- geometry.push_back(webrtc::Point(-0.025f, 0.f, 0.f)); |
- geometry.push_back(webrtc::Point(0.025f, 0.f, 0.f)); |
- } else if (board.find("samus") != std::string::npos) { |
- geometry.push_back(webrtc::Point(-0.032f, 0.f, 0.f)); |
- geometry.push_back(webrtc::Point(0.032f, 0.f, 0.f)); |
- } else if (board.find("swanky") != std::string::npos) { |
- geometry.push_back(webrtc::Point(-0.026f, 0.f, 0.f)); |
- geometry.push_back(webrtc::Point(0.026f, 0.f, 0.f)); |
- } |
- } |
-#endif |
+void ConfigureBeamforming( |
+ webrtc::Config* config, |
+ const MediaAudioConstraints& audio_constraints, |
+ const MediaStreamDevice::AudioDeviceParameters& input_params) { |
+ const std::string& constraints_geometry = |
+ audio_constraints.GetPropertyAsString( |
+ MediaAudioConstraints::kGoogArrayGeometry); |
+ // Give preference to the audio constraint over the device-supplied mic |
+ // positions. This is mainly for testing purposes. |
+ const std::string& geometry_str = constraints_geometry.empty() |
+ ? input_params.mic_positions |
+ : constraints_geometry; |
+ const auto& geometry = ParseArrayGeometry(geometry_str); |
+ |
+ // Only enable beamforming if we have at least two mics. |
config->Set<webrtc::Beamforming>( |
new webrtc::Beamforming(geometry.size() > 1, geometry)); |
} |
@@ -271,7 +265,7 @@ class MediaStreamAudioFifo { |
MediaStreamAudioProcessor::MediaStreamAudioProcessor( |
const blink::WebMediaConstraints& constraints, |
- int effects, |
+ const MediaStreamDevice::AudioDeviceParameters& input_params, |
WebRtcPlayoutDataSource* playout_data_source) |
: render_delay_ms_(0), |
playout_data_source_(playout_data_source), |
@@ -280,7 +274,7 @@ MediaStreamAudioProcessor::MediaStreamAudioProcessor( |
stopped_(false) { |
capture_thread_checker_.DetachFromThread(); |
render_thread_checker_.DetachFromThread(); |
- InitializeAudioProcessingModule(constraints, effects); |
+ InitializeAudioProcessingModule(constraints, input_params); |
aec_dump_message_filter_ = AecDumpMessageFilter::Get(); |
// In unit tests not creating a message filter, |aec_dump_message_filter_| |
@@ -455,11 +449,12 @@ void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) { |
} |
void MediaStreamAudioProcessor::InitializeAudioProcessingModule( |
- const blink::WebMediaConstraints& constraints, int effects) { |
+ const blink::WebMediaConstraints& constraints, |
+ const MediaStreamDevice::AudioDeviceParameters& input_params) { |
DCHECK(main_thread_checker_.CalledOnValidThread()); |
DCHECK(!audio_processing_); |
- MediaAudioConstraints audio_constraints(constraints, effects); |
+ MediaAudioConstraints audio_constraints(constraints, input_params.effects); |
// Audio mirroring can be enabled even though audio processing is otherwise |
// disabled. |
@@ -511,9 +506,7 @@ void MediaStreamAudioProcessor::InitializeAudioProcessingModule( |
if (IsDelayAgnosticAecEnabled()) |
config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true)); |
if (goog_beamforming) { |
- ConfigureBeamforming(&config, |
- audio_constraints.GetPropertyAsString( |
- MediaAudioConstraints::kGoogArrayGeometry)); |
+ ConfigureBeamforming(&config, audio_constraints, input_params); |
} |
// Create and configure the webrtc::AudioProcessing. |