Chromium Code Reviews| Index: content/renderer/media/media_stream_audio_processor.cc |
| diff --git a/content/renderer/media/media_stream_audio_processor.cc b/content/renderer/media/media_stream_audio_processor.cc |
| index f0471a9d9452f683d0efa2849a2aecec59b5080d..47ba49a05221e34d0a3e17b49eae517b0b035398 100644 |
| --- a/content/renderer/media/media_stream_audio_processor.cc |
| +++ b/content/renderer/media/media_stream_audio_processor.cc |
| @@ -93,6 +93,27 @@ bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) { |
| audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming); |
| } |
| +void ConfigureBeamforming(webrtc::Config* config, |
| + const MediaAudioConstraints& constraints) { |
| + std::string position_string = constraints.GetPropertyAsString( |
| + MediaAudioConstraints::kGoogArrayGeometry); |
| + if (position_string == "") { |
|
aluebs-chromium
2015/07/07 15:40:54
position_string.empty()?
ajm
2015/07/31 02:10:39
Agreed, but now reverted.
|
| + // Give preference to the media constraint. Only consider the command-line |
| + // switch if the constraint is not present. |
| + position_string = |
| + base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( |
| + switches::kMicrophonePositions); |
| + } |
| + |
| + if (position_string != "") { |
|
aluebs-chromium
2015/07/07 15:40:54
!position_string.empty()?
ajm
2015/07/31 02:10:39
Agreed, but now reverted.
|
| + const auto geometry = ParseArrayGeometry(position_string); |
| + // Only enable beamforming when we have more than one mic. |
| + const bool enable_beamforming = geometry.size() > 1; |
| + config->Set<webrtc::Beamforming>( |
| + new webrtc::Beamforming(enable_beamforming, geometry)); |
| + } |
| +} |
| + |
| } // namespace |
| // Wraps AudioBus to provide access to the array of channel pointers, since this |
| @@ -483,8 +504,7 @@ void MediaStreamAudioProcessor::InitializeAudioProcessingModule( |
| if (IsDelayAgnosticAecEnabled()) |
| config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true)); |
| if (goog_beamforming) { |
| - ConfigureBeamforming(&config, audio_constraints.GetPropertyAsString( |
| - MediaAudioConstraints::kGoogArrayGeometry)); |
| + ConfigureBeamforming(&config, audio_constraints); |
| } |
| // Create and configure the webrtc::AudioProcessing. |
| @@ -528,47 +548,6 @@ void MediaStreamAudioProcessor::InitializeAudioProcessingModule( |
| RecordProcessingState(AUDIO_PROCESSING_ENABLED); |
| } |
| -void MediaStreamAudioProcessor::ConfigureBeamforming( |
| - webrtc::Config* config, |
| - const std::string& geometry_str) const { |
| - std::vector<webrtc::Point> geometry = ParseArrayGeometry(geometry_str); |
| -#if defined(OS_CHROMEOS) |
| - if(geometry.size() == 0) { |
| - const std::string board = base::SysInfo::GetLsbReleaseBoard(); |
| - if (board.find("peach_pi") != std::string::npos) { |
| - geometry.push_back(webrtc::Point(-0.025f, 0.f, 0.f)); |
| - geometry.push_back(webrtc::Point(0.025f, 0.f, 0.f)); |
| - } else if (board.find("swanky") != std::string::npos) { |
| - geometry.push_back(webrtc::Point(-0.026f, 0.f, 0.f)); |
| - geometry.push_back(webrtc::Point(0.026f, 0.f, 0.f)); |
| - } else if (board.find("samus") != std::string::npos) { |
| - geometry.push_back(webrtc::Point(-0.032f, 0.f, 0.f)); |
| - geometry.push_back(webrtc::Point(0.032f, 0.f, 0.f)); |
| - } |
| - } |
| -#endif |
| - config->Set<webrtc::Beamforming>(new webrtc::Beamforming(geometry.size() > 1, |
| - geometry)); |
| -} |
| - |
| -std::vector<webrtc::Point> MediaStreamAudioProcessor::ParseArrayGeometry( |
| - const std::string& geometry_str) const { |
| - std::vector<webrtc::Point> result; |
| - std::vector<float> values; |
| - std::istringstream str(geometry_str); |
| - std::copy(std::istream_iterator<float>(str), |
| - std::istream_iterator<float>(), |
| - std::back_inserter(values)); |
| - if (values.size() % 3 == 0) { |
| - for (size_t i = 0; i < values.size(); i += 3) { |
| - result.push_back(webrtc::Point(values[i + 0], |
| - values[i + 1], |
| - values[i + 2])); |
| - } |
| - } |
| - return result; |
| -} |
| - |
| void MediaStreamAudioProcessor::InitializeCaptureFifo( |
| const media::AudioParameters& input_format) { |
| DCHECK(main_thread_checker_.CalledOnValidThread()); |