Chromium Code Reviews

Side by Side Diff: content/renderer/media/media_stream_audio_processor.cc

Issue 1224623014: Refactor ParseArrayGeometry to use standard Chromium facilities. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View unified diff |
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/media/media_stream_audio_processor.h" 5 #include "content/renderer/media/media_stream_audio_processor.h"
6 6
7 #include "base/command_line.h" 7 #include "base/command_line.h"
8 #include "base/metrics/field_trial.h" 8 #include "base/metrics/field_trial.h"
9 #include "base/metrics/histogram.h" 9 #include "base/metrics/histogram.h"
10 #include "base/trace_event/trace_event.h" 10 #include "base/trace_event/trace_event.h"
(...skipping 75 matching lines...)
86 86
87 return (group_name == "Enabled" || group_name == "DefaultEnabled"); 87 return (group_name == "Enabled" || group_name == "DefaultEnabled");
88 } 88 }
89 89
90 bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) { 90 bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) {
91 return base::FieldTrialList::FindFullName("ChromebookBeamforming") == 91 return base::FieldTrialList::FindFullName("ChromebookBeamforming") ==
92 "Enabled" || 92 "Enabled" ||
93 audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming); 93 audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming);
94 } 94 }
95 95
96 void ConfigureBeamforming(webrtc::Config* config,
97 const MediaAudioConstraints& constraints) {
98 std::string position_string = constraints.GetPropertyAsString(
99 MediaAudioConstraints::kGoogArrayGeometry);
100 if (position_string == "") {
aluebs-chromium 2015/07/07 15:40:54 position_string.empty()?
ajm 2015/07/31 02:10:39 Agreed, but now reverted.
101 // Give preference to the media constraint. Only consider the command-line
102 // switch if the constraint is not present.
103 position_string =
104 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
105 switches::kMicrophonePositions);
106 }
107
108 if (position_string != "") {
aluebs-chromium 2015/07/07 15:40:54 !position_string.empty()?
ajm 2015/07/31 02:10:39 Agreed, but now reverted.
109 const auto geometry = ParseArrayGeometry(position_string);
110 // Only enable beamforming when we have more than one mic.
111 const bool enable_beamforming = geometry.size() > 1;
112 config->Set<webrtc::Beamforming>(
113 new webrtc::Beamforming(enable_beamforming, geometry));
114 }
115 }
116
96 } // namespace 117 } // namespace
97 118
98 // Wraps AudioBus to provide access to the array of channel pointers, since this 119 // Wraps AudioBus to provide access to the array of channel pointers, since this
99 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every 120 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every
100 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers 121 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers
101 // are changed, e.g. through calls to SetChannelData() or SwapChannels(). 122 // are changed, e.g. through calls to SetChannelData() or SwapChannels().
102 // 123 //
103 // All methods are called on one of the capture or render audio threads 124 // All methods are called on one of the capture or render audio threads
104 // exclusively. 125 // exclusively.
105 class MediaStreamAudioBus { 126 class MediaStreamAudioBus {
(...skipping 370 matching lines...)
476 497
477 // Experimental options provided at creation. 498 // Experimental options provided at creation.
478 webrtc::Config config; 499 webrtc::Config config;
479 if (goog_experimental_aec) 500 if (goog_experimental_aec)
480 config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(true)); 501 config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(true));
481 if (goog_experimental_ns) 502 if (goog_experimental_ns)
482 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true)); 503 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true));
483 if (IsDelayAgnosticAecEnabled()) 504 if (IsDelayAgnosticAecEnabled())
484 config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true)); 505 config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true));
485 if (goog_beamforming) { 506 if (goog_beamforming) {
486 ConfigureBeamforming(&config, audio_constraints.GetPropertyAsString( 507 ConfigureBeamforming(&config, audio_constraints);
487 MediaAudioConstraints::kGoogArrayGeometry));
488 } 508 }
489 509
490 // Create and configure the webrtc::AudioProcessing. 510 // Create and configure the webrtc::AudioProcessing.
491 audio_processing_.reset(webrtc::AudioProcessing::Create(config)); 511 audio_processing_.reset(webrtc::AudioProcessing::Create(config));
492 512
493 // Enable the audio processing components. 513 // Enable the audio processing components.
494 if (echo_cancellation) { 514 if (echo_cancellation) {
495 EnableEchoCancellation(audio_processing_.get()); 515 EnableEchoCancellation(audio_processing_.get());
496 516
497 if (playout_data_source_) 517 if (playout_data_source_)
(...skipping 23 matching lines...)
521 typing_detector_.reset(new webrtc::TypingDetection()); 541 typing_detector_.reset(new webrtc::TypingDetection());
522 EnableTypingDetection(audio_processing_.get(), typing_detector_.get()); 542 EnableTypingDetection(audio_processing_.get(), typing_detector_.get());
523 } 543 }
524 544
525 if (goog_agc) 545 if (goog_agc)
526 EnableAutomaticGainControl(audio_processing_.get()); 546 EnableAutomaticGainControl(audio_processing_.get());
527 547
528 RecordProcessingState(AUDIO_PROCESSING_ENABLED); 548 RecordProcessingState(AUDIO_PROCESSING_ENABLED);
529 } 549 }
530 550
531 void MediaStreamAudioProcessor::ConfigureBeamforming(
532 webrtc::Config* config,
533 const std::string& geometry_str) const {
534 std::vector<webrtc::Point> geometry = ParseArrayGeometry(geometry_str);
535 #if defined(OS_CHROMEOS)
536 if(geometry.size() == 0) {
537 const std::string board = base::SysInfo::GetLsbReleaseBoard();
538 if (board.find("peach_pi") != std::string::npos) {
539 geometry.push_back(webrtc::Point(-0.025f, 0.f, 0.f));
540 geometry.push_back(webrtc::Point(0.025f, 0.f, 0.f));
541 } else if (board.find("swanky") != std::string::npos) {
542 geometry.push_back(webrtc::Point(-0.026f, 0.f, 0.f));
543 geometry.push_back(webrtc::Point(0.026f, 0.f, 0.f));
544 } else if (board.find("samus") != std::string::npos) {
545 geometry.push_back(webrtc::Point(-0.032f, 0.f, 0.f));
546 geometry.push_back(webrtc::Point(0.032f, 0.f, 0.f));
547 }
548 }
549 #endif
550 config->Set<webrtc::Beamforming>(new webrtc::Beamforming(geometry.size() > 1,
551 geometry));
552 }
553
554 std::vector<webrtc::Point> MediaStreamAudioProcessor::ParseArrayGeometry(
555 const std::string& geometry_str) const {
556 std::vector<webrtc::Point> result;
557 std::vector<float> values;
558 std::istringstream str(geometry_str);
559 std::copy(std::istream_iterator<float>(str),
560 std::istream_iterator<float>(),
561 std::back_inserter(values));
562 if (values.size() % 3 == 0) {
563 for (size_t i = 0; i < values.size(); i += 3) {
564 result.push_back(webrtc::Point(values[i + 0],
565 values[i + 1],
566 values[i + 2]));
567 }
568 }
569 return result;
570 }
571
572 void MediaStreamAudioProcessor::InitializeCaptureFifo( 551 void MediaStreamAudioProcessor::InitializeCaptureFifo(
573 const media::AudioParameters& input_format) { 552 const media::AudioParameters& input_format) {
574 DCHECK(main_thread_checker_.CalledOnValidThread()); 553 DCHECK(main_thread_checker_.CalledOnValidThread());
575 DCHECK(input_format.IsValid()); 554 DCHECK(input_format.IsValid());
576 input_format_ = input_format; 555 input_format_ = input_format;
577 556
578 // TODO(ajm): For now, we assume fixed parameters for the output when audio 557 // TODO(ajm): For now, we assume fixed parameters for the output when audio
579 // processing is enabled, to match the previous behavior. We should either 558 // processing is enabled, to match the previous behavior. We should either
580 // use the input parameters (in which case, audio processing will convert 559 // use the input parameters (in which case, audio processing will convert
581 // at output) or ideally, have a backchannel from the sink to know what 560 // at output) or ideally, have a backchannel from the sink to know what
(...skipping 140 matching lines...)
722 if (echo_information_) { 701 if (echo_information_) {
723 echo_information_.get()->UpdateAecDelayStats(ap->echo_cancellation()); 702 echo_information_.get()->UpdateAecDelayStats(ap->echo_cancellation());
724 } 703 }
725 704
726 // Return 0 if the volume hasn't been changed, and otherwise the new volume. 705 // Return 0 if the volume hasn't been changed, and otherwise the new volume.
727 return (agc->stream_analog_level() == volume) ? 706 return (agc->stream_analog_level() == volume) ?
728 0 : agc->stream_analog_level(); 707 0 : agc->stream_analog_level();
729 } 708 }
730 709
731 } // namespace content 710 } // namespace content
OLDNEW

Powered by Google App Engine