OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/media_stream_audio_processor.h" | 5 #include "content/renderer/media/media_stream_audio_processor.h" |
6 | 6 |
7 #include <stddef.h> | 7 #include <stddef.h> |
8 #include <stdint.h> | 8 #include <stdint.h> |
| 9 #include <algorithm> |
| 10 #include <limits> |
| 11 #include <string> |
9 #include <utility> | 12 #include <utility> |
| 13 #include <vector> |
10 | 14 |
11 #include "base/command_line.h" | 15 #include "base/command_line.h" |
12 #include "base/feature_list.h" | 16 #include "base/feature_list.h" |
13 #include "base/metrics/field_trial.h" | 17 #include "base/metrics/field_trial.h" |
14 #include "base/metrics/histogram_macros.h" | 18 #include "base/metrics/histogram_macros.h" |
15 #include "base/optional.h" | 19 #include "base/optional.h" |
16 #include "base/single_thread_task_runner.h" | 20 #include "base/single_thread_task_runner.h" |
17 #include "base/strings/string_number_conversions.h" | 21 #include "base/strings/string_number_conversions.h" |
18 #include "base/threading/thread_task_runner_handle.h" | 22 #include "base/threading/thread_task_runner_handle.h" |
19 #include "base/trace_event/trace_event.h" | 23 #include "base/trace_event/trace_event.h" |
20 #include "build/build_config.h" | 24 #include "build/build_config.h" |
21 #include "content/public/common/content_features.h" | 25 #include "content/public/common/content_features.h" |
22 #include "content/public/common/content_switches.h" | 26 #include "content/public/common/content_switches.h" |
23 #include "content/renderer/media/media_stream_audio_processor_options.h" | |
24 #include "content/renderer/media/webrtc_audio_device_impl.h" | 27 #include "content/renderer/media/webrtc_audio_device_impl.h" |
25 #include "media/base/audio_converter.h" | 28 #include "media/base/audio_converter.h" |
26 #include "media/base/audio_fifo.h" | 29 #include "media/base/audio_fifo.h" |
27 #include "media/base/audio_parameters.h" | 30 #include "media/base/audio_parameters.h" |
28 #include "media/base/channel_layout.h" | 31 #include "media/base/channel_layout.h" |
29 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" | |
30 #include "third_party/webrtc/api/mediaconstraintsinterface.h" | 32 #include "third_party/webrtc/api/mediaconstraintsinterface.h" |
31 #include "third_party/webrtc/modules/audio_processing/typing_detection.h" | 33 #include "third_party/webrtc/modules/audio_processing/typing_detection.h" |
32 | 34 |
33 namespace content { | 35 namespace content { |
34 | 36 |
35 namespace { | 37 namespace { |
36 | 38 |
37 using webrtc::AudioProcessing; | 39 using webrtc::AudioProcessing; |
38 using webrtc::NoiseSuppression; | 40 using webrtc::NoiseSuppression; |
39 | 41 |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
121 return base::Optional<int>(startup_min_volume); | 123 return base::Optional<int>(startup_min_volume); |
122 } | 124 } |
123 | 125 |
124 // Checks if the AEC's refined adaptive filter tuning was enabled on the command | 126 // Checks if the AEC's refined adaptive filter tuning was enabled on the command |
125 // line. | 127 // line. |
126 bool UseAecRefinedAdaptiveFilter() { | 128 bool UseAecRefinedAdaptiveFilter() { |
127 return base::CommandLine::ForCurrentProcess()->HasSwitch( | 129 return base::CommandLine::ForCurrentProcess()->HasSwitch( |
128 switches::kAecRefinedAdaptiveFilter); | 130 switches::kAecRefinedAdaptiveFilter); |
129 } | 131 } |
130 | 132 |
| 133 webrtc::Point WebrtcPointFromMediaPoint(const media::Point& point) { |
| 134 return webrtc::Point(point.x(), point.y(), point.z()); |
| 135 } |
| 136 |
| 137 std::vector<webrtc::Point> WebrtcPointsFromMediaPoints( |
| 138 const std::vector<media::Point>& points) { |
| 139 std::vector<webrtc::Point> webrtc_points; |
| 140 webrtc_points.reserve(webrtc_points.size()); |
| 141 for (const auto& point : points) |
| 142 webrtc_points.push_back(WebrtcPointFromMediaPoint(point)); |
| 143 return webrtc_points; |
| 144 } |
| 145 |
131 } // namespace | 146 } // namespace |
132 | 147 |
133 // Wraps AudioBus to provide access to the array of channel pointers, since this | 148 // Wraps AudioBus to provide access to the array of channel pointers, since this |
134 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every | 149 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every |
135 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers | 150 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers |
136 // are changed, e.g. through calls to SetChannelData() or SwapChannels(). | 151 // are changed, e.g. through calls to SetChannelData() or SwapChannels(). |
137 // | 152 // |
138 // All methods are called on one of the capture or render audio threads | 153 // All methods are called on one of the capture or render audio threads |
139 // exclusively. | 154 // exclusively. |
140 class MediaStreamAudioBus { | 155 class MediaStreamAudioBus { |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
285 // consumed next from the FIFO. When not using |fifo_|, this is the audio | 300 // consumed next from the FIFO. When not using |fifo_|, this is the audio |
286 // delay of the first sample in |destination_|. | 301 // delay of the first sample in |destination_|. |
287 base::TimeDelta next_audio_delay_; | 302 base::TimeDelta next_audio_delay_; |
288 | 303 |
289 // True when |destination_| contains the data to be returned by the next call | 304 // True when |destination_| contains the data to be returned by the next call |
290 // to Consume(). Only used when the FIFO is disabled. | 305 // to Consume(). Only used when the FIFO is disabled. |
291 bool data_available_; | 306 bool data_available_; |
292 }; | 307 }; |
293 | 308 |
294 MediaStreamAudioProcessor::MediaStreamAudioProcessor( | 309 MediaStreamAudioProcessor::MediaStreamAudioProcessor( |
295 const blink::WebMediaConstraints& constraints, | 310 const AudioProcessingProperties& properties, |
296 const MediaStreamDevice::AudioDeviceParameters& input_params, | |
297 WebRtcPlayoutDataSource* playout_data_source) | 311 WebRtcPlayoutDataSource* playout_data_source) |
298 : render_delay_ms_(0), | 312 : render_delay_ms_(0), |
299 has_echo_cancellation_(false), | 313 has_echo_cancellation_(false), |
300 playout_data_source_(playout_data_source), | 314 playout_data_source_(playout_data_source), |
301 main_thread_runner_(base::ThreadTaskRunnerHandle::Get()), | 315 main_thread_runner_(base::ThreadTaskRunnerHandle::Get()), |
302 audio_mirroring_(false), | 316 audio_mirroring_(false), |
303 typing_detected_(false), | 317 typing_detected_(false), |
304 stopped_(false) { | 318 stopped_(false) { |
305 DCHECK(main_thread_runner_); | 319 DCHECK(main_thread_runner_); |
306 capture_thread_checker_.DetachFromThread(); | 320 capture_thread_checker_.DetachFromThread(); |
307 render_thread_checker_.DetachFromThread(); | 321 render_thread_checker_.DetachFromThread(); |
308 InitializeAudioProcessingModule(constraints, input_params); | 322 InitializeAudioProcessingModule(properties); |
309 | 323 |
310 aec_dump_message_filter_ = AecDumpMessageFilter::Get(); | 324 aec_dump_message_filter_ = AecDumpMessageFilter::Get(); |
311 // In unit tests not creating a message filter, |aec_dump_message_filter_| | 325 // In unit tests not creating a message filter, |aec_dump_message_filter_| |
312 // will be NULL. We can just ignore that. Other unit tests and browser tests | 326 // will be NULL. We can just ignore that. Other unit tests and browser tests |
313 // ensure that we do get the filter when we should. | 327 // ensure that we do get the filter when we should. |
314 if (aec_dump_message_filter_.get()) | 328 if (aec_dump_message_filter_.get()) |
315 aec_dump_message_filter_->AddDelegate(this); | 329 aec_dump_message_filter_->AddDelegate(this); |
316 | 330 |
317 // Create and configure |audio_repetition_detector_|. | 331 // Create and configure |audio_repetition_detector_|. |
318 #if ENABLE_AUDIO_REPETITION_DETECTOR | 332 #if ENABLE_AUDIO_REPETITION_DETECTOR |
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
484 } | 498 } |
485 } | 499 } |
486 | 500 |
487 void MediaStreamAudioProcessor::OnIpcClosing() { | 501 void MediaStreamAudioProcessor::OnIpcClosing() { |
488 DCHECK(main_thread_runner_->BelongsToCurrentThread()); | 502 DCHECK(main_thread_runner_->BelongsToCurrentThread()); |
489 aec_dump_message_filter_ = NULL; | 503 aec_dump_message_filter_ = NULL; |
490 } | 504 } |
491 | 505 |
492 // static | 506 // static |
493 bool MediaStreamAudioProcessor::WouldModifyAudio( | 507 bool MediaStreamAudioProcessor::WouldModifyAudio( |
494 const blink::WebMediaConstraints& constraints, | 508 const AudioProcessingProperties& properties) { |
495 int effects_flags) { | |
496 // Note: This method should by kept in-sync with any changes to the logic in | 509 // Note: This method should by kept in-sync with any changes to the logic in |
497 // MediaStreamAudioProcessor::InitializeAudioProcessingModule(). | 510 // MediaStreamAudioProcessor::InitializeAudioProcessingModule(). |
498 | 511 |
499 const MediaAudioConstraints audio_constraints(constraints, effects_flags); | 512 if (properties.goog_audio_mirroring) |
500 | |
501 if (audio_constraints.GetGoogAudioMirroring()) | |
502 return true; | 513 return true; |
503 | 514 |
504 #if !defined(OS_IOS) | 515 #if !defined(OS_IOS) |
505 if (audio_constraints.GetEchoCancellationProperty() || | 516 if (properties.enable_sw_echo_cancellation || |
506 audio_constraints.GetGoogAutoGainControl()) { | 517 properties.goog_auto_gain_control) { |
507 return true; | 518 return true; |
508 } | 519 } |
509 #endif | 520 #endif |
510 | 521 |
511 #if !defined(OS_IOS) && !defined(OS_ANDROID) | 522 #if !defined(OS_IOS) && !defined(OS_ANDROID) |
512 if (audio_constraints.GetGoogExperimentalEchoCancellation() || | 523 if (properties.goog_experimental_echo_cancellation || |
513 audio_constraints.GetGoogTypingNoiseDetection()) { | 524 properties.goog_typing_noise_detection) { |
514 return true; | 525 return true; |
515 } | 526 } |
516 #endif | 527 #endif |
517 | 528 |
518 if (audio_constraints.GetGoogNoiseSuppression() || | 529 if (properties.goog_noise_suppression || |
519 audio_constraints.GetGoogExperimentalNoiseSuppression() || | 530 properties.goog_experimental_noise_suppression || |
520 audio_constraints.GetGoogBeamforming() || | 531 properties.goog_beamforming || properties.goog_highpass_filter) { |
521 audio_constraints.GetGoogHighpassFilter()) { | |
522 return true; | 532 return true; |
523 } | 533 } |
524 | 534 |
525 return false; | 535 return false; |
526 } | 536 } |
527 | 537 |
528 void MediaStreamAudioProcessor::OnPlayoutData(media::AudioBus* audio_bus, | 538 void MediaStreamAudioProcessor::OnPlayoutData(media::AudioBus* audio_bus, |
529 int sample_rate, | 539 int sample_rate, |
530 int audio_delay_milliseconds) { | 540 int audio_delay_milliseconds) { |
531 DCHECK(render_thread_checker_.CalledOnValidThread()); | 541 DCHECK(render_thread_checker_.CalledOnValidThread()); |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
571 render_fifo_->ReattachThreadChecker(); | 581 render_fifo_->ReattachThreadChecker(); |
572 } | 582 } |
573 | 583 |
574 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) { | 584 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) { |
575 stats->typing_noise_detected = | 585 stats->typing_noise_detected = |
576 (base::subtle::Acquire_Load(&typing_detected_) != false); | 586 (base::subtle::Acquire_Load(&typing_detected_) != false); |
577 GetAudioProcessingStats(audio_processing_.get(), stats); | 587 GetAudioProcessingStats(audio_processing_.get(), stats); |
578 } | 588 } |
579 | 589 |
580 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( | 590 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( |
581 const blink::WebMediaConstraints& constraints, | 591 const AudioProcessingProperties& properties) { |
582 const MediaStreamDevice::AudioDeviceParameters& input_params) { | |
583 DCHECK(main_thread_runner_->BelongsToCurrentThread()); | 592 DCHECK(main_thread_runner_->BelongsToCurrentThread()); |
584 DCHECK(!audio_processing_); | 593 DCHECK(!audio_processing_); |
585 | 594 |
586 MediaAudioConstraints audio_constraints(constraints, input_params.effects); | |
587 | |
588 // Note: The audio mirroring constraint (i.e., swap left and right channels) | 595 // Note: The audio mirroring constraint (i.e., swap left and right channels) |
589 // is handled within this MediaStreamAudioProcessor and does not, by itself, | 596 // is handled within this MediaStreamAudioProcessor and does not, by itself, |
590 // require webrtc::AudioProcessing. | 597 // require webrtc::AudioProcessing. |
591 audio_mirroring_ = audio_constraints.GetGoogAudioMirroring(); | 598 audio_mirroring_ = properties.goog_audio_mirroring; |
592 | 599 has_echo_cancellation_ = properties.enable_sw_echo_cancellation; |
593 const bool echo_cancellation = | |
594 audio_constraints.GetEchoCancellationProperty(); | |
595 has_echo_cancellation_ = echo_cancellation; | |
596 const bool goog_agc = audio_constraints.GetGoogAutoGainControl(); | |
597 | 600 |
598 #if defined(OS_ANDROID) | 601 #if defined(OS_ANDROID) |
599 const bool goog_experimental_aec = false; | 602 const bool goog_experimental_aec = false; |
600 const bool goog_typing_detection = false; | 603 const bool goog_typing_detection = false; |
601 #else | 604 #else |
602 const bool goog_experimental_aec = | 605 const bool goog_experimental_aec = |
603 audio_constraints.GetGoogExperimentalEchoCancellation(); | 606 properties.goog_experimental_echo_cancellation; |
604 const bool goog_typing_detection = | 607 const bool goog_typing_detection = properties.goog_typing_noise_detection; |
605 audio_constraints.GetGoogTypingNoiseDetection(); | |
606 #endif | 608 #endif |
607 | 609 |
608 const bool goog_ns = audio_constraints.GetGoogNoiseSuppression(); | |
609 const bool goog_experimental_ns = | |
610 audio_constraints.GetGoogExperimentalNoiseSuppression(); | |
611 const bool goog_beamforming = audio_constraints.GetGoogBeamforming(); | |
612 const bool goog_high_pass_filter = audio_constraints.GetGoogHighpassFilter(); | |
613 | |
614 // Return immediately if none of the goog constraints requiring | 610 // Return immediately if none of the goog constraints requiring |
615 // webrtc::AudioProcessing are enabled. | 611 // webrtc::AudioProcessing are enabled. |
616 if (!echo_cancellation && !goog_experimental_aec && !goog_ns && | 612 if (!properties.enable_sw_echo_cancellation && !goog_experimental_aec && |
617 !goog_high_pass_filter && !goog_typing_detection && | 613 !properties.goog_noise_suppression && !properties.goog_highpass_filter && |
618 !goog_agc && !goog_experimental_ns && !goog_beamforming) { | 614 !goog_typing_detection && !properties.goog_auto_gain_control && |
| 615 !properties.goog_experimental_noise_suppression && |
| 616 !properties.goog_beamforming) { |
619 // Sanity-check: WouldModifyAudio() should return true iff | 617 // Sanity-check: WouldModifyAudio() should return true iff |
620 // |audio_mirroring_| is true. | 618 // |audio_mirroring_| is true. |
621 DCHECK_EQ(audio_mirroring_, WouldModifyAudio(constraints, | 619 DCHECK_EQ(audio_mirroring_, WouldModifyAudio(properties)); |
622 input_params.effects)); | |
623 RecordProcessingState(AUDIO_PROCESSING_DISABLED); | 620 RecordProcessingState(AUDIO_PROCESSING_DISABLED); |
624 return; | 621 return; |
625 } | 622 } |
626 | 623 |
627 // Sanity-check: WouldModifyAudio() should return true because the above logic | 624 // Sanity-check: WouldModifyAudio() should return true because the above logic |
628 // has determined webrtc::AudioProcessing will be used. | 625 // has determined webrtc::AudioProcessing will be used. |
629 DCHECK(WouldModifyAudio(constraints, input_params.effects)); | 626 DCHECK(WouldModifyAudio(properties)); |
630 | 627 |
631 // Experimental options provided at creation. | 628 // Experimental options provided at creation. |
632 webrtc::Config config; | 629 webrtc::Config config; |
633 config.Set<webrtc::ExtendedFilter>( | 630 config.Set<webrtc::ExtendedFilter>( |
634 new webrtc::ExtendedFilter(goog_experimental_aec)); | 631 new webrtc::ExtendedFilter(goog_experimental_aec)); |
635 config.Set<webrtc::ExperimentalNs>( | 632 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs( |
636 new webrtc::ExperimentalNs(goog_experimental_ns)); | 633 properties.goog_experimental_noise_suppression)); |
637 config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true)); | 634 config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true)); |
638 if (UseAecRefinedAdaptiveFilter()) { | 635 if (UseAecRefinedAdaptiveFilter()) { |
639 config.Set<webrtc::RefinedAdaptiveFilter>( | 636 config.Set<webrtc::RefinedAdaptiveFilter>( |
640 new webrtc::RefinedAdaptiveFilter(true)); | 637 new webrtc::RefinedAdaptiveFilter(true)); |
641 } | 638 } |
642 if (goog_beamforming) { | 639 if (properties.goog_beamforming) { |
643 const auto& geometry = | |
644 GetArrayGeometryPreferringConstraints(audio_constraints, input_params); | |
645 | |
646 // Only enable beamforming if we have at least two mics. | 640 // Only enable beamforming if we have at least two mics. |
647 config.Set<webrtc::Beamforming>( | 641 config.Set<webrtc::Beamforming>(new webrtc::Beamforming( |
648 new webrtc::Beamforming(geometry.size() > 1, geometry)); | 642 properties.goog_array_geometry.size() > 1, |
| 643 WebrtcPointsFromMediaPoints(properties.goog_array_geometry))); |
649 } | 644 } |
650 | 645 |
651 // If the experimental AGC is enabled, check for overridden config params. | 646 // If the experimental AGC is enabled, check for overridden config params. |
652 if (audio_constraints.GetGoogExperimentalAutoGainControl()) { | 647 if (properties.goog_experimental_auto_gain_control) { |
653 auto startup_min_volume = GetStartupMinVolumeForAgc(); | 648 auto startup_min_volume = GetStartupMinVolumeForAgc(); |
654 constexpr int kClippingLevelMin = 70; | 649 constexpr int kClippingLevelMin = 70; |
655 // TODO(hlundin) Make this value default in WebRTC and clean up here. | 650 // TODO(hlundin) Make this value default in WebRTC and clean up here. |
656 config.Set<webrtc::ExperimentalAgc>(new webrtc::ExperimentalAgc( | 651 config.Set<webrtc::ExperimentalAgc>(new webrtc::ExperimentalAgc( |
657 true, startup_min_volume.value_or(0), kClippingLevelMin)); | 652 true, startup_min_volume.value_or(0), kClippingLevelMin)); |
658 } | 653 } |
659 | 654 |
660 // Create and configure the webrtc::AudioProcessing. | 655 // Create and configure the webrtc::AudioProcessing. |
661 audio_processing_.reset(webrtc::AudioProcessing::Create(config)); | 656 audio_processing_.reset(webrtc::AudioProcessing::Create(config)); |
662 | 657 |
663 // Enable the audio processing components. | 658 // Enable the audio processing components. |
664 webrtc::AudioProcessing::Config apm_config; | 659 webrtc::AudioProcessing::Config apm_config; |
665 | 660 |
666 if (playout_data_source_) { | 661 if (playout_data_source_) { |
667 playout_data_source_->AddPlayoutSink(this); | 662 playout_data_source_->AddPlayoutSink(this); |
668 } | 663 } |
669 | 664 |
670 if (echo_cancellation) { | 665 if (properties.enable_sw_echo_cancellation) { |
671 EnableEchoCancellation(audio_processing_.get()); | 666 EnableEchoCancellation(audio_processing_.get()); |
672 | 667 |
673 apm_config.echo_canceller3.enabled = override_aec3_.value_or( | 668 apm_config.echo_canceller3.enabled = override_aec3_.value_or( |
674 base::FeatureList::IsEnabled(features::kWebRtcUseEchoCanceller3)); | 669 base::FeatureList::IsEnabled(features::kWebRtcUseEchoCanceller3)); |
675 | 670 |
676 if (!apm_config.echo_canceller3.enabled) { | 671 if (!apm_config.echo_canceller3.enabled) { |
677 // Prepare for logging echo information. If there are data remaining in | 672 // Prepare for logging echo information. If there are data remaining in |
678 // |echo_information_| we simply discard it. | 673 // |echo_information_| we simply discard it. |
679 echo_information_ = base::MakeUnique<EchoInformation>(); | 674 echo_information_ = base::MakeUnique<EchoInformation>(); |
680 } else { | 675 } else { |
681 // Do not log any echo information when AEC3 is active, as the echo | 676 // Do not log any echo information when AEC3 is active, as the echo |
682 // information then will not be properly updated. | 677 // information then will not be properly updated. |
683 echo_information_.reset(); | 678 echo_information_.reset(); |
684 } | 679 } |
685 } else { | 680 } else { |
686 apm_config.echo_canceller3.enabled = false; | 681 apm_config.echo_canceller3.enabled = false; |
687 } | 682 } |
688 | 683 |
689 if (goog_ns) { | 684 if (properties.goog_noise_suppression) { |
690 // The beamforming postfilter is effective at suppressing stationary noise, | 685 // The beamforming postfilter is effective at suppressing stationary noise, |
691 // so reduce the single-channel NS aggressiveness when enabled. | 686 // so reduce the single-channel NS aggressiveness when enabled. |
692 const NoiseSuppression::Level ns_level = | 687 const NoiseSuppression::Level ns_level = |
693 config.Get<webrtc::Beamforming>().enabled ? NoiseSuppression::kLow | 688 config.Get<webrtc::Beamforming>().enabled ? NoiseSuppression::kLow |
694 : NoiseSuppression::kHigh; | 689 : NoiseSuppression::kHigh; |
695 | 690 |
696 EnableNoiseSuppression(audio_processing_.get(), ns_level); | 691 EnableNoiseSuppression(audio_processing_.get(), ns_level); |
697 } | 692 } |
698 | 693 |
699 apm_config.high_pass_filter.enabled = goog_high_pass_filter; | 694 apm_config.high_pass_filter.enabled = properties.goog_highpass_filter; |
700 | 695 |
701 if (goog_typing_detection) { | 696 if (goog_typing_detection) { |
702 // TODO(xians): Remove this |typing_detector_| after the typing suppression | 697 // TODO(xians): Remove this |typing_detector_| after the typing suppression |
703 // is enabled by default. | 698 // is enabled by default. |
704 typing_detector_.reset(new webrtc::TypingDetection()); | 699 typing_detector_.reset(new webrtc::TypingDetection()); |
705 EnableTypingDetection(audio_processing_.get(), typing_detector_.get()); | 700 EnableTypingDetection(audio_processing_.get(), typing_detector_.get()); |
706 } | 701 } |
707 | 702 |
708 if (goog_agc) | 703 if (properties.goog_auto_gain_control) |
709 EnableAutomaticGainControl(audio_processing_.get()); | 704 EnableAutomaticGainControl(audio_processing_.get()); |
710 | 705 |
711 audio_processing_->ApplyConfig(apm_config); | 706 audio_processing_->ApplyConfig(apm_config); |
712 | 707 |
713 RecordProcessingState(AUDIO_PROCESSING_ENABLED); | 708 RecordProcessingState(AUDIO_PROCESSING_ENABLED); |
714 } | 709 } |
715 | 710 |
716 void MediaStreamAudioProcessor::InitializeCaptureFifo( | 711 void MediaStreamAudioProcessor::InitializeCaptureFifo( |
717 const media::AudioParameters& input_format) { | 712 const media::AudioParameters& input_format) { |
718 DCHECK(main_thread_runner_->BelongsToCurrentThread()); | 713 DCHECK(main_thread_runner_->BelongsToCurrentThread()); |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
871 0 : agc->stream_analog_level(); | 866 0 : agc->stream_analog_level(); |
872 } | 867 } |
873 | 868 |
874 void MediaStreamAudioProcessor::UpdateAecStats() { | 869 void MediaStreamAudioProcessor::UpdateAecStats() { |
875 DCHECK(main_thread_runner_->BelongsToCurrentThread()); | 870 DCHECK(main_thread_runner_->BelongsToCurrentThread()); |
876 if (echo_information_) | 871 if (echo_information_) |
877 echo_information_->UpdateAecStats(audio_processing_->echo_cancellation()); | 872 echo_information_->UpdateAecStats(audio_processing_->echo_cancellation()); |
878 } | 873 } |
879 | 874 |
880 } // namespace content | 875 } // namespace content |
OLD | NEW |