| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/media_stream_audio_processor.h" | 5 #include "content/renderer/media/media_stream_audio_processor.h" |
| 6 | 6 |
| 7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
| 8 #include "base/metrics/field_trial.h" | 8 #include "base/metrics/field_trial.h" |
| 9 #include "base/metrics/histogram.h" | 9 #include "base/metrics/histogram.h" |
| 10 #include "base/trace_event/trace_event.h" | 10 #include "base/trace_event/trace_event.h" |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 86 | 86 |
| 87 return (group_name == "Enabled" || group_name == "DefaultEnabled"); | 87 return (group_name == "Enabled" || group_name == "DefaultEnabled"); |
| 88 } | 88 } |
| 89 | 89 |
| 90 bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) { | 90 bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) { |
| 91 return base::FieldTrialList::FindFullName("ChromebookBeamforming") == | 91 return base::FieldTrialList::FindFullName("ChromebookBeamforming") == |
| 92 "Enabled" || | 92 "Enabled" || |
| 93 audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming); | 93 audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming); |
| 94 } | 94 } |
| 95 | 95 |
| 96 void ConfigureBeamforming(webrtc::Config* config, | |
| 97 const std::string& geometry_str) { | |
| 98 std::vector<webrtc::Point> geometry = ParseArrayGeometry(geometry_str); | |
| 99 #if defined(OS_CHROMEOS) | |
| 100 if (geometry.empty()) { | |
| 101 const std::string& board = base::SysInfo::GetLsbReleaseBoard(); | |
| 102 if (board.find("nyan_kitty") != std::string::npos) { | |
| 103 geometry.push_back(webrtc::Point(-0.03f, 0.f, 0.f)); | |
| 104 geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f)); | |
| 105 } else if (board.find("peach_pi") != std::string::npos) { | |
| 106 geometry.push_back(webrtc::Point(-0.025f, 0.f, 0.f)); | |
| 107 geometry.push_back(webrtc::Point(0.025f, 0.f, 0.f)); | |
| 108 } else if (board.find("samus") != std::string::npos) { | |
| 109 geometry.push_back(webrtc::Point(-0.032f, 0.f, 0.f)); | |
| 110 geometry.push_back(webrtc::Point(0.032f, 0.f, 0.f)); | |
| 111 } else if (board.find("swanky") != std::string::npos) { | |
| 112 geometry.push_back(webrtc::Point(-0.026f, 0.f, 0.f)); | |
| 113 geometry.push_back(webrtc::Point(0.026f, 0.f, 0.f)); | |
| 114 } | |
| 115 } | |
| 116 #endif | |
| 117 config->Set<webrtc::Beamforming>( | |
| 118 new webrtc::Beamforming(geometry.size() > 1, geometry)); | |
| 119 } | |
| 120 | |
| 121 } // namespace | 96 } // namespace |
| 122 | 97 |
| 123 // Wraps AudioBus to provide access to the array of channel pointers, since this | 98 // Wraps AudioBus to provide access to the array of channel pointers, since this |
| 124 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every | 99 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every |
| 125 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers | 100 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers |
| 126 // are changed, e.g. through calls to SetChannelData() or SwapChannels(). | 101 // are changed, e.g. through calls to SetChannelData() or SwapChannels(). |
| 127 // | 102 // |
| 128 // All methods are called on one of the capture or render audio threads | 103 // All methods are called on one of the capture or render audio threads |
| 129 // exclusively. | 104 // exclusively. |
| 130 class MediaStreamAudioBus { | 105 class MediaStreamAudioBus { |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 264 // delay of the first sample in |destination_|. | 239 // delay of the first sample in |destination_|. |
| 265 base::TimeDelta next_audio_delay_; | 240 base::TimeDelta next_audio_delay_; |
| 266 | 241 |
| 267 // True when |destination_| contains the data to be returned by the next call | 242 // True when |destination_| contains the data to be returned by the next call |
| 268 // to Consume(). Only used when the FIFO is disabled. | 243 // to Consume(). Only used when the FIFO is disabled. |
| 269 bool data_available_; | 244 bool data_available_; |
| 270 }; | 245 }; |
| 271 | 246 |
| 272 MediaStreamAudioProcessor::MediaStreamAudioProcessor( | 247 MediaStreamAudioProcessor::MediaStreamAudioProcessor( |
| 273 const blink::WebMediaConstraints& constraints, | 248 const blink::WebMediaConstraints& constraints, |
| 274 int effects, | 249 const MediaStreamDevice::AudioDeviceParameters& input_params, |
| 275 WebRtcPlayoutDataSource* playout_data_source) | 250 WebRtcPlayoutDataSource* playout_data_source) |
| 276 : render_delay_ms_(0), | 251 : render_delay_ms_(0), |
| 277 playout_data_source_(playout_data_source), | 252 playout_data_source_(playout_data_source), |
| 278 audio_mirroring_(false), | 253 audio_mirroring_(false), |
| 279 typing_detected_(false), | 254 typing_detected_(false), |
| 280 stopped_(false) { | 255 stopped_(false) { |
| 281 capture_thread_checker_.DetachFromThread(); | 256 capture_thread_checker_.DetachFromThread(); |
| 282 render_thread_checker_.DetachFromThread(); | 257 render_thread_checker_.DetachFromThread(); |
| 283 InitializeAudioProcessingModule(constraints, effects); | 258 InitializeAudioProcessingModule(constraints, input_params); |
| 284 | 259 |
| 285 aec_dump_message_filter_ = AecDumpMessageFilter::Get(); | 260 aec_dump_message_filter_ = AecDumpMessageFilter::Get(); |
| 286 // In unit tests not creating a message filter, |aec_dump_message_filter_| | 261 // In unit tests not creating a message filter, |aec_dump_message_filter_| |
| 287 // will be NULL. We can just ignore that. Other unit tests and browser tests | 262 // will be NULL. We can just ignore that. Other unit tests and browser tests |
| 288 // ensure that we do get the filter when we should. | 263 // ensure that we do get the filter when we should. |
| 289 if (aec_dump_message_filter_.get()) | 264 if (aec_dump_message_filter_.get()) |
| 290 aec_dump_message_filter_->AddDelegate(this); | 265 aec_dump_message_filter_->AddDelegate(this); |
| 291 } | 266 } |
| 292 | 267 |
| 293 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { | 268 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { |
| (...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 448 render_fifo_.reset(); | 423 render_fifo_.reset(); |
| 449 } | 424 } |
| 450 | 425 |
| 451 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) { | 426 void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) { |
| 452 stats->typing_noise_detected = | 427 stats->typing_noise_detected = |
| 453 (base::subtle::Acquire_Load(&typing_detected_) != false); | 428 (base::subtle::Acquire_Load(&typing_detected_) != false); |
| 454 GetAecStats(audio_processing_.get()->echo_cancellation(), stats); | 429 GetAecStats(audio_processing_.get()->echo_cancellation(), stats); |
| 455 } | 430 } |
| 456 | 431 |
| 457 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( | 432 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( |
| 458 const blink::WebMediaConstraints& constraints, int effects) { | 433 const blink::WebMediaConstraints& constraints, |
| 434 const MediaStreamDevice::AudioDeviceParameters& input_params) { |
| 459 DCHECK(main_thread_checker_.CalledOnValidThread()); | 435 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 460 DCHECK(!audio_processing_); | 436 DCHECK(!audio_processing_); |
| 461 | 437 |
| 462 MediaAudioConstraints audio_constraints(constraints, effects); | 438 MediaAudioConstraints audio_constraints(constraints, input_params.effects); |
| 463 | 439 |
| 464 // Audio mirroring can be enabled even though audio processing is otherwise | 440 // Audio mirroring can be enabled even though audio processing is otherwise |
| 465 // disabled. | 441 // disabled. |
| 466 audio_mirroring_ = audio_constraints.GetProperty( | 442 audio_mirroring_ = audio_constraints.GetProperty( |
| 467 MediaAudioConstraints::kGoogAudioMirroring); | 443 MediaAudioConstraints::kGoogAudioMirroring); |
| 468 | 444 |
| 469 #if defined(OS_IOS) | 445 #if defined(OS_IOS) |
| 470 // On iOS, VPIO provides built-in AGC and AEC. | 446 // On iOS, VPIO provides built-in AGC and AEC. |
| 471 const bool echo_cancellation = false; | 447 const bool echo_cancellation = false; |
| 472 const bool goog_agc = false; | 448 const bool goog_agc = false; |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 504 | 480 |
| 505 // Experimental options provided at creation. | 481 // Experimental options provided at creation. |
| 506 webrtc::Config config; | 482 webrtc::Config config; |
| 507 if (goog_experimental_aec) | 483 if (goog_experimental_aec) |
| 508 config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(true)); | 484 config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(true)); |
| 509 if (goog_experimental_ns) | 485 if (goog_experimental_ns) |
| 510 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true)); | 486 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true)); |
| 511 if (IsDelayAgnosticAecEnabled()) | 487 if (IsDelayAgnosticAecEnabled()) |
| 512 config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true)); | 488 config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true)); |
| 513 if (goog_beamforming) { | 489 if (goog_beamforming) { |
| 514 ConfigureBeamforming(&config, | 490 const auto& geometry = |
| 515 audio_constraints.GetPropertyAsString( | 491 GetArrayGeometryPreferringConstraints(audio_constraints, input_params); |
| 516 MediaAudioConstraints::kGoogArrayGeometry)); | 492 |
| 493 // Only enable beamforming if we have at least two mics. |
| 494 config.Set<webrtc::Beamforming>( |
| 495 new webrtc::Beamforming(geometry.size() > 1, geometry)); |
| 517 } | 496 } |
| 518 | 497 |
| 519 // Create and configure the webrtc::AudioProcessing. | 498 // Create and configure the webrtc::AudioProcessing. |
| 520 audio_processing_.reset(webrtc::AudioProcessing::Create(config)); | 499 audio_processing_.reset(webrtc::AudioProcessing::Create(config)); |
| 521 | 500 |
| 522 // Enable the audio processing components. | 501 // Enable the audio processing components. |
| 523 if (echo_cancellation) { | 502 if (echo_cancellation) { |
| 524 EnableEchoCancellation(audio_processing_.get()); | 503 EnableEchoCancellation(audio_processing_.get()); |
| 525 | 504 |
| 526 if (playout_data_source_) | 505 if (playout_data_source_) |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 596 | 575 |
| 597 // webrtc::AudioProcessing requires a 10 ms chunk size. We use this native | 576 // webrtc::AudioProcessing requires a 10 ms chunk size. We use this native |
| 598 // size when processing is enabled. When disabled we use the same size as | 577 // size when processing is enabled. When disabled we use the same size as |
| 599 // the source if less than 10 ms. | 578 // the source if less than 10 ms. |
| 600 // | 579 // |
| 601 // TODO(ajm): This conditional buffer size appears to be assuming knowledge of | 580 // TODO(ajm): This conditional buffer size appears to be assuming knowledge of |
| 602 // the sink based on the source parameters. PeerConnection sinks seem to want | 581 // the sink based on the source parameters. PeerConnection sinks seem to want |
| 603 // 10 ms chunks regardless, while WebAudio sinks want less, and we're assuming | 582 // 10 ms chunks regardless, while WebAudio sinks want less, and we're assuming |
| 604 // we can identify WebAudio sinks by the input chunk size. Less fragile would | 583 // we can identify WebAudio sinks by the input chunk size. Less fragile would |
| 605 // be to have the sink actually tell us how much it wants (as in the above | 584 // be to have the sink actually tell us how much it wants (as in the above |
| 606 // TODO). | 585 // todo). |
| 607 int processing_frames = input_format.sample_rate() / 100; | 586 int processing_frames = input_format.sample_rate() / 100; |
| 608 int output_frames = output_sample_rate / 100; | 587 int output_frames = output_sample_rate / 100; |
| 609 if (!audio_processing_ && input_format.frames_per_buffer() < output_frames) { | 588 if (!audio_processing_ && input_format.frames_per_buffer() < output_frames) { |
| 610 processing_frames = input_format.frames_per_buffer(); | 589 processing_frames = input_format.frames_per_buffer(); |
| 611 output_frames = processing_frames; | 590 output_frames = processing_frames; |
| 612 } | 591 } |
| 613 | 592 |
| 614 output_format_ = media::AudioParameters( | 593 output_format_ = media::AudioParameters( |
| 615 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 594 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 616 output_channel_layout, | 595 output_channel_layout, |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 710 if (echo_information_) { | 689 if (echo_information_) { |
| 711 echo_information_.get()->UpdateAecDelayStats(ap->echo_cancellation()); | 690 echo_information_.get()->UpdateAecDelayStats(ap->echo_cancellation()); |
| 712 } | 691 } |
| 713 | 692 |
| 714 // Return 0 if the volume hasn't been changed, and otherwise the new volume. | 693 // Return 0 if the volume hasn't been changed, and otherwise the new volume. |
| 715 return (agc->stream_analog_level() == volume) ? | 694 return (agc->stream_analog_level() == volume) ? |
| 716 0 : agc->stream_analog_level(); | 695 0 : agc->stream_analog_level(); |
| 717 } | 696 } |
| 718 | 697 |
| 719 } // namespace content | 698 } // namespace content |
| OLD | NEW |