OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/media_stream_audio_processor.h" | 5 #include "content/renderer/media/media_stream_audio_processor.h" |
6 | 6 |
7 #include "base/debug/trace_event.h" | 7 #include "base/debug/trace_event.h" |
8 #if defined(OS_MACOSX) | 8 #if defined(OS_MACOSX) |
9 #include "base/metrics/field_trial.h" | 9 #include "base/metrics/field_trial.h" |
10 #endif | 10 #endif |
11 #include "base/metrics/histogram.h" | 11 #include "base/metrics/histogram.h" |
12 #include "content/renderer/media/media_stream_audio_processor_options.h" | 12 #include "content/renderer/media/media_stream_audio_processor_options.h" |
13 #include "content/renderer/media/rtc_media_constraints.h" | 13 #include "content/renderer/media/rtc_media_constraints.h" |
14 #include "content/renderer/media/webrtc_audio_device_impl.h" | 14 #include "content/renderer/media/webrtc_audio_device_impl.h" |
15 #include "media/audio/audio_parameters.h" | 15 #include "media/audio/audio_parameters.h" |
16 #include "media/base/audio_converter.h" | 16 #include "media/base/audio_converter.h" |
17 #include "media/base/audio_fifo.h" | 17 #include "media/base/audio_fifo.h" |
18 #include "media/base/channel_layout.h" | 18 #include "media/base/channel_layout.h" |
19 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" | 19 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" |
20 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface
.h" | 20 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface
.h" |
21 #include "third_party/webrtc/modules/audio_processing/typing_detection.h" | 21 #include "third_party/webrtc/modules/audio_processing/typing_detection.h" |
22 | 22 |
23 #if defined(OS_CHROMEOS) | |
24 #include "base/sys_info.h" | |
25 #endif | |
26 | |
27 namespace content { | 23 namespace content { |
28 | 24 |
29 namespace { | 25 namespace { |
30 | 26 |
31 using webrtc::AudioProcessing; | 27 using webrtc::AudioProcessing; |
32 | 28 |
33 #if defined(OS_ANDROID) | 29 #if defined(OS_ANDROID) |
34 const int kAudioProcessingSampleRate = 16000; | 30 const int kAudioProcessingSampleRate = 16000; |
35 #else | 31 #else |
36 const int kAudioProcessingSampleRate = 32000; | 32 const int kAudioProcessingSampleRate = 32000; |
(...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
442 const bool goog_experimental_aec = audio_constraints.GetProperty( | 438 const bool goog_experimental_aec = audio_constraints.GetProperty( |
443 MediaAudioConstraints::kGoogExperimentalEchoCancellation); | 439 MediaAudioConstraints::kGoogExperimentalEchoCancellation); |
444 const bool goog_typing_detection = audio_constraints.GetProperty( | 440 const bool goog_typing_detection = audio_constraints.GetProperty( |
445 MediaAudioConstraints::kGoogTypingNoiseDetection); | 441 MediaAudioConstraints::kGoogTypingNoiseDetection); |
446 #endif | 442 #endif |
447 | 443 |
448 const bool goog_ns = audio_constraints.GetProperty( | 444 const bool goog_ns = audio_constraints.GetProperty( |
449 MediaAudioConstraints::kGoogNoiseSuppression); | 445 MediaAudioConstraints::kGoogNoiseSuppression); |
450 const bool goog_experimental_ns = audio_constraints.GetProperty( | 446 const bool goog_experimental_ns = audio_constraints.GetProperty( |
451 MediaAudioConstraints::kGoogExperimentalNoiseSuppression); | 447 MediaAudioConstraints::kGoogExperimentalNoiseSuppression); |
452 const bool goog_beamforming = audio_constraints.GetProperty( | |
453 MediaAudioConstraints::kGoogBeamforming); | |
454 const bool goog_high_pass_filter = audio_constraints.GetProperty( | 448 const bool goog_high_pass_filter = audio_constraints.GetProperty( |
455 MediaAudioConstraints::kGoogHighpassFilter); | 449 MediaAudioConstraints::kGoogHighpassFilter); |
456 | 450 |
457 // Return immediately if no goog constraint is enabled. | 451 // Return immediately if no goog constraint is enabled. |
458 if (!echo_cancellation && !goog_experimental_aec && !goog_ns && | 452 if (!echo_cancellation && !goog_experimental_aec && !goog_ns && |
459 !goog_high_pass_filter && !goog_typing_detection && | 453 !goog_high_pass_filter && !goog_typing_detection && |
460 !goog_agc && !goog_experimental_ns && !goog_beamforming) { | 454 !goog_agc && !goog_experimental_ns) { |
461 RecordProcessingState(AUDIO_PROCESSING_DISABLED); | 455 RecordProcessingState(AUDIO_PROCESSING_DISABLED); |
462 return; | 456 return; |
463 } | 457 } |
464 | 458 |
465 // Experimental options provided at creation. | 459 // Experimental options provided at creation. |
466 webrtc::Config config; | 460 webrtc::Config config; |
467 if (goog_experimental_aec) | 461 if (goog_experimental_aec) |
468 config.Set<webrtc::DelayCorrection>(new webrtc::DelayCorrection(true)); | 462 config.Set<webrtc::DelayCorrection>(new webrtc::DelayCorrection(true)); |
469 if (goog_experimental_ns) | 463 if (goog_experimental_ns) |
470 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true)); | 464 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true)); |
471 #if defined(OS_MACOSX) | 465 #if defined(OS_MACOSX) |
472 if (base::FieldTrialList::FindFullName("NoReportedDelayOnMac") == "Enabled") | 466 if (base::FieldTrialList::FindFullName("NoReportedDelayOnMac") == "Enabled") |
473 config.Set<webrtc::ReportedDelay>(new webrtc::ReportedDelay(false)); | 467 config.Set<webrtc::ReportedDelay>(new webrtc::ReportedDelay(false)); |
474 #endif | 468 #endif |
475 if (goog_beamforming) { | |
476 ConfigureBeamforming(&config); | |
477 } | |
478 | 469 |
479 // Create and configure the webrtc::AudioProcessing. | 470 // Create and configure the webrtc::AudioProcessing. |
480 audio_processing_.reset(webrtc::AudioProcessing::Create(config)); | 471 audio_processing_.reset(webrtc::AudioProcessing::Create(config)); |
481 | 472 |
482 // Enable the audio processing components. | 473 // Enable the audio processing components. |
483 if (echo_cancellation) { | 474 if (echo_cancellation) { |
484 EnableEchoCancellation(audio_processing_.get()); | 475 EnableEchoCancellation(audio_processing_.get()); |
485 | 476 |
486 if (playout_data_source_) | 477 if (playout_data_source_) |
487 playout_data_source_->AddPlayoutSink(this); | 478 playout_data_source_->AddPlayoutSink(this); |
(...skipping 15 matching lines...) Expand all Loading... |
503 typing_detector_.reset(new webrtc::TypingDetection()); | 494 typing_detector_.reset(new webrtc::TypingDetection()); |
504 EnableTypingDetection(audio_processing_.get(), typing_detector_.get()); | 495 EnableTypingDetection(audio_processing_.get(), typing_detector_.get()); |
505 } | 496 } |
506 | 497 |
507 if (goog_agc) | 498 if (goog_agc) |
508 EnableAutomaticGainControl(audio_processing_.get()); | 499 EnableAutomaticGainControl(audio_processing_.get()); |
509 | 500 |
510 RecordProcessingState(AUDIO_PROCESSING_ENABLED); | 501 RecordProcessingState(AUDIO_PROCESSING_ENABLED); |
511 } | 502 } |
512 | 503 |
513 void MediaStreamAudioProcessor::ConfigureBeamforming(webrtc::Config* config) { | |
514 bool enabled = false; | |
515 std::vector<webrtc::Point> geometry(1, webrtc::Point(0.f, 0.f, 0.f)); | |
516 #if defined(OS_CHROMEOS) | |
517 const std::string board = base::SysInfo::GetLsbReleaseBoard(); | |
518 if (board == "peach_pi") { | |
519 enabled = true; | |
520 geometry.push_back(webrtc::Point(0.050f, 0.f, 0.f)); | |
521 } else if (board == "swanky") { | |
522 // TODO(aluebs): Verify beamforming works on Swanky and enable. | |
523 enabled = false; | |
524 geometry.push_back(webrtc::Point(0.052f, 0.f, 0.f)); | |
525 } | |
526 #endif | |
527 config->Set<webrtc::Beamforming>(new webrtc::Beamforming(enabled, geometry)); | |
528 } | |
529 | |
530 void MediaStreamAudioProcessor::InitializeCaptureFifo( | 504 void MediaStreamAudioProcessor::InitializeCaptureFifo( |
531 const media::AudioParameters& input_format) { | 505 const media::AudioParameters& input_format) { |
532 DCHECK(main_thread_checker_.CalledOnValidThread()); | 506 DCHECK(main_thread_checker_.CalledOnValidThread()); |
533 DCHECK(input_format.IsValid()); | 507 DCHECK(input_format.IsValid()); |
534 input_format_ = input_format; | 508 input_format_ = input_format; |
535 | 509 |
536 // TODO(ajm): For now, we assume fixed parameters for the output when audio | 510 // TODO(ajm): For now, we assume fixed parameters for the output when audio |
537 // processing is enabled, to match the previous behavior. We should either | 511 // processing is enabled, to match the previous behavior. We should either |
538 // use the input parameters (in which case, audio processing will convert | 512 // use the input parameters (in which case, audio processing will convert |
539 // at output) or ideally, have a backchannel from the sink to know what | 513 // at output) or ideally, have a backchannel from the sink to know what |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
670 vad->stream_has_voice()); | 644 vad->stream_has_voice()); |
671 base::subtle::Release_Store(&typing_detected_, detected); | 645 base::subtle::Release_Store(&typing_detected_, detected); |
672 } | 646 } |
673 | 647 |
674 // Return 0 if the volume hasn't been changed, and otherwise the new volume. | 648 // Return 0 if the volume hasn't been changed, and otherwise the new volume. |
675 return (agc->stream_analog_level() == volume) ? | 649 return (agc->stream_analog_level() == volume) ? |
676 0 : agc->stream_analog_level(); | 650 0 : agc->stream_analog_level(); |
677 } | 651 } |
678 | 652 |
679 } // namespace content | 653 } // namespace content |
OLD | NEW |