Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/media_stream_audio_processor.h" | 5 #include "content/renderer/media/media_stream_audio_processor.h" |
| 6 | 6 |
| 7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
| 8 #include "base/metrics/field_trial.h" | 8 #include "base/metrics/field_trial.h" |
| 9 #include "base/metrics/histogram.h" | 9 #include "base/metrics/histogram.h" |
| 10 #include "base/trace_event/trace_event.h" | 10 #include "base/trace_event/trace_event.h" |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 86 | 86 |
| 87 return (group_name == "Enabled" || group_name == "DefaultEnabled"); | 87 return (group_name == "Enabled" || group_name == "DefaultEnabled"); |
| 88 } | 88 } |
| 89 | 89 |
| 90 bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) { | 90 bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) { |
| 91 return base::FieldTrialList::FindFullName("ChromebookBeamforming") == | 91 return base::FieldTrialList::FindFullName("ChromebookBeamforming") == |
| 92 "Enabled" || | 92 "Enabled" || |
| 93 audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming); | 93 audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming); |
| 94 } | 94 } |
| 95 | 95 |
| 96 void ConfigureBeamforming(webrtc::Config* config, | |
| 97 const std::string& geometry_str) { | |
| 98 std::vector<webrtc::Point> geometry = ParseArrayGeometry(geometry_str); | |
| 99 #if defined(OS_CHROMEOS) | |
| 100 if (geometry.size() == 0) { | |
|
emircan
2015/08/03 18:06:22
std::vector::empty() can be used here.
ajm
2015/08/04 01:16:35
Done.
| |
| 101 const std::string board = base::SysInfo::GetLsbReleaseBoard(); | |
|
emircan
2015/08/03 18:06:22
const std::string& board can avoid a copy.
ajm
2015/08/04 01:16:35
I don't think this can avoid a copy. GetLsbRelease
| |
| 102 if (board.find("nyan_kitty") != std::string::npos) { | |
| 103 geometry.push_back(webrtc::Point(-0.03f, 0.f, 0.f)); | |
| 104 geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f)); | |
| 105 } else if (board.find("peach_pi") != std::string::npos) { | |
| 106 geometry.push_back(webrtc::Point(-0.025f, 0.f, 0.f)); | |
| 107 geometry.push_back(webrtc::Point(0.025f, 0.f, 0.f)); | |
| 108 } else if (board.find("samus") != std::string::npos) { | |
| 109 geometry.push_back(webrtc::Point(-0.032f, 0.f, 0.f)); | |
| 110 geometry.push_back(webrtc::Point(0.032f, 0.f, 0.f)); | |
| 111 } else if (board.find("swanky") != std::string::npos) { | |
| 112 geometry.push_back(webrtc::Point(-0.026f, 0.f, 0.f)); | |
| 113 geometry.push_back(webrtc::Point(0.026f, 0.f, 0.f)); | |
| 114 } | |
| 115 } | |
| 116 #endif | |
| 117 config->Set<webrtc::Beamforming>( | |
| 118 new webrtc::Beamforming(geometry.size() > 1, geometry)); | |
| 119 } | |
| 120 | |
| 96 } // namespace | 121 } // namespace |
| 97 | 122 |
| 98 // Wraps AudioBus to provide access to the array of channel pointers, since this | 123 // Wraps AudioBus to provide access to the array of channel pointers, since this |
| 99 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every | 124 // is the type webrtc::AudioProcessing deals in. The array is refreshed on every |
| 100 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers | 125 // channel_ptrs() call, and will be valid until the underlying AudioBus pointers |
| 101 // are changed, e.g. through calls to SetChannelData() or SwapChannels(). | 126 // are changed, e.g. through calls to SetChannelData() or SwapChannels(). |
| 102 // | 127 // |
| 103 // All methods are called on one of the capture or render audio threads | 128 // All methods are called on one of the capture or render audio threads |
| 104 // exclusively. | 129 // exclusively. |
| 105 class MediaStreamAudioBus { | 130 class MediaStreamAudioBus { |
| (...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 477 | 502 |
| 478 // Experimental options provided at creation. | 503 // Experimental options provided at creation. |
| 479 webrtc::Config config; | 504 webrtc::Config config; |
| 480 if (goog_experimental_aec) | 505 if (goog_experimental_aec) |
| 481 config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(true)); | 506 config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(true)); |
| 482 if (goog_experimental_ns) | 507 if (goog_experimental_ns) |
| 483 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true)); | 508 config.Set<webrtc::ExperimentalNs>(new webrtc::ExperimentalNs(true)); |
| 484 if (IsDelayAgnosticAecEnabled()) | 509 if (IsDelayAgnosticAecEnabled()) |
| 485 config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true)); | 510 config.Set<webrtc::DelayAgnostic>(new webrtc::DelayAgnostic(true)); |
| 486 if (goog_beamforming) { | 511 if (goog_beamforming) { |
| 487 ConfigureBeamforming(&config, audio_constraints.GetPropertyAsString( | 512 ConfigureBeamforming(&config, |
| 488 MediaAudioConstraints::kGoogArrayGeometry)); | 513 audio_constraints.GetPropertyAsString( |
| 514 MediaAudioConstraints::kGoogArrayGeometry)); | |
| 489 } | 515 } |
| 490 | 516 |
| 491 // Create and configure the webrtc::AudioProcessing. | 517 // Create and configure the webrtc::AudioProcessing. |
| 492 audio_processing_.reset(webrtc::AudioProcessing::Create(config)); | 518 audio_processing_.reset(webrtc::AudioProcessing::Create(config)); |
| 493 | 519 |
| 494 // Enable the audio processing components. | 520 // Enable the audio processing components. |
| 495 if (echo_cancellation) { | 521 if (echo_cancellation) { |
| 496 EnableEchoCancellation(audio_processing_.get()); | 522 EnableEchoCancellation(audio_processing_.get()); |
| 497 | 523 |
| 498 if (playout_data_source_) | 524 if (playout_data_source_) |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 522 typing_detector_.reset(new webrtc::TypingDetection()); | 548 typing_detector_.reset(new webrtc::TypingDetection()); |
| 523 EnableTypingDetection(audio_processing_.get(), typing_detector_.get()); | 549 EnableTypingDetection(audio_processing_.get(), typing_detector_.get()); |
| 524 } | 550 } |
| 525 | 551 |
| 526 if (goog_agc) | 552 if (goog_agc) |
| 527 EnableAutomaticGainControl(audio_processing_.get()); | 553 EnableAutomaticGainControl(audio_processing_.get()); |
| 528 | 554 |
| 529 RecordProcessingState(AUDIO_PROCESSING_ENABLED); | 555 RecordProcessingState(AUDIO_PROCESSING_ENABLED); |
| 530 } | 556 } |
| 531 | 557 |
| 532 void MediaStreamAudioProcessor::ConfigureBeamforming( | |
| 533 webrtc::Config* config, | |
| 534 const std::string& geometry_str) const { | |
| 535 std::vector<webrtc::Point> geometry = ParseArrayGeometry(geometry_str); | |
| 536 #if defined(OS_CHROMEOS) | |
| 537 if (geometry.size() == 0) { | |
| 538 const std::string board = base::SysInfo::GetLsbReleaseBoard(); | |
| 539 if (board.find("nyan_kitty") != std::string::npos) { | |
| 540 geometry.push_back(webrtc::Point(-0.03f, 0.f, 0.f)); | |
| 541 geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f)); | |
| 542 } else if (board.find("peach_pi") != std::string::npos) { | |
| 543 geometry.push_back(webrtc::Point(-0.025f, 0.f, 0.f)); | |
| 544 geometry.push_back(webrtc::Point(0.025f, 0.f, 0.f)); | |
| 545 } else if (board.find("samus") != std::string::npos) { | |
| 546 geometry.push_back(webrtc::Point(-0.032f, 0.f, 0.f)); | |
| 547 geometry.push_back(webrtc::Point(0.032f, 0.f, 0.f)); | |
| 548 } else if (board.find("swanky") != std::string::npos) { | |
| 549 geometry.push_back(webrtc::Point(-0.026f, 0.f, 0.f)); | |
| 550 geometry.push_back(webrtc::Point(0.026f, 0.f, 0.f)); | |
| 551 } | |
| 552 } | |
| 553 #endif | |
| 554 config->Set<webrtc::Beamforming>(new webrtc::Beamforming(geometry.size() > 1, | |
| 555 geometry)); | |
| 556 } | |
| 557 | |
| 558 std::vector<webrtc::Point> MediaStreamAudioProcessor::ParseArrayGeometry( | |
| 559 const std::string& geometry_str) const { | |
| 560 std::vector<webrtc::Point> result; | |
| 561 std::vector<float> values; | |
| 562 std::istringstream str(geometry_str); | |
| 563 std::copy(std::istream_iterator<float>(str), | |
| 564 std::istream_iterator<float>(), | |
| 565 std::back_inserter(values)); | |
| 566 if (values.size() % 3 == 0) { | |
| 567 for (size_t i = 0; i < values.size(); i += 3) { | |
| 568 result.push_back(webrtc::Point(values[i + 0], | |
| 569 values[i + 1], | |
| 570 values[i + 2])); | |
| 571 } | |
| 572 } | |
| 573 return result; | |
| 574 } | |
| 575 | |
| 576 void MediaStreamAudioProcessor::InitializeCaptureFifo( | 558 void MediaStreamAudioProcessor::InitializeCaptureFifo( |
| 577 const media::AudioParameters& input_format) { | 559 const media::AudioParameters& input_format) { |
| 578 DCHECK(main_thread_checker_.CalledOnValidThread()); | 560 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 579 DCHECK(input_format.IsValid()); | 561 DCHECK(input_format.IsValid()); |
| 580 input_format_ = input_format; | 562 input_format_ = input_format; |
| 581 | 563 |
| 582 // TODO(ajm): For now, we assume fixed parameters for the output when audio | 564 // TODO(ajm): For now, we assume fixed parameters for the output when audio |
| 583 // processing is enabled, to match the previous behavior. We should either | 565 // processing is enabled, to match the previous behavior. We should either |
| 584 // use the input parameters (in which case, audio processing will convert | 566 // use the input parameters (in which case, audio processing will convert |
| 585 // at output) or ideally, have a backchannel from the sink to know what | 567 // at output) or ideally, have a backchannel from the sink to know what |
| (...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 726 if (echo_information_) { | 708 if (echo_information_) { |
| 727 echo_information_.get()->UpdateAecDelayStats(ap->echo_cancellation()); | 709 echo_information_.get()->UpdateAecDelayStats(ap->echo_cancellation()); |
| 728 } | 710 } |
| 729 | 711 |
| 730 // Return 0 if the volume hasn't been changed, and otherwise the new volume. | 712 // Return 0 if the volume hasn't been changed, and otherwise the new volume. |
| 731 return (agc->stream_analog_level() == volume) ? | 713 return (agc->stream_analog_level() == volume) ? |
| 732 0 : agc->stream_analog_level(); | 714 0 : agc->stream_analog_level(); |
| 733 } | 715 } |
| 734 | 716 |
| 735 } // namespace content | 717 } // namespace content |
| OLD | NEW |