Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(70)

Unified Diff: content/renderer/media/webrtc_audio_processor.cc

Issue 37793005: move the APM to chrome. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: added a switch, it uses the APM in WebRtc if the switch is off, otherwise use the APM in Chrome. Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/renderer/media/webrtc_audio_processor.cc
diff --git a/content/renderer/media/webrtc_audio_processor.cc b/content/renderer/media/webrtc_audio_processor.cc
new file mode 100644
index 0000000000000000000000000000000000000000..b1456d3bc4aabfc078f0b5e29eaabbf39df9070c
--- /dev/null
+++ b/content/renderer/media/webrtc_audio_processor.cc
@@ -0,0 +1,424 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc_audio_processor.h"
+
+#include "base/command_line.h"
+#include "base/debug/trace_event.h"
+#include "content/public/common/content_switches.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_converter.h"
+#include "media/base/audio_fifo.h"
+#include "media/base/channel_layout.h"
+
+namespace content {
+
+namespace {
+
+using webrtc::AudioProcessing;
+using webrtc::MediaConstraintsInterface;
+
+#if defined(ANDROID)
+const int kAudioProcessingSampleRate = 16000;
+#else
+const int kAudioProcessingSampleRate = 32000;
+#endif
+const int kAudioProcessingNumberOfChannel = 1;
+
+const int kMaxNumberOfBuffersInFifo = 2;
+
+bool GetPropertyFromConstraints(const MediaConstraintsInterface* constraints,
+ const std::string& key) {
+ bool value = false;
+ return webrtc::FindConstraint(constraints, key, &value, NULL) && value;
+}
+
+// Extract all this methods to a helper class.
+void EnableEchoCancellation(AudioProcessing* audio_processing) {
+ DCHECK(audio_processing);
+#if defined(IOS) || defined(ANDROID)
+ // Mobile devices are using AECM.
+ if (audio_processing->echo_control_mobile()->Enable(true))
+ NOTREACHED();
+
+ if (audio_processing->echo_control_mobile()->set_routing_mode(
+ webrtc::EchoControlMobile::kSpeakerphone))
+ NOTREACHED();
+
+ return;
+#endif
+ if (audio_processing->echo_cancellation()->Enable(true))
+ NOTREACHED();
+ if (audio_processing->echo_cancellation()->set_suppression_level(
+ webrtc::EchoCancellation::kHighSuppression))
+ NOTREACHED();
+
+ // Enable the metrics for AEC.
+ if (audio_processing->echo_cancellation()->enable_metrics(true))
+ NOTREACHED();
+ if (audio_processing->echo_cancellation()->enable_delay_logging(true))
+ NOTREACHED();
+}
+
+void EnableNoiseSuppression(AudioProcessing* audio_processing) {
+ DCHECK(audio_processing);
+ if (audio_processing->noise_suppression()->set_level(
+ webrtc::NoiseSuppression::kHigh))
+ NOTREACHED();
+
+ if (audio_processing->noise_suppression()->Enable(true))
+ NOTREACHED();
+}
+
+void EnableHighPassFilter(AudioProcessing* audio_processing) {
+ DCHECK(audio_processing);
+ if (audio_processing->high_pass_filter()->Enable(true))
+ NOTREACHED();
+}
+
+// TODO(xians): stereo swapping
+void EnableTypingDetection(AudioProcessing* audio_processing) {
+ DCHECK(audio_processing);
+ if (audio_processing->voice_detection()->Enable(true))
+ NOTREACHED();
+
+ if (audio_processing->voice_detection()->set_likelihood(
+ webrtc::VoiceDetection::kVeryLowLikelihood))
+ NOTREACHED();
+}
+
+void EnableExperimentalEchoCancellation(AudioProcessing* audio_processing) {
+ DCHECK(audio_processing);
+ webrtc::Config config;
+ config.Set<webrtc::DelayCorrection>(new webrtc::DelayCorrection(true));
+ audio_processing->SetExtraOptions(config);
+}
+
+void StartAecDump(AudioProcessing* audio_processin) {
+ static const char kAecDumpFilename[] = "/tmp/audio.aecdump";
Henrik Grunell 2013/10/31 11:56:12 This should be different for different platforms.
+ if (audio_processin->StartDebugRecording(kAecDumpFilename))
+ LOG(ERROR) << "Fail to start AEC debug recording";
+}
+
+void StopAecDump(AudioProcessing* audio_processin) {
+ if (audio_processin->StopDebugRecording())
+ LOG(ERROR) << "Fail to stop AEC debug recording";
+}
+
+} // namespace
+
+class WebRtcAudioProcessor::WebRtcAudioConverter
+ : public media::AudioConverter::InputCallback {
+ public:
+ WebRtcAudioConverter(const media::AudioParameters& source_params,
+ const media::AudioParameters& sink_params) {
+ source_params_ = source_params;
+ sink_params_ = sink_params;
+
+ // Create the audio converter which is responsible for down-mixing and
+ // resampling.
+ audio_converter_.reset(
+ new media::AudioConverter(source_params, sink_params_, false));
+ audio_converter_->AddInput(this);
+
+ // Create and initialize audio fifo and audio bus wrapper.
+ // The size of the FIFO should be at least twice of the source buffer size
+ // or twice of the sink buffer size.
+ int buffer_size = std::max(
+ kMaxNumberOfBuffersInFifo * source_params.frames_per_buffer(),
+ kMaxNumberOfBuffersInFifo * sink_params_.frames_per_buffer());
+ fifo_.reset(new media::AudioFifo(source_params.channels(), buffer_size));
+ // TODO(xians): Use CreateWrapper to save one memcpy.
+ audio_wrapper_ = media::AudioBus::Create(sink_params_.channels(),
+ sink_params_.frames_per_buffer());
+ }
+
+ ~WebRtcAudioConverter() {
+ audio_converter_->RemoveInput(this);
+ }
+
+ void Push(media::AudioBus* audio_source) {
+ DCHECK(fifo_->frames() + audio_source->frames() <= fifo_->max_frames());
+ fifo_->Push(audio_source);
+ }
+
+ bool Convert() {
+ // Return false if there is no 10ms data in the FIFO.
+ if (fifo_->frames() < (source_params_.sample_rate() / 100))
+ return false;
+
+ // Convert 10ms data to the output format, this will trigger ProvideInput().
+ audio_converter_->Convert(audio_wrapper_.get());
+
+ // TODO(xians): A better way to handle the interleaved and deinterleaved
+ // format switching.
+ audio_wrapper_->ToInterleaved(audio_wrapper_->frames(), 2,
+ audio_frame_.data_);
+
+ audio_frame_.samples_per_channel_ = sink_params_.frames_per_buffer();
+ audio_frame_.sample_rate_hz_ = sink_params_.sample_rate();
+ audio_frame_.speech_type_ = webrtc::AudioFrame::kNormalSpeech;
+ audio_frame_.vad_activity_ = webrtc::AudioFrame::kVadUnknown;
+ audio_frame_.num_channels_ = sink_params_.channels();
+// audio_frame_.interleaved_ = false;
+
+ return true;
+ }
+
+ webrtc::AudioFrame* audio_frame() { return &audio_frame_; }
+ const media::AudioParameters& source_parameters() const {
+ return source_params_;
+ }
+ const media::AudioParameters& sink_parameters() const {
+ return sink_params_;
+ }
+
+ private:
+ // AudioConverter::InputCallback implementation.
+ virtual double ProvideInput(media::AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) {
+ // The first Convert() can trigger ProvideInput two times, use SincResampler
+ // to fix the problem.
+ if (fifo_->frames() < audio_bus->frames())
+ return 0;
+
+ fifo_->Consume(audio_bus, 0, audio_bus->frames());
+ return 1.0;
+ }
+
+ webrtc::AudioFrame audio_frame_;
+
+ // TODO(xians): consider using SincResampler to save some memcpy.
+ // Handles mixing and resampling between input and output parameters.
+ scoped_ptr<media::AudioConverter> audio_converter_;
+ scoped_ptr<media::AudioBus> audio_wrapper_;
+ scoped_ptr<media::AudioFifo> fifo_;
+
+ media::AudioParameters source_params_;
+ media::AudioParameters sink_params_;
+};
+
+WebRtcAudioProcessor::WebRtcAudioProcessor(
+ const webrtc::MediaConstraintsInterface* constraints)
+ : render_delay_ms_(0) {
+ InitializeAudioProcessingModule(constraints);
+}
+
+WebRtcAudioProcessor::~WebRtcAudioProcessor() {
+ StopAudioProcessing();
+}
+
+void WebRtcAudioProcessor::SetFormat(
+ const media::AudioParameters& source_params) {
+ DCHECK(source_params.IsValid());
+
+ // Create and initialize audio converter.
+ int sink_sample_rate = audio_processing_.get() ?
+ kAudioProcessingSampleRate : source_params.sample_rate();
+ media::ChannelLayout sink_channel_layout = audio_processing_.get() ?
+ media::CHANNEL_LAYOUT_MONO : source_params.channel_layout();
+
+ // WebRtc is using 10ms data as its native packet size.
+ media::AudioParameters sink_params(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY, sink_channel_layout,
+ sink_sample_rate, 16, sink_sample_rate / 100);
+ capture_converter_.reset(
+ new WebRtcAudioConverter(source_params, sink_params));
+}
+
+void WebRtcAudioProcessor::Push(media::AudioBus* audio_source) {
+ DCHECK(capture_converter_.get());
+ capture_converter_->Push(audio_source);
+}
+
+bool WebRtcAudioProcessor::ProcessAndConsume10MsData(
+ int capture_audio_delay_ms, int volume, bool key_pressed) {
+ TRACE_EVENT0("audio",
+ "WebRtcAudioProcessor::ProcessAndConsume10MsData");
+
+ if (!capture_converter_->Convert())
+ return false;
+
+ Process10MsData(capture_audio_delay_ms, volume, key_pressed);
+
+ return true;
+}
+
+const int16* WebRtcAudioProcessor::OutputBuffer() const {
+ return &capture_converter_->audio_frame()->data_[0];
+}
+
+const media::AudioParameters&
+WebRtcAudioProcessor::OutputFormat() const {
+ return capture_converter_->sink_parameters();
+}
+
+
+void WebRtcAudioProcessor::Process10MsData(int capture_audio_delay_ms,
+ int volume,
+ bool key_pressed) {
+ if (!audio_processing_.get())
+ return;
+
+ // TODO(xians): Add a DCHECK it is 10ms data chunk.
+
+ TRACE_EVENT0("audio", "WebRtcAudioProcessor::Process10MsData");
+ DCHECK_EQ(audio_processing_->sample_rate_hz(),
+ capture_converter_->sink_parameters().sample_rate());
+ DCHECK_EQ(audio_processing_->num_input_channels(),
+ capture_converter_->sink_parameters().channels());
+ DCHECK_EQ(audio_processing_->num_output_channels(),
+ capture_converter_->sink_parameters().channels());
+
+ // TODO(xians): Sum the capture delay and render delay.
+ int total_delay_ms = 0;
+ {
+ base::AutoLock auto_lock(lock_);
+ total_delay_ms = capture_audio_delay_ms + render_delay_ms_;
+ }
+
+ audio_processing_->set_stream_delay_ms(total_delay_ms);
+ webrtc::GainControl* agc = audio_processing_->gain_control();
+ if (agc->set_stream_analog_level(volume))
+ NOTREACHED();
+ int err = audio_processing_->ProcessStream(
+ capture_converter_->audio_frame());
+ if (err) {
+ NOTREACHED() << "ProcessStream() error: " << err;
+ }
+
+ // TODO(xians): Get the new volume and pass it to the capturer.
+// new_volume_ = agc->stream_analog_level();
+
+ // TODO(xians): Handle the typing detection event here.
+ // TypingDetection(key_pressed);
+}
+
+void WebRtcAudioProcessor::FeedRenderDataToAudioProcessing(
+ const int16* render_audio, int sample_rate, int number_of_channels,
+ int number_of_frames, int render_delay_ms) {
+ if (!audio_processing_.get())
+ return;
+
+ TRACE_EVENT0("audio",
+ "WebRtcAudioProcessor::FeedRenderDataToAudioProcessing");
+ {
+ base::AutoLock auto_lock(lock_);
+ render_delay_ms_ = render_delay_ms;
+ }
+
+ InitializeRenderConverterIfNeeded(sample_rate, number_of_channels,
+ number_of_frames);
+ DCHECK(render_converter_.get());
+
+ // FIXME. This is crazy, a few extra copy and interleave/deinterleave.
+ scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create(
+ number_of_channels, number_of_frames);
+ data_bus->FromInterleaved(render_audio,
+ data_bus->frames(),
+ sizeof(render_audio[0]));
+ render_converter_->Push(data_bus.get());
+ while (render_converter_->Convert()) {
+ audio_processing_->AnalyzeReverseStream(render_converter_->audio_frame());
+ }
+}
+
+void WebRtcAudioProcessor::InitializeAudioProcessingModule(
+ const webrtc::MediaConstraintsInterface* constraints) {
+ const CommandLine& command_line = *CommandLine::ForCurrentProcess();
+ if (!command_line.HasSwitch(switches::kEnableWebRtcAudioProcessor))
+ return;
+
+ if (!constraints)
+ return;
+
+ bool enable_aec = GetPropertyFromConstraints(
+ constraints, MediaConstraintsInterface::kEchoCancellation);
+ bool enable_experimental_aec = GetPropertyFromConstraints(
+ constraints, MediaConstraintsInterface::kExperimentalEchoCancellation);
+ bool enable_ns = GetPropertyFromConstraints(
+ constraints, MediaConstraintsInterface::kNoiseSuppression);
+ bool enable_high_pass_filter = GetPropertyFromConstraints(
+ constraints, MediaConstraintsInterface::kHighpassFilter);
+ bool enable_typing_detection = GetPropertyFromConstraints(
+ constraints, MediaConstraintsInterface::kTypingNoiseDetection);
+ // TODO(xians): How to start and stop AEC dump?
+ bool start_aec_dump = GetPropertyFromConstraints(
+ constraints, MediaConstraintsInterface::kInternalAecDump);
+#if defined(IOS) || defined(ANDROID)
+ enable_typing_detection = false;
+ enable_experimental_aec = false;
+#endif
+
+ // Reset the audio processing to NULL if no audio processing component is
+ // enabled.
+ if (!enable_aec && !enable_experimental_aec && !enable_ns &&
+ !enable_high_pass_filter && !enable_typing_detection) {
+ return;
+ }
+
+ // Create and configure the audio processing if it does not exist.
+ if (!audio_processing_.get())
+ audio_processing_.reset(webrtc::AudioProcessing::Create(0));
+
+ // Enable the audio processing components.
+ if (enable_aec)
+ EnableEchoCancellation(audio_processing_.get());
+
+ if (enable_ns)
+ EnableNoiseSuppression(audio_processing_.get());
+
+ if (enable_high_pass_filter)
+ EnableHighPassFilter(audio_processing_.get());
+
+ if (enable_typing_detection)
+ EnableTypingDetection(audio_processing_.get());
+
+ if (enable_experimental_aec)
+ EnableExperimentalEchoCancellation(audio_processing_.get());
+
+ if (enable_aec && start_aec_dump)
+ StartAecDump(audio_processing_.get());
+
+ // Configure the audio format the audio processing is running on. This
+ // has to be done after all the needed components are enabled.
+ if (audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate))
+ NOTREACHED();
+ if (audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel,
+ kAudioProcessingNumberOfChannel))
+ NOTREACHED();
+}
+
+void WebRtcAudioProcessor::InitializeRenderConverterIfNeeded(
+ int sample_rate, int number_of_channels, int frames_per_buffer) {
+ // TODO, figure out if we need to handle the buffer size change.
+ if (render_converter_.get() &&
+ render_converter_->source_parameters().sample_rate() == sample_rate &&
+ render_converter_->source_parameters().channels() == number_of_channels) {
+ // Do nothing if the |render_converter_| is setup properly.
+ return;
+ }
+
+ media::AudioParameters source_params(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::GuessChannelLayout(number_of_channels), sample_rate, 16,
+ frames_per_buffer);
+ media::AudioParameters sink_params(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_MONO, kAudioProcessingSampleRate, 16,
+ kAudioProcessingSampleRate / 100);
+ render_converter_.reset(new WebRtcAudioConverter(source_params, sink_params));
+}
+
+void WebRtcAudioProcessor::StopAudioProcessing() {
+ if (!audio_processing_.get())
+ return;
+
+ // It is safe to stop the AEC dump even it is not started.
+ StopAecDump(audio_processing_.get());
+
+ audio_processing_.reset();
+}
+
+} // namespace content

Powered by Google App Engine
This is Rietveld 408576698