Chromium Code Reviews| Index: content/renderer/media/webrtc_audio_processing_wrapper.cc |
| diff --git a/content/renderer/media/webrtc_audio_processing_wrapper.cc b/content/renderer/media/webrtc_audio_processing_wrapper.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..ea4c61162b4c52011465c7cb28ef4f39e7129604 |
| --- /dev/null |
| +++ b/content/renderer/media/webrtc_audio_processing_wrapper.cc |
| @@ -0,0 +1,405 @@ |
| +// Copyright 2013 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "content/renderer/media/webrtc_audio_processing_wrapper.h" |
| + |
| +#include "base/debug/trace_event.h" |
| +#include "media/audio/audio_parameters.h" |
| +#include "media/base/audio_converter.h" |
| +#include "media/base/audio_fifo.h" |
| +#include "media/base/channel_layout.h" |
| + |
| +namespace content { |
| + |
| +namespace { |
| + |
| +using webrtc::AudioProcessing; |
| +using webrtc::MediaConstraintsInterface; |
| + |
| +#if defined(ANDROID) |
| +const int kAudioProcessingSampleRate = 16000; |
| +#else |
| +const int kAudioProcessingSampleRate = 32000; |
| +#endif |
| +const int kAudioProcessingNumberOfChannel = 1; |
| + |
| +const int kMaxNumberOfBuffersInFifo = 2; |
| + |
| +bool GetPropertyFromConstraints(const MediaConstraintsInterface* constraints, |
| + const std::string& key) { |
| + bool value = false; |
| + return webrtc::FindConstraint(constraints, key, &value, NULL) && value; |
| +} |
| + |
| +void EnableEchoCancellation(AudioProcessing* audio_processing) { |
| + DCHECK(audio_processing); |
| +#if defined(IOS) || defined(ANDROID) |
| + // Mobile devices are using AECM. |
| + if (audio_processing->echo_control_mobile()->Enable(true)) |
| + NOTREACHED(); |
| + |
| + if (audio_processing->echo_control_mobile()->set_routing_mode( |
| + webrtc::EchoControlMobile::kSpeakerphone)) |
| + NOTREACHED(); |
| + |
| + return; |
| +#endif |
| + if (audio_processing->echo_cancellation()->Enable(true)) |
| + NOTREACHED(); |
| + if (audio_processing->echo_cancellation()->set_suppression_level( |
| + webrtc::EchoCancellation::kHighSuppression)) |
| + NOTREACHED(); |
| + |
| + // Enable the metrics for AEC. |
| + if (audio_processing->echo_cancellation()->enable_metrics(true)) |
| + NOTREACHED(); |
| + if (audio_processing->echo_cancellation()->enable_delay_logging(true)) |
| + NOTREACHED(); |
| +} |
| + |
| +void EnableNoiseSuppression(AudioProcessing* audio_processing) { |
| + DCHECK(audio_processing); |
| + if (audio_processing->noise_suppression()->set_level( |
| + webrtc::NoiseSuppression::kHigh)) |
| + NOTREACHED(); |
| + |
| + if (audio_processing->noise_suppression()->Enable(true)) |
| + NOTREACHED(); |
| +} |
| + |
| +void EnableHighPassFilter(AudioProcessing* audio_processing) { |
| + DCHECK(audio_processing); |
| + if (audio_processing->high_pass_filter()->Enable(true)) |
| + NOTREACHED(); |
| +} |
| + |
| +// TODO(xians): stereo swapping |
| +void EnableTypingDetection(AudioProcessing* audio_processing) { |
| + DCHECK(audio_processing); |
| + if (audio_processing->voice_detection()->Enable(true)) |
| + NOTREACHED(); |
| + |
| + if (audio_processing->voice_detection()->set_likelihood( |
| + webrtc::VoiceDetection::kVeryLowLikelihood)) |
| + NOTREACHED(); |
| +} |
| + |
| +void EnableExperimentalEchoCancellation(AudioProcessing* audio_processing) { |
| + DCHECK(audio_processing); |
| + webrtc::Config config; |
| + config.Set<webrtc::DelayCorrection>(new webrtc::DelayCorrection(true)); |
| + audio_processing->SetExtraOptions(config); |
| +} |
| + |
| +void StartAecDump(AudioProcessing* audio_processin) { |
| + static const char kAecDumpFilename[] = "/tmp/audio.aecdump"; |
| + if (audio_processin->StartDebugRecording(kAecDumpFilename)) |
| + LOG(ERROR) << "Fail to start AEC debug recording"; |
| +} |
| + |
| +void StopAecDump(AudioProcessing* audio_processin) { |
| + if (audio_processin->StopDebugRecording()) |
| + LOG(ERROR) << "Fail to stop AEC debug recording"; |
| +} |
| + |
| +} // namespace |
| + |
| +class WebRtcAudioProcessingWrapper::WebRtcAudioConverter |
| + : public media::AudioConverter::InputCallback { |
| + public: |
| + WebRtcAudioConverter(const media::AudioParameters& source_params, |
| + const media::AudioParameters& sink_params) { |
| + source_params_ = source_params; |
| + sink_params_ = sink_params; |
| + |
| + // Create the audio converter which is responsible for down-mixing and |
| + // resampling. |
| + audio_converter_.reset( |
| + new media::AudioConverter(source_params, sink_params_, false)); |
| + audio_converter_->AddInput(this); |
| + |
| + // Create and initialize audio fifo and audio bus wrapper. |
| + // The size of the FIFO should be at least twice of the source buffer size |
| + // or twice of the sink buffer size. |
| + int buffer_size = std::max( |
| + kMaxNumberOfBuffersInFifo * source_params.frames_per_buffer(), |
| + kMaxNumberOfBuffersInFifo * sink_params_.frames_per_buffer()); |
| + fifo_.reset(new media::AudioFifo(source_params.channels(), buffer_size)); |
| + audio_wrapper_ = media::AudioBus::Create(sink_params_.channels(), |
| + sink_params_.frames_per_buffer()); |
| + } |
| + |
| + ~WebRtcAudioConverter() { |
| + audio_converter_->RemoveInput(this); |
| + } |
| + |
| + void Push(media::AudioBus* audio_source) { |
| + DCHECK(fifo_->frames() + audio_source->frames() <= fifo_->max_frames()); |
| + fifo_->Push(audio_source); |
| + } |
| + |
| + bool Convert() { |
| + // Return false if there is no 10ms data in the FIFO. |
| + if (fifo_->frames() < (source_params_.sample_rate() / 100)) |
| + return false; |
| + |
| + // Convert 10ms data to the output format, this will trigger ProvideInput(). |
| + audio_converter_->Convert(audio_wrapper_.get()); |
| + |
| + // TODO(xians): Avoid deinterleave here if APM takes deinterleave format. |
| + audio_wrapper_->ToInterleaved(audio_wrapper_->frames(), 2, |
| + audio_frame_.data_); |
| + |
| + audio_frame_.samples_per_channel_ = sink_params_.frames_per_buffer(); |
| + audio_frame_.sample_rate_hz_ = sink_params_.sample_rate(); |
| + audio_frame_.speech_type_ = webrtc::AudioFrame::kNormalSpeech; |
| + audio_frame_.vad_activity_ = webrtc::AudioFrame::kVadUnknown; |
| + audio_frame_.num_channels_ = sink_params_.channels(); |
| + |
| + return true; |
| + } |
| + |
| + webrtc::AudioFrame* audio_frame() { return &audio_frame_; } |
| + const media::AudioParameters& source_parameters() const { |
| + return source_params_; |
| + } |
| + const media::AudioParameters& sink_parameters() const { |
| + return sink_params_; |
| + } |
| + |
| + private: |
| + // AudioConverter::InputCallback implementation. |
| + virtual double ProvideInput(media::AudioBus* audio_bus, |
| + base::TimeDelta buffer_delay) { |
| + // The first Convert() can trigger ProvideInput two times, use SincResampler |
| + // to fix the problem. |
| + if (fifo_->frames() < audio_bus->frames()) |
| + return 0; |
| + |
| + fifo_->Consume(audio_bus, 0, audio_bus->frames()); |
| + return 1.0; |
| + } |
| + |
| + webrtc::AudioFrame audio_frame_; |
| + |
| + // TODO(xians): consider using SincResampler to save some memcpy. |
| + // Handles mixing and resampling between input and output parameters. |
| + scoped_ptr<media::AudioConverter> audio_converter_; |
| + scoped_ptr<media::AudioBus> audio_wrapper_; |
| + scoped_ptr<media::AudioFifo> fifo_; |
| + |
| + media::AudioParameters source_params_; |
| + media::AudioParameters sink_params_; |
| +}; |
| + |
| +WebRtcAudioProcessingWrapper::WebRtcAudioProcessingWrapper() { |
| +} |
| + |
| +WebRtcAudioProcessingWrapper::~WebRtcAudioProcessingWrapper() { |
| + StopAudioProcessing(); |
| +} |
| + |
| +// TODO(xians): Should we support changing the setting on the fly without |
| +// constructing a new audio processing module? |
| +void WebRtcAudioProcessingWrapper::Configure( |
| + const media::AudioParameters& source_params, |
| + const MediaConstraintsInterface* constraints) { |
|
perkj_chrome
2013/10/24 12:31:30
You might want to consider mandatory and optional
|
| + if (constraints) { |
| + bool enable_aec = GetPropertyFromConstraints( |
| + constraints, MediaConstraintsInterface::kEchoCancellation); |
| + bool enable_experimental_aec = GetPropertyFromConstraints( |
| + constraints, MediaConstraintsInterface::kExperimentalEchoCancellation); |
| + bool enable_ns = GetPropertyFromConstraints( |
| + constraints, MediaConstraintsInterface::kNoiseSuppression); |
| + bool enable_high_pass_filter = GetPropertyFromConstraints( |
| + constraints, MediaConstraintsInterface::kHighpassFilter); |
| + bool enable_typing_detection = GetPropertyFromConstraints( |
| + constraints, MediaConstraintsInterface::kTypingNoiseDetection); |
| + // TODO(xians): How to start and stop AEC dump? |
| + bool start_aec_dump = GetPropertyFromConstraints( |
| + constraints, MediaConstraintsInterface::kInternalAecDump); |
| +#if defined(IOS) || defined(ANDROID) |
| + enable_typing_detection = false; |
| + enable_experimental_aec = false; |
| +#endif |
| + |
| + // Reset the audio processing to NULL if no audio processing component is |
| + // enabled. |
| + if (!enable_aec && !enable_experimental_aec && !enable_ns && |
| + !enable_high_pass_filter && !enable_typing_detection) { |
| + StopAudioProcessing(); |
| + } else { |
| + // Create and configure the audio processing if it does not exist. |
| + if (!audio_processing_.get()) |
| + audio_processing_.reset(webrtc::AudioProcessing::Create(0)); |
| + |
| + // Enable the audio processing components. |
| + if (enable_aec) |
| + EnableEchoCancellation(audio_processing_.get()); |
| + |
| + if (enable_ns) |
| + EnableNoiseSuppression(audio_processing_.get()); |
| + |
| + if (enable_high_pass_filter) |
| + EnableHighPassFilter(audio_processing_.get()); |
| + |
| + if (enable_typing_detection) |
| + EnableTypingDetection(audio_processing_.get()); |
| + |
| + if (enable_experimental_aec) |
| + EnableExperimentalEchoCancellation(audio_processing_.get()); |
| + |
| + if (enable_aec && start_aec_dump) |
| + StartAecDump(audio_processing_.get()); |
| + else |
| + StopAecDump(audio_processing_.get()); |
| + |
| + // Configure the audio format the audio processing is running on. This |
| + // has to be done after all the needed components are enabled. |
| + if (audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate)) |
| + NOTREACHED(); |
| + if (audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel, |
| + kAudioProcessingNumberOfChannel)) |
| + NOTREACHED(); |
| + } |
| + } |
| + |
| + InitializeCaptureConverter(source_params); |
| +} |
| + |
| +void WebRtcAudioProcessingWrapper::Push(media::AudioBus* audio_source) { |
| + DCHECK(capture_converter_.get()); |
| + capture_converter_->Push(audio_source); |
|
perkj_chrome
2013/10/24 12:31:30
What if all features of APM is disabled- does this
|
| +} |
| + |
| +bool WebRtcAudioProcessingWrapper::ProcessAndConsume10MsData( |
| + int capture_audio_delay_ms, int volume, bool key_pressed) { |
| + TRACE_EVENT0("audio", |
| + "WebRtcAudioProcessingWrapper::ProcessAndConsume10MsData"); |
| + |
| + if (!capture_converter_->Convert()) |
| + return false; |
| + |
| + Process10MsData(capture_audio_delay_ms, volume, key_pressed); |
| + |
| + return true; |
| +} |
| + |
| +const int16* WebRtcAudioProcessingWrapper::OutputBuffer() const { |
| + return &capture_converter_->audio_frame()->data_[0]; |
| +} |
| + |
| +const media::AudioParameters& |
| +WebRtcAudioProcessingWrapper::OutputFormat() const { |
| + return capture_converter_->sink_parameters(); |
| +} |
| + |
| + |
| +void WebRtcAudioProcessingWrapper::Process10MsData(int capture_audio_delay_ms, |
| + int volume, |
| + bool key_pressed) { |
| + if (!audio_processing_.get()) |
| + return; |
| + |
| + // TODO(xians): Add a DCHECK it is 10ms data chunk. |
| + |
| + TRACE_EVENT0("audio", "WebRtcAPM::Process10MsData"); |
| + DCHECK_EQ(audio_processing_->sample_rate_hz(), |
| + capture_converter_->sink_parameters().sample_rate()); |
| + DCHECK_EQ(audio_processing_->num_input_channels(), |
| + capture_converter_->sink_parameters().channels()); |
| + DCHECK_EQ(audio_processing_->num_output_channels(), |
| + capture_converter_->sink_parameters().channels()); |
| + |
| + // TODO(xians): Sum the capture delay and render delay. |
| + int total_delay_ms = capture_audio_delay_ms; |
| + audio_processing_->set_stream_delay_ms(total_delay_ms); |
| + webrtc::GainControl* agc = audio_processing_->gain_control(); |
| + if (agc->set_stream_analog_level(volume)) |
| + NOTREACHED(); |
| + int err = audio_processing_->ProcessStream( |
| + capture_converter_->audio_frame()); |
| + if (err) { |
| + NOTREACHED() << "ProcessStream() error: " << err; |
| + } |
| + |
| + // TODO(xians): Get the new volume and pass it to the capturer. |
| +// new_volume_ = agc->stream_analog_level(); |
| + |
| + // TODO(xians): Handle the typing detection event here. |
| + // TypingDetection(key_pressed); |
| +} |
| + |
| +void WebRtcAudioProcessingWrapper::FeedRenderDataToAudioProcessing( |
| + const int16* render_audio, int sample_rate, int number_of_channels, |
| + int number_of_frames, int render_delay_ms) { |
| + if (!audio_processing_.get()) |
| + return; |
| + |
| + TRACE_EVENT0("audio", "WebRtcAPM::FeedRender10MSDataToAudioProcessing"); |
| + |
| + InitializeRenderConverterIfNeeded(sample_rate, number_of_channels, |
| + number_of_frames); |
| + DCHECK(render_converter_.get()); |
| + |
| + // FIXME. This is crazy, a few extra copy and interleave/deinterleave. |
| + scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create( |
| + number_of_channels, number_of_frames); |
| + data_bus->FromInterleaved(render_audio, |
| + data_bus->frames(), |
| + sizeof(render_audio[0])); |
| + render_converter_->Push(data_bus.get()); |
| + while (render_converter_->Convert()) { |
| + audio_processing_->AnalyzeReverseStream(render_converter_->audio_frame()); |
| + } |
| +} |
| + |
| +void WebRtcAudioProcessingWrapper::InitializeCaptureConverter( |
| + const media::AudioParameters& source_params) { |
| + // Create and initialize audio converter. |
| + int sink_sample_rate = audio_processing_.get() ? |
| + kAudioProcessingSampleRate : source_params.sample_rate(); |
| + media::ChannelLayout sink_channel_layout = audio_processing_.get() ? |
| + media::CHANNEL_LAYOUT_MONO : source_params.channel_layout(); |
| + |
| + // WebRtc is using 10ms data as its native packet size. |
| + media::AudioParameters sink_params( |
| + media::AudioParameters::AUDIO_PCM_LOW_LATENCY, sink_channel_layout, |
| + sink_sample_rate, 16, sink_sample_rate / 100); |
| + capture_converter_.reset( |
| + new WebRtcAudioConverter(source_params, sink_params)); |
| +} |
| + |
| +void WebRtcAudioProcessingWrapper::InitializeRenderConverterIfNeeded( |
| + int sample_rate, int number_of_channels, int frames_per_buffer) { |
| + // TODO, figure out if we need to handle the buffer size change. |
| + if (render_converter_.get() && |
| + render_converter_->source_parameters().sample_rate() == sample_rate && |
| + render_converter_->source_parameters().channels() == number_of_channels) { |
| + // Do nothing if the |render_converter_| is setup properly. |
| + return; |
| + } |
| + |
| + media::AudioParameters source_params( |
| + media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| + media::GuessChannelLayout(number_of_channels), sample_rate, 16, |
| + frames_per_buffer); |
| + media::AudioParameters sink_params( |
| + media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| + media::CHANNEL_LAYOUT_MONO, kAudioProcessingSampleRate, 16, |
| + kAudioProcessingSampleRate / 100); |
| + render_converter_.reset(new WebRtcAudioConverter(source_params, sink_params)); |
| +} |
| + |
| +void WebRtcAudioProcessingWrapper::StopAudioProcessing() { |
| + if (!audio_processing_.get()) |
| + return; |
| + |
| + // It is safe to stop the AEC dump even it is not started. |
| + StopAecDump(audio_processing_.get()); |
| + |
| + audio_processing_.reset(); |
| +} |
| + |
| +} // namespace content |