Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(30)

Side by Side Diff: content/renderer/media/webrtc_audio_processor.cc

Issue 54383003: Added an "enable-audio-processor" flag and WebRtcAudioProcessor class (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: addressed Tommi's and Henriks' comments. Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/renderer/media/webrtc_audio_processor.h"
6
7 #include "base/command_line.h"
tommi (sloooow) - chröme 2013/11/22 14:32:42 you probably don't need this
no longer working on chromium 2013/11/25 16:36:26 This will stay
8 #include "base/debug/trace_event.h"
9 #include "content/public/common/content_switches.h"
tommi (sloooow) - chröme 2013/11/22 14:32:42 or this
no longer working on chromium 2013/11/25 16:36:26 ditto
10 #include "content/renderer/media/webrtc_audio_processor_options.h"
11 #include "media/audio/audio_parameters.h"
12 #include "media/base/audio_converter.h"
13 #include "media/base/audio_fifo.h"
14 #include "media/base/channel_layout.h"
15
16 namespace content {
17
18 namespace {
19
20 using webrtc::AudioProcessing;
21 using webrtc::MediaConstraintsInterface;
22
23 #if defined(ANDROID)
24 const int kAudioProcessingSampleRate = 16000;
25 #else
26 const int kAudioProcessingSampleRate = 32000;
27 #endif
28 const int kAudioProcessingNumberOfChannel = 1;
29
30 const int kMaxNumberOfBuffersInFifo = 2;
31
32 } // namespace
33
34 class WebRtcAudioProcessor::WebRtcAudioConverter
35 : public media::AudioConverter::InputCallback {
36 public:
37 WebRtcAudioConverter(const media::AudioParameters& source_params,
38 const media::AudioParameters& sink_params)
39 : source_params_(source_params),
40 sink_params_(sink_params),
41 audio_converter_(source_params, sink_params_, false) {
42 audio_converter_.AddInput(this);
43 // Create and initialize audio fifo and audio bus wrapper.
44 // The size of the FIFO should be at least twice of the source buffer size
45 // or twice of the sink buffer size.
46 int buffer_size = std::max(
47 kMaxNumberOfBuffersInFifo * source_params_.frames_per_buffer(),
48 kMaxNumberOfBuffersInFifo * sink_params_.frames_per_buffer());
49 fifo_.reset(new media::AudioFifo(source_params_.channels(), buffer_size));
50 // TODO(xians): Use CreateWrapper to save one memcpy.
51 audio_wrapper_ = media::AudioBus::Create(sink_params_.channels(),
52 sink_params_.frames_per_buffer());
53 }
54
55 virtual ~WebRtcAudioConverter() {
56 DCHECK(thread_checker_.CalledOnValidThread());
57 audio_converter_.RemoveInput(this);
58 }
59
60 void Push(media::AudioBus* audio_source) {
61 // Called on the audio thread, which is the capture audio thread for
62 // |WebRtcAudioProcessor::capture_converter_|, and render audio thread for
63 // |WebRtcAudioProcessor::render_converter_|.
64 // And it must be the same thread as calling Convert().
65 DCHECK(thread_checker_.CalledOnValidThread());
66 fifo_->Push(audio_source);
67 }
68
69 bool Convert(webrtc::AudioFrame* out) {
70 // Called on the audio thread, which is the capture audio thread for
71 // |WebRtcAudioProcessor::capture_converter_|, and render audio thread for
72 // |WebRtcAudioProcessor::render_converter_|.
73 // Return false if there is no 10ms data in the FIFO.
74 DCHECK(thread_checker_.CalledOnValidThread());
75 if (fifo_->frames() < (source_params_.sample_rate() / 100))
76 return false;
77
78 // Convert 10ms data to the output format, this will trigger ProvideInput().
79 audio_converter_.Convert(audio_wrapper_.get());
80
81 // TODO(xians): Figure out a better way to handle the interleaved and
82 // deinterleaved format switching.
83 audio_wrapper_->ToInterleaved(audio_wrapper_->frames(),
84 sink_params_.bits_per_sample() / 8,
85 out->data_);
86
87 out->samples_per_channel_ = sink_params_.frames_per_buffer();
88 out->sample_rate_hz_ = sink_params_.sample_rate();
89 out->speech_type_ = webrtc::AudioFrame::kNormalSpeech;
90 out->vad_activity_ = webrtc::AudioFrame::kVadUnknown;
91 out->num_channels_ = sink_params_.channels();
92
93 return true;
94 }
95
96 const media::AudioParameters& source_parameters() const {
97 return source_params_;
98 }
99 const media::AudioParameters& sink_parameters() const {
100 return sink_params_;
101 }
102
103 private:
104 // AudioConverter::InputCallback implementation.
105 virtual double ProvideInput(media::AudioBus* audio_bus,
106 base::TimeDelta buffer_delay) OVERRIDE {
107 // Called on realtime audio thread.
108 // TODO(xians): Figure out why the first Convert() triggers ProvideInput
109 // two times.
110 if (fifo_->frames() < audio_bus->frames())
111 return 0;
112
113 fifo_->Consume(audio_bus, 0, audio_bus->frames());
114
115 // Return 1.0 to indicate no volume scaling on the data.
116 return 1.0;
117 }
118
119 base::ThreadChecker thread_checker_;
120 const media::AudioParameters source_params_;
121 const media::AudioParameters sink_params_;
122
123 // TODO(xians): consider using SincResampler to save some memcpy.
124 // Handles mixing and resampling between input and output parameters.
125 media::AudioConverter audio_converter_;
126 scoped_ptr<media::AudioBus> audio_wrapper_;
127 scoped_ptr<media::AudioFifo> fifo_;
128 };
129
130 WebRtcAudioProcessor::WebRtcAudioProcessor(
131 const webrtc::MediaConstraintsInterface* constraints)
132 : render_delay_ms_(0) {
133 capture_thread_checker_.DetachFromThread();
134 render_thread_checker_.DetachFromThread();
135 InitializeAudioProcessingModule(constraints);
136 }
137
138 WebRtcAudioProcessor::~WebRtcAudioProcessor() {
139 DCHECK(main_thread_checker_.CalledOnValidThread());
140 StopAudioProcessing();
141 }
142
143 void WebRtcAudioProcessor::PushCaptureData(media::AudioBus* audio_source) {
144 DCHECK(capture_thread_checker_.CalledOnValidThread());
145 capture_converter_->Push(audio_source);
146 }
147
148 void WebRtcAudioProcessor::PushRenderData(
149 const int16* render_audio, int sample_rate, int number_of_channels,
150 int number_of_frames, base::TimeDelta render_delay) {
151 DCHECK(render_thread_checker_.CalledOnValidThread());
152
153 // Return immediately if the echo cancellation is off.
154 if (!audio_processing_ ||
155 !audio_processing_->echo_cancellation()->is_enabled()) {
156 return;
157 }
158
159 TRACE_EVENT0("audio",
160 "WebRtcAudioProcessor::FeedRenderDataToAudioProcessing");
161 int64 new_render_delay_ms = render_delay.InMilliseconds();
162 DCHECK_LT(new_render_delay_ms,
163 std::numeric_limits<base::subtle::Atomic32>::max());
164 base::subtle::Release_Store(&render_delay_ms_, new_render_delay_ms);
165
166 InitializeRenderConverterIfNeeded(sample_rate, number_of_channels,
167 number_of_frames);
168
169 // TODO(xians): Avoid this extra interleave/deinterleave.
170 render_data_bus_->FromInterleaved(render_audio,
171 render_data_bus_->frames(),
172 sizeof(render_audio[0]));
173 render_converter_->Push(render_data_bus_.get());
174 while (render_converter_->Convert(&render_frame_))
175 audio_processing_->AnalyzeReverseStream(&render_frame_);
176 }
177
178 bool WebRtcAudioProcessor::ProcessAndConsumeData(
179 base::TimeDelta capture_delay, int volume, bool key_pressed,
180 int16** out) {
181 DCHECK(capture_thread_checker_.CalledOnValidThread());
182 TRACE_EVENT0("audio",
183 "WebRtcAudioProcessor::ProcessAndConsumeData");
184
185 if (!capture_converter_->Convert(&capture_frame_))
186 return false;
187
188 ProcessData(&capture_frame_, capture_delay, volume, key_pressed);
189 *out = capture_frame_.data_;
190
191 return true;
192 }
193
194 void WebRtcAudioProcessor::SetCaptureFormat(
195 const media::AudioParameters& source_params) {
196 DCHECK(capture_thread_checker_.CalledOnValidThread());
197 DCHECK(source_params.IsValid());
198
199 // Create and initialize audio converter for the source data.
200 // When the webrtc AudioProcessing is enabled, the sink format of the
201 // converter will be the same as the post-processed data format, which is
202 // 32k mono for desktops and 16k mono for Android. When the AudioProcessing
203 // is disabled, the sink format will be the same as the source format.
204 const int sink_sample_rate = audio_processing_ ?
205 kAudioProcessingSampleRate : source_params.sample_rate();
206 const media::ChannelLayout sink_channel_layout = audio_processing_ ?
207 media::CHANNEL_LAYOUT_MONO : source_params.channel_layout();
208
209 // WebRtc is using 10ms data as its native packet size.
210 media::AudioParameters sink_params(
211 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, sink_channel_layout,
212 sink_sample_rate, 16, sink_sample_rate / 100);
213 capture_converter_.reset(
214 new WebRtcAudioConverter(source_params, sink_params));
215 }
216
217 const media::AudioParameters& WebRtcAudioProcessor::OutputFormat() const {
218 return capture_converter_->sink_parameters();
219 }
220
221 void WebRtcAudioProcessor::InitializeAudioProcessingModule(
222 const webrtc::MediaConstraintsInterface* constraints) {
223 DCHECK(!audio_processing_);
224 if (!CommandLine::ForCurrentProcess()->HasSwitch(
tommi (sloooow) - chröme 2013/11/22 14:32:42 I don't think this is the right place to do this c
Henrik Grunell 2013/11/25 10:52:02 +1
no longer working on chromium 2013/11/25 16:36:26 The comment was addressed offline.
225 switches::kEnableAudioTrackProcessing)) {
226 return;
227 }
228
229 // Some unittests do not have the constraints defined.
230 if (!constraints)
231 return;
232
233 const bool enable_aec = GetPropertyFromConstraints(
234 constraints, MediaConstraintsInterface::kEchoCancellation);
235 const bool enable_ns = GetPropertyFromConstraints(
236 constraints, MediaConstraintsInterface::kNoiseSuppression);
237 const bool enable_high_pass_filter = GetPropertyFromConstraints(
238 constraints, MediaConstraintsInterface::kHighpassFilter);
239 const bool start_aec_dump = GetPropertyFromConstraints(
240 constraints, MediaConstraintsInterface::kInternalAecDump);
241 #if defined(IOS) || defined(ANDROID)
242 const bool enable_experimental_aec = false;
243 const bool enable_typing_detection = false;
244 #else
245 const bool enable_experimental_aec = GetPropertyFromConstraints(
246 constraints, MediaConstraintsInterface::kExperimentalEchoCancellation);
247 const bool enable_typing_detection = GetPropertyFromConstraints(
248 constraints, MediaConstraintsInterface::kTypingNoiseDetection);
249 #endif
250
251 // Return immediately if no audio processing component is enabled.
252 if (!enable_aec && !enable_experimental_aec && !enable_ns &&
253 !enable_high_pass_filter && !enable_typing_detection) {
254 return;
255 }
256
257 // Create and configure the webrtc::AudioProcessing.
258 audio_processing_.reset(webrtc::AudioProcessing::Create(0));
259
260 // Enable the audio processing components.
261 if (enable_aec) {
262 EnableEchoCancellation(audio_processing_.get());
263 if (enable_experimental_aec)
264 EnableExperimentalEchoCancellation(audio_processing_.get());
265 }
266
267 if (enable_ns)
268 EnableNoiseSuppression(audio_processing_.get());
269
270 if (enable_high_pass_filter)
271 EnableHighPassFilter(audio_processing_.get());
272
273 if (enable_typing_detection)
274 EnableTypingDetection(audio_processing_.get());
275
276 if (enable_aec && start_aec_dump)
277 StartAecDump(audio_processing_.get());
278
279 // Configure the audio format the audio processing is running on. This
280 // has to be done after all the needed components are enabled.
281 CHECK_EQ(audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate),
282 0);
283 CHECK_EQ(audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel,
284 kAudioProcessingNumberOfChannel),
285 0);
286 }
287
288 void WebRtcAudioProcessor::InitializeRenderConverterIfNeeded(
289 int sample_rate, int number_of_channels, int frames_per_buffer) {
290 DCHECK(render_thread_checker_.CalledOnValidThread());
291 // TODO(xians): Figure out if we need to handle the buffer size change.
292 if (render_converter_.get() &&
293 render_converter_->source_parameters().sample_rate() == sample_rate &&
294 render_converter_->source_parameters().channels() == number_of_channels) {
295 // Do nothing if the |render_converter_| has been setup properly.
296 return;
297 }
298
299 // Create and initialize audio converter for the render data.
300 // webrtc::AudioProcessing accepts the same format as what it uses to process
301 // capture data, which is 32k mono for desktops and 16k mono for Android.
302 media::AudioParameters source_params(
303 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
304 media::GuessChannelLayout(number_of_channels), sample_rate, 16,
305 frames_per_buffer);
306 media::AudioParameters sink_params(
307 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
308 media::CHANNEL_LAYOUT_MONO, kAudioProcessingSampleRate, 16,
309 kAudioProcessingSampleRate / 100);
310 render_converter_.reset(new WebRtcAudioConverter(source_params, sink_params));
311 render_data_bus_ = media::AudioBus::Create(number_of_channels,
312 frames_per_buffer);
313 }
314
315 void WebRtcAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame,
316 base::TimeDelta capture_delay,
317 int volume,
318 bool key_pressed) {
319 DCHECK(capture_thread_checker_.CalledOnValidThread());
320 if (!audio_processing_)
321 return;
322
323 TRACE_EVENT0("audio", "WebRtcAudioProcessor::Process10MsData");
324 DCHECK_EQ(audio_processing_->sample_rate_hz(),
325 capture_converter_->sink_parameters().sample_rate());
326 DCHECK_EQ(audio_processing_->num_input_channels(),
327 capture_converter_->sink_parameters().channels());
328 DCHECK_EQ(audio_processing_->num_output_channels(),
329 capture_converter_->sink_parameters().channels());
330
331 base::subtle::Atomic32 render_delay_ms =
332 base::subtle::Acquire_Load(&render_delay_ms_);
333 int64 capture_delay_ms = capture_delay.InMilliseconds();
334 DCHECK_LT(capture_delay_ms,
335 std::numeric_limits<base::subtle::Atomic32>::max());
336 int total_delay_ms = capture_delay_ms + render_delay_ms;
337 if (total_delay_ms > 1000) {
338 LOG(WARNING) << "Large audio delay, capture delay: " << capture_delay_ms
339 << "ms; render delay: " << render_delay_ms << "ms";
340 }
341
342 audio_processing_->set_stream_delay_ms(total_delay_ms);
343 webrtc::GainControl* agc = audio_processing_->gain_control();
344 int err = agc->set_stream_analog_level(volume);
345 DCHECK_EQ(err, 0) << "set_stream_analog_level() error: " << err;
346 err = audio_processing_->ProcessStream(audio_frame);
347 DCHECK_EQ(err, 0) << "ProcessStream() error: " << err;
348
349 // TODO(xians): Add support for AGC, typing detection, audio level
350 // calculation, stereo swapping.
351 }
352
353 void WebRtcAudioProcessor::StopAudioProcessing() {
354 if (!audio_processing_.get())
355 return;
356
357 // It is safe to stop the AEC dump even it is not started.
358 StopAecDump(audio_processing_.get());
359
360 audio_processing_.reset();
361 }
362
363 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698