Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(344)

Side by Side Diff: content/renderer/media/webrtc_audio_processor.cc

Issue 54383003: Added an "enable-audio-processor" flag and WebRtcAudioProcessor class (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: addressed the comments. Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/renderer/media/webrtc_audio_processor.h"
6
7 #include "base/command_line.h"
8 #include "base/debug/trace_event.h"
9 #include "content/public/common/content_switches.h"
10 #include "content/renderer/media/webrtc_audio_processor_options.h"
11 #include "media/audio/audio_parameters.h"
12 #include "media/base/audio_converter.h"
13 #include "media/base/audio_fifo.h"
14 #include "media/base/channel_layout.h"
15
16 namespace content {
17
18 namespace {
19
20 using webrtc::AudioProcessing;
21 using webrtc::MediaConstraintsInterface;
22
23 #if defined(ANDROID)
24 const int kAudioProcessingSampleRate = 16000;
25 #else
26 const int kAudioProcessingSampleRate = 32000;
27 #endif
28 const int kAudioProcessingNumberOfChannel = 1;
29
30 const int kMaxNumberOfBuffersInFifo = 2;
31
32 } // namespace
33
34 class WebRtcAudioProcessor::WebRtcAudioConverter
35 : public media::AudioConverter::InputCallback {
36 public:
37 WebRtcAudioConverter(const media::AudioParameters& source_params,
38 const media::AudioParameters& sink_params)
39 : worker_thread_detach_(false),
40 source_params_(source_params),
41 sink_params_(sink_params),
42 audio_converter_(source_params, sink_params_, false) {
43 audio_converter_.AddInput(this);
44 // Create and initialize audio fifo and audio bus wrapper.
45 // The size of the FIFO should be at least twice of the source buffer size
46 // or twice of the sink buffer size.
47 int buffer_size = std::max(
48 kMaxNumberOfBuffersInFifo * source_params_.frames_per_buffer(),
49 kMaxNumberOfBuffersInFifo * sink_params_.frames_per_buffer());
50 fifo_.reset(new media::AudioFifo(source_params_.channels(), buffer_size));
51 // TODO(xians): Use CreateWrapper to save one memcpy.
52 audio_wrapper_ = media::AudioBus::Create(sink_params_.channels(),
53 sink_params_.frames_per_buffer());
54 }
55
56 virtual ~WebRtcAudioConverter() {
57 DCHECK(destructor_thread_checker_.CalledOnValidThread());
58 audio_converter_.RemoveInput(this);
59 }
60
61 void Push(media::AudioBus* audio_source) {
62 // Called on the audio thread, which is the capture audio thread for
63 // |WebRtcAudioProcessor::capture_converter_|, and render audio thread for
64 // |WebRtcAudioProcessor::render_converter_|.
65 // And it must be the same thread as calling Convert().
66 if (!worker_thread_detach_) {
67 worker_thread_checker_.DetachFromThread();
Jói 2013/11/08 13:36:40 You can simply do this from the constructor I thin
no longer working on chromium 2013/11/08 15:39:30 Done, and changed the name of destructor_thread_ch
68 worker_thread_detach_ = true;
69 }
70
71 worker_thread_checker_.CalledOnValidThread();
72 fifo_->Push(audio_source);
73 }
74
75 bool Convert(webrtc::AudioFrame* out) {
76 // Called on the audio thread, which is the capture audio thread for
77 // |WebRtcAudioProcessor::capture_converter_|, and render audio thread for
78 // |WebRtcAudioProcessor::render_converter_|.
79 // Return false if there is no 10ms data in the FIFO.
80 worker_thread_checker_.CalledOnValidThread();
81 if (fifo_->frames() < (source_params_.sample_rate() / 100))
82 return false;
83
84 // Convert 10ms data to the output format, this will trigger ProvideInput().
85 audio_converter_.Convert(audio_wrapper_.get());
86
87 // TODO(xians): Figure out a better way to handle the interleaved and
88 // deinterleaved format switching.
89 audio_wrapper_->ToInterleaved(audio_wrapper_->frames(),
90 sink_params_.bits_per_sample() / 8,
91 out->data_);
92
93 out->samples_per_channel_ = sink_params_.frames_per_buffer();
94 out->sample_rate_hz_ = sink_params_.sample_rate();
95 out->speech_type_ = webrtc::AudioFrame::kNormalSpeech;
96 out->vad_activity_ = webrtc::AudioFrame::kVadUnknown;
97 out->num_channels_ = sink_params_.channels();
98
99 return true;
100 }
101
102 const media::AudioParameters& source_parameters() const {
103 return source_params_;
104 }
105 const media::AudioParameters& sink_parameters() const {
106 return sink_params_;
107 }
108
109 private:
110 // AudioConverter::InputCallback implementation.
111 virtual double ProvideInput(media::AudioBus* audio_bus,
112 base::TimeDelta buffer_delay) {
113 // Called on realtime audio thread.
114 // TODO(xians): Figure out why the first Convert() triggers ProvideInput
115 // two times.
116 if (fifo_->frames() < audio_bus->frames())
117 return 0;
118
119 fifo_->Consume(audio_bus, 0, audio_bus->frames());
120 return 1.0;
121 }
122
123 base::ThreadChecker destructor_thread_checker_;
124 base::ThreadChecker worker_thread_checker_;
125 bool worker_thread_detach_;
126 media::AudioParameters source_params_;
127 media::AudioParameters sink_params_;
128
129 // TODO(xians): consider using SincResampler to save some memcpy.
130 // Handles mixing and resampling between input and output parameters.
131 media::AudioConverter audio_converter_;
132 scoped_ptr<media::AudioBus> audio_wrapper_;
133 scoped_ptr<media::AudioFifo> fifo_;
134 };
135
136 WebRtcAudioProcessor::WebRtcAudioProcessor(
137 const webrtc::MediaConstraintsInterface* constraints)
138 : render_delay_ms_(0),
139 capture_thread_detach_(false),
140 render_thread_detach_(false) {
141 InitializeAudioProcessingModule(constraints);
142 }
143
144 WebRtcAudioProcessor::~WebRtcAudioProcessor() {
145 DCHECK(thread_checker_.CalledOnValidThread());
146 StopAudioProcessing();
147 }
148
149 void WebRtcAudioProcessor::SetCaptureFormat(
150 const media::AudioParameters& source_params) {
151 DCHECK(thread_checker_.CalledOnValidThread());
152 DCHECK(source_params.IsValid());
153
154 // Create and initialize audio converter for the source data.
155 // When the webrtc AudioProcessing is enabled, the sink format of the
156 // converter will be the same as the post-processed data format, which is
157 // 32k mono for desktops and 16k mono for Android. When the AudioProcessing
158 // is disabled, the sink format will be the same as the source format.
159 const int sink_sample_rate = audio_processing_ ?
160 kAudioProcessingSampleRate : source_params.sample_rate();
161 const media::ChannelLayout sink_channel_layout = audio_processing_ ?
162 media::CHANNEL_LAYOUT_MONO : source_params.channel_layout();
163
164 // WebRtc is using 10ms data as its native packet size.
165 media::AudioParameters sink_params(
166 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, sink_channel_layout,
167 sink_sample_rate, 16, sink_sample_rate / 100);
168 capture_converter_.reset(
169 new WebRtcAudioConverter(source_params, sink_params));
170 }
171
172 void WebRtcAudioProcessor::PushCaptureData(media::AudioBus* audio_source) {
173 if (!capture_thread_detach_) {
174 render_thread_checker_.DetachFromThread();
Jói 2013/11/08 13:36:40 I think you meant capture_thread_checker_.DetachFr
no longer working on chromium 2013/11/08 15:39:30 Right, it should be capture_thread_checker_. Sorry
175 capture_thread_detach_ = true;
176 }
177 capture_thread_checker_.CalledOnValidThread();
178 capture_converter_->Push(audio_source);
179 }
180
181 bool WebRtcAudioProcessor::ProcessAndConsumeData(
182 int capture_audio_delay_ms, int volume, bool key_pressed,
183 int16** out) {
184 capture_thread_checker_.CalledOnValidThread();
185 TRACE_EVENT0("audio",
186 "WebRtcAudioProcessor::ProcessAndConsumeData");
187
188 if (!capture_converter_->Convert(&capture_frame_))
189 return false;
190
191 ProcessData(&capture_frame_, capture_audio_delay_ms, volume, key_pressed);
192 *out = capture_frame_.data_;
193
194 return true;
195 }
196
197 const media::AudioParameters& WebRtcAudioProcessor::OutputFormat() const {
198 return capture_converter_->sink_parameters();
199 }
200
201 void WebRtcAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame,
202 int capture_audio_delay_ms,
203 int volume,
204 bool key_pressed) {
205 capture_thread_checker_.CalledOnValidThread();
206 if (!audio_processing_)
207 return;
208
209 TRACE_EVENT0("audio", "WebRtcAudioProcessor::Process10MsData");
210 DCHECK_EQ(audio_processing_->sample_rate_hz(),
211 capture_converter_->sink_parameters().sample_rate());
212 DCHECK_EQ(audio_processing_->num_input_channels(),
213 capture_converter_->sink_parameters().channels());
214 DCHECK_EQ(audio_processing_->num_output_channels(),
215 capture_converter_->sink_parameters().channels());
216
217 int total_delay_ms = 0;
218 {
219 base::AutoLock auto_lock(lock_);
220 total_delay_ms = capture_audio_delay_ms + render_delay_ms_;
221 }
222
223 audio_processing_->set_stream_delay_ms(total_delay_ms);
224 webrtc::GainControl* agc = audio_processing_->gain_control();
225 if (agc->set_stream_analog_level(volume))
226 NOTREACHED();
227 int err = audio_processing_->ProcessStream(audio_frame);
228 DCHECK(!err) << "ProcessStream() error: " << err;
229
230 // TODO(xians): Add support for AGC, typing detectin, audio level calculation,
231 // stereo swapping.
232 }
233
234 void WebRtcAudioProcessor::PushRenderData(
235 const int16* render_audio, int sample_rate, int number_of_channels,
236 int number_of_frames, int render_delay_ms) {
237 if (!render_thread_detach_) {
238 render_thread_checker_.DetachFromThread();
Jói 2013/11/08 13:36:40 I think you can simply call this in the constructo
no longer working on chromium 2013/11/08 15:39:30 Done.
239 render_thread_detach_ = true;
240 }
241 render_thread_checker_.CalledOnValidThread();
242
243 // Return immediately if the echo cancellation is off.
244 if (!audio_processing_ ||
245 !audio_processing_->echo_cancellation()->is_enabled())
246 return;
247
248 TRACE_EVENT0("audio",
249 "WebRtcAudioProcessor::FeedRenderDataToAudioProcessing");
250 {
251 base::AutoLock auto_lock(lock_);
252 render_delay_ms_ = render_delay_ms;
253 }
254
255 InitializeRenderConverterIfNeeded(sample_rate, number_of_channels,
256 number_of_frames);
257
258 // TODO(xians): Avoid this extra interleave/deinterleave.
259 render_data_bus_->FromInterleaved(render_audio,
260 render_data_bus_->frames(),
261 sizeof(render_audio[0]));
262 render_converter_->Push(render_data_bus_.get());
263 while (render_converter_->Convert(&render_frame_)) {
264 audio_processing_->AnalyzeReverseStream(&render_frame_);
265 }
266 }
267
268 void WebRtcAudioProcessor::InitializeAudioProcessingModule(
269 const webrtc::MediaConstraintsInterface* constraints) {
270 if (!CommandLine::ForCurrentProcess()->HasSwitch(
271 switches::kEnableAudioTrackProcessing)) {
272 return;
273 }
274
275 if (!constraints)
276 return;
277
278 const bool enable_aec = GetPropertyFromConstraints(
279 constraints, MediaConstraintsInterface::kEchoCancellation);
280 const bool enable_ns = GetPropertyFromConstraints(
281 constraints, MediaConstraintsInterface::kNoiseSuppression);
282 const bool enable_high_pass_filter = GetPropertyFromConstraints(
283 constraints, MediaConstraintsInterface::kHighpassFilter);
284 const bool start_aec_dump = GetPropertyFromConstraints(
285 constraints, MediaConstraintsInterface::kInternalAecDump);
286 #if defined(IOS) || defined(ANDROID)
287 const bool enable_experimental_aec = false;
288 const bool enable_typing_detection = false;
289 #else
290 const bool enable_experimental_aec = GetPropertyFromConstraints(
291 constraints, MediaConstraintsInterface::kExperimentalEchoCancellation);
292 const bool enable_typing_detection = GetPropertyFromConstraints(
293 constraints, MediaConstraintsInterface::kTypingNoiseDetection);
294 #endif
295
296 // Reset the audio processing to NULL if no audio processing component is
297 // enabled.
298 if (!enable_aec && !enable_experimental_aec && !enable_ns &&
299 !enable_high_pass_filter && !enable_typing_detection) {
300 return;
301 }
302
303 // Create and configure the audio processing if it does not exist.
304 if (!audio_processing_)
305 audio_processing_.reset(webrtc::AudioProcessing::Create(0));
306
307 // Enable the audio processing components.
308 if (enable_aec) {
309 EnableEchoCancellation(audio_processing_.get());
310
311 if (enable_experimental_aec)
312 EnableExperimentalEchoCancellation(audio_processing_.get());
313 }
314
315 if (enable_ns)
316 EnableNoiseSuppression(audio_processing_.get());
317
318 if (enable_high_pass_filter)
319 EnableHighPassFilter(audio_processing_.get());
320
321 if (enable_typing_detection)
322 EnableTypingDetection(audio_processing_.get());
323
324 if (enable_aec && start_aec_dump)
325 StartAecDump(audio_processing_.get());
326
327 // Configure the audio format the audio processing is running on. This
328 // has to be done after all the needed components are enabled.
329 if (audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate))
330 NOTREACHED();
331 if (audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel,
332 kAudioProcessingNumberOfChannel))
333 NOTREACHED();
334 }
335
336 void WebRtcAudioProcessor::InitializeRenderConverterIfNeeded(
337 int sample_rate, int number_of_channels, int frames_per_buffer) {
338 // TODO, figure out if we need to handle the buffer size change.
Jói 2013/11/08 13:36:40 TODO needs an owner.
no longer working on chromium 2013/11/08 15:39:30 Done.
339 if (render_converter_.get() &&
340 render_converter_->source_parameters().sample_rate() == sample_rate &&
341 render_converter_->source_parameters().channels() == number_of_channels) {
342 // Do nothing if the |render_converter_| has been setup properly.
343 return;
344 }
345
346 media::AudioParameters source_params(
347 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
348 media::GuessChannelLayout(number_of_channels), sample_rate, 16,
349 frames_per_buffer);
350 media::AudioParameters sink_params(
351 media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
352 media::CHANNEL_LAYOUT_MONO, kAudioProcessingSampleRate, 16,
353 kAudioProcessingSampleRate / 100);
354 render_converter_.reset(new WebRtcAudioConverter(source_params, sink_params));
355 render_data_bus_ = media::AudioBus::Create(number_of_channels,
356 frames_per_buffer);
357 }
358
359 void WebRtcAudioProcessor::StopAudioProcessing() {
360 if (!audio_processing_.get())
361 return;
362
363 // It is safe to stop the AEC dump even it is not started.
364 StopAecDump(audio_processing_.get());
365
366 audio_processing_.reset();
367 }
368
369 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698