OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/media/webrtc_audio_processor.h" | |
6 | |
7 #include "base/command_line.h" | |
8 #include "base/debug/trace_event.h" | |
9 #include "content/public/common/content_switches.h" | |
10 #include "media/audio/audio_parameters.h" | |
11 #include "media/base/audio_converter.h" | |
12 #include "media/base/audio_fifo.h" | |
13 #include "media/base/channel_layout.h" | |
14 | |
15 namespace content { | |
16 | |
17 namespace { | |
18 | |
19 using webrtc::AudioProcessing; | |
20 using webrtc::MediaConstraintsInterface; | |
21 | |
22 #if defined(ANDROID) | |
23 const int kAudioProcessingSampleRate = 16000; | |
24 #else | |
25 const int kAudioProcessingSampleRate = 32000; | |
26 #endif | |
27 const int kAudioProcessingNumberOfChannel = 1; | |
28 | |
29 const int kMaxNumberOfBuffersInFifo = 2; | |
30 | |
31 bool GetPropertyFromConstraints(const MediaConstraintsInterface* constraints, | |
32 const std::string& key) { | |
33 bool value = false; | |
34 return webrtc::FindConstraint(constraints, key, &value, NULL) && value; | |
35 } | |
36 | |
37 // Extract all this methods to a helper class. | |
38 void EnableEchoCancellation(AudioProcessing* audio_processing) { | |
39 DCHECK(audio_processing); | |
40 #if defined(IOS) || defined(ANDROID) | |
41 // Mobile devices are using AECM. | |
42 if (audio_processing->echo_control_mobile()->Enable(true)) | |
43 NOTREACHED(); | |
44 | |
45 if (audio_processing->echo_control_mobile()->set_routing_mode( | |
46 webrtc::EchoControlMobile::kSpeakerphone)) | |
47 NOTREACHED(); | |
48 | |
49 return; | |
50 #endif | |
51 if (audio_processing->echo_cancellation()->Enable(true)) | |
52 NOTREACHED(); | |
53 if (audio_processing->echo_cancellation()->set_suppression_level( | |
54 webrtc::EchoCancellation::kHighSuppression)) | |
55 NOTREACHED(); | |
56 | |
57 // Enable the metrics for AEC. | |
58 if (audio_processing->echo_cancellation()->enable_metrics(true)) | |
59 NOTREACHED(); | |
60 if (audio_processing->echo_cancellation()->enable_delay_logging(true)) | |
61 NOTREACHED(); | |
62 } | |
63 | |
64 void EnableNoiseSuppression(AudioProcessing* audio_processing) { | |
65 DCHECK(audio_processing); | |
66 if (audio_processing->noise_suppression()->set_level( | |
67 webrtc::NoiseSuppression::kHigh)) | |
68 NOTREACHED(); | |
69 | |
70 if (audio_processing->noise_suppression()->Enable(true)) | |
71 NOTREACHED(); | |
72 } | |
73 | |
74 void EnableHighPassFilter(AudioProcessing* audio_processing) { | |
75 DCHECK(audio_processing); | |
76 if (audio_processing->high_pass_filter()->Enable(true)) | |
77 NOTREACHED(); | |
78 } | |
79 | |
80 // TODO(xians): stereo swapping | |
81 void EnableTypingDetection(AudioProcessing* audio_processing) { | |
82 DCHECK(audio_processing); | |
83 if (audio_processing->voice_detection()->Enable(true)) | |
84 NOTREACHED(); | |
85 | |
86 if (audio_processing->voice_detection()->set_likelihood( | |
87 webrtc::VoiceDetection::kVeryLowLikelihood)) | |
88 NOTREACHED(); | |
89 } | |
90 | |
91 void EnableExperimentalEchoCancellation(AudioProcessing* audio_processing) { | |
92 DCHECK(audio_processing); | |
93 webrtc::Config config; | |
94 config.Set<webrtc::DelayCorrection>(new webrtc::DelayCorrection(true)); | |
95 audio_processing->SetExtraOptions(config); | |
96 } | |
97 | |
98 void StartAecDump(AudioProcessing* audio_processin) { | |
99 static const char kAecDumpFilename[] = "/tmp/audio.aecdump"; | |
Henrik Grunell
2013/10/31 11:56:12
This should be different for different platforms.
| |
100 if (audio_processin->StartDebugRecording(kAecDumpFilename)) | |
101 LOG(ERROR) << "Fail to start AEC debug recording"; | |
102 } | |
103 | |
104 void StopAecDump(AudioProcessing* audio_processin) { | |
105 if (audio_processin->StopDebugRecording()) | |
106 LOG(ERROR) << "Fail to stop AEC debug recording"; | |
107 } | |
108 | |
109 } // namespace | |
110 | |
111 class WebRtcAudioProcessor::WebRtcAudioConverter | |
112 : public media::AudioConverter::InputCallback { | |
113 public: | |
114 WebRtcAudioConverter(const media::AudioParameters& source_params, | |
115 const media::AudioParameters& sink_params) { | |
116 source_params_ = source_params; | |
117 sink_params_ = sink_params; | |
118 | |
119 // Create the audio converter which is responsible for down-mixing and | |
120 // resampling. | |
121 audio_converter_.reset( | |
122 new media::AudioConverter(source_params, sink_params_, false)); | |
123 audio_converter_->AddInput(this); | |
124 | |
125 // Create and initialize audio fifo and audio bus wrapper. | |
126 // The size of the FIFO should be at least twice of the source buffer size | |
127 // or twice of the sink buffer size. | |
128 int buffer_size = std::max( | |
129 kMaxNumberOfBuffersInFifo * source_params.frames_per_buffer(), | |
130 kMaxNumberOfBuffersInFifo * sink_params_.frames_per_buffer()); | |
131 fifo_.reset(new media::AudioFifo(source_params.channels(), buffer_size)); | |
132 // TODO(xians): Use CreateWrapper to save one memcpy. | |
133 audio_wrapper_ = media::AudioBus::Create(sink_params_.channels(), | |
134 sink_params_.frames_per_buffer()); | |
135 } | |
136 | |
137 ~WebRtcAudioConverter() { | |
138 audio_converter_->RemoveInput(this); | |
139 } | |
140 | |
141 void Push(media::AudioBus* audio_source) { | |
142 DCHECK(fifo_->frames() + audio_source->frames() <= fifo_->max_frames()); | |
143 fifo_->Push(audio_source); | |
144 } | |
145 | |
146 bool Convert() { | |
147 // Return false if there is no 10ms data in the FIFO. | |
148 if (fifo_->frames() < (source_params_.sample_rate() / 100)) | |
149 return false; | |
150 | |
151 // Convert 10ms data to the output format, this will trigger ProvideInput(). | |
152 audio_converter_->Convert(audio_wrapper_.get()); | |
153 | |
154 // TODO(xians): A better way to handle the interleaved and deinterleaved | |
155 // format switching. | |
156 audio_wrapper_->ToInterleaved(audio_wrapper_->frames(), 2, | |
157 audio_frame_.data_); | |
158 | |
159 audio_frame_.samples_per_channel_ = sink_params_.frames_per_buffer(); | |
160 audio_frame_.sample_rate_hz_ = sink_params_.sample_rate(); | |
161 audio_frame_.speech_type_ = webrtc::AudioFrame::kNormalSpeech; | |
162 audio_frame_.vad_activity_ = webrtc::AudioFrame::kVadUnknown; | |
163 audio_frame_.num_channels_ = sink_params_.channels(); | |
164 // audio_frame_.interleaved_ = false; | |
165 | |
166 return true; | |
167 } | |
168 | |
169 webrtc::AudioFrame* audio_frame() { return &audio_frame_; } | |
170 const media::AudioParameters& source_parameters() const { | |
171 return source_params_; | |
172 } | |
173 const media::AudioParameters& sink_parameters() const { | |
174 return sink_params_; | |
175 } | |
176 | |
177 private: | |
178 // AudioConverter::InputCallback implementation. | |
179 virtual double ProvideInput(media::AudioBus* audio_bus, | |
180 base::TimeDelta buffer_delay) { | |
181 // The first Convert() can trigger ProvideInput two times, use SincResampler | |
182 // to fix the problem. | |
183 if (fifo_->frames() < audio_bus->frames()) | |
184 return 0; | |
185 | |
186 fifo_->Consume(audio_bus, 0, audio_bus->frames()); | |
187 return 1.0; | |
188 } | |
189 | |
190 webrtc::AudioFrame audio_frame_; | |
191 | |
192 // TODO(xians): consider using SincResampler to save some memcpy. | |
193 // Handles mixing and resampling between input and output parameters. | |
194 scoped_ptr<media::AudioConverter> audio_converter_; | |
195 scoped_ptr<media::AudioBus> audio_wrapper_; | |
196 scoped_ptr<media::AudioFifo> fifo_; | |
197 | |
198 media::AudioParameters source_params_; | |
199 media::AudioParameters sink_params_; | |
200 }; | |
201 | |
202 WebRtcAudioProcessor::WebRtcAudioProcessor( | |
203 const webrtc::MediaConstraintsInterface* constraints) | |
204 : render_delay_ms_(0) { | |
205 InitializeAudioProcessingModule(constraints); | |
206 } | |
207 | |
208 WebRtcAudioProcessor::~WebRtcAudioProcessor() { | |
209 StopAudioProcessing(); | |
210 } | |
211 | |
212 void WebRtcAudioProcessor::SetFormat( | |
213 const media::AudioParameters& source_params) { | |
214 DCHECK(source_params.IsValid()); | |
215 | |
216 // Create and initialize audio converter. | |
217 int sink_sample_rate = audio_processing_.get() ? | |
218 kAudioProcessingSampleRate : source_params.sample_rate(); | |
219 media::ChannelLayout sink_channel_layout = audio_processing_.get() ? | |
220 media::CHANNEL_LAYOUT_MONO : source_params.channel_layout(); | |
221 | |
222 // WebRtc is using 10ms data as its native packet size. | |
223 media::AudioParameters sink_params( | |
224 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, sink_channel_layout, | |
225 sink_sample_rate, 16, sink_sample_rate / 100); | |
226 capture_converter_.reset( | |
227 new WebRtcAudioConverter(source_params, sink_params)); | |
228 } | |
229 | |
230 void WebRtcAudioProcessor::Push(media::AudioBus* audio_source) { | |
231 DCHECK(capture_converter_.get()); | |
232 capture_converter_->Push(audio_source); | |
233 } | |
234 | |
235 bool WebRtcAudioProcessor::ProcessAndConsume10MsData( | |
236 int capture_audio_delay_ms, int volume, bool key_pressed) { | |
237 TRACE_EVENT0("audio", | |
238 "WebRtcAudioProcessor::ProcessAndConsume10MsData"); | |
239 | |
240 if (!capture_converter_->Convert()) | |
241 return false; | |
242 | |
243 Process10MsData(capture_audio_delay_ms, volume, key_pressed); | |
244 | |
245 return true; | |
246 } | |
247 | |
248 const int16* WebRtcAudioProcessor::OutputBuffer() const { | |
249 return &capture_converter_->audio_frame()->data_[0]; | |
250 } | |
251 | |
252 const media::AudioParameters& | |
253 WebRtcAudioProcessor::OutputFormat() const { | |
254 return capture_converter_->sink_parameters(); | |
255 } | |
256 | |
257 | |
258 void WebRtcAudioProcessor::Process10MsData(int capture_audio_delay_ms, | |
259 int volume, | |
260 bool key_pressed) { | |
261 if (!audio_processing_.get()) | |
262 return; | |
263 | |
264 // TODO(xians): Add a DCHECK it is 10ms data chunk. | |
265 | |
266 TRACE_EVENT0("audio", "WebRtcAudioProcessor::Process10MsData"); | |
267 DCHECK_EQ(audio_processing_->sample_rate_hz(), | |
268 capture_converter_->sink_parameters().sample_rate()); | |
269 DCHECK_EQ(audio_processing_->num_input_channels(), | |
270 capture_converter_->sink_parameters().channels()); | |
271 DCHECK_EQ(audio_processing_->num_output_channels(), | |
272 capture_converter_->sink_parameters().channels()); | |
273 | |
274 // TODO(xians): Sum the capture delay and render delay. | |
275 int total_delay_ms = 0; | |
276 { | |
277 base::AutoLock auto_lock(lock_); | |
278 total_delay_ms = capture_audio_delay_ms + render_delay_ms_; | |
279 } | |
280 | |
281 audio_processing_->set_stream_delay_ms(total_delay_ms); | |
282 webrtc::GainControl* agc = audio_processing_->gain_control(); | |
283 if (agc->set_stream_analog_level(volume)) | |
284 NOTREACHED(); | |
285 int err = audio_processing_->ProcessStream( | |
286 capture_converter_->audio_frame()); | |
287 if (err) { | |
288 NOTREACHED() << "ProcessStream() error: " << err; | |
289 } | |
290 | |
291 // TODO(xians): Get the new volume and pass it to the capturer. | |
292 // new_volume_ = agc->stream_analog_level(); | |
293 | |
294 // TODO(xians): Handle the typing detection event here. | |
295 // TypingDetection(key_pressed); | |
296 } | |
297 | |
298 void WebRtcAudioProcessor::FeedRenderDataToAudioProcessing( | |
299 const int16* render_audio, int sample_rate, int number_of_channels, | |
300 int number_of_frames, int render_delay_ms) { | |
301 if (!audio_processing_.get()) | |
302 return; | |
303 | |
304 TRACE_EVENT0("audio", | |
305 "WebRtcAudioProcessor::FeedRenderDataToAudioProcessing"); | |
306 { | |
307 base::AutoLock auto_lock(lock_); | |
308 render_delay_ms_ = render_delay_ms; | |
309 } | |
310 | |
311 InitializeRenderConverterIfNeeded(sample_rate, number_of_channels, | |
312 number_of_frames); | |
313 DCHECK(render_converter_.get()); | |
314 | |
315 // FIXME. This is crazy, a few extra copy and interleave/deinterleave. | |
316 scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create( | |
317 number_of_channels, number_of_frames); | |
318 data_bus->FromInterleaved(render_audio, | |
319 data_bus->frames(), | |
320 sizeof(render_audio[0])); | |
321 render_converter_->Push(data_bus.get()); | |
322 while (render_converter_->Convert()) { | |
323 audio_processing_->AnalyzeReverseStream(render_converter_->audio_frame()); | |
324 } | |
325 } | |
326 | |
327 void WebRtcAudioProcessor::InitializeAudioProcessingModule( | |
328 const webrtc::MediaConstraintsInterface* constraints) { | |
329 const CommandLine& command_line = *CommandLine::ForCurrentProcess(); | |
330 if (!command_line.HasSwitch(switches::kEnableWebRtcAudioProcessor)) | |
331 return; | |
332 | |
333 if (!constraints) | |
334 return; | |
335 | |
336 bool enable_aec = GetPropertyFromConstraints( | |
337 constraints, MediaConstraintsInterface::kEchoCancellation); | |
338 bool enable_experimental_aec = GetPropertyFromConstraints( | |
339 constraints, MediaConstraintsInterface::kExperimentalEchoCancellation); | |
340 bool enable_ns = GetPropertyFromConstraints( | |
341 constraints, MediaConstraintsInterface::kNoiseSuppression); | |
342 bool enable_high_pass_filter = GetPropertyFromConstraints( | |
343 constraints, MediaConstraintsInterface::kHighpassFilter); | |
344 bool enable_typing_detection = GetPropertyFromConstraints( | |
345 constraints, MediaConstraintsInterface::kTypingNoiseDetection); | |
346 // TODO(xians): How to start and stop AEC dump? | |
347 bool start_aec_dump = GetPropertyFromConstraints( | |
348 constraints, MediaConstraintsInterface::kInternalAecDump); | |
349 #if defined(IOS) || defined(ANDROID) | |
350 enable_typing_detection = false; | |
351 enable_experimental_aec = false; | |
352 #endif | |
353 | |
354 // Reset the audio processing to NULL if no audio processing component is | |
355 // enabled. | |
356 if (!enable_aec && !enable_experimental_aec && !enable_ns && | |
357 !enable_high_pass_filter && !enable_typing_detection) { | |
358 return; | |
359 } | |
360 | |
361 // Create and configure the audio processing if it does not exist. | |
362 if (!audio_processing_.get()) | |
363 audio_processing_.reset(webrtc::AudioProcessing::Create(0)); | |
364 | |
365 // Enable the audio processing components. | |
366 if (enable_aec) | |
367 EnableEchoCancellation(audio_processing_.get()); | |
368 | |
369 if (enable_ns) | |
370 EnableNoiseSuppression(audio_processing_.get()); | |
371 | |
372 if (enable_high_pass_filter) | |
373 EnableHighPassFilter(audio_processing_.get()); | |
374 | |
375 if (enable_typing_detection) | |
376 EnableTypingDetection(audio_processing_.get()); | |
377 | |
378 if (enable_experimental_aec) | |
379 EnableExperimentalEchoCancellation(audio_processing_.get()); | |
380 | |
381 if (enable_aec && start_aec_dump) | |
382 StartAecDump(audio_processing_.get()); | |
383 | |
384 // Configure the audio format the audio processing is running on. This | |
385 // has to be done after all the needed components are enabled. | |
386 if (audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate)) | |
387 NOTREACHED(); | |
388 if (audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel, | |
389 kAudioProcessingNumberOfChannel)) | |
390 NOTREACHED(); | |
391 } | |
392 | |
393 void WebRtcAudioProcessor::InitializeRenderConverterIfNeeded( | |
394 int sample_rate, int number_of_channels, int frames_per_buffer) { | |
395 // TODO, figure out if we need to handle the buffer size change. | |
396 if (render_converter_.get() && | |
397 render_converter_->source_parameters().sample_rate() == sample_rate && | |
398 render_converter_->source_parameters().channels() == number_of_channels) { | |
399 // Do nothing if the |render_converter_| is setup properly. | |
400 return; | |
401 } | |
402 | |
403 media::AudioParameters source_params( | |
404 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
405 media::GuessChannelLayout(number_of_channels), sample_rate, 16, | |
406 frames_per_buffer); | |
407 media::AudioParameters sink_params( | |
408 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
409 media::CHANNEL_LAYOUT_MONO, kAudioProcessingSampleRate, 16, | |
410 kAudioProcessingSampleRate / 100); | |
411 render_converter_.reset(new WebRtcAudioConverter(source_params, sink_params)); | |
412 } | |
413 | |
414 void WebRtcAudioProcessor::StopAudioProcessing() { | |
415 if (!audio_processing_.get()) | |
416 return; | |
417 | |
418 // It is safe to stop the AEC dump even it is not started. | |
419 StopAecDump(audio_processing_.get()); | |
420 | |
421 audio_processing_.reset(); | |
422 } | |
423 | |
424 } // namespace content | |
OLD | NEW |