OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/media/webrtc_audio_processing_wrapper.h" | |
6 | |
7 #include "base/debug/trace_event.h" | |
8 #include "media/audio/audio_parameters.h" | |
9 #include "media/base/audio_converter.h" | |
10 #include "media/base/audio_fifo.h" | |
11 #include "media/base/channel_layout.h" | |
12 | |
13 namespace content { | |
14 | |
15 namespace { | |
16 | |
17 using webrtc::AudioProcessing; | |
18 using webrtc::MediaConstraintsInterface; | |
19 | |
20 #if defined(ANDROID) | |
21 const int kAudioProcessingSampleRate = 16000; | |
22 #else | |
23 const int kAudioProcessingSampleRate = 32000; | |
24 #endif | |
25 const int kAudioProcessingNumberOfChannel = 1; | |
26 | |
27 const int kMaxNumberOfBuffersInFifo = 2; | |
28 | |
29 bool GetPropertyFromConstraints(const MediaConstraintsInterface* constraints, | |
30 const std::string& key) { | |
31 bool value = false; | |
32 return webrtc::FindConstraint(constraints, key, &value, NULL) && value; | |
33 } | |
34 | |
35 void EnableEchoCancellation(AudioProcessing* audio_processing) { | |
36 DCHECK(audio_processing); | |
37 #if defined(IOS) || defined(ANDROID) | |
38 // Mobile devices are using AECM. | |
39 if (audio_processing->echo_control_mobile()->Enable(true)) | |
40 NOTREACHED(); | |
41 | |
42 if (audio_processing->echo_control_mobile()->set_routing_mode( | |
43 webrtc::EchoControlMobile::kSpeakerphone)) | |
44 NOTREACHED(); | |
45 | |
46 return; | |
47 #endif | |
48 if (audio_processing->echo_cancellation()->Enable(true)) | |
49 NOTREACHED(); | |
50 if (audio_processing->echo_cancellation()->set_suppression_level( | |
51 webrtc::EchoCancellation::kHighSuppression)) | |
52 NOTREACHED(); | |
53 | |
54 // Enable the metrics for AEC. | |
55 if (audio_processing->echo_cancellation()->enable_metrics(true)) | |
56 NOTREACHED(); | |
57 if (audio_processing->echo_cancellation()->enable_delay_logging(true)) | |
58 NOTREACHED(); | |
59 } | |
60 | |
61 void EnableNoiseSuppression(AudioProcessing* audio_processing) { | |
62 DCHECK(audio_processing); | |
63 if (audio_processing->noise_suppression()->set_level( | |
64 webrtc::NoiseSuppression::kHigh)) | |
65 NOTREACHED(); | |
66 | |
67 if (audio_processing->noise_suppression()->Enable(true)) | |
68 NOTREACHED(); | |
69 } | |
70 | |
71 void EnableHighPassFilter(AudioProcessing* audio_processing) { | |
72 DCHECK(audio_processing); | |
73 if (audio_processing->high_pass_filter()->Enable(true)) | |
74 NOTREACHED(); | |
75 } | |
76 | |
77 // TODO(xians): stereo swapping | |
78 void EnableTypingDetection(AudioProcessing* audio_processing) { | |
79 DCHECK(audio_processing); | |
80 if (audio_processing->voice_detection()->Enable(true)) | |
81 NOTREACHED(); | |
82 | |
83 if (audio_processing->voice_detection()->set_likelihood( | |
84 webrtc::VoiceDetection::kVeryLowLikelihood)) | |
85 NOTREACHED(); | |
86 } | |
87 | |
88 void EnableExperimentalEchoCancellation(AudioProcessing* audio_processing) { | |
89 DCHECK(audio_processing); | |
90 webrtc::Config config; | |
91 config.Set<webrtc::DelayCorrection>(new webrtc::DelayCorrection(true)); | |
92 audio_processing->SetExtraOptions(config); | |
93 } | |
94 | |
95 void StartAecDump(AudioProcessing* audio_processin) { | |
96 static const char kAecDumpFilename[] = "/tmp/audio.aecdump"; | |
97 if (audio_processin->StartDebugRecording(kAecDumpFilename)) | |
98 LOG(ERROR) << "Fail to start AEC debug recording"; | |
99 } | |
100 | |
101 void StopAecDump(AudioProcessing* audio_processin) { | |
102 if (audio_processin->StopDebugRecording()) | |
103 LOG(ERROR) << "Fail to stop AEC debug recording"; | |
104 } | |
105 | |
106 } // namespace | |
107 | |
108 class WebRtcAudioProcessingWrapper::WebRtcAudioConverter | |
109 : public media::AudioConverter::InputCallback { | |
110 public: | |
111 WebRtcAudioConverter(const media::AudioParameters& source_params, | |
112 const media::AudioParameters& sink_params) { | |
113 source_params_ = source_params; | |
114 sink_params_ = sink_params; | |
115 | |
116 // Create the audio converter which is responsible for down-mixing and | |
117 // resampling. | |
118 audio_converter_.reset( | |
119 new media::AudioConverter(source_params, sink_params_, false)); | |
120 audio_converter_->AddInput(this); | |
121 | |
122 // Create and initialize audio fifo and audio bus wrapper. | |
123 // The size of the FIFO should be at least twice of the source buffer size | |
124 // or twice of the sink buffer size. | |
125 int buffer_size = std::max( | |
126 kMaxNumberOfBuffersInFifo * source_params.frames_per_buffer(), | |
127 kMaxNumberOfBuffersInFifo * sink_params_.frames_per_buffer()); | |
128 fifo_.reset(new media::AudioFifo(source_params.channels(), buffer_size)); | |
129 audio_wrapper_ = media::AudioBus::Create(sink_params_.channels(), | |
130 sink_params_.frames_per_buffer()); | |
131 } | |
132 | |
133 ~WebRtcAudioConverter() { | |
134 audio_converter_->RemoveInput(this); | |
135 } | |
136 | |
137 void Push(media::AudioBus* audio_source) { | |
138 DCHECK(fifo_->frames() + audio_source->frames() <= fifo_->max_frames()); | |
139 fifo_->Push(audio_source); | |
140 } | |
141 | |
142 bool Convert() { | |
143 // Return false if there is no 10ms data in the FIFO. | |
144 if (fifo_->frames() < (source_params_.sample_rate() / 100)) | |
145 return false; | |
146 | |
147 // Convert 10ms data to the output format, this will trigger ProvideInput(). | |
148 audio_converter_->Convert(audio_wrapper_.get()); | |
149 | |
150 // TODO(xians): Avoid deinterleave here if APM takes deinterleave format. | |
151 audio_wrapper_->ToInterleaved(audio_wrapper_->frames(), 2, | |
152 audio_frame_.data_); | |
153 | |
154 audio_frame_.samples_per_channel_ = sink_params_.frames_per_buffer(); | |
155 audio_frame_.sample_rate_hz_ = sink_params_.sample_rate(); | |
156 audio_frame_.speech_type_ = webrtc::AudioFrame::kNormalSpeech; | |
157 audio_frame_.vad_activity_ = webrtc::AudioFrame::kVadUnknown; | |
158 audio_frame_.num_channels_ = sink_params_.channels(); | |
159 | |
160 return true; | |
161 } | |
162 | |
163 webrtc::AudioFrame* audio_frame() { return &audio_frame_; } | |
164 const media::AudioParameters& source_parameters() const { | |
165 return source_params_; | |
166 } | |
167 const media::AudioParameters& sink_parameters() const { | |
168 return sink_params_; | |
169 } | |
170 | |
171 private: | |
172 // AudioConverter::InputCallback implementation. | |
173 virtual double ProvideInput(media::AudioBus* audio_bus, | |
174 base::TimeDelta buffer_delay) { | |
175 // The first Convert() can trigger ProvideInput two times, use SincResampler | |
176 // to fix the problem. | |
177 if (fifo_->frames() < audio_bus->frames()) | |
178 return 0; | |
179 | |
180 fifo_->Consume(audio_bus, 0, audio_bus->frames()); | |
181 return 1.0; | |
182 } | |
183 | |
184 webrtc::AudioFrame audio_frame_; | |
185 | |
186 // TODO(xians): consider using SincResampler to save some memcpy. | |
187 // Handles mixing and resampling between input and output parameters. | |
188 scoped_ptr<media::AudioConverter> audio_converter_; | |
189 scoped_ptr<media::AudioBus> audio_wrapper_; | |
190 scoped_ptr<media::AudioFifo> fifo_; | |
191 | |
192 media::AudioParameters source_params_; | |
193 media::AudioParameters sink_params_; | |
194 }; | |
195 | |
196 WebRtcAudioProcessingWrapper::WebRtcAudioProcessingWrapper() { | |
197 } | |
198 | |
199 WebRtcAudioProcessingWrapper::~WebRtcAudioProcessingWrapper() { | |
200 StopAudioProcessing(); | |
201 } | |
202 | |
203 // TODO(xians): Should we support changing the setting on the fly without | |
204 // constructing a new audio processing module? | |
205 void WebRtcAudioProcessingWrapper::Configure( | |
206 const media::AudioParameters& source_params, | |
207 const MediaConstraintsInterface* constraints) { | |
perkj_chrome
2013/10/24 12:31:30
You might want to consider mandatory and optional
| |
208 if (constraints) { | |
209 bool enable_aec = GetPropertyFromConstraints( | |
210 constraints, MediaConstraintsInterface::kEchoCancellation); | |
211 bool enable_experimental_aec = GetPropertyFromConstraints( | |
212 constraints, MediaConstraintsInterface::kExperimentalEchoCancellation); | |
213 bool enable_ns = GetPropertyFromConstraints( | |
214 constraints, MediaConstraintsInterface::kNoiseSuppression); | |
215 bool enable_high_pass_filter = GetPropertyFromConstraints( | |
216 constraints, MediaConstraintsInterface::kHighpassFilter); | |
217 bool enable_typing_detection = GetPropertyFromConstraints( | |
218 constraints, MediaConstraintsInterface::kTypingNoiseDetection); | |
219 // TODO(xians): How to start and stop AEC dump? | |
220 bool start_aec_dump = GetPropertyFromConstraints( | |
221 constraints, MediaConstraintsInterface::kInternalAecDump); | |
222 #if defined(IOS) || defined(ANDROID) | |
223 enable_typing_detection = false; | |
224 enable_experimental_aec = false; | |
225 #endif | |
226 | |
227 // Reset the audio processing to NULL if no audio processing component is | |
228 // enabled. | |
229 if (!enable_aec && !enable_experimental_aec && !enable_ns && | |
230 !enable_high_pass_filter && !enable_typing_detection) { | |
231 StopAudioProcessing(); | |
232 } else { | |
233 // Create and configure the audio processing if it does not exist. | |
234 if (!audio_processing_.get()) | |
235 audio_processing_.reset(webrtc::AudioProcessing::Create(0)); | |
236 | |
237 // Enable the audio processing components. | |
238 if (enable_aec) | |
239 EnableEchoCancellation(audio_processing_.get()); | |
240 | |
241 if (enable_ns) | |
242 EnableNoiseSuppression(audio_processing_.get()); | |
243 | |
244 if (enable_high_pass_filter) | |
245 EnableHighPassFilter(audio_processing_.get()); | |
246 | |
247 if (enable_typing_detection) | |
248 EnableTypingDetection(audio_processing_.get()); | |
249 | |
250 if (enable_experimental_aec) | |
251 EnableExperimentalEchoCancellation(audio_processing_.get()); | |
252 | |
253 if (enable_aec && start_aec_dump) | |
254 StartAecDump(audio_processing_.get()); | |
255 else | |
256 StopAecDump(audio_processing_.get()); | |
257 | |
258 // Configure the audio format the audio processing is running on. This | |
259 // has to be done after all the needed components are enabled. | |
260 if (audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate)) | |
261 NOTREACHED(); | |
262 if (audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel, | |
263 kAudioProcessingNumberOfChannel)) | |
264 NOTREACHED(); | |
265 } | |
266 } | |
267 | |
268 InitializeCaptureConverter(source_params); | |
269 } | |
270 | |
271 void WebRtcAudioProcessingWrapper::Push(media::AudioBus* audio_source) { | |
272 DCHECK(capture_converter_.get()); | |
273 capture_converter_->Push(audio_source); | |
perkj_chrome
2013/10/24 12:31:30
What if all features of APM is disabled- does this
| |
274 } | |
275 | |
276 bool WebRtcAudioProcessingWrapper::ProcessAndConsume10MsData( | |
277 int capture_audio_delay_ms, int volume, bool key_pressed) { | |
278 TRACE_EVENT0("audio", | |
279 "WebRtcAudioProcessingWrapper::ProcessAndConsume10MsData"); | |
280 | |
281 if (!capture_converter_->Convert()) | |
282 return false; | |
283 | |
284 Process10MsData(capture_audio_delay_ms, volume, key_pressed); | |
285 | |
286 return true; | |
287 } | |
288 | |
289 const int16* WebRtcAudioProcessingWrapper::OutputBuffer() const { | |
290 return &capture_converter_->audio_frame()->data_[0]; | |
291 } | |
292 | |
293 const media::AudioParameters& | |
294 WebRtcAudioProcessingWrapper::OutputFormat() const { | |
295 return capture_converter_->sink_parameters(); | |
296 } | |
297 | |
298 | |
299 void WebRtcAudioProcessingWrapper::Process10MsData(int capture_audio_delay_ms, | |
300 int volume, | |
301 bool key_pressed) { | |
302 if (!audio_processing_.get()) | |
303 return; | |
304 | |
305 // TODO(xians): Add a DCHECK it is 10ms data chunk. | |
306 | |
307 TRACE_EVENT0("audio", "WebRtcAPM::Process10MsData"); | |
308 DCHECK_EQ(audio_processing_->sample_rate_hz(), | |
309 capture_converter_->sink_parameters().sample_rate()); | |
310 DCHECK_EQ(audio_processing_->num_input_channels(), | |
311 capture_converter_->sink_parameters().channels()); | |
312 DCHECK_EQ(audio_processing_->num_output_channels(), | |
313 capture_converter_->sink_parameters().channels()); | |
314 | |
315 // TODO(xians): Sum the capture delay and render delay. | |
316 int total_delay_ms = capture_audio_delay_ms; | |
317 audio_processing_->set_stream_delay_ms(total_delay_ms); | |
318 webrtc::GainControl* agc = audio_processing_->gain_control(); | |
319 if (agc->set_stream_analog_level(volume)) | |
320 NOTREACHED(); | |
321 int err = audio_processing_->ProcessStream( | |
322 capture_converter_->audio_frame()); | |
323 if (err) { | |
324 NOTREACHED() << "ProcessStream() error: " << err; | |
325 } | |
326 | |
327 // TODO(xians): Get the new volume and pass it to the capturer. | |
328 // new_volume_ = agc->stream_analog_level(); | |
329 | |
330 // TODO(xians): Handle the typing detection event here. | |
331 // TypingDetection(key_pressed); | |
332 } | |
333 | |
334 void WebRtcAudioProcessingWrapper::FeedRenderDataToAudioProcessing( | |
335 const int16* render_audio, int sample_rate, int number_of_channels, | |
336 int number_of_frames, int render_delay_ms) { | |
337 if (!audio_processing_.get()) | |
338 return; | |
339 | |
340 TRACE_EVENT0("audio", "WebRtcAPM::FeedRender10MSDataToAudioProcessing"); | |
341 | |
342 InitializeRenderConverterIfNeeded(sample_rate, number_of_channels, | |
343 number_of_frames); | |
344 DCHECK(render_converter_.get()); | |
345 | |
346 // FIXME. This is crazy, a few extra copy and interleave/deinterleave. | |
347 scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create( | |
348 number_of_channels, number_of_frames); | |
349 data_bus->FromInterleaved(render_audio, | |
350 data_bus->frames(), | |
351 sizeof(render_audio[0])); | |
352 render_converter_->Push(data_bus.get()); | |
353 while (render_converter_->Convert()) { | |
354 audio_processing_->AnalyzeReverseStream(render_converter_->audio_frame()); | |
355 } | |
356 } | |
357 | |
358 void WebRtcAudioProcessingWrapper::InitializeCaptureConverter( | |
359 const media::AudioParameters& source_params) { | |
360 // Create and initialize audio converter. | |
361 int sink_sample_rate = audio_processing_.get() ? | |
362 kAudioProcessingSampleRate : source_params.sample_rate(); | |
363 media::ChannelLayout sink_channel_layout = audio_processing_.get() ? | |
364 media::CHANNEL_LAYOUT_MONO : source_params.channel_layout(); | |
365 | |
366 // WebRtc is using 10ms data as its native packet size. | |
367 media::AudioParameters sink_params( | |
368 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, sink_channel_layout, | |
369 sink_sample_rate, 16, sink_sample_rate / 100); | |
370 capture_converter_.reset( | |
371 new WebRtcAudioConverter(source_params, sink_params)); | |
372 } | |
373 | |
374 void WebRtcAudioProcessingWrapper::InitializeRenderConverterIfNeeded( | |
375 int sample_rate, int number_of_channels, int frames_per_buffer) { | |
376 // TODO, figure out if we need to handle the buffer size change. | |
377 if (render_converter_.get() && | |
378 render_converter_->source_parameters().sample_rate() == sample_rate && | |
379 render_converter_->source_parameters().channels() == number_of_channels) { | |
380 // Do nothing if the |render_converter_| is setup properly. | |
381 return; | |
382 } | |
383 | |
384 media::AudioParameters source_params( | |
385 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
386 media::GuessChannelLayout(number_of_channels), sample_rate, 16, | |
387 frames_per_buffer); | |
388 media::AudioParameters sink_params( | |
389 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
390 media::CHANNEL_LAYOUT_MONO, kAudioProcessingSampleRate, 16, | |
391 kAudioProcessingSampleRate / 100); | |
392 render_converter_.reset(new WebRtcAudioConverter(source_params, sink_params)); | |
393 } | |
394 | |
395 void WebRtcAudioProcessingWrapper::StopAudioProcessing() { | |
396 if (!audio_processing_.get()) | |
397 return; | |
398 | |
399 // It is safe to stop the AEC dump even it is not started. | |
400 StopAecDump(audio_processing_.get()); | |
401 | |
402 audio_processing_.reset(); | |
403 } | |
404 | |
405 } // namespace content | |
OLD | NEW |