OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/media_stream_audio_processor.h" | 5 #include "content/renderer/media/media_stream_audio_processor.h" |
6 | 6 |
7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
8 #include "base/debug/trace_event.h" | 8 #include "base/debug/trace_event.h" |
9 #include "content/public/common/content_switches.h" | 9 #include "content/public/common/content_switches.h" |
10 #include "content/renderer/media/media_stream_audio_processor_options.h" | 10 #include "content/renderer/media/media_stream_audio_processor_options.h" |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
134 // TODO(xians): consider using SincResampler to save some memcpy. | 134 // TODO(xians): consider using SincResampler to save some memcpy. |
135 // Handles mixing and resampling between input and output parameters. | 135 // Handles mixing and resampling between input and output parameters. |
136 media::AudioConverter audio_converter_; | 136 media::AudioConverter audio_converter_; |
137 scoped_ptr<media::AudioBus> audio_wrapper_; | 137 scoped_ptr<media::AudioBus> audio_wrapper_; |
138 scoped_ptr<media::AudioFifo> fifo_; | 138 scoped_ptr<media::AudioFifo> fifo_; |
139 }; | 139 }; |
140 | 140 |
141 MediaStreamAudioProcessor::MediaStreamAudioProcessor( | 141 MediaStreamAudioProcessor::MediaStreamAudioProcessor( |
142 const media::AudioParameters& source_params, | 142 const media::AudioParameters& source_params, |
143 const blink::WebMediaConstraints& constraints, | 143 const blink::WebMediaConstraints& constraints, |
144 int effects) | 144 int effects, |
145 : render_delay_ms_(0) { | 145 WebRtcAudioDeviceImpl* audio_device) |
| 146 : render_delay_ms_(0), |
| 147 audio_device_(audio_device) { |
146 capture_thread_checker_.DetachFromThread(); | 148 capture_thread_checker_.DetachFromThread(); |
147 render_thread_checker_.DetachFromThread(); | 149 render_thread_checker_.DetachFromThread(); |
148 InitializeAudioProcessingModule(constraints, effects); | 150 InitializeAudioProcessingModule(constraints, effects); |
149 InitializeCaptureConverter(source_params); | 151 InitializeCaptureConverter(source_params); |
150 } | 152 } |
151 | 153 |
152 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { | 154 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { |
153 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
154 StopAudioProcessing(); | 155 StopAudioProcessing(); |
155 } | 156 } |
156 | 157 |
157 void MediaStreamAudioProcessor::PushCaptureData(media::AudioBus* audio_source) { | 158 void MediaStreamAudioProcessor::PushCaptureData(media::AudioBus* audio_source) { |
158 DCHECK(capture_thread_checker_.CalledOnValidThread()); | 159 DCHECK(capture_thread_checker_.CalledOnValidThread()); |
159 capture_converter_->Push(audio_source); | 160 capture_converter_->Push(audio_source); |
160 } | 161 } |
161 | 162 |
162 void MediaStreamAudioProcessor::PushRenderData( | |
163 const int16* render_audio, int sample_rate, int number_of_channels, | |
164 int number_of_frames, base::TimeDelta render_delay) { | |
165 DCHECK(render_thread_checker_.CalledOnValidThread()); | |
166 | |
167 // Return immediately if the echo cancellation is off. | |
168 if (!audio_processing_ || | |
169 !audio_processing_->echo_cancellation()->is_enabled()) { | |
170 return; | |
171 } | |
172 | |
173 TRACE_EVENT0("audio", | |
174 "MediaStreamAudioProcessor::FeedRenderDataToAudioProcessing"); | |
175 int64 new_render_delay_ms = render_delay.InMilliseconds(); | |
176 DCHECK_LT(new_render_delay_ms, | |
177 std::numeric_limits<base::subtle::Atomic32>::max()); | |
178 base::subtle::Release_Store(&render_delay_ms_, new_render_delay_ms); | |
179 | |
180 InitializeRenderConverterIfNeeded(sample_rate, number_of_channels, | |
181 number_of_frames); | |
182 | |
183 // TODO(xians): Avoid this extra interleave/deinterleave. | |
184 render_data_bus_->FromInterleaved(render_audio, | |
185 render_data_bus_->frames(), | |
186 sizeof(render_audio[0])); | |
187 render_converter_->Push(render_data_bus_.get()); | |
188 while (render_converter_->Convert(&render_frame_)) | |
189 audio_processing_->AnalyzeReverseStream(&render_frame_); | |
190 } | |
191 | |
192 bool MediaStreamAudioProcessor::ProcessAndConsumeData( | 163 bool MediaStreamAudioProcessor::ProcessAndConsumeData( |
193 base::TimeDelta capture_delay, int volume, bool key_pressed, | 164 base::TimeDelta capture_delay, int volume, bool key_pressed, |
194 int16** out) { | 165 int16** out) { |
195 DCHECK(capture_thread_checker_.CalledOnValidThread()); | 166 DCHECK(capture_thread_checker_.CalledOnValidThread()); |
196 TRACE_EVENT0("audio", | 167 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessAndConsumeData"); |
197 "MediaStreamAudioProcessor::ProcessAndConsumeData"); | |
198 | 168 |
199 if (!capture_converter_->Convert(&capture_frame_)) | 169 if (!capture_converter_->Convert(&capture_frame_)) |
200 return false; | 170 return false; |
201 | 171 |
202 ProcessData(&capture_frame_, capture_delay, volume, key_pressed); | 172 ProcessData(&capture_frame_, capture_delay, volume, key_pressed); |
203 *out = capture_frame_.data_; | 173 *out = capture_frame_.data_; |
204 | 174 |
205 return true; | 175 return true; |
206 } | 176 } |
207 | 177 |
208 const media::AudioParameters& MediaStreamAudioProcessor::InputFormat() const { | 178 const media::AudioParameters& MediaStreamAudioProcessor::InputFormat() const { |
209 return capture_converter_->source_parameters(); | 179 return capture_converter_->source_parameters(); |
210 } | 180 } |
211 | 181 |
212 const media::AudioParameters& MediaStreamAudioProcessor::OutputFormat() const { | 182 const media::AudioParameters& MediaStreamAudioProcessor::OutputFormat() const { |
213 return capture_converter_->sink_parameters(); | 183 return capture_converter_->sink_parameters(); |
214 } | 184 } |
215 | 185 |
| 186 void MediaStreamAudioProcessor::RenderData(media::AudioBus* audio_bus, |
| 187 int sample_rate, |
| 188 int audio_delay_milliseconds) { |
| 189 DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 190 DCHECK(audio_processing_->echo_cancellation()->is_enabled()); |
| 191 |
| 192 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::RenderData"); |
| 193 DCHECK_LT(audio_delay_milliseconds, |
| 194 std::numeric_limits<base::subtle::Atomic32>::max()); |
| 195 base::subtle::Release_Store(&render_delay_ms_, audio_delay_milliseconds); |
| 196 |
| 197 InitializeRenderConverterIfNeeded(sample_rate, audio_bus->channels(), |
| 198 audio_bus->frames()); |
| 199 |
| 200 render_converter_->Push(audio_bus); |
| 201 while (render_converter_->Convert(&render_frame_)) |
| 202 audio_processing_->AnalyzeReverseStream(&render_frame_); |
| 203 } |
| 204 |
| 205 void MediaStreamAudioProcessor::RemoveAudioRenderer( |
| 206 WebRtcAudioRenderer* renderer) { |
| 207 NOTREACHED(); |
| 208 } |
| 209 |
216 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( | 210 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( |
217 const blink::WebMediaConstraints& constraints, int effects) { | 211 const blink::WebMediaConstraints& constraints, int effects) { |
218 DCHECK(!audio_processing_); | 212 DCHECK(!audio_processing_); |
219 if (!CommandLine::ForCurrentProcess()->HasSwitch( | 213 if (!CommandLine::ForCurrentProcess()->HasSwitch( |
220 switches::kEnableAudioTrackProcessing)) { | 214 switches::kEnableAudioTrackProcessing)) { |
221 return; | 215 return; |
222 } | 216 } |
223 | 217 |
224 RTCMediaConstraints native_constraints(constraints); | 218 RTCMediaConstraints native_constraints(constraints); |
225 ApplyFixedAudioConstraints(&native_constraints); | 219 ApplyFixedAudioConstraints(&native_constraints); |
(...skipping 28 matching lines...) Expand all Loading... |
254 } | 248 } |
255 | 249 |
256 // Create and configure the webrtc::AudioProcessing. | 250 // Create and configure the webrtc::AudioProcessing. |
257 audio_processing_.reset(webrtc::AudioProcessing::Create(0)); | 251 audio_processing_.reset(webrtc::AudioProcessing::Create(0)); |
258 | 252 |
259 // Enable the audio processing components. | 253 // Enable the audio processing components. |
260 if (enable_aec) { | 254 if (enable_aec) { |
261 EnableEchoCancellation(audio_processing_.get()); | 255 EnableEchoCancellation(audio_processing_.get()); |
262 if (enable_experimental_aec) | 256 if (enable_experimental_aec) |
263 EnableExperimentalEchoCancellation(audio_processing_.get()); | 257 EnableExperimentalEchoCancellation(audio_processing_.get()); |
| 258 |
| 259 if (audio_device_) |
| 260 audio_device_->AddRenderDataObserver(this); |
264 } | 261 } |
265 | 262 |
266 if (enable_ns) | 263 if (enable_ns) |
267 EnableNoiseSuppression(audio_processing_.get()); | 264 EnableNoiseSuppression(audio_processing_.get()); |
268 | 265 |
269 if (enable_high_pass_filter) | 266 if (enable_high_pass_filter) |
270 EnableHighPassFilter(audio_processing_.get()); | 267 EnableHighPassFilter(audio_processing_.get()); |
271 | 268 |
272 if (enable_typing_detection) | 269 if (enable_typing_detection) |
273 EnableTypingDetection(audio_processing_.get()); | 270 EnableTypingDetection(audio_processing_.get()); |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
376 DCHECK_EQ(err, 0) << "ProcessStream() error: " << err; | 373 DCHECK_EQ(err, 0) << "ProcessStream() error: " << err; |
377 | 374 |
378 // TODO(xians): Add support for AGC, typing detection, audio level | 375 // TODO(xians): Add support for AGC, typing detection, audio level |
379 // calculation, stereo swapping. | 376 // calculation, stereo swapping. |
380 } | 377 } |
381 | 378 |
382 void MediaStreamAudioProcessor::StopAudioProcessing() { | 379 void MediaStreamAudioProcessor::StopAudioProcessing() { |
383 if (!audio_processing_.get()) | 380 if (!audio_processing_.get()) |
384 return; | 381 return; |
385 | 382 |
| 383 if (audio_device_) |
| 384 audio_device_->RemoveRenderDataObserver(this); |
| 385 |
386 audio_processing_.reset(); | 386 audio_processing_.reset(); |
387 } | 387 } |
388 | 388 |
389 } // namespace content | 389 } // namespace content |
OLD | NEW |