| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/media_stream_audio_processor.h" | 5 #include "content/renderer/media/media_stream_audio_processor.h" |
| 6 | 6 |
| 7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
| 8 #include "base/debug/trace_event.h" | 8 #include "base/debug/trace_event.h" |
| 9 #include "content/public/common/content_switches.h" | 9 #include "content/public/common/content_switches.h" |
| 10 #include "content/renderer/media/media_stream_audio_processor_options.h" | 10 #include "content/renderer/media/media_stream_audio_processor_options.h" |
| 11 #include "content/renderer/media/rtc_media_constraints.h" | 11 #include "content/renderer/media/rtc_media_constraints.h" |
| 12 #include "media/audio/audio_parameters.h" | 12 #include "media/audio/audio_parameters.h" |
| 13 #include "media/base/audio_converter.h" | 13 #include "media/base/audio_converter.h" |
| 14 #include "media/base/audio_fifo.h" | 14 #include "media/base/audio_fifo.h" |
| 15 #include "media/base/channel_layout.h" | 15 #include "media/base/channel_layout.h" |
| 16 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" | 16 #include "third_party/WebKit/public/platform/WebMediaConstraints.h" |
| 17 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface
.h" | 17 #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface
.h" |
| 18 | 18 |
| 19 namespace content { | 19 namespace content { |
| 20 | 20 |
| 21 namespace { | 21 namespace { |
| 22 | 22 |
| 23 using webrtc::AudioProcessing; | 23 using webrtc::AudioProcessing; |
| 24 using webrtc::MediaConstraintsInterface; | 24 using webrtc::MediaConstraintsInterface; |
| 25 | 25 |
| 26 #if defined(OS_ANDROID) | 26 #if defined(ANDROID) |
| 27 const int kAudioProcessingSampleRate = 16000; | 27 const int kAudioProcessingSampleRate = 16000; |
| 28 #else | 28 #else |
| 29 const int kAudioProcessingSampleRate = 32000; | 29 const int kAudioProcessingSampleRate = 32000; |
| 30 #endif | 30 #endif |
| 31 const int kAudioProcessingNumberOfChannel = 1; | 31 const int kAudioProcessingNumberOfChannel = 1; |
| 32 | 32 |
| 33 const int kMaxNumberOfBuffersInFifo = 2; | 33 const int kMaxNumberOfBuffersInFifo = 2; |
| 34 | 34 |
| 35 } // namespace | 35 } // namespace |
| 36 | 36 |
| (...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 135 // Handles mixing and resampling between input and output parameters. | 135 // Handles mixing and resampling between input and output parameters. |
| 136 media::AudioConverter audio_converter_; | 136 media::AudioConverter audio_converter_; |
| 137 scoped_ptr<media::AudioBus> audio_wrapper_; | 137 scoped_ptr<media::AudioBus> audio_wrapper_; |
| 138 scoped_ptr<media::AudioFifo> fifo_; | 138 scoped_ptr<media::AudioFifo> fifo_; |
| 139 }; | 139 }; |
| 140 | 140 |
| 141 MediaStreamAudioProcessor::MediaStreamAudioProcessor( | 141 MediaStreamAudioProcessor::MediaStreamAudioProcessor( |
| 142 const media::AudioParameters& source_params, | 142 const media::AudioParameters& source_params, |
| 143 const blink::WebMediaConstraints& constraints, | 143 const blink::WebMediaConstraints& constraints, |
| 144 int effects) | 144 int effects) |
| 145 : render_delay_ms_(0), | 145 : render_delay_ms_(0) { |
| 146 audio_mirroring_(false) { | |
| 147 capture_thread_checker_.DetachFromThread(); | 146 capture_thread_checker_.DetachFromThread(); |
| 148 render_thread_checker_.DetachFromThread(); | 147 render_thread_checker_.DetachFromThread(); |
| 149 InitializeAudioProcessingModule(constraints, effects); | 148 InitializeAudioProcessingModule(constraints, effects); |
| 150 InitializeCaptureConverter(source_params); | 149 InitializeCaptureConverter(source_params); |
| 151 } | 150 } |
| 152 | 151 |
| 153 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { | 152 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { |
| 154 DCHECK(main_thread_checker_.CalledOnValidThread()); | 153 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 155 StopAudioProcessing(); | 154 StopAudioProcessing(); |
| 156 } | 155 } |
| (...skipping 28 matching lines...) Expand all Loading... |
| 185 render_data_bus_->FromInterleaved(render_audio, | 184 render_data_bus_->FromInterleaved(render_audio, |
| 186 render_data_bus_->frames(), | 185 render_data_bus_->frames(), |
| 187 sizeof(render_audio[0])); | 186 sizeof(render_audio[0])); |
| 188 render_converter_->Push(render_data_bus_.get()); | 187 render_converter_->Push(render_data_bus_.get()); |
| 189 while (render_converter_->Convert(&render_frame_)) | 188 while (render_converter_->Convert(&render_frame_)) |
| 190 audio_processing_->AnalyzeReverseStream(&render_frame_); | 189 audio_processing_->AnalyzeReverseStream(&render_frame_); |
| 191 } | 190 } |
| 192 | 191 |
| 193 bool MediaStreamAudioProcessor::ProcessAndConsumeData( | 192 bool MediaStreamAudioProcessor::ProcessAndConsumeData( |
| 194 base::TimeDelta capture_delay, int volume, bool key_pressed, | 193 base::TimeDelta capture_delay, int volume, bool key_pressed, |
| 195 int* new_volume, int16** out) { | 194 int16** out) { |
| 196 DCHECK(capture_thread_checker_.CalledOnValidThread()); | 195 DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 197 TRACE_EVENT0("audio", | 196 TRACE_EVENT0("audio", |
| 198 "MediaStreamAudioProcessor::ProcessAndConsumeData"); | 197 "MediaStreamAudioProcessor::ProcessAndConsumeData"); |
| 199 | 198 |
| 200 if (!capture_converter_->Convert(&capture_frame_)) | 199 if (!capture_converter_->Convert(&capture_frame_)) |
| 201 return false; | 200 return false; |
| 202 | 201 |
| 203 *new_volume = ProcessData(&capture_frame_, capture_delay, volume, | 202 ProcessData(&capture_frame_, capture_delay, volume, key_pressed); |
| 204 key_pressed); | |
| 205 *out = capture_frame_.data_; | 203 *out = capture_frame_.data_; |
| 206 | 204 |
| 207 return true; | 205 return true; |
| 208 } | 206 } |
| 209 | 207 |
| 210 const media::AudioParameters& MediaStreamAudioProcessor::InputFormat() const { | 208 const media::AudioParameters& MediaStreamAudioProcessor::InputFormat() const { |
| 211 return capture_converter_->source_parameters(); | 209 return capture_converter_->source_parameters(); |
| 212 } | 210 } |
| 213 | 211 |
| 214 const media::AudioParameters& MediaStreamAudioProcessor::OutputFormat() const { | 212 const media::AudioParameters& MediaStreamAudioProcessor::OutputFormat() const { |
| 215 return capture_converter_->sink_parameters(); | 213 return capture_converter_->sink_parameters(); |
| 216 } | 214 } |
| 217 | 215 |
| 218 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( | 216 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( |
| 219 const blink::WebMediaConstraints& constraints, int effects) { | 217 const blink::WebMediaConstraints& constraints, int effects) { |
| 220 DCHECK(!audio_processing_); | 218 DCHECK(!audio_processing_); |
| 221 if (!CommandLine::ForCurrentProcess()->HasSwitch( | 219 if (!CommandLine::ForCurrentProcess()->HasSwitch( |
| 222 switches::kEnableAudioTrackProcessing)) { | 220 switches::kEnableAudioTrackProcessing)) { |
| 223 return; | 221 return; |
| 224 } | 222 } |
| 225 | 223 |
| 226 RTCMediaConstraints native_constraints(constraints); | 224 RTCMediaConstraints native_constraints(constraints); |
| 227 ApplyFixedAudioConstraints(&native_constraints); | 225 ApplyFixedAudioConstraints(&native_constraints); |
| 228 if (effects & media::AudioParameters::ECHO_CANCELLER) { | 226 if (effects & media::AudioParameters::ECHO_CANCELLER) { |
| 229 // If platform echo canceller is enabled, disable the software AEC. | 227 // If platform echo cancellator is enabled, disable the software AEC. |
| 230 native_constraints.AddMandatory( | 228 native_constraints.AddMandatory( |
| 231 MediaConstraintsInterface::kEchoCancellation, | 229 MediaConstraintsInterface::kEchoCancellation, |
| 232 MediaConstraintsInterface::kValueFalse, true); | 230 MediaConstraintsInterface::kValueFalse, true); |
| 233 } | 231 } |
| 234 | 232 |
| 235 #if defined(OS_IOS) | |
| 236 // On iOS, VPIO provides built-in AEC and AGC. | |
| 237 const bool enable_aec = false; | |
| 238 const bool enable_agc = false; | |
| 239 #else | |
| 240 const bool enable_aec = GetPropertyFromConstraints( | 233 const bool enable_aec = GetPropertyFromConstraints( |
| 241 &native_constraints, MediaConstraintsInterface::kEchoCancellation); | 234 &native_constraints, MediaConstraintsInterface::kEchoCancellation); |
| 242 const bool enable_agc = GetPropertyFromConstraints( | 235 const bool enable_ns = GetPropertyFromConstraints( |
| 243 &native_constraints, webrtc::MediaConstraintsInterface::kAutoGainControl); | 236 &native_constraints, MediaConstraintsInterface::kNoiseSuppression); |
| 244 #endif | 237 const bool enable_high_pass_filter = GetPropertyFromConstraints( |
| 245 | 238 &native_constraints, MediaConstraintsInterface::kHighpassFilter); |
| 246 #if defined(OS_IOS) || defined(OS_ANDROID) | 239 #if defined(IOS) || defined(ANDROID) |
| 247 const bool enable_experimental_aec = false; | 240 const bool enable_experimental_aec = false; |
| 248 const bool enable_typing_detection = false; | 241 const bool enable_typing_detection = false; |
| 249 #else | 242 #else |
| 250 const bool enable_experimental_aec = GetPropertyFromConstraints( | 243 const bool enable_experimental_aec = GetPropertyFromConstraints( |
| 251 &native_constraints, | 244 &native_constraints, |
| 252 MediaConstraintsInterface::kExperimentalEchoCancellation); | 245 MediaConstraintsInterface::kExperimentalEchoCancellation); |
| 253 const bool enable_typing_detection = GetPropertyFromConstraints( | 246 const bool enable_typing_detection = GetPropertyFromConstraints( |
| 254 &native_constraints, MediaConstraintsInterface::kTypingNoiseDetection); | 247 &native_constraints, MediaConstraintsInterface::kTypingNoiseDetection); |
| 255 #endif | 248 #endif |
| 256 | 249 |
| 257 const bool enable_ns = GetPropertyFromConstraints( | |
| 258 &native_constraints, MediaConstraintsInterface::kNoiseSuppression); | |
| 259 const bool enable_high_pass_filter = GetPropertyFromConstraints( | |
| 260 &native_constraints, MediaConstraintsInterface::kHighpassFilter); | |
| 261 | |
| 262 audio_mirroring_ = GetPropertyFromConstraints( | |
| 263 &native_constraints, webrtc::MediaConstraintsInterface::kAudioMirroring); | |
| 264 | |
| 265 // Return immediately if no audio processing component is enabled. | 250 // Return immediately if no audio processing component is enabled. |
| 266 if (!enable_aec && !enable_experimental_aec && !enable_ns && | 251 if (!enable_aec && !enable_experimental_aec && !enable_ns && |
| 267 !enable_high_pass_filter && !enable_typing_detection && !enable_agc) { | 252 !enable_high_pass_filter && !enable_typing_detection) { |
| 268 return; | 253 return; |
| 269 } | 254 } |
| 270 | 255 |
| 271 // Create and configure the webrtc::AudioProcessing. | 256 // Create and configure the webrtc::AudioProcessing. |
| 272 audio_processing_.reset(webrtc::AudioProcessing::Create(0)); | 257 audio_processing_.reset(webrtc::AudioProcessing::Create(0)); |
| 273 | 258 |
| 274 // Enable the audio processing components. | 259 // Enable the audio processing components. |
| 275 if (enable_aec) { | 260 if (enable_aec) { |
| 276 EnableEchoCancellation(audio_processing_.get()); | 261 EnableEchoCancellation(audio_processing_.get()); |
| 277 if (enable_experimental_aec) | 262 if (enable_experimental_aec) |
| 278 EnableExperimentalEchoCancellation(audio_processing_.get()); | 263 EnableExperimentalEchoCancellation(audio_processing_.get()); |
| 279 } | 264 } |
| 280 | 265 |
| 281 if (enable_ns) | 266 if (enable_ns) |
| 282 EnableNoiseSuppression(audio_processing_.get()); | 267 EnableNoiseSuppression(audio_processing_.get()); |
| 283 | 268 |
| 284 if (enable_high_pass_filter) | 269 if (enable_high_pass_filter) |
| 285 EnableHighPassFilter(audio_processing_.get()); | 270 EnableHighPassFilter(audio_processing_.get()); |
| 286 | 271 |
| 287 if (enable_typing_detection) | 272 if (enable_typing_detection) |
| 288 EnableTypingDetection(audio_processing_.get()); | 273 EnableTypingDetection(audio_processing_.get()); |
| 289 | 274 |
| 290 if (enable_agc) | |
| 291 EnableAutomaticGainControl(audio_processing_.get()); | |
| 292 | 275 |
| 293 // Configure the audio format the audio processing is running on. This | 276 // Configure the audio format the audio processing is running on. This |
| 294 // has to be done after all the needed components are enabled. | 277 // has to be done after all the needed components are enabled. |
| 295 CHECK_EQ(audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate), | 278 CHECK_EQ(audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate), |
| 296 0); | 279 0); |
| 297 CHECK_EQ(audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel, | 280 CHECK_EQ(audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel, |
| 298 kAudioProcessingNumberOfChannel), | 281 kAudioProcessingNumberOfChannel), |
| 299 0); | 282 0); |
| 300 } | 283 } |
| 301 | 284 |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 351 media::AudioParameters sink_params( | 334 media::AudioParameters sink_params( |
| 352 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 335 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 353 media::CHANNEL_LAYOUT_MONO, kAudioProcessingSampleRate, 16, | 336 media::CHANNEL_LAYOUT_MONO, kAudioProcessingSampleRate, 16, |
| 354 kAudioProcessingSampleRate / 100); | 337 kAudioProcessingSampleRate / 100); |
| 355 render_converter_.reset( | 338 render_converter_.reset( |
| 356 new MediaStreamAudioConverter(source_params, sink_params)); | 339 new MediaStreamAudioConverter(source_params, sink_params)); |
| 357 render_data_bus_ = media::AudioBus::Create(number_of_channels, | 340 render_data_bus_ = media::AudioBus::Create(number_of_channels, |
| 358 frames_per_buffer); | 341 frames_per_buffer); |
| 359 } | 342 } |
| 360 | 343 |
| 361 int MediaStreamAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame, | 344 void MediaStreamAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame, |
| 362 base::TimeDelta capture_delay, | 345 base::TimeDelta capture_delay, |
| 363 int volume, | 346 int volume, |
| 364 bool key_pressed) { | 347 bool key_pressed) { |
| 365 DCHECK(capture_thread_checker_.CalledOnValidThread()); | 348 DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 366 if (!audio_processing_) | 349 if (!audio_processing_) |
| 367 return 0; | 350 return; |
| 368 | 351 |
| 369 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessData"); | 352 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::Process10MsData"); |
| 370 DCHECK_EQ(audio_processing_->sample_rate_hz(), | 353 DCHECK_EQ(audio_processing_->sample_rate_hz(), |
| 371 capture_converter_->sink_parameters().sample_rate()); | 354 capture_converter_->sink_parameters().sample_rate()); |
| 372 DCHECK_EQ(audio_processing_->num_input_channels(), | 355 DCHECK_EQ(audio_processing_->num_input_channels(), |
| 373 capture_converter_->sink_parameters().channels()); | 356 capture_converter_->sink_parameters().channels()); |
| 374 DCHECK_EQ(audio_processing_->num_output_channels(), | 357 DCHECK_EQ(audio_processing_->num_output_channels(), |
| 375 capture_converter_->sink_parameters().channels()); | 358 capture_converter_->sink_parameters().channels()); |
| 376 | 359 |
| 377 base::subtle::Atomic32 render_delay_ms = | 360 base::subtle::Atomic32 render_delay_ms = |
| 378 base::subtle::Acquire_Load(&render_delay_ms_); | 361 base::subtle::Acquire_Load(&render_delay_ms_); |
| 379 int64 capture_delay_ms = capture_delay.InMilliseconds(); | 362 int64 capture_delay_ms = capture_delay.InMilliseconds(); |
| 380 DCHECK_LT(capture_delay_ms, | 363 DCHECK_LT(capture_delay_ms, |
| 381 std::numeric_limits<base::subtle::Atomic32>::max()); | 364 std::numeric_limits<base::subtle::Atomic32>::max()); |
| 382 int total_delay_ms = capture_delay_ms + render_delay_ms; | 365 int total_delay_ms = capture_delay_ms + render_delay_ms; |
| 383 if (total_delay_ms > 300) { | 366 if (total_delay_ms > 1000) { |
| 384 LOG(WARNING) << "Large audio delay, capture delay: " << capture_delay_ms | 367 LOG(WARNING) << "Large audio delay, capture delay: " << capture_delay_ms |
| 385 << "ms; render delay: " << render_delay_ms << "ms"; | 368 << "ms; render delay: " << render_delay_ms << "ms"; |
| 386 } | 369 } |
| 387 | 370 |
| 388 audio_processing_->set_stream_delay_ms(total_delay_ms); | 371 audio_processing_->set_stream_delay_ms(total_delay_ms); |
| 389 webrtc::GainControl* agc = audio_processing_->gain_control(); | 372 webrtc::GainControl* agc = audio_processing_->gain_control(); |
| 390 int err = agc->set_stream_analog_level(volume); | 373 int err = agc->set_stream_analog_level(volume); |
| 391 DCHECK_EQ(err, 0) << "set_stream_analog_level() error: " << err; | 374 DCHECK_EQ(err, 0) << "set_stream_analog_level() error: " << err; |
| 392 err = audio_processing_->ProcessStream(audio_frame); | 375 err = audio_processing_->ProcessStream(audio_frame); |
| 393 DCHECK_EQ(err, 0) << "ProcessStream() error: " << err; | 376 DCHECK_EQ(err, 0) << "ProcessStream() error: " << err; |
| 394 | 377 |
| 395 // TODO(xians): Add support for typing detection, audio level calculation. | 378 // TODO(xians): Add support for AGC, typing detection, audio level |
| 396 | 379 // calculation, stereo swapping. |
| 397 if (audio_mirroring_ && audio_frame->num_channels_ == 2) { | |
| 398 // TODO(xians): Swap the stereo channels after switching to media::AudioBus. | |
| 399 } | |
| 400 | |
| 401 // Return 0 if the volume has not been changed, otherwise return the new | |
| 402 // volume. | |
| 403 return (agc->stream_analog_level() == volume) ? | |
| 404 0 : agc->stream_analog_level(); | |
| 405 } | 380 } |
| 406 | 381 |
| 407 void MediaStreamAudioProcessor::StopAudioProcessing() { | 382 void MediaStreamAudioProcessor::StopAudioProcessing() { |
| 408 if (!audio_processing_.get()) | 383 if (!audio_processing_.get()) |
| 409 return; | 384 return; |
| 410 | 385 |
| 411 audio_processing_.reset(); | 386 audio_processing_.reset(); |
| 412 } | 387 } |
| 413 | 388 |
| 414 } // namespace content | 389 } // namespace content |
| OLD | NEW |