Chromium Code Reviews| Index: content/renderer/media/media_stream_audio_processor.cc |
| diff --git a/content/renderer/media/media_stream_audio_processor.cc b/content/renderer/media/media_stream_audio_processor.cc |
| index 35b8d9fa24ebc69814515ac97a65f10dfd01821e..b91955528e3c34fc798b3ee0ef1bb54f92c78030 100644 |
| --- a/content/renderer/media/media_stream_audio_processor.cc |
| +++ b/content/renderer/media/media_stream_audio_processor.cc |
| @@ -23,7 +23,7 @@ namespace { |
| using webrtc::AudioProcessing; |
| using webrtc::MediaConstraintsInterface; |
| -#if defined(ANDROID) |
| +#if defined(OS_ANDROID) |
| const int kAudioProcessingSampleRate = 16000; |
| #else |
| const int kAudioProcessingSampleRate = 32000; |
| @@ -142,7 +142,8 @@ MediaStreamAudioProcessor::MediaStreamAudioProcessor( |
| const media::AudioParameters& source_params, |
| const blink::WebMediaConstraints& constraints, |
| int effects) |
| - : render_delay_ms_(0) { |
| + : render_delay_ms_(0), |
| + audio_mirroring_(false) { |
| capture_thread_checker_.DetachFromThread(); |
| render_thread_checker_.DetachFromThread(); |
| InitializeAudioProcessingModule(constraints, effects); |
| @@ -191,15 +192,17 @@ void MediaStreamAudioProcessor::PushRenderData( |
| bool MediaStreamAudioProcessor::ProcessAndConsumeData( |
| base::TimeDelta capture_delay, int volume, bool key_pressed, |
| - int16** out) { |
| + int* new_volume, int16** out) { |
| DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| TRACE_EVENT0("audio", |
| "MediaStreamAudioProcessor::ProcessAndConsumeData"); |
| + *new_volume = 0; |
| if (!capture_converter_->Convert(&capture_frame_)) |
| return false; |
| - ProcessData(&capture_frame_, capture_delay, volume, key_pressed); |
| + *new_volume = ProcessData(&capture_frame_, capture_delay, volume, |
| + key_pressed); |
| *out = capture_frame_.data_; |
| return true; |
| @@ -224,32 +227,45 @@ void MediaStreamAudioProcessor::InitializeAudioProcessingModule( |
| RTCMediaConstraints native_constraints(constraints); |
| ApplyFixedAudioConstraints(&native_constraints); |
| if (effects & media::AudioParameters::ECHO_CANCELLER) { |
| - // If platform echo cancellator is enabled, disable the software AEC. |
| + // If platform echo canceller is enabled, disable the software AEC. |
| native_constraints.AddMandatory( |
| MediaConstraintsInterface::kEchoCancellation, |
| MediaConstraintsInterface::kValueFalse, true); |
| } |
| +#if defined(OS_IOS) |
| + // On IOS, VPIO provides built-in AEC. |
|
ajm
2014/01/23 17:30:07
nit: iOS :)
no longer working on chromium
2014/01/24 09:10:50
Done.
|
| + const bool enable_aec = false; |
| +#else |
| const bool enable_aec = GetPropertyFromConstraints( |
| &native_constraints, MediaConstraintsInterface::kEchoCancellation); |
| +#endif |
| + |
| const bool enable_ns = GetPropertyFromConstraints( |
| &native_constraints, MediaConstraintsInterface::kNoiseSuppression); |
| const bool enable_high_pass_filter = GetPropertyFromConstraints( |
| &native_constraints, MediaConstraintsInterface::kHighpassFilter); |
| -#if defined(IOS) || defined(ANDROID) |
| + |
| +#if defined(OS_IOS) || defined(OS_ANDROID) |
|
ajm
2014/01/23 17:30:07
nit: perhaps move this block up, so all platform s
no longer working on chromium
2014/01/24 09:10:50
Done.
|
| const bool enable_experimental_aec = false; |
| const bool enable_typing_detection = false; |
| + const bool enable_agc = false; |
|
ajm
2014/01/24 06:48:59
As in off review comments, move this just up to th
no longer working on chromium
2014/01/24 09:10:50
Done.
|
| #else |
| const bool enable_experimental_aec = GetPropertyFromConstraints( |
| &native_constraints, |
| MediaConstraintsInterface::kExperimentalEchoCancellation); |
| const bool enable_typing_detection = GetPropertyFromConstraints( |
| &native_constraints, MediaConstraintsInterface::kTypingNoiseDetection); |
| + const bool enable_agc = GetPropertyFromConstraints( |
| + &native_constraints, webrtc::MediaConstraintsInterface::kAutoGainControl); |
| #endif |
| + audio_mirroring_ = GetPropertyFromConstraints( |
| + &native_constraints, webrtc::MediaConstraintsInterface::kAudioMirroring); |
| + |
| // Return immediately if no audio processing component is enabled. |
| if (!enable_aec && !enable_experimental_aec && !enable_ns && |
| - !enable_high_pass_filter && !enable_typing_detection) { |
| + !enable_high_pass_filter && !enable_typing_detection && !enable_agc) { |
| return; |
| } |
| @@ -272,6 +288,8 @@ void MediaStreamAudioProcessor::InitializeAudioProcessingModule( |
| if (enable_typing_detection) |
| EnableTypingDetection(audio_processing_.get()); |
| + if (enable_agc) |
| + EnableAutomaticGainControl(audio_processing_.get()); |
| // Configure the audio format the audio processing is running on. This |
| // has to be done after all the needed components are enabled. |
| @@ -341,15 +359,15 @@ void MediaStreamAudioProcessor::InitializeRenderConverterIfNeeded( |
| frames_per_buffer); |
| } |
| -void MediaStreamAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame, |
| - base::TimeDelta capture_delay, |
| - int volume, |
| - bool key_pressed) { |
| +int MediaStreamAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame, |
| + base::TimeDelta capture_delay, |
| + int volume, |
| + bool key_pressed) { |
| DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| if (!audio_processing_) |
| - return; |
| + return 0; |
| - TRACE_EVENT0("audio", "MediaStreamAudioProcessor::Process10MsData"); |
| + TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessData"); |
| DCHECK_EQ(audio_processing_->sample_rate_hz(), |
| capture_converter_->sink_parameters().sample_rate()); |
| DCHECK_EQ(audio_processing_->num_input_channels(), |
| @@ -363,20 +381,35 @@ void MediaStreamAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame, |
| DCHECK_LT(capture_delay_ms, |
| std::numeric_limits<base::subtle::Atomic32>::max()); |
| int total_delay_ms = capture_delay_ms + render_delay_ms; |
| - if (total_delay_ms > 1000) { |
| + if (total_delay_ms > 300) { |
| LOG(WARNING) << "Large audio delay, capture delay: " << capture_delay_ms |
| << "ms; render delay: " << render_delay_ms << "ms"; |
| } |
| audio_processing_->set_stream_delay_ms(total_delay_ms); |
| webrtc::GainControl* agc = audio_processing_->gain_control(); |
| + // TODO(xians): We used to have a problem with the truncation of the volume. |
| + // For example, if the OS has 25 volume steps, and the current volume is 7, |
| + // which will be scaled to 70 in [0, 255]. When the AGC tries to adjust to |
| + // volume to 76, SetVolume will fail updating the volume due to truncating |
| + // the new volume back to 7. WebRtc works around this problem by keeping |
| + // track values and forcing AGC to continue its trend. |
| + // Check with ajm@ on if we still need the workaround. |
| int err = agc->set_stream_analog_level(volume); |
| DCHECK_EQ(err, 0) << "set_stream_analog_level() error: " << err; |
| err = audio_processing_->ProcessStream(audio_frame); |
| DCHECK_EQ(err, 0) << "ProcessStream() error: " << err; |
| - // TODO(xians): Add support for AGC, typing detection, audio level |
| - // calculation, stereo swapping. |
| + // TODO(xians): Add support for typing detection, audio level calculation. |
| + |
| + if (audio_mirroring_ && audio_frame->num_channels_ == 2) { |
| + // TODO(xians): Swap the stereo channels after switching to media::AudioBus. |
| + } |
| + |
| + // Return 0 if the volume has not been changed, otherwise return the new |
| + // volume. |
| + return (agc->stream_analog_level() == volume) ? |
| + 0 : agc->stream_analog_level(); |
| } |
| void MediaStreamAudioProcessor::StopAudioProcessing() { |