| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/media_stream_audio_processor.h" | 5 #include "content/renderer/media/media_stream_audio_processor.h" |
| 6 | 6 |
| 7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
| 8 #include "base/debug/trace_event.h" | 8 #include "base/debug/trace_event.h" |
| 9 #include "content/public/common/content_switches.h" | 9 #include "content/public/common/content_switches.h" |
| 10 #include "content/renderer/media/media_stream_audio_processor_options.h" | 10 #include "content/renderer/media/media_stream_audio_processor_options.h" |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 135 // TODO(xians): consider using SincResampler to save some memcpy. | 135 // TODO(xians): consider using SincResampler to save some memcpy. |
| 136 // Handles mixing and resampling between input and output parameters. | 136 // Handles mixing and resampling between input and output parameters. |
| 137 media::AudioConverter audio_converter_; | 137 media::AudioConverter audio_converter_; |
| 138 scoped_ptr<media::AudioBus> audio_wrapper_; | 138 scoped_ptr<media::AudioBus> audio_wrapper_; |
| 139 scoped_ptr<media::AudioFifo> fifo_; | 139 scoped_ptr<media::AudioFifo> fifo_; |
| 140 }; | 140 }; |
| 141 | 141 |
| 142 MediaStreamAudioProcessor::MediaStreamAudioProcessor( | 142 MediaStreamAudioProcessor::MediaStreamAudioProcessor( |
| 143 const media::AudioParameters& source_params, | 143 const media::AudioParameters& source_params, |
| 144 const blink::WebMediaConstraints& constraints, | 144 const blink::WebMediaConstraints& constraints, |
| 145 int effects) | 145 int effects, |
| 146 WebRtcPlayoutDataSource* playout_data_source) |
| 146 : render_delay_ms_(0), | 147 : render_delay_ms_(0), |
| 148 playout_data_source_(playout_data_source), |
| 147 audio_mirroring_(false), | 149 audio_mirroring_(false), |
| 148 typing_detected_(false) { | 150 typing_detected_(false) { |
| 149 capture_thread_checker_.DetachFromThread(); | 151 capture_thread_checker_.DetachFromThread(); |
| 150 render_thread_checker_.DetachFromThread(); | 152 render_thread_checker_.DetachFromThread(); |
| 151 InitializeAudioProcessingModule(constraints, effects); | 153 InitializeAudioProcessingModule(constraints, effects); |
| 152 InitializeCaptureConverter(source_params); | 154 InitializeCaptureConverter(source_params); |
| 153 } | 155 } |
| 154 | 156 |
| 155 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { | 157 MediaStreamAudioProcessor::~MediaStreamAudioProcessor() { |
| 156 DCHECK(main_thread_checker_.CalledOnValidThread()); | 158 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 157 StopAudioProcessing(); | 159 StopAudioProcessing(); |
| 158 } | 160 } |
| 159 | 161 |
| 160 void MediaStreamAudioProcessor::PushCaptureData(media::AudioBus* audio_source) { | 162 void MediaStreamAudioProcessor::PushCaptureData(media::AudioBus* audio_source) { |
| 161 DCHECK(capture_thread_checker_.CalledOnValidThread()); | 163 DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 162 capture_converter_->Push(audio_source); | 164 capture_converter_->Push(audio_source); |
| 163 } | 165 } |
| 164 | 166 |
| 165 void MediaStreamAudioProcessor::PushRenderData( | |
| 166 const int16* render_audio, int sample_rate, int number_of_channels, | |
| 167 int number_of_frames, base::TimeDelta render_delay) { | |
| 168 DCHECK(render_thread_checker_.CalledOnValidThread()); | |
| 169 | |
| 170 // Return immediately if the echo cancellation is off. | |
| 171 if (!audio_processing_ || | |
| 172 !audio_processing_->echo_cancellation()->is_enabled()) { | |
| 173 return; | |
| 174 } | |
| 175 | |
| 176 TRACE_EVENT0("audio", | |
| 177 "MediaStreamAudioProcessor::FeedRenderDataToAudioProcessing"); | |
| 178 int64 new_render_delay_ms = render_delay.InMilliseconds(); | |
| 179 DCHECK_LT(new_render_delay_ms, | |
| 180 std::numeric_limits<base::subtle::Atomic32>::max()); | |
| 181 base::subtle::Release_Store(&render_delay_ms_, new_render_delay_ms); | |
| 182 | |
| 183 InitializeRenderConverterIfNeeded(sample_rate, number_of_channels, | |
| 184 number_of_frames); | |
| 185 | |
| 186 // TODO(xians): Avoid this extra interleave/deinterleave. | |
| 187 render_data_bus_->FromInterleaved(render_audio, | |
| 188 render_data_bus_->frames(), | |
| 189 sizeof(render_audio[0])); | |
| 190 render_converter_->Push(render_data_bus_.get()); | |
| 191 while (render_converter_->Convert(&render_frame_)) | |
| 192 audio_processing_->AnalyzeReverseStream(&render_frame_); | |
| 193 } | |
| 194 | |
| 195 bool MediaStreamAudioProcessor::ProcessAndConsumeData( | 167 bool MediaStreamAudioProcessor::ProcessAndConsumeData( |
| 196 base::TimeDelta capture_delay, int volume, bool key_pressed, | 168 base::TimeDelta capture_delay, int volume, bool key_pressed, |
| 197 int* new_volume, int16** out) { | 169 int* new_volume, int16** out) { |
| 198 DCHECK(capture_thread_checker_.CalledOnValidThread()); | 170 DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 199 TRACE_EVENT0("audio", | 171 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessAndConsumeData"); |
| 200 "MediaStreamAudioProcessor::ProcessAndConsumeData"); | |
| 201 | 172 |
| 202 if (!capture_converter_->Convert(&capture_frame_)) | 173 if (!capture_converter_->Convert(&capture_frame_)) |
| 203 return false; | 174 return false; |
| 204 | 175 |
| 205 *new_volume = ProcessData(&capture_frame_, capture_delay, volume, | 176 *new_volume = ProcessData(&capture_frame_, capture_delay, volume, |
| 206 key_pressed); | 177 key_pressed); |
| 207 *out = capture_frame_.data_; | 178 *out = capture_frame_.data_; |
| 208 | 179 |
| 209 return true; | 180 return true; |
| 210 } | 181 } |
| 211 | 182 |
| 212 const media::AudioParameters& MediaStreamAudioProcessor::InputFormat() const { | 183 const media::AudioParameters& MediaStreamAudioProcessor::InputFormat() const { |
| 213 return capture_converter_->source_parameters(); | 184 return capture_converter_->source_parameters(); |
| 214 } | 185 } |
| 215 | 186 |
| 216 const media::AudioParameters& MediaStreamAudioProcessor::OutputFormat() const { | 187 const media::AudioParameters& MediaStreamAudioProcessor::OutputFormat() const { |
| 217 return capture_converter_->sink_parameters(); | 188 return capture_converter_->sink_parameters(); |
| 218 } | 189 } |
| 219 | 190 |
| 191 void MediaStreamAudioProcessor::OnPlayoutData(media::AudioBus* audio_bus, |
| 192 int sample_rate, |
| 193 int audio_delay_milliseconds) { |
| 194 DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 195 #if defined(OS_ANDROID) || defined(OS_IOS) |
| 196 DCHECK(audio_processing_->echo_control_mobile()->is_enabled()); |
| 197 #else |
| 198 DCHECK(audio_processing_->echo_cancellation()->is_enabled()); |
| 199 #endif |
| 200 |
| 201 TRACE_EVENT0("audio", "MediaStreamAudioProcessor::OnPlayoutData"); |
| 202 DCHECK_LT(audio_delay_milliseconds, |
| 203 std::numeric_limits<base::subtle::Atomic32>::max()); |
| 204 base::subtle::Release_Store(&render_delay_ms_, audio_delay_milliseconds); |
| 205 |
| 206 InitializeRenderConverterIfNeeded(sample_rate, audio_bus->channels(), |
| 207 audio_bus->frames()); |
| 208 |
| 209 render_converter_->Push(audio_bus); |
| 210 while (render_converter_->Convert(&render_frame_)) |
| 211 audio_processing_->AnalyzeReverseStream(&render_frame_); |
| 212 } |
| 213 |
| 220 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( | 214 void MediaStreamAudioProcessor::InitializeAudioProcessingModule( |
| 221 const blink::WebMediaConstraints& constraints, int effects) { | 215 const blink::WebMediaConstraints& constraints, int effects) { |
| 222 DCHECK(!audio_processing_); | 216 DCHECK(!audio_processing_); |
| 223 if (!CommandLine::ForCurrentProcess()->HasSwitch( | 217 if (!CommandLine::ForCurrentProcess()->HasSwitch( |
| 224 switches::kEnableAudioTrackProcessing)) { | 218 switches::kEnableAudioTrackProcessing)) { |
| 225 return; | 219 return; |
| 226 } | 220 } |
| 227 | 221 |
| 228 RTCMediaConstraints native_constraints(constraints); | 222 RTCMediaConstraints native_constraints(constraints); |
| 229 ApplyFixedAudioConstraints(&native_constraints); | 223 ApplyFixedAudioConstraints(&native_constraints); |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 272 } | 266 } |
| 273 | 267 |
| 274 // Create and configure the webrtc::AudioProcessing. | 268 // Create and configure the webrtc::AudioProcessing. |
| 275 audio_processing_.reset(webrtc::AudioProcessing::Create(0)); | 269 audio_processing_.reset(webrtc::AudioProcessing::Create(0)); |
| 276 | 270 |
| 277 // Enable the audio processing components. | 271 // Enable the audio processing components. |
| 278 if (enable_aec) { | 272 if (enable_aec) { |
| 279 EnableEchoCancellation(audio_processing_.get()); | 273 EnableEchoCancellation(audio_processing_.get()); |
| 280 if (enable_experimental_aec) | 274 if (enable_experimental_aec) |
| 281 EnableExperimentalEchoCancellation(audio_processing_.get()); | 275 EnableExperimentalEchoCancellation(audio_processing_.get()); |
| 276 |
| 277 if (playout_data_source_) |
| 278 playout_data_source_->AddPlayoutSink(this); |
| 282 } | 279 } |
| 283 | 280 |
| 284 if (enable_ns) | 281 if (enable_ns) |
| 285 EnableNoiseSuppression(audio_processing_.get()); | 282 EnableNoiseSuppression(audio_processing_.get()); |
| 286 | 283 |
| 287 if (enable_high_pass_filter) | 284 if (enable_high_pass_filter) |
| 288 EnableHighPassFilter(audio_processing_.get()); | 285 EnableHighPassFilter(audio_processing_.get()); |
| 289 | 286 |
| 290 if (enable_typing_detection) { | 287 if (enable_typing_detection) { |
| 291 // TODO(xians): Remove this |typing_detector_| after the typing suppression | 288 // TODO(xians): Remove this |typing_detector_| after the typing suppression |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 416 // Return 0 if the volume has not been changed, otherwise return the new | 413 // Return 0 if the volume has not been changed, otherwise return the new |
| 417 // volume. | 414 // volume. |
| 418 return (agc->stream_analog_level() == volume) ? | 415 return (agc->stream_analog_level() == volume) ? |
| 419 0 : agc->stream_analog_level(); | 416 0 : agc->stream_analog_level(); |
| 420 } | 417 } |
| 421 | 418 |
| 422 void MediaStreamAudioProcessor::StopAudioProcessing() { | 419 void MediaStreamAudioProcessor::StopAudioProcessing() { |
| 423 if (!audio_processing_.get()) | 420 if (!audio_processing_.get()) |
| 424 return; | 421 return; |
| 425 | 422 |
| 423 if (playout_data_source_) |
| 424 playout_data_source_->RemovePlayoutSink(this); |
| 425 |
| 426 audio_processing_.reset(); | 426 audio_processing_.reset(); |
| 427 } | 427 } |
| 428 | 428 |
| 429 } // namespace content | 429 } // namespace content |
| OLD | NEW |