OLD | NEW |
(Empty) | |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "content/renderer/media/webrtc_audio_processor.h" |
| 6 |
| 7 #include "base/command_line.h" |
| 8 #include "base/debug/trace_event.h" |
| 9 #include "content/public/common/content_switches.h" |
| 10 #include "content/renderer/media/webrtc_audio_processor_options.h" |
| 11 #include "media/audio/audio_parameters.h" |
| 12 #include "media/base/audio_converter.h" |
| 13 #include "media/base/audio_fifo.h" |
| 14 #include "media/base/channel_layout.h" |
| 15 |
| 16 namespace content { |
| 17 |
| 18 namespace { |
| 19 |
| 20 using webrtc::AudioProcessing; |
| 21 using webrtc::MediaConstraintsInterface; |
| 22 |
| 23 #if defined(ANDROID) |
| 24 const int kAudioProcessingSampleRate = 16000; |
| 25 #else |
| 26 const int kAudioProcessingSampleRate = 32000; |
| 27 #endif |
| 28 const int kAudioProcessingNumberOfChannel = 1; |
| 29 |
| 30 const int kMaxNumberOfBuffersInFifo = 2; |
| 31 |
| 32 } // namespace |
| 33 |
| 34 class WebRtcAudioProcessor::WebRtcAudioConverter |
| 35 : public media::AudioConverter::InputCallback { |
| 36 public: |
| 37 WebRtcAudioConverter(const media::AudioParameters& source_params, |
| 38 const media::AudioParameters& sink_params) |
| 39 : source_params_(source_params), |
| 40 sink_params_(sink_params), |
| 41 audio_converter_(source_params, sink_params_, false) { |
| 42 worker_thread_checker_.DetachFromThread(); |
| 43 |
| 44 audio_converter_.AddInput(this); |
| 45 // Create and initialize audio fifo and audio bus wrapper. |
| 46 // The size of the FIFO should be at least twice of the source buffer size |
| 47 // or twice of the sink buffer size. |
| 48 int buffer_size = std::max( |
| 49 kMaxNumberOfBuffersInFifo * source_params_.frames_per_buffer(), |
| 50 kMaxNumberOfBuffersInFifo * sink_params_.frames_per_buffer()); |
| 51 fifo_.reset(new media::AudioFifo(source_params_.channels(), buffer_size)); |
| 52 // TODO(xians): Use CreateWrapper to save one memcpy. |
| 53 audio_wrapper_ = media::AudioBus::Create(sink_params_.channels(), |
| 54 sink_params_.frames_per_buffer()); |
| 55 } |
| 56 |
| 57 virtual ~WebRtcAudioConverter() { |
| 58 DCHECK(create_thread_checker_.CalledOnValidThread()); |
| 59 audio_converter_.RemoveInput(this); |
| 60 } |
| 61 |
| 62 void Push(media::AudioBus* audio_source) { |
| 63 // Called on the audio thread, which is the capture audio thread for |
| 64 // |WebRtcAudioProcessor::capture_converter_|, and render audio thread for |
| 65 // |WebRtcAudioProcessor::render_converter_|. |
| 66 // And it must be the same thread as calling Convert(). |
| 67 worker_thread_checker_.CalledOnValidThread(); |
| 68 fifo_->Push(audio_source); |
| 69 } |
| 70 |
| 71 bool Convert(webrtc::AudioFrame* out) { |
| 72 // Called on the audio thread, which is the capture audio thread for |
| 73 // |WebRtcAudioProcessor::capture_converter_|, and render audio thread for |
| 74 // |WebRtcAudioProcessor::render_converter_|. |
| 75 // Return false if there is no 10ms data in the FIFO. |
| 76 worker_thread_checker_.CalledOnValidThread(); |
| 77 if (fifo_->frames() < (source_params_.sample_rate() / 100)) |
| 78 return false; |
| 79 |
| 80 // Convert 10ms data to the output format, this will trigger ProvideInput(). |
| 81 audio_converter_.Convert(audio_wrapper_.get()); |
| 82 |
| 83 // TODO(xians): Figure out a better way to handle the interleaved and |
| 84 // deinterleaved format switching. |
| 85 audio_wrapper_->ToInterleaved(audio_wrapper_->frames(), |
| 86 sink_params_.bits_per_sample() / 8, |
| 87 out->data_); |
| 88 |
| 89 out->samples_per_channel_ = sink_params_.frames_per_buffer(); |
| 90 out->sample_rate_hz_ = sink_params_.sample_rate(); |
| 91 out->speech_type_ = webrtc::AudioFrame::kNormalSpeech; |
| 92 out->vad_activity_ = webrtc::AudioFrame::kVadUnknown; |
| 93 out->num_channels_ = sink_params_.channels(); |
| 94 |
| 95 return true; |
| 96 } |
| 97 |
| 98 const media::AudioParameters& source_parameters() const { |
| 99 return source_params_; |
| 100 } |
| 101 const media::AudioParameters& sink_parameters() const { |
| 102 return sink_params_; |
| 103 } |
| 104 |
| 105 private: |
| 106 // AudioConverter::InputCallback implementation. |
| 107 virtual double ProvideInput(media::AudioBus* audio_bus, |
| 108 base::TimeDelta buffer_delay) { |
| 109 // Called on realtime audio thread. |
| 110 // TODO(xians): Figure out why the first Convert() triggers ProvideInput |
| 111 // two times. |
| 112 if (fifo_->frames() < audio_bus->frames()) |
| 113 return 0; |
| 114 |
| 115 fifo_->Consume(audio_bus, 0, audio_bus->frames()); |
| 116 return 1.0; |
| 117 } |
| 118 |
| 119 base::ThreadChecker create_thread_checker_; |
| 120 base::ThreadChecker worker_thread_checker_; |
| 121 media::AudioParameters source_params_; |
| 122 media::AudioParameters sink_params_; |
| 123 |
| 124 // TODO(xians): consider using SincResampler to save some memcpy. |
| 125 // Handles mixing and resampling between input and output parameters. |
| 126 media::AudioConverter audio_converter_; |
| 127 scoped_ptr<media::AudioBus> audio_wrapper_; |
| 128 scoped_ptr<media::AudioFifo> fifo_; |
| 129 }; |
| 130 |
| 131 WebRtcAudioProcessor::WebRtcAudioProcessor( |
| 132 const webrtc::MediaConstraintsInterface* constraints) { |
| 133 capture_thread_checker_.DetachFromThread(); |
| 134 render_thread_checker_.DetachFromThread(); |
| 135 InitializeAudioProcessingModule(constraints); |
| 136 } |
| 137 |
| 138 WebRtcAudioProcessor::~WebRtcAudioProcessor() { |
| 139 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 140 StopAudioProcessing(); |
| 141 } |
| 142 |
| 143 void WebRtcAudioProcessor::SetCaptureFormat( |
| 144 const media::AudioParameters& source_params) { |
| 145 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 146 DCHECK(source_params.IsValid()); |
| 147 |
| 148 // Create and initialize audio converter for the source data. |
| 149 // When the webrtc AudioProcessing is enabled, the sink format of the |
| 150 // converter will be the same as the post-processed data format, which is |
| 151 // 32k mono for desktops and 16k mono for Android. When the AudioProcessing |
| 152 // is disabled, the sink format will be the same as the source format. |
| 153 const int sink_sample_rate = audio_processing_ ? |
| 154 kAudioProcessingSampleRate : source_params.sample_rate(); |
| 155 const media::ChannelLayout sink_channel_layout = audio_processing_ ? |
| 156 media::CHANNEL_LAYOUT_MONO : source_params.channel_layout(); |
| 157 |
| 158 // WebRtc is using 10ms data as its native packet size. |
| 159 media::AudioParameters sink_params( |
| 160 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, sink_channel_layout, |
| 161 sink_sample_rate, 16, sink_sample_rate / 100); |
| 162 capture_converter_.reset( |
| 163 new WebRtcAudioConverter(source_params, sink_params)); |
| 164 } |
| 165 |
| 166 void WebRtcAudioProcessor::PushCaptureData(media::AudioBus* audio_source) { |
| 167 capture_thread_checker_.CalledOnValidThread(); |
| 168 capture_converter_->Push(audio_source); |
| 169 } |
| 170 |
| 171 bool WebRtcAudioProcessor::ProcessAndConsumeData( |
| 172 const base::TimeDelta& capture_delay, int volume, bool key_pressed, |
| 173 int16** out) { |
| 174 capture_thread_checker_.CalledOnValidThread(); |
| 175 TRACE_EVENT0("audio", |
| 176 "WebRtcAudioProcessor::ProcessAndConsumeData"); |
| 177 |
| 178 if (!capture_converter_->Convert(&capture_frame_)) |
| 179 return false; |
| 180 |
| 181 ProcessData(&capture_frame_, capture_delay, volume, key_pressed); |
| 182 *out = capture_frame_.data_; |
| 183 |
| 184 return true; |
| 185 } |
| 186 |
| 187 const media::AudioParameters& WebRtcAudioProcessor::OutputFormat() const { |
| 188 return capture_converter_->sink_parameters(); |
| 189 } |
| 190 |
| 191 void WebRtcAudioProcessor::ProcessData(webrtc::AudioFrame* audio_frame, |
| 192 const base::TimeDelta& capture_delay, |
| 193 int volume, |
| 194 bool key_pressed) { |
| 195 capture_thread_checker_.CalledOnValidThread(); |
| 196 if (!audio_processing_) |
| 197 return; |
| 198 |
| 199 TRACE_EVENT0("audio", "WebRtcAudioProcessor::Process10MsData"); |
| 200 DCHECK_EQ(audio_processing_->sample_rate_hz(), |
| 201 capture_converter_->sink_parameters().sample_rate()); |
| 202 DCHECK_EQ(audio_processing_->num_input_channels(), |
| 203 capture_converter_->sink_parameters().channels()); |
| 204 DCHECK_EQ(audio_processing_->num_output_channels(), |
| 205 capture_converter_->sink_parameters().channels()); |
| 206 |
| 207 int total_delay_ms = 0; |
| 208 { |
| 209 base::AutoLock auto_lock(lock_); |
| 210 total_delay_ms = |
| 211 capture_delay.InMilliseconds() + render_delay_.InMilliseconds(); |
| 212 } |
| 213 |
| 214 audio_processing_->set_stream_delay_ms(total_delay_ms); |
| 215 webrtc::GainControl* agc = audio_processing_->gain_control(); |
| 216 if (agc->set_stream_analog_level(volume)) |
| 217 NOTREACHED(); |
| 218 int err = audio_processing_->ProcessStream(audio_frame); |
| 219 DCHECK(!err) << "ProcessStream() error: " << err; |
| 220 |
| 221 // TODO(xians): Add support for AGC, typing detectin, audio level calculation, |
| 222 // stereo swapping. |
| 223 } |
| 224 |
| 225 void WebRtcAudioProcessor::PushRenderData( |
| 226 const int16* render_audio, int sample_rate, int number_of_channels, |
| 227 int number_of_frames, const base::TimeDelta& render_delay) { |
| 228 render_thread_checker_.CalledOnValidThread(); |
| 229 |
| 230 // Return immediately if the echo cancellation is off. |
| 231 if (!audio_processing_ || |
| 232 !audio_processing_->echo_cancellation()->is_enabled()) |
| 233 return; |
| 234 |
| 235 TRACE_EVENT0("audio", |
| 236 "WebRtcAudioProcessor::FeedRenderDataToAudioProcessing"); |
| 237 { |
| 238 base::AutoLock auto_lock(lock_); |
| 239 render_delay_ = render_delay; |
| 240 } |
| 241 |
| 242 InitializeRenderConverterIfNeeded(sample_rate, number_of_channels, |
| 243 number_of_frames); |
| 244 |
| 245 // TODO(xians): Avoid this extra interleave/deinterleave. |
| 246 render_data_bus_->FromInterleaved(render_audio, |
| 247 render_data_bus_->frames(), |
| 248 sizeof(render_audio[0])); |
| 249 render_converter_->Push(render_data_bus_.get()); |
| 250 while (render_converter_->Convert(&render_frame_)) { |
| 251 audio_processing_->AnalyzeReverseStream(&render_frame_); |
| 252 } |
| 253 } |
| 254 |
| 255 void WebRtcAudioProcessor::InitializeAudioProcessingModule( |
| 256 const webrtc::MediaConstraintsInterface* constraints) { |
| 257 if (!CommandLine::ForCurrentProcess()->HasSwitch( |
| 258 switches::kEnableAudioTrackProcessing)) { |
| 259 return; |
| 260 } |
| 261 |
| 262 if (!constraints) |
| 263 return; |
| 264 |
| 265 const bool enable_aec = GetPropertyFromConstraints( |
| 266 constraints, MediaConstraintsInterface::kEchoCancellation); |
| 267 const bool enable_ns = GetPropertyFromConstraints( |
| 268 constraints, MediaConstraintsInterface::kNoiseSuppression); |
| 269 const bool enable_high_pass_filter = GetPropertyFromConstraints( |
| 270 constraints, MediaConstraintsInterface::kHighpassFilter); |
| 271 const bool start_aec_dump = GetPropertyFromConstraints( |
| 272 constraints, MediaConstraintsInterface::kInternalAecDump); |
| 273 #if defined(IOS) || defined(ANDROID) |
| 274 const bool enable_experimental_aec = false; |
| 275 const bool enable_typing_detection = false; |
| 276 #else |
| 277 const bool enable_experimental_aec = GetPropertyFromConstraints( |
| 278 constraints, MediaConstraintsInterface::kExperimentalEchoCancellation); |
| 279 const bool enable_typing_detection = GetPropertyFromConstraints( |
| 280 constraints, MediaConstraintsInterface::kTypingNoiseDetection); |
| 281 #endif |
| 282 |
| 283 // Reset the audio processing to NULL if no audio processing component is |
| 284 // enabled. |
| 285 if (!enable_aec && !enable_experimental_aec && !enable_ns && |
| 286 !enable_high_pass_filter && !enable_typing_detection) { |
| 287 return; |
| 288 } |
| 289 |
| 290 // Create and configure the audio processing if it does not exist. |
| 291 if (!audio_processing_) |
| 292 audio_processing_.reset(webrtc::AudioProcessing::Create(0)); |
| 293 |
| 294 // Enable the audio processing components. |
| 295 if (enable_aec) { |
| 296 EnableEchoCancellation(audio_processing_.get()); |
| 297 |
| 298 if (enable_experimental_aec) |
| 299 EnableExperimentalEchoCancellation(audio_processing_.get()); |
| 300 } |
| 301 |
| 302 if (enable_ns) |
| 303 EnableNoiseSuppression(audio_processing_.get()); |
| 304 |
| 305 if (enable_high_pass_filter) |
| 306 EnableHighPassFilter(audio_processing_.get()); |
| 307 |
| 308 if (enable_typing_detection) |
| 309 EnableTypingDetection(audio_processing_.get()); |
| 310 |
| 311 if (enable_aec && start_aec_dump) |
| 312 StartAecDump(audio_processing_.get()); |
| 313 |
| 314 // Configure the audio format the audio processing is running on. This |
| 315 // has to be done after all the needed components are enabled. |
| 316 if (audio_processing_->set_sample_rate_hz(kAudioProcessingSampleRate)) |
| 317 NOTREACHED(); |
| 318 if (audio_processing_->set_num_channels(kAudioProcessingNumberOfChannel, |
| 319 kAudioProcessingNumberOfChannel)) |
| 320 NOTREACHED(); |
| 321 } |
| 322 |
| 323 void WebRtcAudioProcessor::InitializeRenderConverterIfNeeded( |
| 324 int sample_rate, int number_of_channels, int frames_per_buffer) { |
| 325 // TODO(xians): Figure out if we need to handle the buffer size change. |
| 326 if (render_converter_.get() && |
| 327 render_converter_->source_parameters().sample_rate() == sample_rate && |
| 328 render_converter_->source_parameters().channels() == number_of_channels) { |
| 329 // Do nothing if the |render_converter_| has been setup properly. |
| 330 return; |
| 331 } |
| 332 |
| 333 media::AudioParameters source_params( |
| 334 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 335 media::GuessChannelLayout(number_of_channels), sample_rate, 16, |
| 336 frames_per_buffer); |
| 337 media::AudioParameters sink_params( |
| 338 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 339 media::CHANNEL_LAYOUT_MONO, kAudioProcessingSampleRate, 16, |
| 340 kAudioProcessingSampleRate / 100); |
| 341 render_converter_.reset(new WebRtcAudioConverter(source_params, sink_params)); |
| 342 render_data_bus_ = media::AudioBus::Create(number_of_channels, |
| 343 frames_per_buffer); |
| 344 } |
| 345 |
| 346 void WebRtcAudioProcessor::StopAudioProcessing() { |
| 347 if (!audio_processing_.get()) |
| 348 return; |
| 349 |
| 350 // It is safe to stop the AEC dump even it is not started. |
| 351 StopAecDump(audio_processing_.get()); |
| 352 |
| 353 audio_processing_.reset(); |
| 354 } |
| 355 |
| 356 } // namespace content |
OLD | NEW |