| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/webrtc_audio_capturer.h" | 5 #include "content/renderer/media/webrtc_audio_capturer.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "base/logging.h" | 8 #include "base/logging.h" |
| 9 #include "base/metrics/histogram.h" | 9 #include "base/metrics/histogram.h" |
| 10 #include "base/strings/string_util.h" | 10 #include "base/strings/string_util.h" |
| (...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 180 device_info_.device.input.sample_rate) == | 180 device_info_.device.input.sample_rate) == |
| 181 &kValidInputRates[arraysize(kValidInputRates)]) { | 181 &kValidInputRates[arraysize(kValidInputRates)]) { |
| 182 DLOG(ERROR) << device_info_.device.input.sample_rate | 182 DLOG(ERROR) << device_info_.device.input.sample_rate |
| 183 << " is not a supported input rate."; | 183 << " is not a supported input rate."; |
| 184 return false; | 184 return false; |
| 185 } | 185 } |
| 186 | 186 |
| 187 // Create and configure the default audio capturing source. | 187 // Create and configure the default audio capturing source. |
| 188 SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id_), | 188 SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id_), |
| 189 channel_layout, | 189 channel_layout, |
| 190 static_cast<float>(device_info_.device.input.sample_rate), | 190 static_cast<float>(device_info_.device.input.sample_rate)); |
| 191 device_info_.device.input.effects, | |
| 192 constraints_); | |
| 193 | 191 |
| 194 // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware | 192 // Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware |
| 195 // information from the capturer. | 193 // information from the capturer. |
| 196 if (audio_device_) | 194 if (audio_device_) |
| 197 audio_device_->AddAudioCapturer(this); | 195 audio_device_->AddAudioCapturer(this); |
| 198 | 196 |
| 199 return true; | 197 return true; |
| 200 } | 198 } |
| 201 | 199 |
| 202 WebRtcAudioCapturer::WebRtcAudioCapturer( | 200 WebRtcAudioCapturer::WebRtcAudioCapturer( |
| 203 int render_view_id, | 201 int render_view_id, |
| 204 const StreamDeviceInfo& device_info, | 202 const StreamDeviceInfo& device_info, |
| 205 const blink::WebMediaConstraints& constraints, | 203 const blink::WebMediaConstraints& constraints, |
| 206 WebRtcAudioDeviceImpl* audio_device) | 204 WebRtcAudioDeviceImpl* audio_device) |
| 207 : constraints_(constraints), | 205 : constraints_(constraints), |
| 206 audio_processor_(new MediaStreamAudioProcessor( |
| 207 constraints, device_info.device.input.effects, audio_device)), |
| 208 running_(false), | 208 running_(false), |
| 209 render_view_id_(render_view_id), | 209 render_view_id_(render_view_id), |
| 210 device_info_(device_info), | 210 device_info_(device_info), |
| 211 volume_(0), | 211 volume_(0), |
| 212 peer_connection_mode_(false), | 212 peer_connection_mode_(false), |
| 213 key_pressed_(false), | 213 key_pressed_(false), |
| 214 need_audio_processing_(false), | 214 need_audio_processing_(false), |
| 215 audio_device_(audio_device) { | 215 audio_device_(audio_device) { |
| 216 DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; | 216 DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; |
| 217 } | 217 } |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 254 // Clear the delegate to ensure that no more capture callbacks will | 254 // Clear the delegate to ensure that no more capture callbacks will |
| 255 // be sent to this sink. Also avoids a possible crash which can happen | 255 // be sent to this sink. Also avoids a possible crash which can happen |
| 256 // if this method is called while capturing is active. | 256 // if this method is called while capturing is active. |
| 257 if (removed_item.get()) | 257 if (removed_item.get()) |
| 258 removed_item->Reset(); | 258 removed_item->Reset(); |
| 259 } | 259 } |
| 260 | 260 |
| 261 void WebRtcAudioCapturer::SetCapturerSource( | 261 void WebRtcAudioCapturer::SetCapturerSource( |
| 262 const scoped_refptr<media::AudioCapturerSource>& source, | 262 const scoped_refptr<media::AudioCapturerSource>& source, |
| 263 media::ChannelLayout channel_layout, | 263 media::ChannelLayout channel_layout, |
| 264 float sample_rate, | 264 float sample_rate) { |
| 265 int effects, | |
| 266 const blink::WebMediaConstraints& constraints) { | |
| 267 DCHECK(thread_checker_.CalledOnValidThread()); | 265 DCHECK(thread_checker_.CalledOnValidThread()); |
| 268 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," | 266 DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," |
| 269 << "sample_rate=" << sample_rate << ")"; | 267 << "sample_rate=" << sample_rate << ")"; |
| 270 scoped_refptr<media::AudioCapturerSource> old_source; | 268 scoped_refptr<media::AudioCapturerSource> old_source; |
| 271 bool restart_source = false; | 269 bool restart_source = false; |
| 272 { | 270 { |
| 273 base::AutoLock auto_lock(lock_); | 271 base::AutoLock auto_lock(lock_); |
| 274 if (source_.get() == source.get()) | 272 if (source_.get() == source.get()) |
| 275 return; | 273 return; |
| 276 | 274 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 287 old_source->Stop(); | 285 old_source->Stop(); |
| 288 | 286 |
| 289 // Dispatch the new parameters both to the sink(s) and to the new source, | 287 // Dispatch the new parameters both to the sink(s) and to the new source, |
| 290 // also apply the new |constraints|. | 288 // also apply the new |constraints|. |
| 291 // The idea is to get rid of any dependency of the microphone parameters | 289 // The idea is to get rid of any dependency of the microphone parameters |
| 292 // which would normally be used by default. | 290 // which would normally be used by default. |
| 293 // bits_per_sample is always 16 for now. | 291 // bits_per_sample is always 16 for now. |
| 294 int buffer_size = GetBufferSize(sample_rate); | 292 int buffer_size = GetBufferSize(sample_rate); |
| 295 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | 293 media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 296 channel_layout, 0, sample_rate, | 294 channel_layout, 0, sample_rate, |
| 297 16, buffer_size, effects); | 295 16, buffer_size, |
| 298 scoped_refptr<MediaStreamAudioProcessor> new_audio_processor( | 296 device_info_.device.input.effects); |
| 299 new MediaStreamAudioProcessor(params, constraints, effects, | 297 |
| 300 audio_device_)); | |
| 301 { | 298 { |
| 302 base::AutoLock auto_lock(lock_); | 299 base::AutoLock auto_lock(lock_); |
| 303 audio_processor_ = new_audio_processor; | 300 // Notify the |audio_processor_| of the new format. |
| 304 need_audio_processing_ = NeedsAudioProcessing(constraints, effects); | 301 audio_processor_->OnCaptureFormatChanged(params); |
| 305 | 302 |
| 303 need_audio_processing_ = NeedsAudioProcessing( |
| 304 constraints_, device_info_.device.input.effects); |
| 306 // Notify all tracks about the new format. | 305 // Notify all tracks about the new format. |
| 307 tracks_.TagAll(); | 306 tracks_.TagAll(); |
| 308 } | 307 } |
| 309 | 308 |
| 310 if (source.get()) | 309 if (source.get()) |
| 311 source->Initialize(params, this, session_id()); | 310 source->Initialize(params, this, session_id()); |
| 312 | 311 |
| 313 if (restart_source) | 312 if (restart_source) |
| 314 Start(); | 313 Start(); |
| 315 } | 314 } |
| (...skipping 22 matching lines...) Expand all Loading... |
| 338 // Do nothing if the current buffer size is the WebRtc native buffer size. | 337 // Do nothing if the current buffer size is the WebRtc native buffer size. |
| 339 if (GetBufferSize(input_params.sample_rate()) == | 338 if (GetBufferSize(input_params.sample_rate()) == |
| 340 input_params.frames_per_buffer()) { | 339 input_params.frames_per_buffer()) { |
| 341 return; | 340 return; |
| 342 } | 341 } |
| 343 | 342 |
| 344 // Create a new audio stream as source which will open the hardware using | 343 // Create a new audio stream as source which will open the hardware using |
| 345 // WebRtc native buffer size. | 344 // WebRtc native buffer size. |
| 346 SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), | 345 SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), |
| 347 input_params.channel_layout(), | 346 input_params.channel_layout(), |
| 348 static_cast<float>(input_params.sample_rate()), | 347 static_cast<float>(input_params.sample_rate())); |
| 349 input_params.effects(), | |
| 350 constraints_); | |
| 351 } | 348 } |
| 352 | 349 |
| 353 void WebRtcAudioCapturer::Start() { | 350 void WebRtcAudioCapturer::Start() { |
| 351 DCHECK(thread_checker_.CalledOnValidThread()); |
| 354 DVLOG(1) << "WebRtcAudioCapturer::Start()"; | 352 DVLOG(1) << "WebRtcAudioCapturer::Start()"; |
| 355 base::AutoLock auto_lock(lock_); | 353 base::AutoLock auto_lock(lock_); |
| 356 if (running_ || !source_) | 354 if (running_ || !source_) |
| 357 return; | 355 return; |
| 358 | 356 |
| 359 // Start the data source, i.e., start capturing data from the current source. | 357 // Start the data source, i.e., start capturing data from the current source. |
| 360 // We need to set the AGC control before starting the stream. | 358 // We need to set the AGC control before starting the stream. |
| 361 source_->SetAutomaticGainControl(true); | 359 source_->SetAutomaticGainControl(true); |
| 362 source_->Start(); | 360 source_->Start(); |
| 363 running_ = true; | 361 running_ = true; |
| 364 } | 362 } |
| 365 | 363 |
| 366 void WebRtcAudioCapturer::Stop() { | 364 void WebRtcAudioCapturer::Stop() { |
| 365 DCHECK(thread_checker_.CalledOnValidThread()); |
| 367 DVLOG(1) << "WebRtcAudioCapturer::Stop()"; | 366 DVLOG(1) << "WebRtcAudioCapturer::Stop()"; |
| 368 scoped_refptr<media::AudioCapturerSource> source; | 367 scoped_refptr<media::AudioCapturerSource> source; |
| 369 TrackList::ItemList tracks; | 368 TrackList::ItemList tracks; |
| 370 { | 369 { |
| 371 base::AutoLock auto_lock(lock_); | 370 base::AutoLock auto_lock(lock_); |
| 372 if (!running_) | 371 if (!running_) |
| 373 return; | 372 return; |
| 374 | 373 |
| 375 source = source_; | 374 source = source_; |
| 376 tracks = tracks_.Items(); | 375 tracks = tracks_.Items(); |
| 377 tracks_.Clear(); | 376 tracks_.Clear(); |
| 378 running_ = false; | 377 running_ = false; |
| 379 } | 378 } |
| 380 | 379 |
| 381 // Remove the capturer object from the WebRtcAudioDeviceImpl. | 380 // Remove the capturer object from the WebRtcAudioDeviceImpl. |
| 382 if (audio_device_) | 381 if (audio_device_) |
| 383 audio_device_->RemoveAudioCapturer(this); | 382 audio_device_->RemoveAudioCapturer(this); |
| 384 | 383 |
| 384 // Stop the Aec dump. |
| 385 StopAecDump(); |
| 386 |
| 385 for (TrackList::ItemList::const_iterator it = tracks.begin(); | 387 for (TrackList::ItemList::const_iterator it = tracks.begin(); |
| 386 it != tracks.end(); | 388 it != tracks.end(); |
| 387 ++it) { | 389 ++it) { |
| 388 (*it)->Stop(); | 390 (*it)->Stop(); |
| 389 } | 391 } |
| 390 | 392 |
| 391 if (source.get()) | 393 if (source.get()) |
| 392 source->Stop(); | 394 source->Stop(); |
| 393 } | 395 } |
| 394 | 396 |
| (...skipping 30 matching lines...) Expand all Loading... |
| 425 // allows the user to set a scaling that is higher than 100%. It means that | 427 // allows the user to set a scaling that is higher than 100%. It means that |
| 426 // even if the reported maximum levels is N, the actual microphone level can | 428 // even if the reported maximum levels is N, the actual microphone level can |
| 427 // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x. | 429 // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x. |
| 428 DCHECK_LE(volume, 1.6); | 430 DCHECK_LE(volume, 1.6); |
| 429 #endif | 431 #endif |
| 430 | 432 |
| 431 TrackList::ItemList tracks; | 433 TrackList::ItemList tracks; |
| 432 TrackList::ItemList tracks_to_notify_format; | 434 TrackList::ItemList tracks_to_notify_format; |
| 433 int current_volume = 0; | 435 int current_volume = 0; |
| 434 base::TimeDelta audio_delay; | 436 base::TimeDelta audio_delay; |
| 435 scoped_refptr<MediaStreamAudioProcessor> audio_processor; | |
| 436 bool need_audio_processing = true; | 437 bool need_audio_processing = true; |
| 437 { | 438 { |
| 438 base::AutoLock auto_lock(lock_); | 439 base::AutoLock auto_lock(lock_); |
| 439 if (!running_) | 440 if (!running_) |
| 440 return; | 441 return; |
| 441 | 442 |
| 442 // Map internal volume range of [0.0, 1.0] into [0, 255] used by the | 443 // Map internal volume range of [0.0, 1.0] into [0, 255] used by the |
| 443 // webrtc::VoiceEngine. webrtc::VoiceEngine will handle the case when the | 444 // webrtc::VoiceEngine. webrtc::VoiceEngine will handle the case when the |
| 444 // volume is higher than 255. | 445 // volume is higher than 255. |
| 445 volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); | 446 volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); |
| 446 current_volume = volume_; | 447 current_volume = volume_; |
| 447 audio_delay = base::TimeDelta::FromMilliseconds(audio_delay_milliseconds); | 448 audio_delay = base::TimeDelta::FromMilliseconds(audio_delay_milliseconds); |
| 448 audio_delay_ = audio_delay; | 449 audio_delay_ = audio_delay; |
| 449 key_pressed_ = key_pressed; | 450 key_pressed_ = key_pressed; |
| 450 tracks = tracks_.Items(); | 451 tracks = tracks_.Items(); |
| 451 tracks_.RetrieveAndClearTags(&tracks_to_notify_format); | 452 tracks_.RetrieveAndClearTags(&tracks_to_notify_format); |
| 452 audio_processor = audio_processor_; | |
| 453 | 453 |
| 454 // Set the flag to turn on the audio processing in PeerConnection level. | 454 // Set the flag to turn on the audio processing in PeerConnection level. |
| 455 // Note that, we turn off the audio processing in PeerConnection if the | 455 // Note that, we turn off the audio processing in PeerConnection if the |
| 456 // processor has already processed the data. | 456 // processor has already processed the data. |
| 457 need_audio_processing = need_audio_processing_ ? | 457 need_audio_processing = need_audio_processing_ ? |
| 458 !audio_processor->has_audio_processing() : false; | 458 !audio_processor_->has_audio_processing() : false; |
| 459 } | 459 } |
| 460 | 460 |
| 461 DCHECK(audio_processor->InputFormat().IsValid()); | 461 DCHECK(audio_processor_->InputFormat().IsValid()); |
| 462 DCHECK_EQ(audio_source->channels(), | 462 DCHECK_EQ(audio_source->channels(), |
| 463 audio_processor->InputFormat().channels()); | 463 audio_processor_->InputFormat().channels()); |
| 464 DCHECK_EQ(audio_source->frames(), | 464 DCHECK_EQ(audio_source->frames(), |
| 465 audio_processor->InputFormat().frames_per_buffer()); | 465 audio_processor_->InputFormat().frames_per_buffer()); |
| 466 | 466 |
| 467 // Notify the tracks on when the format changes. This will do nothing if | 467 // Notify the tracks on when the format changes. This will do nothing if |
| 468 // |tracks_to_notify_format| is empty. | 468 // |tracks_to_notify_format| is empty. |
| 469 media::AudioParameters output_params = audio_processor_->OutputFormat(); | 469 media::AudioParameters output_params = audio_processor_->OutputFormat(); |
| 470 for (TrackList::ItemList::const_iterator it = tracks_to_notify_format.begin(); | 470 for (TrackList::ItemList::const_iterator it = tracks_to_notify_format.begin(); |
| 471 it != tracks_to_notify_format.end(); ++it) { | 471 it != tracks_to_notify_format.end(); ++it) { |
| 472 (*it)->OnSetFormat(output_params); | 472 (*it)->OnSetFormat(output_params); |
| 473 } | 473 } |
| 474 | 474 |
| 475 // Push the data to the processor for processing. | 475 // Push the data to the processor for processing. |
| 476 audio_processor->PushCaptureData(audio_source); | 476 audio_processor_->PushCaptureData(audio_source); |
| 477 | 477 |
| 478 // Process and consume the data in the processor until there is not enough | 478 // Process and consume the data in the processor until there is not enough |
| 479 // data in the processor. | 479 // data in the processor. |
| 480 int16* output = NULL; | 480 int16* output = NULL; |
| 481 int new_volume = 0; | 481 int new_volume = 0; |
| 482 while (audio_processor->ProcessAndConsumeData( | 482 while (audio_processor_->ProcessAndConsumeData( |
| 483 audio_delay, current_volume, key_pressed, &new_volume, &output)) { | 483 audio_delay, current_volume, key_pressed, &new_volume, &output)) { |
| 484 // Feed the post-processed data to the tracks. | 484 // Feed the post-processed data to the tracks. |
| 485 for (TrackList::ItemList::const_iterator it = tracks.begin(); | 485 for (TrackList::ItemList::const_iterator it = tracks.begin(); |
| 486 it != tracks.end(); ++it) { | 486 it != tracks.end(); ++it) { |
| 487 (*it)->Capture(output, audio_delay, current_volume, key_pressed, | 487 (*it)->Capture(output, audio_delay, current_volume, key_pressed, |
| 488 need_audio_processing); | 488 need_audio_processing); |
| 489 } | 489 } |
| 490 | 490 |
| 491 if (new_volume) { | 491 if (new_volume) { |
| 492 SetVolume(new_volume); | 492 SetVolume(new_volume); |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 554 *delay = audio_delay_; | 554 *delay = audio_delay_; |
| 555 *volume = volume_; | 555 *volume = volume_; |
| 556 *key_pressed = key_pressed_; | 556 *key_pressed = key_pressed_; |
| 557 } | 557 } |
| 558 | 558 |
| 559 void WebRtcAudioCapturer::SetCapturerSourceForTesting( | 559 void WebRtcAudioCapturer::SetCapturerSourceForTesting( |
| 560 const scoped_refptr<media::AudioCapturerSource>& source, | 560 const scoped_refptr<media::AudioCapturerSource>& source, |
| 561 media::AudioParameters params) { | 561 media::AudioParameters params) { |
| 562 // Create a new audio stream as source which uses the new source. | 562 // Create a new audio stream as source which uses the new source. |
| 563 SetCapturerSource(source, params.channel_layout(), | 563 SetCapturerSource(source, params.channel_layout(), |
| 564 static_cast<float>(params.sample_rate()), | 564 static_cast<float>(params.sample_rate())); |
| 565 params.effects(), | 565 } |
| 566 constraints_); | 566 |
| 567 void WebRtcAudioCapturer::StartAecDump( |
| 568 const base::PlatformFile& aec_dump_file) { |
| 569 DCHECK(thread_checker_.CalledOnValidThread()); |
| 570 DCHECK_NE(aec_dump_file, base::kInvalidPlatformFileValue); |
| 571 audio_processor_->StartAecDump(aec_dump_file); |
| 572 } |
| 573 |
| 574 void WebRtcAudioCapturer::StopAecDump() { |
| 575 DCHECK(thread_checker_.CalledOnValidThread()); |
| 576 audio_processor_->StopAecDump(); |
| 567 } | 577 } |
| 568 | 578 |
| 569 } // namespace content | 579 } // namespace content |
| OLD | NEW |