Index: content/renderer/media/webrtc_audio_capturer.cc |
diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc |
index aa3ecf8627f84ca4165d481afd14b50388faf291..db9ef7d980f3f01c5d51d47fd7002c6f3170d5c4 100644 |
--- a/content/renderer/media/webrtc_audio_capturer.cc |
+++ b/content/renderer/media/webrtc_audio_capturer.cc |
@@ -11,6 +11,7 @@ |
#include "content/child/child_process.h" |
#include "content/renderer/media/audio_device_factory.h" |
#include "content/renderer/media/webrtc_audio_device_impl.h" |
+#include "content/renderer/media/webrtc_audio_processing_wrapper.h" |
#include "content/renderer/media/webrtc_local_audio_track.h" |
#include "media/audio/sample_rates.h" |
@@ -42,16 +43,16 @@ class WebRtcAudioCapturer::TrackOwner |
explicit TrackOwner(WebRtcLocalAudioTrack* track) |
: delegate_(track) {} |
- void Capture(media::AudioBus* audio_source, |
- int audio_delay_milliseconds, |
- double volume, |
- bool key_pressed) { |
+ void Capture(const int16* data, |
+ int sample_rate, |
+ int number_of_channels, |
+ int number_of_frames) { |
base::AutoLock lock(lock_); |
if (delegate_) { |
- delegate_->Capture(audio_source, |
- audio_delay_milliseconds, |
- volume, |
- key_pressed); |
+ delegate_->Capture(data, |
+ sample_rate, |
+ number_of_channels, |
+ number_of_frames); |
} |
} |
@@ -101,8 +102,10 @@ scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() { |
return capturer; |
} |
-void WebRtcAudioCapturer::Reconfigure(int sample_rate, |
- media::ChannelLayout channel_layout) { |
+void WebRtcAudioCapturer::Reconfigure( |
+ int sample_rate, |
+ media::ChannelLayout channel_layout, |
+ const webrtc::MediaConstraintsInterface* constraints) { |
DCHECK(thread_checker_.CalledOnValidThread()); |
int buffer_size = GetBufferSize(sample_rate); |
DVLOG(1) << "Using WebRTC input buffer size: " << buffer_size; |
@@ -115,27 +118,33 @@ void WebRtcAudioCapturer::Reconfigure(int sample_rate, |
media::AudioParameters params(format, channel_layout, sample_rate, |
bits_per_sample, buffer_size); |
+ audio_processing_.reset(new WebRtcAudioProcessingWrapper()); |
+ audio_processing_->Configure(params, constraints); |
+ sink_params_ = audio_processing_->OutputFormat(); |
+ |
TrackList tracks; |
{ |
base::AutoLock auto_lock(lock_); |
tracks = tracks_; |
- params_ = params; |
+ source_params_ = params; |
} |
// Tell all audio_tracks which format we use. |
for (TrackList::const_iterator it = tracks.begin(); |
it != tracks.end(); ++it) |
- (*it)->SetCaptureFormat(params); |
+ (*it)->SetCaptureFormat(sink_params_); |
} |
-bool WebRtcAudioCapturer::Initialize(int render_view_id, |
- media::ChannelLayout channel_layout, |
- int sample_rate, |
- int buffer_size, |
- int session_id, |
- const std::string& device_id, |
- int paired_output_sample_rate, |
- int paired_output_frames_per_buffer) { |
+bool WebRtcAudioCapturer::Initialize( |
+ int render_view_id, |
+ media::ChannelLayout channel_layout, |
+ int sample_rate, |
+ int buffer_size, |
+ int session_id, |
+ const std::string& device_id, |
+ int paired_output_sample_rate, |
+ int paired_output_frames_per_buffer, |
+ const webrtc::MediaConstraintsInterface* constraints) { |
DCHECK(thread_checker_.CalledOnValidThread()); |
DCHECK_GE(render_view_id, 0); |
DVLOG(1) << "WebRtcAudioCapturer::Initialize()"; |
@@ -187,9 +196,10 @@ bool WebRtcAudioCapturer::Initialize(int render_view_id, |
// Create and configure the default audio capturing source. The |source_| |
// will be overwritten if an external client later calls SetCapturerSource() |
// providing an alternative media::AudioCapturerSource. |
- SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), |
- channel_layout, |
- static_cast<float>(sample_rate)); |
+ InitializeCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), |
+ channel_layout, |
+ static_cast<float>(sample_rate), |
+ constraints); |
return true; |
} |
@@ -230,7 +240,7 @@ void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { |
DCHECK(std::find_if(tracks_.begin(), tracks_.end(), |
TrackOwner::TrackWrapper(track)) == tracks_.end()); |
- track->SetCaptureFormat(params_); |
+ track->SetCaptureFormat(sink_params_); |
tracks_.push_back(new WebRtcAudioCapturer::TrackOwner(track)); |
} |
@@ -260,13 +270,14 @@ void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) { |
Stop(); |
} |
-void WebRtcAudioCapturer::SetCapturerSource( |
+void WebRtcAudioCapturer::InitializeCapturerSource( |
const scoped_refptr<media::AudioCapturerSource>& source, |
media::ChannelLayout channel_layout, |
- float sample_rate) { |
+ float sample_rate, |
+ const webrtc::MediaConstraintsInterface* constraints) { |
DCHECK(thread_checker_.CalledOnValidThread()); |
- DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," |
- << "sample_rate=" << sample_rate << ")"; |
+ DVLOG(1) << "InitializeCapturerSource(channel_layout=" << channel_layout |
+ << "," << "sample_rate=" << sample_rate << ")"; |
scoped_refptr<media::AudioCapturerSource> old_source; |
bool restart_source = false; |
{ |
@@ -289,7 +300,7 @@ void WebRtcAudioCapturer::SetCapturerSource( |
// Dispatch the new parameters both to the sink(s) and to the new source. |
// The idea is to get rid of any dependency of the microphone parameters |
// which would normally be used by default. |
- Reconfigure(sample_rate, channel_layout); |
+ Reconfigure(sample_rate, channel_layout, constraints); |
// Make sure to grab the new parameters in case they were reconfigured. |
media::AudioParameters params = audio_parameters(); |
@@ -327,11 +338,25 @@ void WebRtcAudioCapturer::EnablePeerConnectionMode() { |
// Create a new audio stream as source which will open the hardware using |
// WebRtc native buffer size. |
- SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), |
- params.channel_layout(), |
- static_cast<float>(params.sample_rate())); |
+ InitializeCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id), |
+ params.channel_layout(), |
+ static_cast<float>(params.sample_rate()), |
+ NULL); |
} |
+void WebRtcAudioCapturer::FeedRenderDataToAudioProcessing( |
+ const int16* render_audio, |
+ int sample_rate, |
+ int number_of_channels, |
+ int number_of_frames, |
+ int render_delay_ms) { |
+ audio_processing_->FeedRenderDataToAudioProcessing(render_audio, |
+ sample_rate, |
+ number_of_channels, |
+ number_of_frames, |
+ render_delay_ms); |
+ |
+} |
void WebRtcAudioCapturer::Start() { |
DVLOG(1) << "WebRtcAudioCapturer::Start()"; |
base::AutoLock auto_lock(lock_); |
@@ -412,7 +437,6 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source, |
#endif |
TrackList tracks; |
- int current_volume = 0; |
{ |
base::AutoLock auto_lock(lock_); |
if (!running_) |
@@ -422,21 +446,31 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source, |
// webrtc::VoiceEngine. webrtc::VoiceEngine will handle the case when the |
// volume is higher than 255. |
volume_ = static_cast<int>((volume * MaxVolume()) + 0.5); |
- current_volume = volume_; |
tracks = tracks_; |
} |
- // Deliver captured data to source provider, which stores the data into FIFO |
- // for WebAudio to fetch. |
- source_provider_->DeliverData(audio_source, audio_delay_milliseconds, |
- current_volume, key_pressed); |
+ audio_processing_->Push(audio_source); |
+ int current_volume = volume; |
+ while (audio_processing_->ProcessAndConsume10MsData(audio_delay_milliseconds, |
+ current_volume, |
+ key_pressed)) { |
+ // TODO(xians): Get the new volume and set it to |current_volume|. |
+ |
+ for (TrackList::const_iterator it = tracks.begin(); |
+ it != tracks.end(); ++it) { |
+ (*it)->Capture(audio_processing_->OutputBuffer(), |
+ sink_params_.sample_rate(), |
+ sink_params_.channels(), |
+ sink_params_.frames_per_buffer()); |
+ } |
- // Feed the data to the tracks. |
- for (TrackList::const_iterator it = tracks.begin(); |
- it != tracks.end(); |
- ++it) { |
- (*it)->Capture(audio_source, audio_delay_milliseconds, |
- current_volume, key_pressed); |
+ // TODO(xians): Make the source provider as one of the sinks. |
+ // Deliver captured data to source provider, which stores the data into FIFO |
+ // for WebAudio to fetch. |
+ source_provider_->DeliverData(audio_processing_->OutputBuffer(), |
+ sink_params_.sample_rate(), |
+ sink_params_.channels(), |
+ sink_params_.frames_per_buffer()); |
} |
} |
@@ -446,7 +480,7 @@ void WebRtcAudioCapturer::OnCaptureError() { |
media::AudioParameters WebRtcAudioCapturer::audio_parameters() const { |
base::AutoLock auto_lock(lock_); |
- return params_; |
+ return source_params_; |
} |
bool WebRtcAudioCapturer::GetPairedOutputParameters( |