Index: content/renderer/media/webrtc_audio_capturer.cc |
diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc |
index a3c6ebf42b9b4661dac774b3a2c1d9a56868cd5f..ecc43aaf69b81316a105cd6a199b23641150aec0 100644 |
--- a/content/renderer/media/webrtc_audio_capturer.cc |
+++ b/content/renderer/media/webrtc_audio_capturer.cc |
@@ -63,6 +63,43 @@ scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer() { |
return capturer; |
} |
phoglund_chromium
2013/02/07 14:03:18
I found two places in the code which was doing rou
|
+bool WebRtcAudioCapturer::Reconfigure(int sample_rate, |
+ media::AudioParameters::Format format, |
+ media::ChannelLayout channel_layout) { |
+ int buffer_size = GetBufferSizeForSampleRate(sample_rate); |
+ if (!buffer_size) { |
+ DLOG(ERROR) << "Unsupported sample-rate: " << sample_rate; |
+ return false; |
+ } |
+ |
+ // Ignored since the audio stack uses float32. |
tommi (sloooow) - chröme
2013/02/07 14:57:47
s/Ignored/bits_per_sample is always 16
|
+ int bits_per_sample = 16; |
+ |
+ // These snapshots will allow us to run callbacks outside the lock. |
+ SinkList sinks; |
+ media::AudioParameters params; |
+ { |
+ base::AutoLock auto_lock(lock_); |
+ |
phoglund_chromium
2013/02/07 14:03:18
So along with copying parameters, this is the addi
|
+ while (buffer_in_use_) |
+ buffer_in_use_cv_.Wait(); |
+ |
phoglund_chromium
2013/02/07 14:03:18
Once the buffer is not used, we can move ahead wit
|
+ params_.Reset(format, channel_layout, sample_rate, bits_per_sample, |
+ buffer_size); |
+ buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]); |
+ |
+ sinks = sinks_; |
+ params = params_; |
+ } |
+ |
+ // Tell all sinks which format we use. |
+ for (SinkList::const_iterator it = sinks.begin(); it != sinks.end(); ++it) { |
+ (*it)->SetCaptureFormat(params); |
+ } |
+ |
+ return true; |
+} |
+ |
bool WebRtcAudioCapturer::Initialize(media::ChannelLayout channel_layout, |
int sample_rate) { |
DCHECK(thread_checker_.CalledOnValidThread()); |
@@ -98,18 +135,8 @@ bool WebRtcAudioCapturer::Initialize(media::ChannelLayout channel_layout, |
return false; |
} |
phoglund_chromium
2013/02/07 14:03:18
This code didn't check if GetBufferSize... returne
|
- int buffer_size = GetBufferSizeForSampleRate(sample_rate); |
- |
- // Configure audio parameters for the default source. |
- params_.Reset(format, channel_layout, sample_rate, 16, buffer_size); |
- |
- // Tell all sinks which format we use. |
- for (SinkList::const_iterator it = sinks_.begin(); |
- it != sinks_.end(); ++it) { |
- (*it)->SetCaptureFormat(params_); |
- } |
- |
- buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]); |
+ if (!Reconfigure(sample_rate, format, channel_layout)) |
+ return false; |
// Create and configure the default audio capturing source. The |source_| |
// will be overwritten if an external client later calls SetCapturerSource() |
@@ -122,7 +149,9 @@ bool WebRtcAudioCapturer::Initialize(media::ChannelLayout channel_layout, |
} |
WebRtcAudioCapturer::WebRtcAudioCapturer() |
- : source_(NULL), |
+ : buffer_in_use_cv_(&lock_), |
+ buffer_in_use_(false), |
+ source_(NULL), |
running_(false), |
buffering_(false) { |
DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; |
@@ -162,6 +191,7 @@ void WebRtcAudioCapturer::SetCapturerSource( |
DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," |
<< "sample_rate=" << sample_rate << ")"; |
scoped_refptr<media::AudioCapturerSource> old_source; |
+ media::AudioParameters::Format current_format; |
{ |
base::AutoLock auto_lock(lock_); |
if (source_ == source) |
@@ -169,6 +199,7 @@ void WebRtcAudioCapturer::SetCapturerSource( |
source_.swap(old_source); |
source_ = source; |
phoglund_chromium
2013/02/07 14:03:18
We copy this here to avoid grabbing the lock again
|
+ current_format = params_.format(); |
} |
const bool no_default_audio_source_exists = !buffer_.get(); |
@@ -185,25 +216,8 @@ void WebRtcAudioCapturer::SetCapturerSource( |
// Dispatch the new parameters both to the sink(s) and to the new source. |
// The idea is to get rid of any dependency of the microphone parameters |
// which would normally be used by default. |
- |
- int buffer_size = GetBufferSizeForSampleRate(sample_rate); |
- if (!buffer_size) { |
- DLOG(ERROR) << "Unsupported sample-rate: " << sample_rate; |
+ if (!Reconfigure(sample_rate, current_format, channel_layout)) |
return; |
- } |
- |
- params_.Reset(params_.format(), |
- channel_layout, |
- sample_rate, |
- 16, // ignored since the audio stack uses float32. |
- buffer_size); |
- |
- buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]); |
- |
- for (SinkList::const_iterator it = sinks_.begin(); |
- it != sinks_.end(); ++it) { |
- (*it)->SetCaptureFormat(params_); |
- } |
} |
if (source) |
@@ -358,13 +372,21 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source, |
// |source_| is AudioInputDevice, otherwise it is driven by client's |
// CaptureCallback. |
SinkList sinks; |
+ int16* buffer; |
+ int bytes_per_sample; |
{ |
base::AutoLock auto_lock(lock_); |
if (!running_) |
return; |
phoglund_chromium
2013/02/07 14:03:18
Here we use buffer_in_use variable to protect agai
tommi (sloooow) - chröme
2013/02/07 14:57:47
There's no thread check that ensures that the meth
|
- // Copy the sink list to a local variable. |
+ // We're assuming here that this method is called only once at a time. |
+ buffer_in_use_ = true; |
+ buffer_in_use_cv_.Broadcast(); |
+ |
+ // Copy the stuff we will need to local variables. |
sinks = sinks_; |
+ buffer = buffer_.get(); |
+ bytes_per_sample = params_.bits_per_sample() / 8; |
// Push captured audio to FIFO so it can be read by a local sink. |
// Buffering is only enabled if we are rendering a local media stream. |
@@ -380,18 +402,24 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source, |
// Interleave, scale, and clip input to int and store result in |
// a local byte buffer. |
- audio_source->ToInterleaved(audio_source->frames(), |
- params_.bits_per_sample() / 8, |
- buffer_.get()); |
+ audio_source->ToInterleaved(audio_source->frames(), bytes_per_sample, buffer); |
// Feed the data to the sinks. |
for (SinkList::const_iterator it = sinks.begin(); |
it != sinks.end(); |
++it) { |
- (*it)->CaptureData(reinterpret_cast<const int16*>(buffer_.get()), |
+ (*it)->CaptureData(reinterpret_cast<const int16*>(buffer), |
audio_source->channels(), audio_source->frames(), |
audio_delay_milliseconds, volume); |
} |
+ |
phoglund_chromium
2013/02/07 14:03:18
If this note isn't true, we need WebRTC to somehow
|
+ // Note: This assumes that the callbacks are guaranteed to be done with the |
+ // data after returning! |
+ { |
+ base::AutoLock auto_lock(lock_); |
+ buffer_in_use_ = false; |
+ buffer_in_use_cv_.Broadcast(); |
+ } |
} |
void WebRtcAudioCapturer::OnCaptureError() { |