Index: content/renderer/media/webrtc_audio_device_impl.cc |
diff --git a/content/renderer/media/webrtc_audio_device_impl.cc b/content/renderer/media/webrtc_audio_device_impl.cc |
index aadcda9b67232434a7177501215c4b78215d0416..acf5a919c2b46b594f16420689018311423b1a65 100644 |
--- a/content/renderer/media/webrtc_audio_device_impl.cc |
+++ b/content/renderer/media/webrtc_audio_device_impl.cc |
@@ -51,6 +51,7 @@ int32_t WebRtcAudioDeviceImpl::Release() { |
} |
return ret; |
} |
+ |
int WebRtcAudioDeviceImpl::CaptureData(const std::vector<int>& channels, |
const int16* audio_data, |
int sample_rate, |
@@ -74,36 +75,11 @@ int WebRtcAudioDeviceImpl::CaptureData(const std::vector<int>& channels, |
DVLOG(2) << "total delay: " << input_delay_ms_ + output_delay_ms_; |
} |
- // Write audio samples in blocks of 10 milliseconds to the registered |
- // webrtc::AudioTransport sink. Keep writing until our internal byte |
- // buffer is empty. |
- const int16* audio_buffer = audio_data; |
- const int samples_per_10_msec = (sample_rate / 100); |
- int accumulated_audio_samples = 0; |
- uint32_t new_volume = 0; |
- while (accumulated_audio_samples < number_of_frames) { |
- // Deliver 10ms of recorded 16-bit linear PCM audio. |
- int new_mic_level = audio_transport_callback_->OnDataAvailable( |
- &channels[0], |
- channels.size(), |
- audio_buffer, |
- sample_rate, |
- number_of_channels, |
- samples_per_10_msec, |
- total_delay_ms, |
- current_volume, |
- key_pressed, |
- need_audio_processing); |
- |
- accumulated_audio_samples += samples_per_10_msec; |
- audio_buffer += samples_per_10_msec * number_of_channels; |
- |
- // The latest non-zero new microphone level will be returned. |
- if (new_mic_level) |
- new_volume = new_mic_level; |
- } |
- |
- return new_volume; |
+ // Deliver 10ms of recorded 16-bit linear PCM audio. |
+ return audio_transport_callback_->OnDataAvailable( |
+ &channels[0], channels.size(), audio_data, sample_rate, |
+ number_of_channels, number_of_frames, total_delay_ms, |
+ current_volume, key_pressed, need_audio_processing); |
} |
void WebRtcAudioDeviceImpl::SetCaptureFormat( |
@@ -150,6 +126,18 @@ void WebRtcAudioDeviceImpl::RenderData(uint8* audio_data, |
accumulated_audio_samples += num_audio_samples; |
audio_data += bytes_per_10_msec; |
} |
+ |
+ base::AutoLock auto_lock(lock_); |
+ for (RenderDataObservers::const_iterator iter = |
+ render_data_observers_.begin(); |
+ iter != render_data_observers_.end(); ++iter) { |
+ (*iter)->OnRenderData( |
+ reinterpret_cast<const int16*>(audio_data), |
+ samples_per_sec, |
+ number_of_channels, |
+ number_of_frames, |
+ audio_delay_milliseconds); |
+ } |
} |
void WebRtcAudioDeviceImpl::SetRenderFormat(const AudioParameters& params) { |
@@ -202,6 +190,14 @@ int32_t WebRtcAudioDeviceImpl::Terminate() { |
capturers_.clear(); |
+ // Notify all the remaining observers that the render object is going away. |
+ for (RenderDataObservers::const_iterator iter = |
+ render_data_observers_.begin(); |
+ iter != render_data_observers_.end(); ++iter) { |
+ (*iter)->OnRenderClosing(); |
+ } |
+ render_data_observers_.clear(); |
+ |
initialized_ = false; |
return 0; |
} |
@@ -481,4 +477,27 @@ WebRtcAudioDeviceImpl::GetDefaultCapturer() const { |
return NULL; |
} |
+void WebRtcAudioDeviceImpl::AddRenderDataObserver( |
+ WebRtcAudioRenderDataObserver* observer) { |
+ DCHECK(observer); |
+ base::AutoLock auto_lock(lock_); |
+ DCHECK(std::find(render_data_observers_.begin(), |
+ render_data_observers_.end(), |
+ observer) == render_data_observers_.end()); |
+ render_data_observers_.push_back(observer); |
+} |
+ |
+void WebRtcAudioDeviceImpl::RemoveRenderDataObserver( |
+ WebRtcAudioRenderDataObserver* observer) { |
+ DCHECK(observer); |
+ base::AutoLock auto_lock(lock_); |
+ RenderDataObservers::iterator iter = std::find( |
+ render_data_observers_.begin(), render_data_observers_.end(), observer); |
+ // TODO(xians): why not DCHECK. |
+ if (iter == render_data_observers_.end()) |
+ return; |
+ |
+ render_data_observers_.erase(iter); |
+} |
+ |
} // namespace content |