Index: content/renderer/media/webrtc_audio_renderer.cc |
diff --git a/content/renderer/media/webrtc_audio_renderer.cc b/content/renderer/media/webrtc_audio_renderer.cc |
index 39d71b48815704b90242767e20f4aca5d8e8f1fe..cf6472bfe38de38fd62860beff731a8f1ca9a870 100644 |
--- a/content/renderer/media/webrtc_audio_renderer.cc |
+++ b/content/renderer/media/webrtc_audio_renderer.cc |
@@ -123,6 +123,14 @@ class SharedAudioRenderer : public MediaStreamAudioRenderer { |
return delegate_->GetOutputDevice(); |
} |
+ void SwitchOutputDevice( |
+ const std::string& device_id, |
+ const url::Origin& security_origin, |
+ const media::SwitchOutputDeviceCB& callback) override { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ return delegate_->SwitchOutputDevice(device_id, security_origin, callback); |
+ } |
+ |
base::TimeDelta GetCurrentRenderTime() const override { |
DCHECK(thread_checker_.CalledOnValidThread()); |
return delegate_->GetCurrentRenderTime(); |
@@ -224,8 +232,9 @@ bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { |
AudioDeviceFactory::kSourceWebRtc, source_render_frame_id_, session_id_, |
output_device_id_, security_origin_); |
- if (sink_->GetOutputDevice()->GetDeviceStatus() != |
- media::OUTPUT_DEVICE_STATUS_OK) { |
+ media::OutputDevice* device = sink_->GetOutputDevice(); |
+ if (!(device && |
+ (device->GetDeviceStatus() == media::OUTPUT_DEVICE_STATUS_OK))) { |
return false; |
} |
@@ -357,7 +366,7 @@ void WebRtcAudioRenderer::SetVolume(float volume) { |
media::OutputDevice* WebRtcAudioRenderer::GetOutputDevice() { |
DCHECK(thread_checker_.CalledOnValidThread()); |
- return this; |
+ return sink_ ? sink_->GetOutputDevice() : nullptr; |
} |
base::TimeDelta WebRtcAudioRenderer::GetCurrentRenderTime() const { |
@@ -387,9 +396,12 @@ void WebRtcAudioRenderer::SwitchOutputDevice( |
AudioDeviceFactory::NewAudioRendererSink( |
AudioDeviceFactory::kSourceWebRtc, source_render_frame_id_, |
session_id_, device_id, security_origin); |
- if (new_sink->GetOutputDevice()->GetDeviceStatus() != |
- media::OUTPUT_DEVICE_STATUS_OK) { |
- callback.Run(new_sink->GetOutputDevice()->GetDeviceStatus()); |
+ media::OutputDevice* device = new_sink->GetOutputDevice(); |
+ media::OutputDeviceStatus status = |
+ device ? device->GetDeviceStatus() |
+ : media::OUTPUT_DEVICE_STATUS_ERROR_INTERNAL; |
+ if (status != media::OUTPUT_DEVICE_STATUS_OK) { |
+ callback.Run(status); |
return; |
} |
@@ -411,22 +423,6 @@ void WebRtcAudioRenderer::SwitchOutputDevice( |
callback.Run(media::OUTPUT_DEVICE_STATUS_OK); |
} |
-media::AudioParameters WebRtcAudioRenderer::GetOutputParameters() { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- if (!sink_.get()) |
- return media::AudioParameters(); |
- |
- return sink_->GetOutputDevice()->GetOutputParameters(); |
-} |
- |
-media::OutputDeviceStatus WebRtcAudioRenderer::GetDeviceStatus() { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- if (!sink_.get()) |
- return media::OUTPUT_DEVICE_STATUS_ERROR_INTERNAL; |
- |
- return sink_->GetOutputDevice()->GetDeviceStatus(); |
-} |
- |
int WebRtcAudioRenderer::Render(media::AudioBus* audio_bus, |
uint32_t frames_delayed, |
uint32_t frames_skipped) { |
@@ -627,14 +623,18 @@ void WebRtcAudioRenderer::PrepareSink() { |
base::AutoLock lock(lock_); |
new_sink_params = sink_params_; |
} |
+ |
+ media::OutputDevice* device = sink_->GetOutputDevice(); |
+ DCHECK(device); |
+ const media::AudioParameters output_params = device->GetOutputParameters(); |
+ |
// WebRTC does not yet support higher rates than 96000 on the client side |
// and 48000 is the preferred sample rate. Therefore, if 192000 is detected, |
// we change the rate to 48000 instead. The consequence is that the native |
// layer will be opened up at 192kHz but WebRTC will provide data at 48kHz |
// which will then be resampled by the audio converted on the browser side |
// to match the native audio layer. |
- int sample_rate = |
- sink_->GetOutputDevice()->GetOutputParameters().sample_rate(); |
+ int sample_rate = output_params.sample_rate(); |
DVLOG(1) << "Audio output hardware sample rate: " << sample_rate; |
if (sample_rate >= 192000) { |
DVLOG(1) << "Resampling from 48000 to " << sample_rate << " is required"; |
@@ -655,9 +655,8 @@ void WebRtcAudioRenderer::PrepareSink() { |
DVLOG(1) << "Using WebRTC output buffer size: " << source_frames_per_buffer; |
// Setup sink parameters. |
- const int sink_frames_per_buffer = GetOptimalBufferSize( |
- sample_rate, |
- sink_->GetOutputDevice()->GetOutputParameters().frames_per_buffer()); |
+ const int sink_frames_per_buffer = |
+ GetOptimalBufferSize(sample_rate, output_params.frames_per_buffer()); |
new_sink_params.set_sample_rate(sample_rate); |
new_sink_params.set_frames_per_buffer(sink_frames_per_buffer); |