Index: media/audio/win/audio_low_latency_output_win.cc |
diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc |
index 3dbb7d6b0b559e8c9fc61ea602857818c6b0929c..67edec54ccb1fcb304ed6cb587e40581ba2c44bf 100644 |
--- a/media/audio/win/audio_low_latency_output_win.cc |
+++ b/media/audio/win/audio_low_latency_output_win.cc |
@@ -131,8 +131,8 @@ bool WASAPIAudioOutputStream::Open() { |
if (opened_) |
return true; |
- DCHECK(!audio_client_.get()); |
- DCHECK(!audio_render_client_.get()); |
+ DCHECK(!audio_client_.Get()); |
+ DCHECK(!audio_render_client_.Get()); |
// Will be set to true if we ended up opening the default communications |
// device. |
@@ -145,16 +145,16 @@ bool WASAPIAudioOutputStream::Open() { |
communications_device = (device_role_ == eCommunications); |
} else { |
ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id_)); |
- DLOG_IF(ERROR, !device.get()) << "Failed to open device: " << device_id_; |
- if (device.get()) |
- audio_client = CoreAudioUtil::CreateClient(device.get()); |
+ DLOG_IF(ERROR, !device.Get()) << "Failed to open device: " << device_id_; |
+ if (device.Get()) |
+ audio_client = CoreAudioUtil::CreateClient(device.Get()); |
} |
- if (!audio_client.get()) |
+ if (!audio_client.Get()) |
return false; |
// Extra sanity to ensure that the provided device format is still valid. |
- if (!CoreAudioUtil::IsFormatSupported(audio_client.get(), share_mode_, |
+ if (!CoreAudioUtil::IsFormatSupported(audio_client.Get(), share_mode_, |
&format_)) { |
LOG(ERROR) << "Audio parameters are not supported."; |
return false; |
@@ -165,7 +165,7 @@ bool WASAPIAudioOutputStream::Open() { |
// Initialize the audio stream between the client and the device in shared |
// mode and using event-driven buffer handling. |
hr = CoreAudioUtil::SharedModeInitialize( |
- audio_client.get(), &format_, audio_samples_render_event_.Get(), |
+ audio_client.Get(), &format_, audio_samples_render_event_.Get(), |
&endpoint_buffer_size_frames_, |
communications_device ? &kCommunicationsSessionId : NULL); |
if (FAILED(hr)) |
@@ -173,7 +173,7 @@ bool WASAPIAudioOutputStream::Open() { |
REFERENCE_TIME device_period = 0; |
if (FAILED(CoreAudioUtil::GetDevicePeriod( |
- audio_client.get(), AUDCLNT_SHAREMODE_SHARED, &device_period))) { |
+ audio_client.Get(), AUDCLNT_SHAREMODE_SHARED, &device_period))) { |
return false; |
} |
@@ -213,7 +213,7 @@ bool WASAPIAudioOutputStream::Open() { |
} else { |
// TODO(henrika): break out to CoreAudioUtil::ExclusiveModeInitialize() |
// when removing the enable-exclusive-audio flag. |
- hr = ExclusiveModeInitialization(audio_client.get(), |
+ hr = ExclusiveModeInitialization(audio_client.Get(), |
audio_samples_render_event_.Get(), |
&endpoint_buffer_size_frames_); |
if (FAILED(hr)) |
@@ -232,8 +232,8 @@ bool WASAPIAudioOutputStream::Open() { |
// The IAudioRenderClient interface enables us to write output data to |
// a rendering endpoint buffer. |
ScopedComPtr<IAudioRenderClient> audio_render_client = |
- CoreAudioUtil::CreateRenderClient(audio_client.get()); |
- if (!audio_render_client.get()) |
+ CoreAudioUtil::CreateRenderClient(audio_client.Get()); |
+ if (!audio_render_client.Get()) |
return false; |
// Store valid COM interfaces. |
@@ -267,7 +267,7 @@ void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { |
// Ensure that the endpoint buffer is prepared with silence. |
if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence( |
- audio_client_.get(), audio_render_client_.get())) { |
+ audio_client_.Get(), audio_render_client_.Get())) { |
LOG(ERROR) << "Failed to prepare endpoint buffers with silence."; |
callback->OnError(this); |
return; |