Chromium Code Reviews| Index: media/audio/win/audio_unified_win.cc |
| diff --git a/media/audio/win/audio_unified_win.cc b/media/audio/win/audio_unified_win.cc |
| index 677f9e0260aea59c10879be3e4daef4e10a206e7..bd0426b9d14bcac6a8f186af140fa30edd3ddf2b 100644 |
| --- a/media/audio/win/audio_unified_win.cc |
| +++ b/media/audio/win/audio_unified_win.cc |
| @@ -246,12 +246,16 @@ void WASAPIUnifiedStream::Start(AudioSourceCallback* callback) { |
| return; |
| } |
| - // Reset the counter for number of rendered frames taking into account the |
| - // fact that we always initialize the render side with silence. |
| - UINT32 num_queued_frames = 0; |
| - audio_output_client_->GetCurrentPadding(&num_queued_frames); |
| - DCHECK_EQ(num_queued_frames, endpoint_render_buffer_size_frames_); |
| - num_written_frames_ = num_queued_frames; |
| + // Ensure that the endpoint buffer is prepared with silence. |
| + UINT32 num_filled_frames = 0; |
| + if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
| + if (!FillRenderEndpointBufferWithSilence(&num_filled_frames)) { |
| + DLOG(WARNING) << "Failed to prepare endpoint buffers with silence."; |
| + return; |
| + } |
| + DCHECK_EQ(num_filled_frames, endpoint_render_buffer_size_frames_); |
| + } |
| + num_written_frames_ = num_filled_frames; |
| // Start output streaming data between the endpoint buffer and the audio |
| // engine. |
| @@ -566,4 +570,37 @@ void WASAPIUnifiedStream::StopAndJoinThread(HRESULT err) { |
| HandleError(err); |
| } |
| +bool WASAPIUnifiedStream::FillRenderEndpointBufferWithSilence( |
|
DaleCurtis
2013/01/31 02:34:33
Should this just be in CoreAudioUtil?
henrika (OOO until Aug 14)
2013/01/31 14:29:38
Good point. I will fix that.
|
| + UINT32* num_written_frames) { |
|
tommi (sloooow) - chröme
2013/01/31 13:42:08
indent
henrika (OOO until Aug 14)
2013/01/31 14:29:38
Done.
|
| + UINT32 num_queued_frames = 0; |
| + HRESULT hr = audio_output_client_->GetCurrentPadding(&num_queued_frames); |
| + if (FAILED(hr)) |
| + return false; |
| + |
| + BYTE* data = NULL; |
| + int num_frames_to_fill = |
| + endpoint_render_buffer_size_frames_ - num_queued_frames; |
| + hr = audio_render_client_->GetBuffer(num_frames_to_fill, &data); |
| + if (FAILED(hr)) |
| + return false; |
| + |
| + // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to |
| + // explicitly write silence data to the rendering buffer. |
| + DVLOG(2) << "filling up " << num_frames_to_fill << " frames with silence"; |
| + hr = audio_render_client_->ReleaseBuffer(num_frames_to_fill, |
| + AUDCLNT_BUFFERFLAGS_SILENT); |
|
tommi (sloooow) - chröme
2013/01/31 13:42:08
indent
henrika (OOO until Aug 14)
2013/01/31 14:29:38
Done.
|
| + if (FAILED(hr)) |
| + return false; |
| + |
| + // Get the amount of valid, unread data that the endpoint buffer |
| + // currently contains. This amount corresponds to the number of written |
| + // audio frames. |
| + hr = audio_output_client_->GetCurrentPadding(&num_queued_frames); |
| + if (FAILED(hr)) |
| + return false; |
| + *num_written_frames = num_queued_frames; |
| + |
| + return SUCCEEDED(hr); |
|
tommi (sloooow) - chröme
2013/01/31 13:42:08
nit: you've already checked hr, so you can return
henrika (OOO until Aug 14)
2013/01/31 14:29:38
I use the "hängslen och livrem" principle here ;-)
|
| +} |
| + |
| } // namespace media |