OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_unified_win.h" | 5 #include "media/audio/win/audio_unified_win.h" |
6 | 6 |
7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
8 | 8 |
9 #include "base/debug/trace_event.h" | 9 #include "base/debug/trace_event.h" |
10 #include "base/time.h" | 10 #include "base/time.h" |
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
239 } | 239 } |
240 | 240 |
241 // Start input streaming data between the endpoint buffer and the audio | 241 // Start input streaming data between the endpoint buffer and the audio |
242 // engine. | 242 // engine. |
243 HRESULT hr = audio_input_client_->Start(); | 243 HRESULT hr = audio_input_client_->Start(); |
244 if (FAILED(hr)) { | 244 if (FAILED(hr)) { |
245 StopAndJoinThread(hr); | 245 StopAndJoinThread(hr); |
246 return; | 246 return; |
247 } | 247 } |
248 | 248 |
249 // Reset the counter for number of rendered frames taking into account the | 249 // Ensure that the endpoint buffer is prepared with silence. |
250 // fact that we always initialize the render side with silence. | 250 UINT32 num_filled_frames = 0; |
251 UINT32 num_queued_frames = 0; | 251 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
252 audio_output_client_->GetCurrentPadding(&num_queued_frames); | 252 if (!FillRenderEndpointBufferWithSilence(&num_filled_frames)) { |
253 DCHECK_EQ(num_queued_frames, endpoint_render_buffer_size_frames_); | 253 DLOG(WARNING) << "Failed to prepare endpoint buffers with silence."; |
254 num_written_frames_ = num_queued_frames; | 254 return; |
255 } | |
256 DCHECK_EQ(num_filled_frames, endpoint_render_buffer_size_frames_); | |
257 } | |
258 num_written_frames_ = num_filled_frames; | |
255 | 259 |
256 // Start output streaming data between the endpoint buffer and the audio | 260 // Start output streaming data between the endpoint buffer and the audio |
257 // engine. | 261 // engine. |
258 hr = audio_output_client_->Start(); | 262 hr = audio_output_client_->Start(); |
259 if (FAILED(hr)) { | 263 if (FAILED(hr)) { |
260 StopAndJoinThread(hr); | 264 StopAndJoinThread(hr); |
261 return; | 265 return; |
262 } | 266 } |
263 } | 267 } |
264 | 268 |
(...skipping 294 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
559 | 563 |
560 void WASAPIUnifiedStream::StopAndJoinThread(HRESULT err) { | 564 void WASAPIUnifiedStream::StopAndJoinThread(HRESULT err) { |
561 CHECK(GetCurrentThreadId() == creating_thread_id_); | 565 CHECK(GetCurrentThreadId() == creating_thread_id_); |
562 DCHECK(audio_io_thread_.get()); | 566 DCHECK(audio_io_thread_.get()); |
563 SetEvent(stop_streaming_event_.Get()); | 567 SetEvent(stop_streaming_event_.Get()); |
564 audio_io_thread_->Join(); | 568 audio_io_thread_->Join(); |
565 audio_io_thread_.reset(); | 569 audio_io_thread_.reset(); |
566 HandleError(err); | 570 HandleError(err); |
567 } | 571 } |
568 | 572 |
573 bool WASAPIUnifiedStream::FillRenderEndpointBufferWithSilence( | |
DaleCurtis
2013/01/31 02:34:33
Should this just be in CoreAudioUtil?
henrika (OOO until Aug 14)
2013/01/31 14:29:38
Good point. I will fix that.
| |
574 UINT32* num_written_frames) { | |
tommi (sloooow) - chröme
2013/01/31 13:42:08
indent
henrika (OOO until Aug 14)
2013/01/31 14:29:38
Done.
| |
575 UINT32 num_queued_frames = 0; | |
576 HRESULT hr = audio_output_client_->GetCurrentPadding(&num_queued_frames); | |
577 if (FAILED(hr)) | |
578 return false; | |
579 | |
580 BYTE* data = NULL; | |
581 int num_frames_to_fill = | |
582 endpoint_render_buffer_size_frames_ - num_queued_frames; | |
583 hr = audio_render_client_->GetBuffer(num_frames_to_fill, &data); | |
584 if (FAILED(hr)) | |
585 return false; | |
586 | |
587 // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to | |
588 // explicitly write silence data to the rendering buffer. | |
589 DVLOG(2) << "filling up " << num_frames_to_fill << " frames with silence"; | |
590 hr = audio_render_client_->ReleaseBuffer(num_frames_to_fill, | |
591 AUDCLNT_BUFFERFLAGS_SILENT); | |
tommi (sloooow) - chröme
2013/01/31 13:42:08
indent
henrika (OOO until Aug 14)
2013/01/31 14:29:38
Done.
| |
592 if (FAILED(hr)) | |
593 return false; | |
594 | |
595 // Get the amount of valid, unread data that the endpoint buffer | |
596 // currently contains. This amount corresponds to the number of written | |
597 // audio frames. | |
598 hr = audio_output_client_->GetCurrentPadding(&num_queued_frames); | |
599 if (FAILED(hr)) | |
600 return false; | |
601 *num_written_frames = num_queued_frames; | |
602 | |
603 return SUCCEEDED(hr); | |
tommi (sloooow) - chröme
2013/01/31 13:42:08
nit: you've already checked hr, so you can return
henrika (OOO until Aug 14)
2013/01/31 14:29:38
I use the "hängslen och livrem" principle here ;-)
| |
604 } | |
605 | |
569 } // namespace media | 606 } // namespace media |
OLD | NEW |