Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(521)

Side by Side Diff: media/audio/win/audio_low_latency_output_win.cc

Issue 8440002: Low-latency AudioOutputStream implementation based on WASAPI for Windows. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Improved comments about thread handling Created 9 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "media/audio/win/audio_low_latency_output_win.h"
6
7 #include "base/logging.h"
8 #include "base/memory/scoped_ptr.h"
9 #include "base/utf_string_conversions.h"
10 #include "media/audio/audio_util.h"
11 #include "media/audio/win/audio_manager_win.h"
12 #include "media/audio/win/avrt_wrapper_win.h"
13
14 using base::win::ScopedComPtr;
15 using base::win::ScopedCOMInitializer;
16
17 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
18 const AudioParameters& params,
19 ERole device_role)
20 : com_init_(ScopedCOMInitializer::kMTA),
21 creating_thread_id_(base::PlatformThread::CurrentId()),
22 manager_(manager),
23 render_thread_(NULL),
24 opened_(false),
25 started_(false),
26 volume_(1.0),
27 endpoint_buffer_size_frames_(0),
28 device_role_(device_role),
29 num_written_frames_(0),
30 source_(NULL) {
31 CHECK(com_init_.succeeded());
32 DCHECK(manager_);
33
34 // Load the Avrt DLL if not already loaded. Required to support MMCSS.
35 bool avrt_init = avrt::Initialize();
36 DCHECK(avrt_init) << "Failed to load the avrt.dll";
37
38 // Set up the desired render format specified by the client.
39 format_.nSamplesPerSec = params.sample_rate;
40 format_.wFormatTag = WAVE_FORMAT_PCM;
41 format_.wBitsPerSample = params.bits_per_sample;
42 format_.nChannels = params.channels;
43 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels;
44 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign;
45 format_.cbSize = 0;
46
47 // Size in bytes of each audio frame.
48 frame_size_ = format_.nBlockAlign;
49
50 // Store size (in different units) of audio packets which we expect to
51 // get from the audio endpoint device in each render event.
52 packet_size_frames_ = params.GetPacketSize() / format_.nBlockAlign;
53 packet_size_bytes_ = params.GetPacketSize();
54 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate;
55 DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
56 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
57 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_;
58
59 // All events are auto-reset events and non-signaled initially.
60
61 // Create the event which the audio engine will signal each time
62 // a buffer becomes ready to be processed by the client.
63 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
64 DCHECK(audio_samples_render_event_.IsValid());
65
66 // Create the event which will be set in Stop() when capturing shall stop.
67 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
68 DCHECK(stop_render_event_.IsValid());
69 }
70
71 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {}
72
73 bool WASAPIAudioOutputStream::Open() {
74 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
75 if (opened_)
76 return true;
77
78 // Obtain a reference to the IMMDevice interface of the default rendering
79 // device with the specified role.
80 HRESULT hr = SetRenderDevice(device_role_);
81 if (FAILED(hr)) {
82 HandleError(hr);
83 return false;
84 }
85
86 // Obtain an IAudioClient interface which enables us to create and initialize
87 // an audio stream between an audio application and the audio engine.
88 hr = ActivateRenderDevice();
89 if (FAILED(hr)) {
90 HandleError(hr);
91 return false;
92 }
93
94 // Retrieve the stream format which the audio engine uses for its internal
95 // processing/mixing of shared-mode streams.
96 hr = GetAudioEngineStreamFormat();
97 if (FAILED(hr)) {
98 HandleError(hr);
99 return false;
100 }
101
102 // Verify that the selected audio endpoint supports the specified format
103 // set during construction.
104 if (!DesiredFormatIsSupported()) {
105 hr = E_INVALIDARG;
106 HandleError(hr);
107 return false;
108 }
109
110 // Initialize the audio stream between the client and the device using
111 // shared mode and a lowest possible glitch-free latency.
112 hr = InitializeAudioEngine();
113 if (FAILED(hr)) {
114 HandleError(hr);
115 return false;
116 }
117
118 opened_ = true;
119
120 return true;
121 }
122
123 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
124 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
125 DCHECK(callback);
126 DCHECK(opened_);
127
128 if (!opened_)
129 return;
130
131 if (started_)
132 return;
133
134 source_ = callback;
135
136 // Avoid start-up glitches by filling up the endpoint buffer with "silence"
137 // before starting the stream.
138 BYTE* data_ptr = NULL;
139 HRESULT hr = audio_render_client_->GetBuffer(endpoint_buffer_size_frames_,
140 &data_ptr);
141 if (FAILED(hr)) {
142 DLOG(ERROR) << "Failed to use rendering audio buffer: " << std::hex << hr;
143 return;
144 }
145
146 // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
147 // explicitly write silence data to the rendering buffer.
148 audio_render_client_->ReleaseBuffer(endpoint_buffer_size_frames_,
149 AUDCLNT_BUFFERFLAGS_SILENT);
150 num_written_frames_ = endpoint_buffer_size_frames_;
151
152 // Sanity check: verify that the endpoint buffer is filled with silence.
153 UINT32 num_queued_frames = 0;
154 audio_client_->GetCurrentPadding(&num_queued_frames);
155 DCHECK(num_queued_frames == num_written_frames_);
156
157 // Create and start the thread that will drive the rendering by waiting for
158 // render events.
159 render_thread_ = new base::DelegateSimpleThread(this, "wasapi_render_thread");
160 render_thread_->Start();
161 if (!render_thread_->HasBeenStarted()) {
162 DLOG(ERROR) << "Failed to start WASAPI render thread.";
163 return;
164 }
165
166 // Start streaming data between the endpoint buffer and the audio engine.
167 hr = audio_client_->Start();
168 if (FAILED(hr)) {
169 SetEvent(stop_render_event_.Get());
170 render_thread_->Join();
171 render_thread_ = NULL;
172 HandleError(hr);
173 return;
174 }
175
176 started_ = true;
177 }
178
179 void WASAPIAudioOutputStream::Stop() {
180 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
181 if (!started_)
182 return;
183
184 // Shut down the render thread.
185 if (stop_render_event_.IsValid()) {
186 SetEvent(stop_render_event_.Get());
187 }
188
189 // Stop output audio streaming.
190 HRESULT hr = audio_client_->Stop();
191 DLOG_IF(ERROR, FAILED(hr)) << "Failed to stop output streaming: "
192 << std::hex << hr;
193
194 // Wait until the thread completes and perform cleanup.
195 if (render_thread_) {
196 SetEvent(stop_render_event_.Get());
197 render_thread_->Join();
198 render_thread_ = NULL;
199 }
200
201 started_ = false;
202 }
203
204 void WASAPIAudioOutputStream::Close() {
205 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
206
207 // It is valid to call Close() before calling open or Start().
208 // It is also valid to call Close() after Start() has been called.
209 Stop();
210
211 // Inform the audio manager that we have been closed. This will cause our
212 // destruction.
213 manager_->ReleaseOutputStream(this);
214 }
215
216 void WASAPIAudioOutputStream::SetVolume(double volume) {
217 float volume_float = static_cast<float>(volume);
218 if (volume_float < 0.0f || volume_float > 1.0f) {
219 DLOG(WARNING) << "Invalid volume setting. Valid range is [0.0, 1.0]";
220 return;
221 }
222 volume_ = volume_float;
223 }
224
225 void WASAPIAudioOutputStream::GetVolume(double* volume) {
226 *volume = static_cast<double>(volume_);
227 }
228
229 // static
230 double WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) {
231 // It is assumed that this static method is called from a COM thread, i.e.,
232 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts.
233 ScopedComPtr<IMMDeviceEnumerator> enumerator;
234 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
235 NULL,
236 CLSCTX_INPROC_SERVER,
237 __uuidof(IMMDeviceEnumerator),
238 enumerator.ReceiveVoid());
239 if (FAILED(hr)) {
240 NOTREACHED() << "error code: " << std::hex << hr;
241 return 0.0;
242 }
243
244 ScopedComPtr<IMMDevice> endpoint_device;
245 hr = enumerator->GetDefaultAudioEndpoint(eRender,
246 device_role,
247 endpoint_device.Receive());
248 if (FAILED(hr)) {
249 // This will happen if there's no audio output device found or available
250 // (e.g. some audio cards that have outputs will still report them as
251 // "not found" when no speaker is plugged into the output jack).
252 LOG(WARNING) << "No audio end point: " << std::hex << hr;
253 return 0.0;
254 }
255
256 ScopedComPtr<IAudioClient> audio_client;
257 hr = endpoint_device->Activate(__uuidof(IAudioClient),
258 CLSCTX_INPROC_SERVER,
259 NULL,
260 audio_client.ReceiveVoid());
261 if (FAILED(hr)) {
262 NOTREACHED() << "error code: " << std::hex << hr;
263 return 0.0;
264 }
265
266 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
267 hr = audio_client->GetMixFormat(&audio_engine_mix_format);
268 if (FAILED(hr)) {
269 NOTREACHED() << "error code: " << std::hex << hr;
270 return 0.0;
271 }
272
273 return static_cast<double>(audio_engine_mix_format->nSamplesPerSec);
274 }
275
276 void WASAPIAudioOutputStream::Run() {
277 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
278
279 // Increase the thread priority.
280 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
281
282 // Enable MMCSS to ensure that this thread receives prioritized access to
283 // CPU resources.
284 DWORD task_index = 0;
285 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
286 &task_index);
287 bool mmcss_is_ok =
288 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
289 if (!mmcss_is_ok) {
290 // Failed to enable MMCSS on this thread. It is not fatal but can lead
291 // to reduced QoS at high load.
292 DWORD err = GetLastError();
293 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
294 }
295
296 HRESULT hr = S_FALSE;
297
298 bool playing = true;
299 bool error = false;
300 HANDLE wait_array[2] = {stop_render_event_, audio_samples_render_event_};
301 UINT64 device_frequency = 0;
302
303 // The IAudioClock interface enables us to monitor a stream's data
304 // rate and the current position in the stream. Allocate it before we
305 // start spinning.
306 ScopedComPtr<IAudioClock> audio_clock;
307 hr = audio_client_->GetService(__uuidof(IAudioClock),
308 audio_clock.ReceiveVoid());
309 if (SUCCEEDED(hr)) {
310 // The device frequency is the frequency generated by the hardware clock in
311 // the audio device. The GetFrequency() method reports a constant frequency.
312 hr = audio_clock->GetFrequency(&device_frequency);
313 }
314 error = FAILED(hr);
315 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: "
316 << std::hex << hr;
317
318 // Render audio until stop event or error.
319 while (playing && !error) {
320 // Wait for a close-down event or a new render event.
321 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE);
322
323 switch (wait_result) {
324 case WAIT_OBJECT_0 + 0:
325 // |stop_render_event_| has been set.
326 playing = false;
327 break;
328 case WAIT_OBJECT_0 + 1:
329 {
330 // |audio_samples_render_event_| has been set.
331 UINT32 num_queued_frames = 0;
332 uint8* audio_data = NULL;
333
334 // Get the padding value which represents the amount of rendering
335 // data that is queued up to play in the endpoint buffer.
336 hr = audio_client_->GetCurrentPadding(&num_queued_frames);
337
338 // Determine how much new data we can write to the buffer without
339 // the risk of overwriting previously written data that the audio
340 // engine has not yet read from the buffer.
341 size_t num_available_frames =
342 endpoint_buffer_size_frames_ - num_queued_frames;
343
344 // Check if there is enough available space to fit the packet size
345 // specified by the client.
346 if (FAILED(hr) || (num_available_frames < packet_size_frames_))
347 continue;
348
349 // Derive the number of packets we need get from the client to
350 // fill up the available area in the endpoint buffer.
351 size_t num_packets = (num_available_frames / packet_size_frames_);
352
353 // Get data from the client/source.
354 for (size_t n = 0; n < num_packets; ++n) {
355 // Grab all available space in the rendering endpoint buffer
356 // into which the client can write a data packet.
357 hr = audio_render_client_->GetBuffer(packet_size_frames_,
358 &audio_data);
359 if (FAILED(hr)) {
360 DLOG(ERROR) << "Failed to use rendering audio buffer: "
361 << std::hex << hr;
362 continue;
363 }
364
365 // Derive the audio delay which corresponds to the delay between
366 // a render event and the time when the first audio sample in a
367 // packet is played out through the speaker. This delay value
368 // can typically be utilized by an acoustic echo-control (AEC)
369 // unit at the render side.
370 UINT64 position = 0;
371 int audio_delay_bytes = 0;
372 hr = audio_clock->GetPosition(&position, NULL);
373 if (SUCCEEDED(hr)) {
374 // Stream position of the sample that is currently playing
375 // through the speaker.
376 double pos_sample_playing_frames = format_.nSamplesPerSec *
377 (static_cast<double>(position) / device_frequency);
378
379 // Stream position of the last sample written to the endpoint
380 // buffer. Note that, the packet we are about to receive in
381 // the upcoming callback is also included.
382 size_t pos_last_sample_written_frames =
383 num_written_frames_ + packet_size_frames_;
384
385 // Derive the actual delay value which will be fed to the
386 // render client using the OnMoreData() callback.
387 audio_delay_bytes = (pos_last_sample_written_frames -
388 pos_sample_playing_frames) * frame_size_;
389 }
390
391 // Read a data packet from the registered client source and
392 // deliver a delay estimate in the same callback to the client.
393 // A time stamp is also stored in the AudioBuffersState. This
394 // time stamp can be used at the client side to compensate for
395 // the delay between the usage of the delay value and the time
396 // of generation.
397 uint32 num_filled_bytes = source_->OnMoreData(
398 this, audio_data, packet_size_bytes_,
399 AudioBuffersState(0, audio_delay_bytes));
400
401 // Perform in-place, software-volume adjustments.
402 media::AdjustVolume(audio_data,
403 num_filled_bytes,
404 format_.nChannels,
405 format_.wBitsPerSample >> 3,
406 volume_);
407
408 // Zero out the part of the packet which has not been filled by
409 // the client. Using silence is the least bad option in this
410 // situation.
411 if (num_filled_bytes < packet_size_bytes_) {
412 memset(&audio_data[num_filled_bytes], 0,
413 (packet_size_bytes_ - num_filled_bytes));
414 }
415
416 // Release the buffer space acquired in the GetBuffer() call.
417 DWORD flags = 0;
418 audio_render_client_->ReleaseBuffer(packet_size_frames_,
419 flags);
420
421 num_written_frames_ += packet_size_frames_;
422 }
423 }
424 break;
425 default:
426 error = true;
427 break;
428 }
429 }
430
431 if (playing && error) {
432 // Stop audio rendering since something has gone wrong in our main thread
433 // loop. Note that, we are still in a "started" state, hence a Stop() call
434 // is required to join the thread properly.
435 audio_client_->Stop();
436 PLOG(ERROR) << "WASAPI rendering failed.";
437 }
438
439 // Disable MMCSS.
440 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
441 PLOG(WARNING) << "Failed to disable MMCSS";
442 }
443 }
444
445 void WASAPIAudioOutputStream::HandleError(HRESULT err) {
446 NOTREACHED() << "Error code: " << std::hex << err;
447 if (source_)
448 source_->OnError(this, static_cast<int>(err));
449 }
450
451 HRESULT WASAPIAudioOutputStream::SetRenderDevice(ERole device_role) {
452 ScopedComPtr<IMMDeviceEnumerator> enumerator;
453 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
454 NULL,
455 CLSCTX_INPROC_SERVER,
456 __uuidof(IMMDeviceEnumerator),
457 enumerator.ReceiveVoid());
458 if (SUCCEEDED(hr)) {
459 // Retrieve the default render audio endpoint for the specified role.
460 // Note that, in Windows Vista, the MMDevice API supports device roles
461 // but the system-supplied user interface programs do not.
462 hr = enumerator->GetDefaultAudioEndpoint(eRender,
463 device_role,
464 endpoint_device_.Receive());
465
466 // Verify that the audio endpoint device is active. That is, the audio
467 // adapter that connects to the endpoint device is present and enabled.
468 DWORD state = DEVICE_STATE_DISABLED;
469 hr = endpoint_device_->GetState(&state);
470 if (SUCCEEDED(hr)) {
471 if (!(state & DEVICE_STATE_ACTIVE)) {
472 DLOG(ERROR) << "Selected render device is not active.";
473 hr = E_ACCESSDENIED;
474 }
475 }
476 }
477
478 return hr;
479 }
480
481 HRESULT WASAPIAudioOutputStream::ActivateRenderDevice() {
482 // Creates and activates an IAudioClient COM object given the selected
483 // render endpoint device.
484 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient),
485 CLSCTX_INPROC_SERVER,
486 NULL,
487 audio_client_.ReceiveVoid());
488 return hr;
489 }
490
491 HRESULT WASAPIAudioOutputStream::GetAudioEngineStreamFormat() {
492 // Retrieve the stream format that the audio engine uses for its internal
493 // processing/mixing of shared-mode streams.
494 return audio_client_->GetMixFormat(&audio_engine_mix_format_);
495 }
496
497 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() {
498 // In shared mode, the audio engine always supports the mix format,
499 // which is stored in the |audio_engine_mix_format_| member. In addition,
500 // the audio engine *might* support similar formats that have the same
501 // sample rate and number of channels as the mix format but differ in
502 // the representation of audio sample values.
503 base::win::ScopedCoMem<WAVEFORMATEX> closest_match;
504 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED,
505 &format_,
506 &closest_match);
507 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
508 << "but a closest match exists.";
509 return (hr == S_OK);
510 }
511
512 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() {
513 // TODO(henrika): this buffer scheme is still under development.
514 // The exact details are yet to be determined based on tests with different
515 // audio clients.
516 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5);
517 if (audio_engine_mix_format_->nSamplesPerSec == 48000) {
518 // Initial tests have shown that we have to add 10 ms extra to
519 // ensure that we don't run empty for any packet size.
520 glitch_free_buffer_size_ms += 10;
521 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) {
522 // Initial tests have shown that we have to add 20 ms extra to
523 // ensure that we don't run empty for any packet size.
524 glitch_free_buffer_size_ms += 20;
525 } else {
526 glitch_free_buffer_size_ms += 20;
527 }
528 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms;
529 REFERENCE_TIME requested_buffer_duration_hns =
530 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000);
531
532 // Initialize the audio stream between the client and the device.
533 // We connect indirectly through the audio engine by using shared mode
534 // and WASAPI is initialized in an event driven mode.
535 // Note that this API ensures that the buffer is never smaller than the
536 // minimum buffer size needed to ensure glitch-free rendering.
537 // If we requests a buffer size that is smaller than the audio engine's
538 // minimum required buffer size, the method sets the buffer size to this
539 // minimum buffer size rather than to the buffer size requested.
540 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
541 AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
542 AUDCLNT_STREAMFLAGS_NOPERSIST,
543 requested_buffer_duration_hns,
544 0,
545 &format_,
546 NULL);
547 if (FAILED(hr))
548 return hr;
549
550 // Retrieve the length of the endpoint buffer shared between the client
551 // and the audio engine. The buffer length the buffer length determines
552 // the maximum amount of rendering data that the client can write to
553 // the endpoint buffer during a single processing pass.
554 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
555 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_);
556 if (FAILED(hr))
557 return hr;
558 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_
559 << " [frames]";
560 #ifndef NDEBUG
561 // The period between processing passes by the audio engine is fixed for a
562 // particular audio endpoint device and represents the smallest processing
563 // quantum for the audio engine. This period plus the stream latency between
564 // the buffer and endpoint device represents the minimum possible latency
565 // that an audio application can achieve in shared mode.
566 REFERENCE_TIME default_device_period = 0;
567 REFERENCE_TIME minimum_device_period = 0;
568 HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period,
569 &minimum_device_period);
570 if (SUCCEEDED(hr_dbg)) {
571 // Shared mode device period.
572 DVLOG(1) << "default device period: "
573 << static_cast<double>(default_device_period / 10000.0)
574 << " [ms]";
575 // Exclusive mode device period.
576 DVLOG(1) << "minimum device period: "
577 << static_cast<double>(minimum_device_period / 10000.0)
578 << " [ms]";
579 }
580
581 REFERENCE_TIME latency = 0;
582 hr_dbg = audio_client_->GetStreamLatency(&latency);
583 if (SUCCEEDED(hr_dbg)) {
584 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0)
585 << " [ms]";
586 }
587 #endif
588
589 // Set the event handle that the audio engine will signal each time
590 // a buffer becomes ready to be processed by the client.
591 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get());
592 if (FAILED(hr))
593 return hr;
594
595 // Get access to the IAudioRenderClient interface. This interface
596 // enables us to write output data to a rendering endpoint buffer.
597 // The methods in this interface manage the movement of data packets
598 // that contain audio-rendering data.
599 hr = audio_client_->GetService(__uuidof(IAudioRenderClient),
600 audio_render_client_.ReceiveVoid());
601 return hr;
602 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698