Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(787)

Side by Side Diff: media/audio/win/audio_low_latency_output_win.cc

Issue 8440002: Low-latency AudioOutputStream implementation based on WASAPI for Windows. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: rebased Created 9 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "media/audio/win/audio_low_latency_output_win.h"
6
7 #include "base/logging.h"
8 #include "base/memory/scoped_ptr.h"
9 #include "base/utf_string_conversions.h"
10 #include "media/audio/audio_util.h"
11 #include "media/audio/win/audio_manager_win.h"
12 #include "media/audio/win/avrt_wrapper_win.h"
13
14 using base::win::ScopedComPtr;
15 using base::win::ScopedCOMInitializer;
16
17 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
18 const AudioParameters& params,
19 ERole device_role)
20 : com_init_(ScopedCOMInitializer::kMTA),
21 creating_thread_id_(base::PlatformThread::CurrentId()),
22 manager_(manager),
23 render_thread_(NULL),
24 opened_(false),
25 started_(false),
26 volume_(1.0),
27 endpoint_buffer_size_frames_(0),
28 device_role_(device_role),
29 num_written_frames_(0),
30 source_(NULL) {
31 CHECK(com_init_.succeeded());
32 DCHECK(manager_);
33
34 // Load the Avrt DLL if not already loaded. Required to support MMCSS.
35 bool avrt_init = avrt::Initialize();
36 DCHECK(avrt_init) << "Failed to load the avrt.dll";
37
38 // Set up the desired render format specified by the client.
39 format_.nSamplesPerSec = params.sample_rate;
40 format_.wFormatTag = WAVE_FORMAT_PCM;
41 format_.wBitsPerSample = params.bits_per_sample;
42 format_.nChannels = params.channels;
43 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels;
44 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign;
45 format_.cbSize = 0;
46
47 // Size in bytes of each audio frame.
48 frame_size_ = format_.nBlockAlign;
49
50 // Store size (in different units) of audio packets which we expect to
51 // get from the audio endpoint device in each render event.
52 packet_size_frames_ = params.GetPacketSize() / format_.nBlockAlign;
53 packet_size_bytes_ = params.GetPacketSize();
54 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate;
55 DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
56 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
57 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_;
58
59 // All events are auto-reset events and non-signaled initially.
60
61 // Create the event which the audio engine will signal each time
62 // a buffer becomes ready to be processed by the client.
63 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
64 DCHECK(audio_samples_render_event_.IsValid());
65
66 // Create the event which will be set in Stop() when capturing shall stop.
67 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
68 DCHECK(stop_render_event_.IsValid());
69 }
70
71 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {}
72
73 bool WASAPIAudioOutputStream::Open() {
74 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
75 if (opened_)
76 return true;
77
78 // Obtain a reference to the IMMDevice interface of the default rendering
79 // device with the specified role.
80 HRESULT hr = SetRenderDevice(device_role_);
81 if (FAILED(hr)) {
82 HandleError(hr);
83 return false;
84 }
85
86 // Obtain an IAudioClient interface which enables us to create and initialize
87 // an audio stream between an audio application and the audio engine.
88 hr = ActivateRenderDevice();
89 if (FAILED(hr)) {
90 HandleError(hr);
91 return false;
92 }
93
94 // Retrieve the stream format which the audio engine uses for its internal
95 // processing/mixing of shared-mode streams.
96 hr = GetAudioEngineStreamFormat();
97 if (FAILED(hr)) {
98 HandleError(hr);
99 return false;
100 }
101
102 // Verify that the selected audio endpoint supports the specified format
103 // set during construction.
104 if (!DesiredFormatIsSupported()) {
105 hr = E_INVALIDARG;
106 HandleError(hr);
107 return false;
108 }
109
110 // Initialize the audio stream between the client and the device using
111 // shared mode and a lowest possible glitch-free latency.
112 hr = InitializeAudioEngine();
113 if (FAILED(hr)) {
114 HandleError(hr);
115 return false;
116 }
117
118 opened_ = true;
119
120 return true;
121 }
122
123 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
124 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
125 DCHECK(callback);
126 DCHECK(opened_);
127
128 if (!opened_)
129 return;
130
131 if (started_)
132 return;
133
134 source_ = callback;
135
136 // Avoid start-up glitches by filling up the endpoint buffer with "silence"
137 // before starting the stream.
138 BYTE* data_ptr = NULL;
139 HRESULT hr = audio_render_client_->GetBuffer(endpoint_buffer_size_frames_,
140 &data_ptr);
141 if (FAILED(hr)) {
142 DLOG(ERROR) << "Failed to use rendering audio buffer: " << std::hex << hr;
143 return;
144 }
145
146 // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
147 // explicitly write silence data to the rendering buffer.
148 audio_render_client_->ReleaseBuffer(endpoint_buffer_size_frames_,
149 AUDCLNT_BUFFERFLAGS_SILENT);
150 num_written_frames_ = endpoint_buffer_size_frames_;
151
152 // Sanity check: verify that the endpoint buffer is filled with silence.
153 UINT32 num_queued_frames = 0;
154 audio_client_->GetCurrentPadding(&num_queued_frames);
155 DCHECK(num_queued_frames == num_written_frames_);
156
157 // Create and start the thread that will drive the rendering by waiting for
158 // render events.
159 render_thread_ = new base::DelegateSimpleThread(this, "wasapi_render_thread");
160 render_thread_->Start();
161 if (!render_thread_->HasBeenStarted()) {
162 DLOG(ERROR) << "Failed to start WASAPI render thread.";
163 return;
164 }
165
166 // Start streaming data between the endpoint buffer and the audio engine.
167 hr = audio_client_->Start();
168 if (FAILED(hr)) {
169 SetEvent(stop_render_event_.Get());
170 render_thread_->Join();
171 render_thread_ = NULL;
172 HandleError(hr);
173 return;
174 }
175
176 started_ = true;
177 }
178
179 void WASAPIAudioOutputStream::Stop() {
180 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
181 if (!started_)
182 return;
183
184 // Shut down the render thread.
185 if (stop_render_event_.IsValid()) {
186 SetEvent(stop_render_event_.Get());
187 }
188
189 // Stop output audio streaming.
190 HRESULT hr = audio_client_->Stop();
191 DLOG_IF(ERROR, FAILED(hr)) << "Failed to stop output streaming: "
192 << std::hex << hr;
193
194 // Wait until the thread completes and perform cleanup.
195 if (render_thread_) {
196 SetEvent(stop_render_event_.Get());
197 render_thread_->Join();
198 render_thread_ = NULL;
199 }
200
201 started_ = false;
202 }
203
204 void WASAPIAudioOutputStream::Close() {
205 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
206
207 // It is valid to call Close() before calling open or Start().
208 // It is also valid to call Close() after Start() has been called.
209 Stop();
210
211 // Inform the audio manager that we have been closed. This will cause our
212 // destruction.
213 manager_->ReleaseOutputStream(this);
214 }
215
216 void WASAPIAudioOutputStream::SetVolume(double volume) {
217 float volume_float = static_cast<float>(volume);
218 if (volume_float < 0.0f || volume_float > 1.0f) {
219 return;
220 }
221 volume_ = volume_float;
222 }
223
224 void WASAPIAudioOutputStream::GetVolume(double* volume) {
225 *volume = static_cast<double>(volume_);
226 }
227
228 // static
229 double WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) {
230 // It is assumed that this static method is called from a COM thread, i.e.,
231 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts.
232 ScopedComPtr<IMMDeviceEnumerator> enumerator;
233 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
234 NULL,
235 CLSCTX_INPROC_SERVER,
236 __uuidof(IMMDeviceEnumerator),
237 enumerator.ReceiveVoid());
238 if (FAILED(hr)) {
239 NOTREACHED() << "error code: " << std::hex << hr;
240 return 0.0;
241 }
242
243 ScopedComPtr<IMMDevice> endpoint_device;
244 hr = enumerator->GetDefaultAudioEndpoint(eRender,
245 device_role,
246 endpoint_device.Receive());
247 if (FAILED(hr)) {
248 // This will happen if there's no audio output device found or available
249 // (e.g. some audio cards that have outputs will still report them as
250 // "not found" when no speaker is plugged into the output jack).
251 LOG(WARNING) << "No audio end point: " << std::hex << hr;
252 return 0.0;
253 }
254
255 ScopedComPtr<IAudioClient> audio_client;
256 hr = endpoint_device->Activate(__uuidof(IAudioClient),
257 CLSCTX_INPROC_SERVER,
258 NULL,
259 audio_client.ReceiveVoid());
260 if (FAILED(hr)) {
261 NOTREACHED() << "error code: " << std::hex << hr;
262 return 0.0;
263 }
264
265 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
266 hr = audio_client->GetMixFormat(&audio_engine_mix_format);
267 if (FAILED(hr)) {
268 NOTREACHED() << "error code: " << std::hex << hr;
269 return 0.0;
270 }
271
272 return static_cast<double>(audio_engine_mix_format->nSamplesPerSec);
273 }
274
275 void WASAPIAudioOutputStream::Run() {
276 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
277
278 // Increase the thread priority.
279 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
280
281 // Enable MMCSS to ensure that this thread receives prioritized access to
282 // CPU resources.
283 DWORD task_index = 0;
284 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
285 &task_index);
286 bool mmcss_is_ok =
287 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
288 if (!mmcss_is_ok) {
289 // Failed to enable MMCSS on this thread. It is not fatal but can lead
290 // to reduced QoS at high load.
291 DWORD err = GetLastError();
292 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
293 }
294
295 HRESULT hr = S_FALSE;
296
297 bool playing = true;
298 bool error = false;
299 HANDLE wait_array[2] = {stop_render_event_, audio_samples_render_event_};
300 UINT64 device_frequency = 0;
301
302 // The IAudioClock interface enables us to monitor a stream's data
303 // rate and the current position in the stream. Allocate it before we
304 // start spinning.
305 ScopedComPtr<IAudioClock> audio_clock;
306 hr = audio_client_->GetService(__uuidof(IAudioClock),
307 audio_clock.ReceiveVoid());
308 if (SUCCEEDED(hr)) {
309 // The device frequency is the frequency generated by the hardware clock in
310 // the audio device. The GetFrequency() method reports a constant frequency.
311 hr = audio_clock->GetFrequency(&device_frequency);
312 }
313 error = FAILED(hr);
314 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: "
315 << std::hex << hr;
316
317 // Render audio until stop event or error.
318 while (playing && !error) {
319 // Wait for a close-down event or a new render event.
320 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE);
321
322 switch (wait_result) {
323 case WAIT_OBJECT_0 + 0:
324 // |stop_render_event_| has been set.
325 playing = false;
326 break;
327 case WAIT_OBJECT_0 + 1:
328 {
329 // |audio_samples_render_event_| has been set.
330 UINT32 num_queued_frames = 0;
331 uint8* audio_data = NULL;
332
333 // Get the padding value which represents the amount of rendering
334 // data that is queued up to play in the endpoint buffer.
335 hr = audio_client_->GetCurrentPadding(&num_queued_frames);
336
337 // Determine how much new data we can write to the buffer without
338 // the risk of overwriting previously written data that the audio
339 // engine has not yet read from the buffer.
340 size_t num_available_frames =
341 endpoint_buffer_size_frames_ - num_queued_frames;
342
343 // Check if there is enough available space to fit the packet size
344 // specified by the client.
345 if (FAILED(hr) || (num_available_frames < packet_size_frames_))
346 continue;
347
348 // Derive the number of packets we need get from the client to
349 // fill up the available area in the endpoint buffer.
350 size_t num_packets = (num_available_frames / packet_size_frames_);
351
352 // Get data from the client/source.
353 for (size_t n = 0; n < num_packets; ++n) {
354 // Grab all available space in the rendering endpoint buffer
355 // into which the client can write a data packet.
356 hr = audio_render_client_->GetBuffer(packet_size_frames_,
357 &audio_data);
358 if (FAILED(hr)) {
359 DLOG(ERROR) << "Failed to use rendering audio buffer: "
360 << std::hex << hr;
361 continue;
362 }
363
364 // Derive the audio delay which corresponds to the delay between
365 // a render event and the time when the first audio sample in a
366 // packet is played out through the speaker. This delay value
367 // can typically be utilized by an acoustic echo-control (AEC)
368 // unit at the render side.
369 UINT64 position = 0;
370 int audio_delay_bytes = 0;
371 hr = audio_clock->GetPosition(&position, NULL);
372 if (SUCCEEDED(hr)) {
373 // Stream position of the sample that is currently playing
374 // through the speaker.
375 double pos_sample_playing_frames = format_.nSamplesPerSec *
376 (static_cast<double>(position) / device_frequency);
377
378 // Stream position of the last sample written to the endpoint
379 // buffer. Note that, the packet we are about to receive in
380 // the upcoming callback is also included.
381 size_t pos_last_sample_written_frames =
382 num_written_frames_ + packet_size_frames_;
383
384 // Derive the actual delay value which will be fed to the
385 // render client using the OnMoreData() callback.
386 audio_delay_bytes = (pos_last_sample_written_frames -
387 pos_sample_playing_frames) * frame_size_;
388 }
389
390 // Read a data packet from the registered client source and
391 // deliver a delay estimate in the same callback to the client.
392 // A time stamp is also stored in the AudioBuffersState. This
393 // time stamp can be used at the client side to compensate for
394 // the delay between the usage of the delay value and the time
395 // of generation.
396 uint32 num_filled_bytes = source_->OnMoreData(
397 this, audio_data, packet_size_bytes_,
398 AudioBuffersState(0, audio_delay_bytes));
399
400 // Perform in-place, software-volume adjustments.
401 media::AdjustVolume(audio_data,
402 num_filled_bytes,
403 format_.nChannels,
404 format_.wBitsPerSample >> 3,
405 volume_);
406
407 // Zero out the part of the packet which has not been filled by
408 // the client. Using silence is the least bad option in this
409 // situation.
410 if (num_filled_bytes < packet_size_bytes_) {
411 memset(&audio_data[num_filled_bytes], 0,
412 (packet_size_bytes_ - num_filled_bytes));
413 }
414
415 // Release the buffer space acquired in the GetBuffer() call.
416 DWORD flags = 0;
417 audio_render_client_->ReleaseBuffer(packet_size_frames_,
418 flags);
419
420 num_written_frames_ += packet_size_frames_;
421 }
422 }
423 break;
424 default:
425 error = true;
426 break;
427 }
428 }
429
430 if (playing && error) {
431 // Stop audio rendering since something has gone wrong in our main thread
432 // loop. Note that, we are still in a "started" state, hence a Stop() call
433 // is required to join the thread properly.
434 audio_client_->Stop();
435 PLOG(ERROR) << "WASAPI rendering failed.";
436 }
437
438 // Disable MMCSS.
439 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
440 PLOG(WARNING) << "Failed to disable MMCSS";
441 }
442 }
443
444 void WASAPIAudioOutputStream::HandleError(HRESULT err) {
445 NOTREACHED() << "Error code: " << std::hex << err;
446 if (source_)
447 source_->OnError(this, static_cast<int>(err));
448 }
449
450 HRESULT WASAPIAudioOutputStream::SetRenderDevice(ERole device_role) {
451 ScopedComPtr<IMMDeviceEnumerator> enumerator;
452 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
453 NULL,
454 CLSCTX_INPROC_SERVER,
455 __uuidof(IMMDeviceEnumerator),
456 enumerator.ReceiveVoid());
457 if (SUCCEEDED(hr)) {
458 // Retrieve the default render audio endpoint for the specified role.
459 // Note that, in Windows Vista, the MMDevice API supports device roles
460 // but the system-supplied user interface programs do not.
461 hr = enumerator->GetDefaultAudioEndpoint(eRender,
462 device_role,
463 endpoint_device_.Receive());
464
465 // Verify that the audio endpoint device is active. That is, the audio
466 // adapter that connects to the endpoint device is present and enabled.
467 DWORD state = DEVICE_STATE_DISABLED;
468 hr = endpoint_device_->GetState(&state);
469 if (SUCCEEDED(hr)) {
470 if (!(state & DEVICE_STATE_ACTIVE)) {
471 DLOG(ERROR) << "Selected render device is not active.";
472 hr = E_ACCESSDENIED;
473 }
474 }
475 }
476
477 return hr;
478 }
479
480 HRESULT WASAPIAudioOutputStream::ActivateRenderDevice() {
481 // Creates and activates an IAudioClient COM object given the selected
482 // render endpoint device.
483 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient),
484 CLSCTX_INPROC_SERVER,
485 NULL,
486 audio_client_.ReceiveVoid());
487 return hr;
488 }
489
490 HRESULT WASAPIAudioOutputStream::GetAudioEngineStreamFormat() {
491 // Retrieve the stream format that the audio engine uses for its internal
492 // processing/mixing of shared-mode streams.
493 return audio_client_->GetMixFormat(&audio_engine_mix_format_);
494 }
495
496 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() {
497 // In shared mode, the audio engine always supports the mix format,
498 // which is stored in the |audio_engine_mix_format_| member. In addition,
499 // the audio engine *might* support similar formats that have the same
500 // sample rate and number of channels as the mix format but differ in
501 // the representation of audio sample values.
502 base::win::ScopedCoMem<WAVEFORMATEX> closest_match;
503 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED,
504 &format_,
505 &closest_match);
506 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
507 << "but a closest match exists.";
508 return (hr == S_OK);
509 }
510
511 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() {
512 // TODO(henrika): this buffer scheme is still under development.
513 // The exact details are yet to be determined based on tests with different
514 // audio clients.
515 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5);
516 if (audio_engine_mix_format_->nSamplesPerSec == 48000) {
517 // Initial tests have shown that we have to add 10 ms extra to
518 // ensure that we don't run empty for any packet size.
519 glitch_free_buffer_size_ms += 10;
520 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) {
521 // Initial tests have shown that we have to add 20 ms extra to
522 // ensure that we don't run empty for any packet size.
523 glitch_free_buffer_size_ms += 20;
524 } else {
525 glitch_free_buffer_size_ms += 20;
526 }
527 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms;
528 REFERENCE_TIME requested_buffer_duration_hns =
529 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000);
530
531 // Initialize the audio stream between the client and the device.
532 // We connect indirectly through the audio engine by using shared mode
533 // and WASAPI is initialized in an event driven mode.
534 // Note that this API ensures that the buffer is never smaller than the
535 // minimum buffer size needed to ensure glitch-free rendering.
536 // If we requests a buffer size that is smaller than the audio engine's
537 // minimum required buffer size, the method sets the buffer size to this
538 // minimum buffer size rather than to the buffer size requested.
539 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
540 AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
541 AUDCLNT_STREAMFLAGS_NOPERSIST,
542 requested_buffer_duration_hns,
543 0,
544 &format_,
545 NULL);
546 if (FAILED(hr))
547 return hr;
548
549 // Retrieve the length of the endpoint buffer shared between the client
550 // and the audio engine. The buffer length the buffer length determines
551 // the maximum amount of rendering data that the client can write to
552 // the endpoint buffer during a single processing pass.
553 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
554 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_);
555 if (FAILED(hr))
556 return hr;
557 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_
558 << " [frames]";
559 #ifndef NDEBUG
560 // The period between processing passes by the audio engine is fixed for a
561 // particular audio endpoint device and represents the smallest processing
562 // quantum for the audio engine. This period plus the stream latency between
563 // the buffer and endpoint device represents the minimum possible latency
564 // that an audio application can achieve in shared mode.
565 REFERENCE_TIME default_device_period = 0;
566 REFERENCE_TIME minimum_device_period = 0;
567 HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period,
568 &minimum_device_period);
569 if (SUCCEEDED(hr_dbg)) {
570 // Shared mode device period.
571 DVLOG(1) << "default device period: "
572 << static_cast<double>(default_device_period / 10000.0)
573 << " [ms]";
574 // Exclusive mode device period.
575 DVLOG(1) << "minimum device period: "
576 << static_cast<double>(minimum_device_period / 10000.0)
577 << " [ms]";
578 }
579
580 REFERENCE_TIME latency = 0;
581 hr_dbg = audio_client_->GetStreamLatency(&latency);
582 if (SUCCEEDED(hr_dbg)) {
583 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0)
584 << " [ms]";
585 }
586 #endif
587
588 // Set the event handle that the audio engine will signal each time
589 // a buffer becomes ready to be processed by the client.
590 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get());
591 if (FAILED(hr))
592 return hr;
593
594 // Get access to the IAudioRenderClient interface. This interface
595 // enables us to write output data to a rendering endpoint buffer.
596 // The methods in this interface manage the movement of data packets
597 // that contain audio-rendering data.
598 hr = audio_client_->GetService(__uuidof(IAudioRenderClient),
599 audio_render_client_.ReceiveVoid());
600 return hr;
601 }
OLDNEW
« no previous file with comments | « media/audio/win/audio_low_latency_output_win.h ('k') | media/audio/win/audio_low_latency_output_win_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698