Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(474)

Side by Side Diff: media/audio/win/audio_low_latency_output_win.cc

Issue 1097553003: Switch to STA mode for audio thread and WASAPI I/O streams. (Closed) Base URL: http://chromium.googlesource.com/chromium/src.git@master
Patch Set: Comments. Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/audio/win/audio_low_latency_output_win.h" 5 #include "media/audio/win/audio_low_latency_output_win.h"
6 6
7 #include <Functiondiscoverykeys_devpkey.h> 7 #include <Functiondiscoverykeys_devpkey.h>
8 8
9 #include "base/command_line.h" 9 #include "base/command_line.h"
10 #include "base/logging.h" 10 #include "base/logging.h"
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
63 opened_(false), 63 opened_(false),
64 volume_(1.0), 64 volume_(1.0),
65 packet_size_frames_(0), 65 packet_size_frames_(0),
66 packet_size_bytes_(0), 66 packet_size_bytes_(0),
67 endpoint_buffer_size_frames_(0), 67 endpoint_buffer_size_frames_(0),
68 device_id_(device_id), 68 device_id_(device_id),
69 device_role_(device_role), 69 device_role_(device_role),
70 share_mode_(GetShareMode()), 70 share_mode_(GetShareMode()),
71 num_written_frames_(0), 71 num_written_frames_(0),
72 source_(NULL), 72 source_(NULL),
73 com_stream_(NULL),
tommi (sloooow) - chröme 2015/04/20 18:23:38 is this a COM pointer? If so, use ScopedComPtr
DaleCurtis 2015/04/20 18:54:07 I think so, but it's seemingly manually released v
73 audio_bus_(AudioBus::Create(params)) { 74 audio_bus_(AudioBus::Create(params)) {
74 DCHECK(manager_); 75 DCHECK(manager_);
75 76
76 DVLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()"; 77 DVLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()";
77 DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE) 78 DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
78 << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled."; 79 << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
79 80
80 // Load the Avrt DLL if not already loaded. Required to support MMCSS. 81 // Load the Avrt DLL if not already loaded. Required to support MMCSS.
81 bool avrt_init = avrt::Initialize(); 82 bool avrt_init = avrt::Initialize();
82 DCHECK(avrt_init) << "Failed to load the avrt.dll"; 83 DCHECK(avrt_init) << "Failed to load the avrt.dll";
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after
242 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { 243 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
243 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence( 244 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
244 audio_client_.get(), audio_render_client_.get())) { 245 audio_client_.get(), audio_render_client_.get())) {
245 LOG(ERROR) << "Failed to prepare endpoint buffers with silence."; 246 LOG(ERROR) << "Failed to prepare endpoint buffers with silence.";
246 callback->OnError(this); 247 callback->OnError(this);
247 return; 248 return;
248 } 249 }
249 } 250 }
250 num_written_frames_ = endpoint_buffer_size_frames_; 251 num_written_frames_ = endpoint_buffer_size_frames_;
251 252
253 if (!MarshalComPointers()) {
254 callback->OnError(this);
255 return;
256 }
257
252 // Create and start the thread that will drive the rendering by waiting for 258 // Create and start the thread that will drive the rendering by waiting for
253 // render events. 259 // render events.
254 render_thread_.reset( 260 render_thread_.reset(
255 new base::DelegateSimpleThread(this, "wasapi_render_thread")); 261 new base::DelegateSimpleThread(this, "wasapi_render_thread"));
256 render_thread_->Start(); 262 render_thread_->Start();
257 if (!render_thread_->HasBeenStarted()) { 263 if (!render_thread_->HasBeenStarted()) {
258 LOG(ERROR) << "Failed to start WASAPI render thread."; 264 LOG(ERROR) << "Failed to start WASAPI render thread.";
259 StopThread(); 265 StopThread();
260 callback->OnError(this); 266 callback->OnError(this);
261 return; 267 return;
262 } 268 }
263 269
264 // Start streaming data between the endpoint buffer and the audio engine. 270 // Start streaming data between the endpoint buffer and the audio engine.
271 // TODO(dalecurtis): Do we need a lock on this with STA mode?
tommi (sloooow) - chröme 2015/04/20 18:23:38 One thing to be aware of with STA is reentrancy.
265 HRESULT hr = audio_client_->Start(); 272 HRESULT hr = audio_client_->Start();
266 if (FAILED(hr)) { 273 if (FAILED(hr)) {
267 PLOG(ERROR) << "Failed to start output streaming: " << std::hex << hr; 274 PLOG(ERROR) << "Failed to start output streaming: " << std::hex << hr;
268 StopThread(); 275 StopThread();
269 callback->OnError(this); 276 callback->OnError(this);
270 } 277 }
271 } 278 }
272 279
273 void WASAPIAudioOutputStream::Stop() { 280 void WASAPIAudioOutputStream::Stop() {
274 DVLOG(1) << "WASAPIAudioOutputStream::Stop()"; 281 DVLOG(1) << "WASAPIAudioOutputStream::Stop()";
275 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); 282 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
276 if (!render_thread_) 283 if (!render_thread_)
277 return; 284 return;
278 285
279 // Stop output audio streaming. 286 // Stop output audio streaming.
287 // TODO(dalecurtis): Do we need a lock on this with STA mode?
tommi (sloooow) - chröme 2015/04/20 18:23:38 if a lock wasn't needed before, I don't think it's
DaleCurtis 2015/04/20 18:54:07 Hmm, previously we were talking to the same instan
DaleCurtis 2015/04/22 16:08:23 Can you add some more details here?
280 HRESULT hr = audio_client_->Stop(); 288 HRESULT hr = audio_client_->Stop();
281 if (FAILED(hr)) { 289 if (FAILED(hr)) {
282 PLOG(ERROR) << "Failed to stop output streaming: " << std::hex << hr; 290 PLOG(ERROR) << "Failed to stop output streaming: " << std::hex << hr;
283 source_->OnError(this); 291 source_->OnError(this);
284 } 292 }
285 293
286 // Make a local copy of |source_| since StopThread() will clear it. 294 // Make a local copy of |source_| since StopThread() will clear it.
287 AudioSourceCallback* callback = source_; 295 AudioSourceCallback* callback = source_;
288 StopThread(); 296 StopThread();
289 297
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
326 } 334 }
327 volume_ = volume_float; 335 volume_ = volume_float;
328 } 336 }
329 337
330 void WASAPIAudioOutputStream::GetVolume(double* volume) { 338 void WASAPIAudioOutputStream::GetVolume(double* volume) {
331 DVLOG(1) << "GetVolume()"; 339 DVLOG(1) << "GetVolume()";
332 *volume = static_cast<double>(volume_); 340 *volume = static_cast<double>(volume_);
333 } 341 }
334 342
335 void WASAPIAudioOutputStream::Run() { 343 void WASAPIAudioOutputStream::Run() {
336 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); 344 ScopedCOMInitializer com_init;
337 345
338 // Increase the thread priority. 346 // Increase the thread priority.
339 render_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO); 347 render_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO);
340 348
341 // Enable MMCSS to ensure that this thread receives prioritized access to 349 // Enable MMCSS to ensure that this thread receives prioritized access to
342 // CPU resources. 350 // CPU resources.
343 DWORD task_index = 0; 351 DWORD task_index = 0;
344 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", 352 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
345 &task_index); 353 &task_index);
346 bool mmcss_is_ok = 354 bool mmcss_is_ok =
347 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); 355 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
348 if (!mmcss_is_ok) { 356 if (!mmcss_is_ok) {
349 // Failed to enable MMCSS on this thread. It is not fatal but can lead 357 // Failed to enable MMCSS on this thread. It is not fatal but can lead
350 // to reduced QoS at high load. 358 // to reduced QoS at high load.
351 DWORD err = GetLastError(); 359 DWORD err = GetLastError();
352 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; 360 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
353 } 361 }
354 362
363 // Retrieve COM pointers from the main thread.
364 IAudioClient* thread_audio_client = NULL;
tommi (sloooow) - chröme 2015/04/20 18:23:38 use ScopedComPtr instead of raw pointers? actuall
DaleCurtis 2015/04/20 18:54:07 I have no idea what I'm doing here :) Marshal tuto
365 IAudioRenderClient* thread_audio_render_client = NULL;
366 IAudioClock* thread_audio_clock = NULL;
367
355 HRESULT hr = S_FALSE; 368 HRESULT hr = S_FALSE;
356 369
357 bool playing = true; 370 bool playing = true;
358 bool error = false; 371 bool error =
372 !UnmarshalComPointers(&thread_audio_client, &thread_audio_render_client,
373 &thread_audio_clock);
374
359 HANDLE wait_array[] = { stop_render_event_.Get(), 375 HANDLE wait_array[] = { stop_render_event_.Get(),
360 audio_samples_render_event_.Get() }; 376 audio_samples_render_event_.Get() };
361 UINT64 device_frequency = 0; 377 UINT64 device_frequency = 0;
362 378
363 // The device frequency is the frequency generated by the hardware clock in 379 if (!error) {
364 // the audio device. The GetFrequency() method reports a constant frequency. 380 // The device frequency is the frequency generated by the hardware clock in
365 hr = audio_clock_->GetFrequency(&device_frequency); 381 // the audio device. The GetFrequency() method reports a constant frequency.
366 error = FAILED(hr); 382 hr = audio_clock_->GetFrequency(&device_frequency);
367 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: " 383 error = FAILED(hr);
368 << std::hex << hr; 384 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: "
385 << std::hex << hr;
386 }
369 387
370 // Keep rendering audio until the stop event or the stream-switch event 388 // Keep rendering audio until the stop event or the stream-switch event
371 // is signaled. An error event can also break the main thread loop. 389 // is signaled. An error event can also break the main thread loop.
372 while (playing && !error) { 390 while (playing && !error) {
373 // Wait for a close-down event, stream-switch event or a new render event. 391 // Wait for a close-down event, stream-switch event or a new render event.
374 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array), 392 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
375 wait_array, 393 wait_array,
376 FALSE, 394 FALSE,
377 INFINITE); 395 INFINITE);
378 396
379 switch (wait_result) { 397 switch (wait_result) {
380 case WAIT_OBJECT_0 + 0: 398 case WAIT_OBJECT_0 + 0:
381 // |stop_render_event_| has been set. 399 // |stop_render_event_| has been set.
382 playing = false; 400 playing = false;
383 break; 401 break;
384 case WAIT_OBJECT_0 + 1: 402 case WAIT_OBJECT_0 + 1:
385 // |audio_samples_render_event_| has been set. 403 // |audio_samples_render_event_| has been set.
386 error = !RenderAudioFromSource(device_frequency); 404 error = !RenderAudioFromSource(device_frequency, thread_audio_client,
405 thread_audio_render_client,
406 thread_audio_clock);
387 break; 407 break;
388 default: 408 default:
389 error = true; 409 error = true;
390 break; 410 break;
391 } 411 }
392 } 412 }
393 413
394 if (playing && error) { 414 if (playing && error && thread_audio_client) {
395 // Stop audio rendering since something has gone wrong in our main thread 415 // Stop audio rendering since something has gone wrong in our main thread
396 // loop. Note that, we are still in a "started" state, hence a Stop() call 416 // loop. Note that, we are still in a "started" state, hence a Stop() call
397 // is required to join the thread properly. 417 // is required to join the thread properly.
398 audio_client_->Stop(); 418 thread_audio_client->Stop();
399 PLOG(ERROR) << "WASAPI rendering failed."; 419 PLOG(ERROR) << "WASAPI rendering failed.";
400 } 420 }
401 421
402 // Disable MMCSS. 422 // Disable MMCSS.
403 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { 423 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
404 PLOG(WARNING) << "Failed to disable MMCSS"; 424 PLOG(WARNING) << "Failed to disable MMCSS";
405 } 425 }
406 } 426 }
407 427
408 bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) { 428 bool WASAPIAudioOutputStream::RenderAudioFromSource(
429 UINT64 device_frequency,
430 IAudioClient* thread_audio_client,
431 IAudioRenderClient* thread_audio_render_client,
432 IAudioClock* thread_audio_clock) {
409 TRACE_EVENT0("audio", "RenderAudioFromSource"); 433 TRACE_EVENT0("audio", "RenderAudioFromSource");
410 434
411 HRESULT hr = S_FALSE; 435 HRESULT hr = S_FALSE;
412 UINT32 num_queued_frames = 0; 436 UINT32 num_queued_frames = 0;
413 uint8* audio_data = NULL; 437 uint8* audio_data = NULL;
414 438
415 // Contains how much new data we can write to the buffer without 439 // Contains how much new data we can write to the buffer without
416 // the risk of overwriting previously written data that the audio 440 // the risk of overwriting previously written data that the audio
417 // engine has not yet read from the buffer. 441 // engine has not yet read from the buffer.
418 size_t num_available_frames = 0; 442 size_t num_available_frames = 0;
419 443
420 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { 444 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
421 // Get the padding value which represents the amount of rendering 445 // Get the padding value which represents the amount of rendering
422 // data that is queued up to play in the endpoint buffer. 446 // data that is queued up to play in the endpoint buffer.
423 hr = audio_client_->GetCurrentPadding(&num_queued_frames); 447 hr = thread_audio_client->GetCurrentPadding(&num_queued_frames);
424 num_available_frames = 448 num_available_frames =
425 endpoint_buffer_size_frames_ - num_queued_frames; 449 endpoint_buffer_size_frames_ - num_queued_frames;
426 if (FAILED(hr)) { 450 if (FAILED(hr)) {
427 DLOG(ERROR) << "Failed to retrieve amount of available space: " 451 DLOG(ERROR) << "Failed to retrieve amount of available space: "
428 << std::hex << hr; 452 << std::hex << hr;
429 return false; 453 return false;
430 } 454 }
431 } else { 455 } else {
432 // While the stream is running, the system alternately sends one 456 // While the stream is running, the system alternately sends one
433 // buffer or the other to the client. This form of double buffering 457 // buffer or the other to the client. This form of double buffering
(...skipping 21 matching lines...) Expand all
455 // fill up the available area in the endpoint buffer. 479 // fill up the available area in the endpoint buffer.
456 // |num_packets| will always be one for exclusive-mode streams and 480 // |num_packets| will always be one for exclusive-mode streams and
457 // will be one in most cases for shared mode streams as well. 481 // will be one in most cases for shared mode streams as well.
458 // However, we have found that two packets can sometimes be 482 // However, we have found that two packets can sometimes be
459 // required. 483 // required.
460 size_t num_packets = (num_available_frames / packet_size_frames_); 484 size_t num_packets = (num_available_frames / packet_size_frames_);
461 485
462 for (size_t n = 0; n < num_packets; ++n) { 486 for (size_t n = 0; n < num_packets; ++n) {
463 // Grab all available space in the rendering endpoint buffer 487 // Grab all available space in the rendering endpoint buffer
464 // into which the client can write a data packet. 488 // into which the client can write a data packet.
465 hr = audio_render_client_->GetBuffer(packet_size_frames_, 489 hr =
466 &audio_data); 490 thread_audio_render_client->GetBuffer(packet_size_frames_, &audio_data);
467 if (FAILED(hr)) { 491 if (FAILED(hr)) {
468 DLOG(ERROR) << "Failed to use rendering audio buffer: " 492 DLOG(ERROR) << "Failed to use rendering audio buffer: "
469 << std::hex << hr; 493 << std::hex << hr;
470 return false; 494 return false;
471 } 495 }
472 496
473 // Derive the audio delay which corresponds to the delay between 497 // Derive the audio delay which corresponds to the delay between
474 // a render event and the time when the first audio sample in a 498 // a render event and the time when the first audio sample in a
475 // packet is played out through the speaker. This delay value 499 // packet is played out through the speaker. This delay value
476 // can typically be utilized by an acoustic echo-control (AEC) 500 // can typically be utilized by an acoustic echo-control (AEC)
477 // unit at the render side. 501 // unit at the render side.
478 UINT64 position = 0; 502 UINT64 position = 0;
479 uint32 audio_delay_bytes = 0; 503 uint32 audio_delay_bytes = 0;
480 hr = audio_clock_->GetPosition(&position, NULL); 504 hr = thread_audio_clock->GetPosition(&position, NULL);
481 if (SUCCEEDED(hr)) { 505 if (SUCCEEDED(hr)) {
482 // Stream position of the sample that is currently playing 506 // Stream position of the sample that is currently playing
483 // through the speaker. 507 // through the speaker.
484 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * 508 double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
485 (static_cast<double>(position) / device_frequency); 509 (static_cast<double>(position) / device_frequency);
486 510
487 // Stream position of the last sample written to the endpoint 511 // Stream position of the last sample written to the endpoint
488 // buffer. Note that, the packet we are about to receive in 512 // buffer. Note that, the packet we are about to receive in
489 // the upcoming callback is also included. 513 // the upcoming callback is also included.
490 size_t pos_last_sample_written_frames = 514 size_t pos_last_sample_written_frames =
(...skipping 19 matching lines...) Expand all
510 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; 534 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
511 audio_bus_->Scale(volume_); 535 audio_bus_->Scale(volume_);
512 audio_bus_->ToInterleaved( 536 audio_bus_->ToInterleaved(
513 frames_filled, bytes_per_sample, audio_data); 537 frames_filled, bytes_per_sample, audio_data);
514 538
515 539
516 // Release the buffer space acquired in the GetBuffer() call. 540 // Release the buffer space acquired in the GetBuffer() call.
517 // Render silence if we were not able to fill up the buffer totally. 541 // Render silence if we were not able to fill up the buffer totally.
518 DWORD flags = (num_filled_bytes < packet_size_bytes_) ? 542 DWORD flags = (num_filled_bytes < packet_size_bytes_) ?
519 AUDCLNT_BUFFERFLAGS_SILENT : 0; 543 AUDCLNT_BUFFERFLAGS_SILENT : 0;
520 audio_render_client_->ReleaseBuffer(packet_size_frames_, flags); 544 thread_audio_render_client->ReleaseBuffer(packet_size_frames_, flags);
521 545
522 num_written_frames_ += packet_size_frames_; 546 num_written_frames_ += packet_size_frames_;
523 } 547 }
524 548
525 return true; 549 return true;
526 } 550 }
527 551
528 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( 552 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
529 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) { 553 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) {
530 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); 554 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE);
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
615 render_thread_.reset(); 639 render_thread_.reset();
616 640
617 // Ensure that we don't quit the main thread loop immediately next 641 // Ensure that we don't quit the main thread loop immediately next
618 // time Start() is called. 642 // time Start() is called.
619 ResetEvent(stop_render_event_.Get()); 643 ResetEvent(stop_render_event_.Get());
620 } 644 }
621 645
622 source_ = NULL; 646 source_ = NULL;
623 } 647 }
624 648
649 bool WASAPIAudioOutputStream::MarshalComPointers() {
650 HRESULT hr = CreateStreamOnHGlobal(0, TRUE, &com_stream_);
651 if (FAILED(hr)) {
652 DLOG(ERROR) << "Failed to create stream for marshaling COM pointers.";
653 com_stream_ = NULL;
654 return false;
655 }
656
657 hr = CoMarshalInterface(com_stream_, __uuidof(IAudioClient),
658 audio_client_.get(), MSHCTX_INPROC, NULL,
659 MSHLFLAGS_NORMAL);
660 if (FAILED(hr)) {
661 DLOG(ERROR) << "Marshal failed for IAudioClient: " << std::hex << hr;
662 if (com_stream_) {
663 CoReleaseMarshalData(com_stream_);
664 com_stream_ = NULL;
665 }
666 return false;
667 }
668
669 hr = CoMarshalInterface(com_stream_, __uuidof(IAudioRenderClient),
670 audio_render_client_.get(), MSHCTX_INPROC, NULL,
671 MSHLFLAGS_NORMAL);
672 if (FAILED(hr)) {
673 DLOG(ERROR) << "Marshal failed for IAudioRenderClient: " << std::hex << hr;
674 CoReleaseMarshalData(com_stream_);
675 com_stream_ = NULL;
676 return false;
677 }
678
679 hr =
680 CoMarshalInterface(com_stream_, __uuidof(IAudioClock), audio_clock_.get(),
681 MSHCTX_INPROC, NULL, MSHLFLAGS_NORMAL);
682 if (FAILED(hr)) {
683 DLOG(ERROR) << "Marshal failed for IAudioClock: " << std::hex << hr;
684 CoReleaseMarshalData(com_stream_);
685 com_stream_ = NULL;
686 return false;
687 }
688
689 LARGE_INTEGER pos = {0};
690 hr = com_stream_->Seek(pos, STREAM_SEEK_SET, NULL);
691 if (FAILED(hr)) {
692 DLOG(ERROR) << "Failed to seek IStream for marshaling: " << std::hex << hr;
693 CoReleaseMarshalData(com_stream_);
694 com_stream_ = NULL;
695 return false;
696 }
697
698 return true;
699 }
700
701 bool WASAPIAudioOutputStream::UnmarshalComPointers(
702 IAudioClient** audio_client,
703 IAudioRenderClient** audio_render_client,
704 IAudioClock** audio_clock) {
705 HRESULT hr = CoUnmarshalInterface(com_stream_, __uuidof(IAudioClient),
706 reinterpret_cast<LPVOID*>(audio_client));
707 if (FAILED(hr)) {
708 DLOG(ERROR) << "Unmarshal failed IAudioClient: " << std::hex << hr;
709 CoReleaseMarshalData(com_stream_);
710 com_stream_ = NULL;
711 return false;
712 }
713
714 hr = CoUnmarshalInterface(com_stream_, __uuidof(IAudioRenderClient),
715 reinterpret_cast<LPVOID*>(audio_render_client));
716 if (FAILED(hr)) {
717 DLOG(ERROR) << "Unmarshal failed IAudioRenderClient: " << std::hex << hr;
718 CoReleaseMarshalData(com_stream_);
719 com_stream_ = NULL;
720 return false;
721 }
722
723 hr = CoUnmarshalInterface(com_stream_, __uuidof(IAudioClock),
724 reinterpret_cast<LPVOID*>(audio_clock));
725 if (FAILED(hr))
726 DLOG(ERROR) << "Unmarshal failed IAudioClock: " << std::hex << hr;
727 CoReleaseMarshalData(com_stream_);
728 com_stream_ = NULL;
729 return SUCCEEDED(hr);
730 }
731
625 } // namespace media 732 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698