OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
6 | 6 |
7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
8 | 8 |
9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
242 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | 242 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
243 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence( | 243 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence( |
244 audio_client_.get(), audio_render_client_.get())) { | 244 audio_client_.get(), audio_render_client_.get())) { |
245 LOG(ERROR) << "Failed to prepare endpoint buffers with silence."; | 245 LOG(ERROR) << "Failed to prepare endpoint buffers with silence."; |
246 callback->OnError(this); | 246 callback->OnError(this); |
247 return; | 247 return; |
248 } | 248 } |
249 } | 249 } |
250 num_written_frames_ = endpoint_buffer_size_frames_; | 250 num_written_frames_ = endpoint_buffer_size_frames_; |
251 | 251 |
252 if (!MarshalComPointers()) { | |
253 callback->OnError(this); | |
254 return; | |
255 } | |
256 | |
252 // Create and start the thread that will drive the rendering by waiting for | 257 // Create and start the thread that will drive the rendering by waiting for |
253 // render events. | 258 // render events. |
254 render_thread_.reset( | 259 render_thread_.reset( |
255 new base::DelegateSimpleThread(this, "wasapi_render_thread")); | 260 new base::DelegateSimpleThread(this, "wasapi_render_thread")); |
256 render_thread_->Start(); | 261 render_thread_->Start(); |
257 if (!render_thread_->HasBeenStarted()) { | 262 if (!render_thread_->HasBeenStarted()) { |
258 LOG(ERROR) << "Failed to start WASAPI render thread."; | 263 LOG(ERROR) << "Failed to start WASAPI render thread."; |
259 StopThread(); | 264 StopThread(); |
260 callback->OnError(this); | 265 callback->OnError(this); |
261 return; | 266 return; |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
326 } | 331 } |
327 volume_ = volume_float; | 332 volume_ = volume_float; |
328 } | 333 } |
329 | 334 |
330 void WASAPIAudioOutputStream::GetVolume(double* volume) { | 335 void WASAPIAudioOutputStream::GetVolume(double* volume) { |
331 DVLOG(1) << "GetVolume()"; | 336 DVLOG(1) << "GetVolume()"; |
332 *volume = static_cast<double>(volume_); | 337 *volume = static_cast<double>(volume_); |
333 } | 338 } |
334 | 339 |
335 void WASAPIAudioOutputStream::Run() { | 340 void WASAPIAudioOutputStream::Run() { |
336 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); | 341 ScopedCOMInitializer com_init; |
337 | 342 |
338 // Increase the thread priority. | 343 // Increase the thread priority. |
339 render_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO); | 344 render_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO); |
340 | 345 |
341 // Enable MMCSS to ensure that this thread receives prioritized access to | 346 // Enable MMCSS to ensure that this thread receives prioritized access to |
342 // CPU resources. | 347 // CPU resources. |
343 DWORD task_index = 0; | 348 DWORD task_index = 0; |
344 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", | 349 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", |
345 &task_index); | 350 &task_index); |
346 bool mmcss_is_ok = | 351 bool mmcss_is_ok = |
347 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); | 352 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); |
348 if (!mmcss_is_ok) { | 353 if (!mmcss_is_ok) { |
349 // Failed to enable MMCSS on this thread. It is not fatal but can lead | 354 // Failed to enable MMCSS on this thread. It is not fatal but can lead |
350 // to reduced QoS at high load. | 355 // to reduced QoS at high load. |
351 DWORD err = GetLastError(); | 356 DWORD err = GetLastError(); |
352 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; | 357 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
353 } | 358 } |
354 | 359 |
360 // Retrieve COM pointers from the main thread. | |
361 ScopedComPtr<IAudioClient> audio_client; | |
362 ScopedComPtr<IAudioRenderClient> audio_render_client; | |
363 ScopedComPtr<IAudioClock> audio_clock; | |
364 | |
355 HRESULT hr = S_FALSE; | 365 HRESULT hr = S_FALSE; |
356 | 366 |
357 bool playing = true; | 367 bool playing = true; |
358 bool error = false; | 368 bool error = |
369 !UnmarshalComPointers(&audio_client, &audio_render_client, &audio_clock); | |
370 | |
359 HANDLE wait_array[] = { stop_render_event_.Get(), | 371 HANDLE wait_array[] = { stop_render_event_.Get(), |
360 audio_samples_render_event_.Get() }; | 372 audio_samples_render_event_.Get() }; |
361 UINT64 device_frequency = 0; | 373 UINT64 device_frequency = 0; |
362 | 374 |
363 // The device frequency is the frequency generated by the hardware clock in | 375 if (!error) { |
364 // the audio device. The GetFrequency() method reports a constant frequency. | 376 // The device frequency is the frequency generated by the hardware clock in |
365 hr = audio_clock_->GetFrequency(&device_frequency); | 377 // the audio device. The GetFrequency() method reports a constant frequency. |
366 error = FAILED(hr); | 378 hr = audio_clock->GetFrequency(&device_frequency); |
367 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: " | 379 error = FAILED(hr); |
368 << std::hex << hr; | 380 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: " |
381 << std::hex << hr; | |
382 } | |
369 | 383 |
370 // Keep rendering audio until the stop event or the stream-switch event | 384 // Keep rendering audio until the stop event or the stream-switch event |
371 // is signaled. An error event can also break the main thread loop. | 385 // is signaled. An error event can also break the main thread loop. |
372 while (playing && !error) { | 386 while (playing && !error) { |
373 // Wait for a close-down event, stream-switch event or a new render event. | 387 // Wait for a close-down event, stream-switch event or a new render event. |
374 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array), | 388 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array), |
375 wait_array, | 389 wait_array, |
376 FALSE, | 390 FALSE, |
377 INFINITE); | 391 INFINITE); |
378 | 392 |
379 switch (wait_result) { | 393 switch (wait_result) { |
380 case WAIT_OBJECT_0 + 0: | 394 case WAIT_OBJECT_0 + 0: |
381 // |stop_render_event_| has been set. | 395 // |stop_render_event_| has been set. |
382 playing = false; | 396 playing = false; |
383 break; | 397 break; |
384 case WAIT_OBJECT_0 + 1: | 398 case WAIT_OBJECT_0 + 1: |
385 // |audio_samples_render_event_| has been set. | 399 // |audio_samples_render_event_| has been set. |
386 error = !RenderAudioFromSource(device_frequency); | 400 error = !RenderAudioFromSource(device_frequency, audio_client.get(), |
401 audio_render_client.get(), | |
402 audio_clock.get()); | |
387 break; | 403 break; |
388 default: | 404 default: |
389 error = true; | 405 error = true; |
390 break; | 406 break; |
391 } | 407 } |
392 } | 408 } |
393 | 409 |
394 if (playing && error) { | 410 if (playing && error && audio_client) { |
395 // Stop audio rendering since something has gone wrong in our main thread | 411 // Stop audio rendering since something has gone wrong in our main thread |
396 // loop. Note that, we are still in a "started" state, hence a Stop() call | 412 // loop. Note that, we are still in a "started" state, hence a Stop() call |
397 // is required to join the thread properly. | 413 // is required to join the thread properly. |
398 audio_client_->Stop(); | 414 audio_client->Stop(); |
399 PLOG(ERROR) << "WASAPI rendering failed."; | 415 PLOG(ERROR) << "WASAPI rendering failed."; |
400 } | 416 } |
401 | 417 |
402 // Disable MMCSS. | 418 // Disable MMCSS. |
403 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { | 419 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { |
404 PLOG(WARNING) << "Failed to disable MMCSS"; | 420 PLOG(WARNING) << "Failed to disable MMCSS"; |
405 } | 421 } |
406 } | 422 } |
407 | 423 |
408 bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) { | 424 bool WASAPIAudioOutputStream::RenderAudioFromSource( |
425 UINT64 device_frequency, | |
426 IAudioClient* audio_client, | |
427 IAudioRenderClient* audio_render_client, | |
428 IAudioClock* audio_clock) { | |
409 TRACE_EVENT0("audio", "RenderAudioFromSource"); | 429 TRACE_EVENT0("audio", "RenderAudioFromSource"); |
410 | 430 |
411 HRESULT hr = S_FALSE; | 431 HRESULT hr = S_FALSE; |
412 UINT32 num_queued_frames = 0; | 432 UINT32 num_queued_frames = 0; |
413 uint8* audio_data = NULL; | 433 uint8* audio_data = NULL; |
414 | 434 |
415 // Contains how much new data we can write to the buffer without | 435 // Contains how much new data we can write to the buffer without |
416 // the risk of overwriting previously written data that the audio | 436 // the risk of overwriting previously written data that the audio |
417 // engine has not yet read from the buffer. | 437 // engine has not yet read from the buffer. |
418 size_t num_available_frames = 0; | 438 size_t num_available_frames = 0; |
419 | 439 |
420 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | 440 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
421 // Get the padding value which represents the amount of rendering | 441 // Get the padding value which represents the amount of rendering |
422 // data that is queued up to play in the endpoint buffer. | 442 // data that is queued up to play in the endpoint buffer. |
423 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | 443 hr = audio_client->GetCurrentPadding(&num_queued_frames); |
424 num_available_frames = | 444 num_available_frames = |
425 endpoint_buffer_size_frames_ - num_queued_frames; | 445 endpoint_buffer_size_frames_ - num_queued_frames; |
426 if (FAILED(hr)) { | 446 if (FAILED(hr)) { |
427 DLOG(ERROR) << "Failed to retrieve amount of available space: " | 447 DLOG(ERROR) << "Failed to retrieve amount of available space: " |
428 << std::hex << hr; | 448 << std::hex << hr; |
429 return false; | 449 return false; |
430 } | 450 } |
431 } else { | 451 } else { |
432 // While the stream is running, the system alternately sends one | 452 // While the stream is running, the system alternately sends one |
433 // buffer or the other to the client. This form of double buffering | 453 // buffer or the other to the client. This form of double buffering |
(...skipping 21 matching lines...) Expand all Loading... | |
455 // fill up the available area in the endpoint buffer. | 475 // fill up the available area in the endpoint buffer. |
456 // |num_packets| will always be one for exclusive-mode streams and | 476 // |num_packets| will always be one for exclusive-mode streams and |
457 // will be one in most cases for shared mode streams as well. | 477 // will be one in most cases for shared mode streams as well. |
458 // However, we have found that two packets can sometimes be | 478 // However, we have found that two packets can sometimes be |
459 // required. | 479 // required. |
460 size_t num_packets = (num_available_frames / packet_size_frames_); | 480 size_t num_packets = (num_available_frames / packet_size_frames_); |
461 | 481 |
462 for (size_t n = 0; n < num_packets; ++n) { | 482 for (size_t n = 0; n < num_packets; ++n) { |
463 // Grab all available space in the rendering endpoint buffer | 483 // Grab all available space in the rendering endpoint buffer |
464 // into which the client can write a data packet. | 484 // into which the client can write a data packet. |
465 hr = audio_render_client_->GetBuffer(packet_size_frames_, | 485 hr = audio_render_client->GetBuffer(packet_size_frames_, &audio_data); |
466 &audio_data); | |
467 if (FAILED(hr)) { | 486 if (FAILED(hr)) { |
468 DLOG(ERROR) << "Failed to use rendering audio buffer: " | 487 DLOG(ERROR) << "Failed to use rendering audio buffer: " |
469 << std::hex << hr; | 488 << std::hex << hr; |
470 return false; | 489 return false; |
471 } | 490 } |
472 | 491 |
473 // Derive the audio delay which corresponds to the delay between | 492 // Derive the audio delay which corresponds to the delay between |
474 // a render event and the time when the first audio sample in a | 493 // a render event and the time when the first audio sample in a |
475 // packet is played out through the speaker. This delay value | 494 // packet is played out through the speaker. This delay value |
476 // can typically be utilized by an acoustic echo-control (AEC) | 495 // can typically be utilized by an acoustic echo-control (AEC) |
477 // unit at the render side. | 496 // unit at the render side. |
478 UINT64 position = 0; | 497 UINT64 position = 0; |
479 uint32 audio_delay_bytes = 0; | 498 uint32 audio_delay_bytes = 0; |
480 hr = audio_clock_->GetPosition(&position, NULL); | 499 hr = audio_clock->GetPosition(&position, NULL); |
481 if (SUCCEEDED(hr)) { | 500 if (SUCCEEDED(hr)) { |
482 // Stream position of the sample that is currently playing | 501 // Stream position of the sample that is currently playing |
483 // through the speaker. | 502 // through the speaker. |
484 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | 503 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * |
485 (static_cast<double>(position) / device_frequency); | 504 (static_cast<double>(position) / device_frequency); |
486 | 505 |
487 // Stream position of the last sample written to the endpoint | 506 // Stream position of the last sample written to the endpoint |
488 // buffer. Note that, the packet we are about to receive in | 507 // buffer. Note that, the packet we are about to receive in |
489 // the upcoming callback is also included. | 508 // the upcoming callback is also included. |
490 size_t pos_last_sample_written_frames = | 509 size_t pos_last_sample_written_frames = |
(...skipping 19 matching lines...) Expand all Loading... | |
510 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; | 529 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; |
511 audio_bus_->Scale(volume_); | 530 audio_bus_->Scale(volume_); |
512 audio_bus_->ToInterleaved( | 531 audio_bus_->ToInterleaved( |
513 frames_filled, bytes_per_sample, audio_data); | 532 frames_filled, bytes_per_sample, audio_data); |
514 | 533 |
515 | 534 |
516 // Release the buffer space acquired in the GetBuffer() call. | 535 // Release the buffer space acquired in the GetBuffer() call. |
517 // Render silence if we were not able to fill up the buffer totally. | 536 // Render silence if we were not able to fill up the buffer totally. |
518 DWORD flags = (num_filled_bytes < packet_size_bytes_) ? | 537 DWORD flags = (num_filled_bytes < packet_size_bytes_) ? |
519 AUDCLNT_BUFFERFLAGS_SILENT : 0; | 538 AUDCLNT_BUFFERFLAGS_SILENT : 0; |
520 audio_render_client_->ReleaseBuffer(packet_size_frames_, flags); | 539 audio_render_client->ReleaseBuffer(packet_size_frames_, flags); |
521 | 540 |
522 num_written_frames_ += packet_size_frames_; | 541 num_written_frames_ += packet_size_frames_; |
523 } | 542 } |
524 | 543 |
525 return true; | 544 return true; |
526 } | 545 } |
527 | 546 |
528 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( | 547 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( |
529 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) { | 548 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) { |
530 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); | 549 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
615 render_thread_.reset(); | 634 render_thread_.reset(); |
616 | 635 |
617 // Ensure that we don't quit the main thread loop immediately next | 636 // Ensure that we don't quit the main thread loop immediately next |
618 // time Start() is called. | 637 // time Start() is called. |
619 ResetEvent(stop_render_event_.Get()); | 638 ResetEvent(stop_render_event_.Get()); |
620 } | 639 } |
621 | 640 |
622 source_ = NULL; | 641 source_ = NULL; |
623 } | 642 } |
624 | 643 |
644 bool WASAPIAudioOutputStream::MarshalComPointers() { | |
645 DCHECK_EQ(creating_thread_id_, base::PlatformThread::CurrentId()); | |
646 DCHECK(!com_stream_); | |
647 | |
648 ScopedComPtr<IStream> com_stream; | |
649 HRESULT hr = CreateStreamOnHGlobal(NULL, TRUE, com_stream.Receive()); | |
650 if (FAILED(hr)) { | |
651 DLOG(ERROR) << "Failed to create stream for marshaling COM pointers."; | |
652 return false; | |
653 } | |
654 | |
655 hr = CoMarshalInterface(com_stream.get(), __uuidof(IAudioClient), | |
656 audio_client_.get(), MSHCTX_INPROC, NULL, | |
657 MSHLFLAGS_NORMAL); | |
658 if (FAILED(hr)) { | |
659 DLOG(ERROR) << "Marshal failed for IAudioClient: " << std::hex << hr; | |
660 return false; | |
661 } | |
662 | |
663 hr = CoMarshalInterface(com_stream.get(), __uuidof(IAudioRenderClient), | |
664 audio_render_client_.get(), MSHCTX_INPROC, NULL, | |
665 MSHLFLAGS_NORMAL); | |
666 if (FAILED(hr)) { | |
667 DLOG(ERROR) << "Marshal failed for IAudioRenderClient: " << std::hex << hr; | |
668 return false; | |
669 } | |
670 | |
671 hr = CoMarshalInterface(com_stream.get(), __uuidof(IAudioClock), | |
672 audio_clock_.get(), MSHCTX_INPROC, NULL, | |
673 MSHLFLAGS_NORMAL); | |
674 if (FAILED(hr)) { | |
675 DLOG(ERROR) << "Marshal failed for IAudioClock: " << std::hex << hr; | |
676 return false; | |
677 } | |
678 | |
679 LARGE_INTEGER pos = {0}; | |
680 hr = com_stream->Seek(pos, STREAM_SEEK_SET, NULL); | |
681 if (FAILED(hr)) { | |
682 DLOG(ERROR) << "Failed to seek IStream for marshaling: " << std::hex << hr; | |
683 return false; | |
684 } | |
685 | |
686 com_stream_ = com_stream.Pass(); | |
687 return true; | |
688 } | |
689 | |
690 bool WASAPIAudioOutputStream::UnmarshalComPointers( | |
tommi (sloooow) - chröme
2015/04/22 18:10:08
just throwing this out there... we could implement
| |
691 ScopedComPtr<IAudioClient>* audio_client, | |
692 ScopedComPtr<IAudioRenderClient>* audio_render_client, | |
693 ScopedComPtr<IAudioClock>* audio_clock) { | |
694 DCHECK_EQ(render_thread_->tid(), base::PlatformThread::CurrentId()); | |
695 | |
696 DCHECK(com_stream_); | |
697 ScopedComPtr<IStream> com_stream; | |
698 com_stream = com_stream_.Pass(); | |
699 | |
700 HRESULT hr = CoUnmarshalInterface(com_stream.get(), __uuidof(IAudioClient), | |
701 audio_client->ReceiveVoid()); | |
702 if (FAILED(hr)) { | |
703 DLOG(ERROR) << "Unmarshal failed IAudioClient: " << std::hex << hr; | |
704 return false; | |
705 } | |
706 | |
707 hr = CoUnmarshalInterface(com_stream.get(), __uuidof(IAudioRenderClient), | |
708 audio_render_client->ReceiveVoid()); | |
709 if (FAILED(hr)) { | |
710 DLOG(ERROR) << "Unmarshal failed IAudioRenderClient: " << std::hex << hr; | |
711 return false; | |
712 } | |
713 | |
714 hr = CoUnmarshalInterface(com_stream.get(), __uuidof(IAudioClock), | |
715 audio_clock->ReceiveVoid()); | |
716 if (FAILED(hr)) | |
717 DLOG(ERROR) << "Unmarshal failed IAudioClock: " << std::hex << hr; | |
718 return SUCCEEDED(hr); | |
719 } | |
720 | |
625 } // namespace media | 721 } // namespace media |
OLD | NEW |