OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
6 | 6 |
7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
8 | 8 |
9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
242 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | 242 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
243 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence( | 243 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence( |
244 audio_client_.get(), audio_render_client_.get())) { | 244 audio_client_.get(), audio_render_client_.get())) { |
245 LOG(ERROR) << "Failed to prepare endpoint buffers with silence."; | 245 LOG(ERROR) << "Failed to prepare endpoint buffers with silence."; |
246 callback->OnError(this); | 246 callback->OnError(this); |
247 return; | 247 return; |
248 } | 248 } |
249 } | 249 } |
250 num_written_frames_ = endpoint_buffer_size_frames_; | 250 num_written_frames_ = endpoint_buffer_size_frames_; |
251 | 251 |
| 252 if (!MarshalComPointers()) { |
| 253 callback->OnError(this); |
| 254 return; |
| 255 } |
| 256 |
252 // Create and start the thread that will drive the rendering by waiting for | 257 // Create and start the thread that will drive the rendering by waiting for |
253 // render events. | 258 // render events. |
254 render_thread_.reset( | 259 render_thread_.reset( |
255 new base::DelegateSimpleThread(this, "wasapi_render_thread")); | 260 new base::DelegateSimpleThread(this, "wasapi_render_thread")); |
256 render_thread_->Start(); | 261 render_thread_->Start(); |
257 if (!render_thread_->HasBeenStarted()) { | 262 if (!render_thread_->HasBeenStarted()) { |
258 LOG(ERROR) << "Failed to start WASAPI render thread."; | 263 LOG(ERROR) << "Failed to start WASAPI render thread."; |
259 StopThread(); | 264 StopThread(); |
260 callback->OnError(this); | 265 callback->OnError(this); |
261 return; | 266 return; |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
326 } | 331 } |
327 volume_ = volume_float; | 332 volume_ = volume_float; |
328 } | 333 } |
329 | 334 |
330 void WASAPIAudioOutputStream::GetVolume(double* volume) { | 335 void WASAPIAudioOutputStream::GetVolume(double* volume) { |
331 DVLOG(1) << "GetVolume()"; | 336 DVLOG(1) << "GetVolume()"; |
332 *volume = static_cast<double>(volume_); | 337 *volume = static_cast<double>(volume_); |
333 } | 338 } |
334 | 339 |
335 void WASAPIAudioOutputStream::Run() { | 340 void WASAPIAudioOutputStream::Run() { |
336 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); | 341 ScopedCOMInitializer com_init; |
337 | 342 |
338 // Increase the thread priority. | 343 // Increase the thread priority. |
339 render_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO); | 344 render_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO); |
340 | 345 |
341 // Enable MMCSS to ensure that this thread receives prioritized access to | 346 // Enable MMCSS to ensure that this thread receives prioritized access to |
342 // CPU resources. | 347 // CPU resources. |
343 DWORD task_index = 0; | 348 DWORD task_index = 0; |
344 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", | 349 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", |
345 &task_index); | 350 &task_index); |
346 bool mmcss_is_ok = | 351 bool mmcss_is_ok = |
347 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); | 352 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); |
348 if (!mmcss_is_ok) { | 353 if (!mmcss_is_ok) { |
349 // Failed to enable MMCSS on this thread. It is not fatal but can lead | 354 // Failed to enable MMCSS on this thread. It is not fatal but can lead |
350 // to reduced QoS at high load. | 355 // to reduced QoS at high load. |
351 DWORD err = GetLastError(); | 356 DWORD err = GetLastError(); |
352 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; | 357 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
353 } | 358 } |
354 | 359 |
| 360 // Retrieve COM pointers from the main thread. |
| 361 ScopedComPtr<IAudioClient> audio_client; |
| 362 ScopedComPtr<IAudioRenderClient> audio_render_client; |
| 363 ScopedComPtr<IAudioClock> audio_clock; |
| 364 UnmarshalComPointers(&audio_client, &audio_render_client, &audio_clock); |
| 365 |
355 HRESULT hr = S_FALSE; | 366 HRESULT hr = S_FALSE; |
356 | 367 |
357 bool playing = true; | 368 bool playing = true; |
358 bool error = false; | 369 bool error = false; |
359 HANDLE wait_array[] = { stop_render_event_.Get(), | 370 HANDLE wait_array[] = { stop_render_event_.Get(), |
360 audio_samples_render_event_.Get() }; | 371 audio_samples_render_event_.Get() }; |
361 UINT64 device_frequency = 0; | 372 UINT64 device_frequency = 0; |
362 | 373 |
363 // The device frequency is the frequency generated by the hardware clock in | 374 // The device frequency is the frequency generated by the hardware clock in |
364 // the audio device. The GetFrequency() method reports a constant frequency. | 375 // the audio device. The GetFrequency() method reports a constant frequency. |
365 hr = audio_clock_->GetFrequency(&device_frequency); | 376 hr = audio_clock->GetFrequency(&device_frequency); |
366 error = FAILED(hr); | 377 error = FAILED(hr); |
367 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: " | 378 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: " |
368 << std::hex << hr; | 379 << std::hex << hr; |
369 | 380 |
370 // Keep rendering audio until the stop event or the stream-switch event | 381 // Keep rendering audio until the stop event or the stream-switch event |
371 // is signaled. An error event can also break the main thread loop. | 382 // is signaled. An error event can also break the main thread loop. |
372 while (playing && !error) { | 383 while (playing && !error) { |
373 // Wait for a close-down event, stream-switch event or a new render event. | 384 // Wait for a close-down event, stream-switch event or a new render event. |
374 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array), | 385 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array), |
375 wait_array, | 386 wait_array, |
376 FALSE, | 387 FALSE, |
377 INFINITE); | 388 INFINITE); |
378 | 389 |
379 switch (wait_result) { | 390 switch (wait_result) { |
380 case WAIT_OBJECT_0 + 0: | 391 case WAIT_OBJECT_0 + 0: |
381 // |stop_render_event_| has been set. | 392 // |stop_render_event_| has been set. |
382 playing = false; | 393 playing = false; |
383 break; | 394 break; |
384 case WAIT_OBJECT_0 + 1: | 395 case WAIT_OBJECT_0 + 1: |
385 // |audio_samples_render_event_| has been set. | 396 // |audio_samples_render_event_| has been set. |
386 error = !RenderAudioFromSource(device_frequency); | 397 error = !RenderAudioFromSource(device_frequency, audio_client.get(), |
| 398 audio_render_client.get(), |
| 399 audio_clock.get()); |
387 break; | 400 break; |
388 default: | 401 default: |
389 error = true; | 402 error = true; |
390 break; | 403 break; |
391 } | 404 } |
392 } | 405 } |
393 | 406 |
394 if (playing && error) { | 407 if (playing && error && audio_client) { |
395 // Stop audio rendering since something has gone wrong in our main thread | 408 // Stop audio rendering since something has gone wrong in our main thread |
396 // loop. Note that, we are still in a "started" state, hence a Stop() call | 409 // loop. Note that, we are still in a "started" state, hence a Stop() call |
397 // is required to join the thread properly. | 410 // is required to join the thread properly. |
398 audio_client_->Stop(); | 411 audio_client->Stop(); |
399 PLOG(ERROR) << "WASAPI rendering failed."; | 412 PLOG(ERROR) << "WASAPI rendering failed."; |
400 } | 413 } |
401 | 414 |
402 // Disable MMCSS. | 415 // Disable MMCSS. |
403 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { | 416 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { |
404 PLOG(WARNING) << "Failed to disable MMCSS"; | 417 PLOG(WARNING) << "Failed to disable MMCSS"; |
405 } | 418 } |
406 } | 419 } |
407 | 420 |
408 bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) { | 421 bool WASAPIAudioOutputStream::RenderAudioFromSource( |
| 422 UINT64 device_frequency, |
| 423 IAudioClient* audio_client, |
| 424 IAudioRenderClient* audio_render_client, |
| 425 IAudioClock* audio_clock) { |
409 TRACE_EVENT0("audio", "RenderAudioFromSource"); | 426 TRACE_EVENT0("audio", "RenderAudioFromSource"); |
410 | 427 |
411 HRESULT hr = S_FALSE; | 428 HRESULT hr = S_FALSE; |
412 UINT32 num_queued_frames = 0; | 429 UINT32 num_queued_frames = 0; |
413 uint8* audio_data = NULL; | 430 uint8* audio_data = NULL; |
414 | 431 |
415 // Contains how much new data we can write to the buffer without | 432 // Contains how much new data we can write to the buffer without |
416 // the risk of overwriting previously written data that the audio | 433 // the risk of overwriting previously written data that the audio |
417 // engine has not yet read from the buffer. | 434 // engine has not yet read from the buffer. |
418 size_t num_available_frames = 0; | 435 size_t num_available_frames = 0; |
419 | 436 |
420 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | 437 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
421 // Get the padding value which represents the amount of rendering | 438 // Get the padding value which represents the amount of rendering |
422 // data that is queued up to play in the endpoint buffer. | 439 // data that is queued up to play in the endpoint buffer. |
423 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | 440 hr = audio_client->GetCurrentPadding(&num_queued_frames); |
424 num_available_frames = | 441 num_available_frames = |
425 endpoint_buffer_size_frames_ - num_queued_frames; | 442 endpoint_buffer_size_frames_ - num_queued_frames; |
426 if (FAILED(hr)) { | 443 if (FAILED(hr)) { |
427 DLOG(ERROR) << "Failed to retrieve amount of available space: " | 444 DLOG(ERROR) << "Failed to retrieve amount of available space: " |
428 << std::hex << hr; | 445 << std::hex << hr; |
429 return false; | 446 return false; |
430 } | 447 } |
431 } else { | 448 } else { |
432 // While the stream is running, the system alternately sends one | 449 // While the stream is running, the system alternately sends one |
433 // buffer or the other to the client. This form of double buffering | 450 // buffer or the other to the client. This form of double buffering |
(...skipping 21 matching lines...) Expand all Loading... |
455 // fill up the available area in the endpoint buffer. | 472 // fill up the available area in the endpoint buffer. |
456 // |num_packets| will always be one for exclusive-mode streams and | 473 // |num_packets| will always be one for exclusive-mode streams and |
457 // will be one in most cases for shared mode streams as well. | 474 // will be one in most cases for shared mode streams as well. |
458 // However, we have found that two packets can sometimes be | 475 // However, we have found that two packets can sometimes be |
459 // required. | 476 // required. |
460 size_t num_packets = (num_available_frames / packet_size_frames_); | 477 size_t num_packets = (num_available_frames / packet_size_frames_); |
461 | 478 |
462 for (size_t n = 0; n < num_packets; ++n) { | 479 for (size_t n = 0; n < num_packets; ++n) { |
463 // Grab all available space in the rendering endpoint buffer | 480 // Grab all available space in the rendering endpoint buffer |
464 // into which the client can write a data packet. | 481 // into which the client can write a data packet. |
465 hr = audio_render_client_->GetBuffer(packet_size_frames_, | 482 hr = audio_render_client->GetBuffer(packet_size_frames_, &audio_data); |
466 &audio_data); | |
467 if (FAILED(hr)) { | 483 if (FAILED(hr)) { |
468 DLOG(ERROR) << "Failed to use rendering audio buffer: " | 484 DLOG(ERROR) << "Failed to use rendering audio buffer: " |
469 << std::hex << hr; | 485 << std::hex << hr; |
470 return false; | 486 return false; |
471 } | 487 } |
472 | 488 |
473 // Derive the audio delay which corresponds to the delay between | 489 // Derive the audio delay which corresponds to the delay between |
474 // a render event and the time when the first audio sample in a | 490 // a render event and the time when the first audio sample in a |
475 // packet is played out through the speaker. This delay value | 491 // packet is played out through the speaker. This delay value |
476 // can typically be utilized by an acoustic echo-control (AEC) | 492 // can typically be utilized by an acoustic echo-control (AEC) |
477 // unit at the render side. | 493 // unit at the render side. |
478 UINT64 position = 0; | 494 UINT64 position = 0; |
479 uint32 audio_delay_bytes = 0; | 495 uint32 audio_delay_bytes = 0; |
480 hr = audio_clock_->GetPosition(&position, NULL); | 496 hr = audio_clock->GetPosition(&position, NULL); |
481 if (SUCCEEDED(hr)) { | 497 if (SUCCEEDED(hr)) { |
482 // Stream position of the sample that is currently playing | 498 // Stream position of the sample that is currently playing |
483 // through the speaker. | 499 // through the speaker. |
484 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | 500 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * |
485 (static_cast<double>(position) / device_frequency); | 501 (static_cast<double>(position) / device_frequency); |
486 | 502 |
487 // Stream position of the last sample written to the endpoint | 503 // Stream position of the last sample written to the endpoint |
488 // buffer. Note that, the packet we are about to receive in | 504 // buffer. Note that, the packet we are about to receive in |
489 // the upcoming callback is also included. | 505 // the upcoming callback is also included. |
490 size_t pos_last_sample_written_frames = | 506 size_t pos_last_sample_written_frames = |
(...skipping 19 matching lines...) Expand all Loading... |
510 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; | 526 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; |
511 audio_bus_->Scale(volume_); | 527 audio_bus_->Scale(volume_); |
512 audio_bus_->ToInterleaved( | 528 audio_bus_->ToInterleaved( |
513 frames_filled, bytes_per_sample, audio_data); | 529 frames_filled, bytes_per_sample, audio_data); |
514 | 530 |
515 | 531 |
516 // Release the buffer space acquired in the GetBuffer() call. | 532 // Release the buffer space acquired in the GetBuffer() call. |
517 // Render silence if we were not able to fill up the buffer totally. | 533 // Render silence if we were not able to fill up the buffer totally. |
518 DWORD flags = (num_filled_bytes < packet_size_bytes_) ? | 534 DWORD flags = (num_filled_bytes < packet_size_bytes_) ? |
519 AUDCLNT_BUFFERFLAGS_SILENT : 0; | 535 AUDCLNT_BUFFERFLAGS_SILENT : 0; |
520 audio_render_client_->ReleaseBuffer(packet_size_frames_, flags); | 536 audio_render_client->ReleaseBuffer(packet_size_frames_, flags); |
521 | 537 |
522 num_written_frames_ += packet_size_frames_; | 538 num_written_frames_ += packet_size_frames_; |
523 } | 539 } |
524 | 540 |
525 return true; | 541 return true; |
526 } | 542 } |
527 | 543 |
528 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( | 544 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( |
529 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) { | 545 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) { |
530 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); | 546 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
615 render_thread_.reset(); | 631 render_thread_.reset(); |
616 | 632 |
617 // Ensure that we don't quit the main thread loop immediately next | 633 // Ensure that we don't quit the main thread loop immediately next |
618 // time Start() is called. | 634 // time Start() is called. |
619 ResetEvent(stop_render_event_.Get()); | 635 ResetEvent(stop_render_event_.Get()); |
620 } | 636 } |
621 | 637 |
622 source_ = NULL; | 638 source_ = NULL; |
623 } | 639 } |
624 | 640 |
| 641 bool WASAPIAudioOutputStream::MarshalComPointers() { |
| 642 DCHECK_EQ(creating_thread_id_, base::PlatformThread::CurrentId()); |
| 643 DCHECK(!com_stream_); |
| 644 |
| 645 ScopedComPtr<IStream> com_stream; |
| 646 HRESULT hr = CreateStreamOnHGlobal(NULL, TRUE, com_stream.Receive()); |
| 647 if (FAILED(hr)) { |
| 648 DLOG(ERROR) << "Failed to create stream for marshaling COM pointers."; |
| 649 return false; |
| 650 } |
| 651 |
| 652 hr = CoMarshalInterface(com_stream.get(), __uuidof(IAudioClient), |
| 653 audio_client_.get(), MSHCTX_INPROC, NULL, |
| 654 MSHLFLAGS_NORMAL); |
| 655 if (FAILED(hr)) { |
| 656 DLOG(ERROR) << "Marshal failed for IAudioClient: " << std::hex << hr; |
| 657 return false; |
| 658 } |
| 659 |
| 660 hr = CoMarshalInterface(com_stream.get(), __uuidof(IAudioRenderClient), |
| 661 audio_render_client_.get(), MSHCTX_INPROC, NULL, |
| 662 MSHLFLAGS_NORMAL); |
| 663 if (FAILED(hr)) { |
| 664 DLOG(ERROR) << "Marshal failed for IAudioRenderClient: " << std::hex << hr; |
| 665 return false; |
| 666 } |
| 667 |
| 668 hr = CoMarshalInterface(com_stream.get(), __uuidof(IAudioClock), |
| 669 audio_clock_.get(), MSHCTX_INPROC, NULL, |
| 670 MSHLFLAGS_NORMAL); |
| 671 if (FAILED(hr)) { |
| 672 DLOG(ERROR) << "Marshal failed for IAudioClock: " << std::hex << hr; |
| 673 return false; |
| 674 } |
| 675 |
| 676 LARGE_INTEGER pos = {0}; |
| 677 hr = com_stream->Seek(pos, STREAM_SEEK_SET, NULL); |
| 678 if (FAILED(hr)) { |
| 679 DLOG(ERROR) << "Failed to seek IStream for marshaling: " << std::hex << hr; |
| 680 return false; |
| 681 } |
| 682 |
| 683 com_stream_ = com_stream.Pass(); |
| 684 return true; |
| 685 } |
| 686 |
| 687 void WASAPIAudioOutputStream::UnmarshalComPointers( |
| 688 ScopedComPtr<IAudioClient>* audio_client, |
| 689 ScopedComPtr<IAudioRenderClient>* audio_render_client, |
| 690 ScopedComPtr<IAudioClock>* audio_clock) { |
| 691 DCHECK_EQ(render_thread_->tid(), base::PlatformThread::CurrentId()); |
| 692 |
| 693 DCHECK(com_stream_); |
| 694 ScopedComPtr<IStream> com_stream; |
| 695 com_stream = com_stream_.Pass(); |
| 696 |
| 697 HRESULT hr = CoUnmarshalInterface(com_stream.get(), __uuidof(IAudioClient), |
| 698 audio_client->ReceiveVoid()); |
| 699 CHECK(SUCCEEDED(hr)); |
| 700 |
| 701 hr = CoUnmarshalInterface(com_stream.get(), __uuidof(IAudioRenderClient), |
| 702 audio_render_client->ReceiveVoid()); |
| 703 CHECK(SUCCEEDED(hr)); |
| 704 |
| 705 hr = CoUnmarshalInterface(com_stream.get(), __uuidof(IAudioClock), |
| 706 audio_clock->ReceiveVoid()); |
| 707 CHECK(SUCCEEDED(hr)); |
| 708 } |
| 709 |
625 } // namespace media | 710 } // namespace media |
OLD | NEW |