Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(200)

Side by Side Diff: media/audio/win/audio_low_latency_output_win.cc

Issue 1097553003: Switch to STA mode for audio thread and WASAPI I/O streams. (Closed) Base URL: http://chromium.googlesource.com/chromium/src.git@master
Patch Set: Simplify. Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/audio/win/audio_low_latency_output_win.h" 5 #include "media/audio/win/audio_low_latency_output_win.h"
6 6
7 #include <Functiondiscoverykeys_devpkey.h> 7 #include <Functiondiscoverykeys_devpkey.h>
8 8
9 #include "base/command_line.h" 9 #include "base/command_line.h"
10 #include "base/logging.h" 10 #include "base/logging.h"
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after
242 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { 242 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
243 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence( 243 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
244 audio_client_.get(), audio_render_client_.get())) { 244 audio_client_.get(), audio_render_client_.get())) {
245 LOG(ERROR) << "Failed to prepare endpoint buffers with silence."; 245 LOG(ERROR) << "Failed to prepare endpoint buffers with silence.";
246 callback->OnError(this); 246 callback->OnError(this);
247 return; 247 return;
248 } 248 }
249 } 249 }
250 num_written_frames_ = endpoint_buffer_size_frames_; 250 num_written_frames_ = endpoint_buffer_size_frames_;
251 251
252 if (!MarshalComPointers()) {
253 callback->OnError(this);
254 return;
255 }
256
252 // Create and start the thread that will drive the rendering by waiting for 257 // Create and start the thread that will drive the rendering by waiting for
253 // render events. 258 // render events.
254 render_thread_.reset( 259 render_thread_.reset(
255 new base::DelegateSimpleThread(this, "wasapi_render_thread")); 260 new base::DelegateSimpleThread(this, "wasapi_render_thread"));
256 render_thread_->Start(); 261 render_thread_->Start();
257 if (!render_thread_->HasBeenStarted()) { 262 if (!render_thread_->HasBeenStarted()) {
258 LOG(ERROR) << "Failed to start WASAPI render thread."; 263 LOG(ERROR) << "Failed to start WASAPI render thread.";
259 StopThread(); 264 StopThread();
260 callback->OnError(this); 265 callback->OnError(this);
261 return; 266 return;
262 } 267 }
263 268
264 // Start streaming data between the endpoint buffer and the audio engine. 269 // Start streaming data between the endpoint buffer and the audio engine.
270 // TODO(dalecurtis): Do we need a lock on this with STA mode?
DaleCurtis 2015/04/22 17:48:54 Tentatively removed this assuming you POV is corre
265 HRESULT hr = audio_client_->Start(); 271 HRESULT hr = audio_client_->Start();
266 if (FAILED(hr)) { 272 if (FAILED(hr)) {
267 PLOG(ERROR) << "Failed to start output streaming: " << std::hex << hr; 273 PLOG(ERROR) << "Failed to start output streaming: " << std::hex << hr;
268 StopThread(); 274 StopThread();
269 callback->OnError(this); 275 callback->OnError(this);
270 } 276 }
271 } 277 }
272 278
273 void WASAPIAudioOutputStream::Stop() { 279 void WASAPIAudioOutputStream::Stop() {
274 DVLOG(1) << "WASAPIAudioOutputStream::Stop()"; 280 DVLOG(1) << "WASAPIAudioOutputStream::Stop()";
275 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); 281 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
276 if (!render_thread_) 282 if (!render_thread_)
277 return; 283 return;
278 284
279 // Stop output audio streaming. 285 // Stop output audio streaming.
286 // TODO(dalecurtis): Do we need a lock on this with STA mode?
280 HRESULT hr = audio_client_->Stop(); 287 HRESULT hr = audio_client_->Stop();
281 if (FAILED(hr)) { 288 if (FAILED(hr)) {
282 PLOG(ERROR) << "Failed to stop output streaming: " << std::hex << hr; 289 PLOG(ERROR) << "Failed to stop output streaming: " << std::hex << hr;
283 source_->OnError(this); 290 source_->OnError(this);
284 } 291 }
285 292
286 // Make a local copy of |source_| since StopThread() will clear it. 293 // Make a local copy of |source_| since StopThread() will clear it.
287 AudioSourceCallback* callback = source_; 294 AudioSourceCallback* callback = source_;
288 StopThread(); 295 StopThread();
289 296
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
326 } 333 }
327 volume_ = volume_float; 334 volume_ = volume_float;
328 } 335 }
329 336
330 void WASAPIAudioOutputStream::GetVolume(double* volume) { 337 void WASAPIAudioOutputStream::GetVolume(double* volume) {
331 DVLOG(1) << "GetVolume()"; 338 DVLOG(1) << "GetVolume()";
332 *volume = static_cast<double>(volume_); 339 *volume = static_cast<double>(volume_);
333 } 340 }
334 341
335 void WASAPIAudioOutputStream::Run() { 342 void WASAPIAudioOutputStream::Run() {
336 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); 343 ScopedCOMInitializer com_init;
337 344
338 // Increase the thread priority. 345 // Increase the thread priority.
339 render_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO); 346 render_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO);
340 347
341 // Enable MMCSS to ensure that this thread receives prioritized access to 348 // Enable MMCSS to ensure that this thread receives prioritized access to
342 // CPU resources. 349 // CPU resources.
343 DWORD task_index = 0; 350 DWORD task_index = 0;
344 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", 351 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
345 &task_index); 352 &task_index);
346 bool mmcss_is_ok = 353 bool mmcss_is_ok =
347 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); 354 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
348 if (!mmcss_is_ok) { 355 if (!mmcss_is_ok) {
349 // Failed to enable MMCSS on this thread. It is not fatal but can lead 356 // Failed to enable MMCSS on this thread. It is not fatal but can lead
350 // to reduced QoS at high load. 357 // to reduced QoS at high load.
351 DWORD err = GetLastError(); 358 DWORD err = GetLastError();
352 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; 359 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
353 } 360 }
354 361
362 // Retrieve COM pointers from the main thread.
363 ScopedComPtr<IAudioClient> thread_audio_client;
364 ScopedComPtr<IAudioRenderClient> thread_audio_render_client;
365 ScopedComPtr<IAudioClock> thread_audio_clock;
366
355 HRESULT hr = S_FALSE; 367 HRESULT hr = S_FALSE;
356 368
357 bool playing = true; 369 bool playing = true;
358 bool error = false; 370 bool error =
371 !UnmarshalComPointers(&thread_audio_client, &thread_audio_render_client,
372 &thread_audio_clock);
373
359 HANDLE wait_array[] = { stop_render_event_.Get(), 374 HANDLE wait_array[] = { stop_render_event_.Get(),
360 audio_samples_render_event_.Get() }; 375 audio_samples_render_event_.Get() };
361 UINT64 device_frequency = 0; 376 UINT64 device_frequency = 0;
362 377
363 // The device frequency is the frequency generated by the hardware clock in 378 if (!error) {
364 // the audio device. The GetFrequency() method reports a constant frequency. 379 // The device frequency is the frequency generated by the hardware clock in
365 hr = audio_clock_->GetFrequency(&device_frequency); 380 // the audio device. The GetFrequency() method reports a constant frequency.
366 error = FAILED(hr); 381 hr = thread_audio_clock->GetFrequency(&device_frequency);
367 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: " 382 error = FAILED(hr);
368 << std::hex << hr; 383 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: "
384 << std::hex << hr;
385 }
369 386
370 // Keep rendering audio until the stop event or the stream-switch event 387 // Keep rendering audio until the stop event or the stream-switch event
371 // is signaled. An error event can also break the main thread loop. 388 // is signaled. An error event can also break the main thread loop.
372 while (playing && !error) { 389 while (playing && !error) {
373 // Wait for a close-down event, stream-switch event or a new render event. 390 // Wait for a close-down event, stream-switch event or a new render event.
374 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array), 391 DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
375 wait_array, 392 wait_array,
376 FALSE, 393 FALSE,
377 INFINITE); 394 INFINITE);
378 395
379 switch (wait_result) { 396 switch (wait_result) {
380 case WAIT_OBJECT_0 + 0: 397 case WAIT_OBJECT_0 + 0:
381 // |stop_render_event_| has been set. 398 // |stop_render_event_| has been set.
382 playing = false; 399 playing = false;
383 break; 400 break;
384 case WAIT_OBJECT_0 + 1: 401 case WAIT_OBJECT_0 + 1:
385 // |audio_samples_render_event_| has been set. 402 // |audio_samples_render_event_| has been set.
386 error = !RenderAudioFromSource(device_frequency); 403 error = !RenderAudioFromSource(
404 device_frequency, thread_audio_client.get(),
405 thread_audio_render_client.get(), thread_audio_clock.get());
387 break; 406 break;
388 default: 407 default:
389 error = true; 408 error = true;
390 break; 409 break;
391 } 410 }
392 } 411 }
393 412
394 if (playing && error) { 413 if (playing && error && thread_audio_client) {
395 // Stop audio rendering since something has gone wrong in our main thread 414 // Stop audio rendering since something has gone wrong in our main thread
396 // loop. Note that, we are still in a "started" state, hence a Stop() call 415 // loop. Note that, we are still in a "started" state, hence a Stop() call
397 // is required to join the thread properly. 416 // is required to join the thread properly.
398 audio_client_->Stop(); 417 thread_audio_client->Stop();
399 PLOG(ERROR) << "WASAPI rendering failed."; 418 PLOG(ERROR) << "WASAPI rendering failed.";
400 } 419 }
401 420
402 // Disable MMCSS. 421 // Disable MMCSS.
403 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { 422 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
404 PLOG(WARNING) << "Failed to disable MMCSS"; 423 PLOG(WARNING) << "Failed to disable MMCSS";
405 } 424 }
406 } 425 }
407 426
408 bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) { 427 bool WASAPIAudioOutputStream::RenderAudioFromSource(
428 UINT64 device_frequency,
429 IAudioClient* thread_audio_client,
tommi (sloooow) - chröme 2015/04/22 10:31:52 nit: Does the 'thread_' prefix add context? I thi
DaleCurtis 2015/04/22 17:48:54 Done.
430 IAudioRenderClient* thread_audio_render_client,
431 IAudioClock* thread_audio_clock) {
409 TRACE_EVENT0("audio", "RenderAudioFromSource"); 432 TRACE_EVENT0("audio", "RenderAudioFromSource");
410 433
411 HRESULT hr = S_FALSE; 434 HRESULT hr = S_FALSE;
412 UINT32 num_queued_frames = 0; 435 UINT32 num_queued_frames = 0;
413 uint8* audio_data = NULL; 436 uint8* audio_data = NULL;
414 437
415 // Contains how much new data we can write to the buffer without 438 // Contains how much new data we can write to the buffer without
416 // the risk of overwriting previously written data that the audio 439 // the risk of overwriting previously written data that the audio
417 // engine has not yet read from the buffer. 440 // engine has not yet read from the buffer.
418 size_t num_available_frames = 0; 441 size_t num_available_frames = 0;
419 442
420 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { 443 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
421 // Get the padding value which represents the amount of rendering 444 // Get the padding value which represents the amount of rendering
422 // data that is queued up to play in the endpoint buffer. 445 // data that is queued up to play in the endpoint buffer.
423 hr = audio_client_->GetCurrentPadding(&num_queued_frames); 446 hr = thread_audio_client->GetCurrentPadding(&num_queued_frames);
424 num_available_frames = 447 num_available_frames =
425 endpoint_buffer_size_frames_ - num_queued_frames; 448 endpoint_buffer_size_frames_ - num_queued_frames;
426 if (FAILED(hr)) { 449 if (FAILED(hr)) {
427 DLOG(ERROR) << "Failed to retrieve amount of available space: " 450 DLOG(ERROR) << "Failed to retrieve amount of available space: "
428 << std::hex << hr; 451 << std::hex << hr;
429 return false; 452 return false;
430 } 453 }
431 } else { 454 } else {
432 // While the stream is running, the system alternately sends one 455 // While the stream is running, the system alternately sends one
433 // buffer or the other to the client. This form of double buffering 456 // buffer or the other to the client. This form of double buffering
(...skipping 21 matching lines...) Expand all
455 // fill up the available area in the endpoint buffer. 478 // fill up the available area in the endpoint buffer.
456 // |num_packets| will always be one for exclusive-mode streams and 479 // |num_packets| will always be one for exclusive-mode streams and
457 // will be one in most cases for shared mode streams as well. 480 // will be one in most cases for shared mode streams as well.
458 // However, we have found that two packets can sometimes be 481 // However, we have found that two packets can sometimes be
459 // required. 482 // required.
460 size_t num_packets = (num_available_frames / packet_size_frames_); 483 size_t num_packets = (num_available_frames / packet_size_frames_);
461 484
462 for (size_t n = 0; n < num_packets; ++n) { 485 for (size_t n = 0; n < num_packets; ++n) {
463 // Grab all available space in the rendering endpoint buffer 486 // Grab all available space in the rendering endpoint buffer
464 // into which the client can write a data packet. 487 // into which the client can write a data packet.
465 hr = audio_render_client_->GetBuffer(packet_size_frames_, 488 hr =
466 &audio_data); 489 thread_audio_render_client->GetBuffer(packet_size_frames_, &audio_data);
467 if (FAILED(hr)) { 490 if (FAILED(hr)) {
468 DLOG(ERROR) << "Failed to use rendering audio buffer: " 491 DLOG(ERROR) << "Failed to use rendering audio buffer: "
469 << std::hex << hr; 492 << std::hex << hr;
470 return false; 493 return false;
471 } 494 }
472 495
473 // Derive the audio delay which corresponds to the delay between 496 // Derive the audio delay which corresponds to the delay between
474 // a render event and the time when the first audio sample in a 497 // a render event and the time when the first audio sample in a
475 // packet is played out through the speaker. This delay value 498 // packet is played out through the speaker. This delay value
476 // can typically be utilized by an acoustic echo-control (AEC) 499 // can typically be utilized by an acoustic echo-control (AEC)
477 // unit at the render side. 500 // unit at the render side.
478 UINT64 position = 0; 501 UINT64 position = 0;
479 uint32 audio_delay_bytes = 0; 502 uint32 audio_delay_bytes = 0;
480 hr = audio_clock_->GetPosition(&position, NULL); 503 hr = thread_audio_clock->GetPosition(&position, NULL);
481 if (SUCCEEDED(hr)) { 504 if (SUCCEEDED(hr)) {
482 // Stream position of the sample that is currently playing 505 // Stream position of the sample that is currently playing
483 // through the speaker. 506 // through the speaker.
484 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * 507 double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
485 (static_cast<double>(position) / device_frequency); 508 (static_cast<double>(position) / device_frequency);
486 509
487 // Stream position of the last sample written to the endpoint 510 // Stream position of the last sample written to the endpoint
488 // buffer. Note that, the packet we are about to receive in 511 // buffer. Note that, the packet we are about to receive in
489 // the upcoming callback is also included. 512 // the upcoming callback is also included.
490 size_t pos_last_sample_written_frames = 513 size_t pos_last_sample_written_frames =
(...skipping 19 matching lines...) Expand all
510 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; 533 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
511 audio_bus_->Scale(volume_); 534 audio_bus_->Scale(volume_);
512 audio_bus_->ToInterleaved( 535 audio_bus_->ToInterleaved(
513 frames_filled, bytes_per_sample, audio_data); 536 frames_filled, bytes_per_sample, audio_data);
514 537
515 538
516 // Release the buffer space acquired in the GetBuffer() call. 539 // Release the buffer space acquired in the GetBuffer() call.
517 // Render silence if we were not able to fill up the buffer totally. 540 // Render silence if we were not able to fill up the buffer totally.
518 DWORD flags = (num_filled_bytes < packet_size_bytes_) ? 541 DWORD flags = (num_filled_bytes < packet_size_bytes_) ?
519 AUDCLNT_BUFFERFLAGS_SILENT : 0; 542 AUDCLNT_BUFFERFLAGS_SILENT : 0;
520 audio_render_client_->ReleaseBuffer(packet_size_frames_, flags); 543 thread_audio_render_client->ReleaseBuffer(packet_size_frames_, flags);
521 544
522 num_written_frames_ += packet_size_frames_; 545 num_written_frames_ += packet_size_frames_;
523 } 546 }
524 547
525 return true; 548 return true;
526 } 549 }
527 550
528 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( 551 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
529 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) { 552 IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) {
530 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); 553 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE);
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
615 render_thread_.reset(); 638 render_thread_.reset();
616 639
617 // Ensure that we don't quit the main thread loop immediately next 640 // Ensure that we don't quit the main thread loop immediately next
618 // time Start() is called. 641 // time Start() is called.
619 ResetEvent(stop_render_event_.Get()); 642 ResetEvent(stop_render_event_.Get());
620 } 643 }
621 644
622 source_ = NULL; 645 source_ = NULL;
623 } 646 }
624 647
648 bool WASAPIAudioOutputStream::MarshalComPointers() {
tommi (sloooow) - chröme 2015/04/22 10:31:52 Can we add a thread checker for these methods? Th
DaleCurtis 2015/04/22 16:08:23 I'll see if I can. I forget if the unit tests try
DaleCurtis 2015/04/22 17:48:54 Done.
649 HRESULT hr = CreateStreamOnHGlobal(NULL, TRUE, com_stream_.Receive());
tommi (sloooow) - chröme 2015/04/22 10:31:52 what about using a local variable for the stream h
DaleCurtis 2015/04/22 16:08:23 Good idea, I'll do this.
DaleCurtis 2015/04/22 17:48:54 Done.
650 if (FAILED(hr)) {
651 DLOG(ERROR) << "Failed to create stream for marshaling COM pointers.";
652 return false;
653 }
654
655 hr = CoMarshalInterface(com_stream_.get(), __uuidof(IAudioClient),
tommi (sloooow) - chröme 2015/04/22 10:31:52 was there a particular reason you decided to go wi
DaleCurtis 2015/04/22 16:08:23 Yes, but maybe not good ones, as I basically just
656 audio_client_.get(), MSHCTX_INPROC, NULL,
657 MSHLFLAGS_NORMAL);
658 if (FAILED(hr)) {
659 DLOG(ERROR) << "Marshal failed for IAudioClient: " << std::hex << hr;
660 com_stream_.Release();
661 return false;
662 }
663
664 hr = CoMarshalInterface(com_stream_.get(), __uuidof(IAudioRenderClient),
665 audio_render_client_.get(), MSHCTX_INPROC, NULL,
666 MSHLFLAGS_NORMAL);
667 if (FAILED(hr)) {
668 DLOG(ERROR) << "Marshal failed for IAudioRenderClient: " << std::hex << hr;
669 com_stream_.Release();
670 return false;
671 }
672
673 hr = CoMarshalInterface(com_stream_.get(), __uuidof(IAudioClock),
674 audio_clock_.get(), MSHCTX_INPROC, NULL,
675 MSHLFLAGS_NORMAL);
676 if (FAILED(hr)) {
677 DLOG(ERROR) << "Marshal failed for IAudioClock: " << std::hex << hr;
678 com_stream_.Release();
679 return false;
680 }
681
682 LARGE_INTEGER pos = {0};
683 hr = com_stream_->Seek(pos, STREAM_SEEK_SET, NULL);
684 if (FAILED(hr)) {
685 DLOG(ERROR) << "Failed to seek IStream for marshaling: " << std::hex << hr;
686 com_stream_.Release();
687 return false;
688 }
689
690 return true;
691 }
692
693 bool WASAPIAudioOutputStream::UnmarshalComPointers(
694 ScopedComPtr<IAudioClient>* audio_client,
695 ScopedComPtr<IAudioRenderClient>* audio_render_client,
696 ScopedComPtr<IAudioClock>* audio_clock) {
697 HRESULT hr = CoUnmarshalInterface(com_stream_.get(), __uuidof(IAudioClient),
tommi (sloooow) - chröme 2015/04/22 10:31:52 nit: would be nice to detach com_stream_ here to a
DaleCurtis 2015/04/22 17:48:54 Done.
698 audio_client->ReceiveVoid());
699 if (FAILED(hr)) {
700 DLOG(ERROR) << "Unmarshal failed IAudioClient: " << std::hex << hr;
701 com_stream_.Release();
702 return false;
703 }
704
705 hr = CoUnmarshalInterface(com_stream_.get(), __uuidof(IAudioRenderClient),
706 audio_render_client->ReceiveVoid());
707 if (FAILED(hr)) {
708 DLOG(ERROR) << "Unmarshal failed IAudioRenderClient: " << std::hex << hr;
709 com_stream_.Release();
710 return false;
711 }
712
713 hr = CoUnmarshalInterface(com_stream_.get(), __uuidof(IAudioClock),
714 audio_clock->ReceiveVoid());
715 if (FAILED(hr))
716 DLOG(ERROR) << "Unmarshal failed IAudioClock: " << std::hex << hr;
717 com_stream_.Release();
718 return SUCCEEDED(hr);
719 }
720
625 } // namespace media 721 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698