OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
6 | 6 |
7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
8 | 8 |
9 #include "base/command_line.h" | |
9 #include "base/logging.h" | 10 #include "base/logging.h" |
10 #include "base/memory/scoped_ptr.h" | 11 #include "base/memory/scoped_ptr.h" |
11 #include "base/utf_string_conversions.h" | 12 #include "base/utf_string_conversions.h" |
12 #include "media/audio/audio_util.h" | 13 #include "media/audio/audio_util.h" |
13 #include "media/audio/win/audio_manager_win.h" | 14 #include "media/audio/win/audio_manager_win.h" |
14 #include "media/audio/win/avrt_wrapper_win.h" | 15 #include "media/audio/win/avrt_wrapper_win.h" |
16 #include "media/base/media_switches.h" | |
15 | 17 |
16 using base::win::ScopedComPtr; | 18 using base::win::ScopedComPtr; |
17 using base::win::ScopedCOMInitializer; | 19 using base::win::ScopedCOMInitializer; |
20 using base::win::ScopedCoMem; | |
18 | 21 |
19 namespace media { | 22 namespace media { |
20 | 23 |
24 AUDCLNT_SHAREMODE GetShareModeImpl() { | |
25 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); | |
26 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) | |
27 return AUDCLNT_SHAREMODE_EXCLUSIVE; | |
28 return AUDCLNT_SHAREMODE_SHARED; | |
29 } | |
30 | |
31 // static | |
32 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() { | |
33 static const AUDCLNT_SHAREMODE kShareMode = GetShareModeImpl(); | |
scherkus (not reviewing)
2012/07/26 17:06:41
don't bother w/ the static and simply inline the I
henrika (OOO until Aug 14)
2012/07/27 08:27:39
Done.
| |
34 return kShareMode; | |
35 } | |
36 | |
21 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, | 37 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
22 const AudioParameters& params, | 38 const AudioParameters& params, |
23 ERole device_role) | 39 ERole device_role) |
24 : com_init_(ScopedCOMInitializer::kMTA), | 40 : com_init_(ScopedCOMInitializer::kMTA), |
25 creating_thread_id_(base::PlatformThread::CurrentId()), | 41 creating_thread_id_(base::PlatformThread::CurrentId()), |
26 manager_(manager), | 42 manager_(manager), |
27 render_thread_(NULL), | 43 render_thread_(NULL), |
28 opened_(false), | 44 opened_(false), |
29 started_(false), | 45 started_(false), |
30 restart_rendering_mode_(false), | 46 restart_rendering_mode_(false), |
31 volume_(1.0), | 47 volume_(1.0), |
32 endpoint_buffer_size_frames_(0), | 48 endpoint_buffer_size_frames_(0), |
33 device_role_(device_role), | 49 device_role_(device_role), |
50 share_mode_(GetShareMode()), | |
34 num_written_frames_(0), | 51 num_written_frames_(0), |
35 source_(NULL) { | 52 source_(NULL) { |
36 CHECK(com_init_.succeeded()); | 53 CHECK(com_init_.succeeded()); |
37 DCHECK(manager_); | 54 DCHECK(manager_); |
38 | 55 |
39 // Load the Avrt DLL if not already loaded. Required to support MMCSS. | 56 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
40 bool avrt_init = avrt::Initialize(); | 57 bool avrt_init = avrt::Initialize(); |
41 DCHECK(avrt_init) << "Failed to load the avrt.dll"; | 58 DCHECK(avrt_init) << "Failed to load the avrt.dll"; |
42 | 59 |
60 if (share_mode() == AUDCLNT_SHAREMODE_EXCLUSIVE) { | |
61 VLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<"; | |
62 } | |
63 | |
43 // Set up the desired render format specified by the client. | 64 // Set up the desired render format specified by the client. |
44 format_.nSamplesPerSec = params.sample_rate(); | 65 format_.nSamplesPerSec = params.sample_rate(); |
45 format_.wFormatTag = WAVE_FORMAT_PCM; | 66 format_.wFormatTag = WAVE_FORMAT_PCM; |
46 format_.wBitsPerSample = params.bits_per_sample(); | 67 format_.wBitsPerSample = params.bits_per_sample(); |
47 format_.nChannels = params.channels(); | 68 format_.nChannels = params.channels(); |
48 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; | 69 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; |
49 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; | 70 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; |
50 format_.cbSize = 0; | 71 format_.cbSize = 0; |
51 | 72 |
52 // Size in bytes of each audio frame. | 73 // Size in bytes of each audio frame. |
(...skipping 27 matching lines...) Expand all Loading... | |
80 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} | 101 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} |
81 | 102 |
82 bool WASAPIAudioOutputStream::Open() { | 103 bool WASAPIAudioOutputStream::Open() { |
83 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 104 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
84 if (opened_) | 105 if (opened_) |
85 return true; | 106 return true; |
86 | 107 |
87 // Create an IMMDeviceEnumerator interface and obtain a reference to | 108 // Create an IMMDeviceEnumerator interface and obtain a reference to |
88 // the IMMDevice interface of the default rendering device with the | 109 // the IMMDevice interface of the default rendering device with the |
89 // specified role. | 110 // specified role. |
90 HRESULT hr = SetRenderDevice(device_role_); | 111 HRESULT hr = SetRenderDevice(); |
91 if (FAILED(hr)) { | 112 if (FAILED(hr)) { |
92 return false; | 113 return false; |
93 } | 114 } |
94 | 115 |
95 // Obtain an IAudioClient interface which enables us to create and initialize | 116 // Obtain an IAudioClient interface which enables us to create and initialize |
96 // an audio stream between an audio application and the audio engine. | 117 // an audio stream between an audio application and the audio engine. |
97 hr = ActivateRenderDevice(); | 118 hr = ActivateRenderDevice(); |
98 if (FAILED(hr)) { | 119 if (FAILED(hr)) { |
99 return false; | 120 return false; |
100 } | 121 } |
101 | 122 |
102 // Retrieve the stream format which the audio engine uses for its internal | |
103 // processing/mixing of shared-mode streams. | |
104 hr = GetAudioEngineStreamFormat(); | |
105 if (FAILED(hr)) { | |
106 return false; | |
107 } | |
108 | |
109 // Verify that the selected audio endpoint supports the specified format | 123 // Verify that the selected audio endpoint supports the specified format |
110 // set during construction. | 124 // set during construction. |
125 // In exclusive mode, the client can choose to open the stream in any audio | |
126 // format that the endpoint device supports. In shared mode, the client must | |
127 // open the stream in the mix format that is currently in use by the audio | |
128 // engine (or a format that is similar to the mix format). The audio engine's | |
129 // input streams and the output mix from the engine are all in this format. | |
111 if (!DesiredFormatIsSupported()) { | 130 if (!DesiredFormatIsSupported()) { |
112 return false; | 131 return false; |
113 } | 132 } |
114 | 133 |
115 // Initialize the audio stream between the client and the device using | 134 // Initialize the audio stream between the client and the device using |
116 // shared mode and a lowest possible glitch-free latency. | 135 // shared or exclusive mode and a lowest possible glitch-free latency. |
136 // We will enter different code paths depending on the specified share mode. | |
117 hr = InitializeAudioEngine(); | 137 hr = InitializeAudioEngine(); |
118 if (FAILED(hr)) { | 138 if (FAILED(hr)) { |
119 return false; | 139 return false; |
120 } | 140 } |
121 | 141 |
122 // Register this client as an IMMNotificationClient implementation. | 142 // Register this client as an IMMNotificationClient implementation. |
123 // Only OnDefaultDeviceChanged() and OnDeviceStateChanged() and are | 143 // Only OnDefaultDeviceChanged() and OnDeviceStateChanged() and are |
124 // non-trivial. | 144 // non-trivial. |
125 hr = device_enumerator_->RegisterEndpointNotificationCallback(this); | 145 hr = device_enumerator_->RegisterEndpointNotificationCallback(this); |
126 | 146 |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
222 // Flush all pending data and reset the audio clock stream position to 0. | 242 // Flush all pending data and reset the audio clock stream position to 0. |
223 hr = audio_client_->Reset(); | 243 hr = audio_client_->Reset(); |
224 if (FAILED(hr)) { | 244 if (FAILED(hr)) { |
225 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) | 245 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) |
226 << "Failed to reset streaming: " << std::hex << hr; | 246 << "Failed to reset streaming: " << std::hex << hr; |
227 } | 247 } |
228 | 248 |
229 // Extra safety check to ensure that the buffers are cleared. | 249 // Extra safety check to ensure that the buffers are cleared. |
230 // If the buffers are not cleared correctly, the next call to Start() | 250 // If the buffers are not cleared correctly, the next call to Start() |
231 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). | 251 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). |
232 UINT32 num_queued_frames = 0; | 252 // This check is is only needed for shared-mode streams. |
233 audio_client_->GetCurrentPadding(&num_queued_frames); | 253 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { |
234 DCHECK_EQ(0u, num_queued_frames); | 254 UINT32 num_queued_frames = 0; |
255 audio_client_->GetCurrentPadding(&num_queued_frames); | |
256 DCHECK_EQ(0u, num_queued_frames); | |
257 } | |
235 | 258 |
236 // Ensure that we don't quit the main thread loop immediately next | 259 // Ensure that we don't quit the main thread loop immediately next |
237 // time Start() is called. | 260 // time Start() is called. |
238 ResetEvent(stop_render_event_.Get()); | 261 ResetEvent(stop_render_event_.Get()); |
239 | 262 |
240 started_ = false; | 263 started_ = false; |
241 } | 264 } |
242 | 265 |
243 void WASAPIAudioOutputStream::Close() { | 266 void WASAPIAudioOutputStream::Close() { |
244 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 267 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
(...skipping 24 matching lines...) Expand all Loading... | |
269 volume_ = volume_float; | 292 volume_ = volume_float; |
270 } | 293 } |
271 | 294 |
272 void WASAPIAudioOutputStream::GetVolume(double* volume) { | 295 void WASAPIAudioOutputStream::GetVolume(double* volume) { |
273 DVLOG(1) << "GetVolume()"; | 296 DVLOG(1) << "GetVolume()"; |
274 *volume = static_cast<double>(volume_); | 297 *volume = static_cast<double>(volume_); |
275 } | 298 } |
276 | 299 |
277 // static | 300 // static |
278 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { | 301 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { |
302 // Calling this function only makes sense for shared mode streams, since | |
303 // if the device will be opened in exclusive mode, then the application | |
304 // specified format is used instead. However, the result of this method can | |
305 // be useful for testing purposes so we don't DCHECK here. | |
306 DLOG_IF(WARNING, GetShareMode() == AUDCLNT_SHAREMODE_EXCLUSIVE) << | |
307 "The mixing sample rate will be ignored for exclusive-mode streams."; | |
308 | |
279 // It is assumed that this static method is called from a COM thread, i.e., | 309 // It is assumed that this static method is called from a COM thread, i.e., |
280 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. | 310 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. |
281 ScopedComPtr<IMMDeviceEnumerator> enumerator; | 311 ScopedComPtr<IMMDeviceEnumerator> enumerator; |
282 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), | 312 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
283 NULL, | 313 NULL, |
284 CLSCTX_INPROC_SERVER, | 314 CLSCTX_INPROC_SERVER, |
285 __uuidof(IMMDeviceEnumerator), | 315 __uuidof(IMMDeviceEnumerator), |
286 enumerator.ReceiveVoid()); | 316 enumerator.ReceiveVoid()); |
287 if (FAILED(hr)) { | 317 if (FAILED(hr)) { |
288 NOTREACHED() << "error code: " << std::hex << hr; | 318 NOTREACHED() << "error code: " << std::hex << hr; |
(...skipping 15 matching lines...) Expand all Loading... | |
304 ScopedComPtr<IAudioClient> audio_client; | 334 ScopedComPtr<IAudioClient> audio_client; |
305 hr = endpoint_device->Activate(__uuidof(IAudioClient), | 335 hr = endpoint_device->Activate(__uuidof(IAudioClient), |
306 CLSCTX_INPROC_SERVER, | 336 CLSCTX_INPROC_SERVER, |
307 NULL, | 337 NULL, |
308 audio_client.ReceiveVoid()); | 338 audio_client.ReceiveVoid()); |
309 if (FAILED(hr)) { | 339 if (FAILED(hr)) { |
310 NOTREACHED() << "error code: " << std::hex << hr; | 340 NOTREACHED() << "error code: " << std::hex << hr; |
311 return 0.0; | 341 return 0.0; |
312 } | 342 } |
313 | 343 |
344 // Retrieve the stream format that the audio engine uses for its internal | |
345 // processing of shared-mode streams. | |
314 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format; | 346 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format; |
315 hr = audio_client->GetMixFormat(&audio_engine_mix_format); | 347 hr = audio_client->GetMixFormat(&audio_engine_mix_format); |
316 if (FAILED(hr)) { | 348 if (FAILED(hr)) { |
317 NOTREACHED() << "error code: " << std::hex << hr; | 349 NOTREACHED() << "error code: " << std::hex << hr; |
318 return 0.0; | 350 return 0.0; |
319 } | 351 } |
320 | 352 |
321 return static_cast<int>(audio_engine_mix_format->nSamplesPerSec); | 353 return static_cast<int>(audio_engine_mix_format->nSamplesPerSec); |
322 } | 354 } |
323 | 355 |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
387 playing = false; | 419 playing = false; |
388 error = true; | 420 error = true; |
389 } | 421 } |
390 break; | 422 break; |
391 case WAIT_OBJECT_0 + 2: | 423 case WAIT_OBJECT_0 + 2: |
392 { | 424 { |
393 // |audio_samples_render_event_| has been set. | 425 // |audio_samples_render_event_| has been set. |
394 UINT32 num_queued_frames = 0; | 426 UINT32 num_queued_frames = 0; |
395 uint8* audio_data = NULL; | 427 uint8* audio_data = NULL; |
396 | 428 |
397 // Get the padding value which represents the amount of rendering | 429 // Contains how much new data we can write to the buffer without |
398 // data that is queued up to play in the endpoint buffer. | |
399 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | |
400 | |
401 // Determine how much new data we can write to the buffer without | |
402 // the risk of overwriting previously written data that the audio | 430 // the risk of overwriting previously written data that the audio |
403 // engine has not yet read from the buffer. | 431 // engine has not yet read from the buffer. |
404 size_t num_available_frames = | 432 size_t num_available_frames = 0; |
405 endpoint_buffer_size_frames_ - num_queued_frames; | 433 |
434 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { | |
435 // Get the padding value which represents the amount of rendering | |
436 // data that is queued up to play in the endpoint buffer. | |
437 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | |
438 num_available_frames = | |
439 endpoint_buffer_size_frames_ - num_queued_frames; | |
440 } else { | |
441 // While the stream is running, the system alternately sends one | |
442 // buffer or the other to the client. This form of double buffering | |
443 // is referred to as "ping-ponging". Each time the client receives | |
444 // a buffer from the system (triggers this event) the client must | |
445 // process the entire buffer. Calls to the GetCurrentPadding method | |
446 // are unnecessary because the packet size must always equal the | |
447 // buffer size. In contrast to the shared mode buffering scheme, | |
448 // the latency for an event-driven, exclusive-mode stream depends | |
449 // directly on the buffer size. | |
450 num_available_frames = endpoint_buffer_size_frames_; | |
451 } | |
406 | 452 |
407 // Check if there is enough available space to fit the packet size | 453 // Check if there is enough available space to fit the packet size |
408 // specified by the client. | 454 // specified by the client. |
409 if (FAILED(hr) || (num_available_frames < packet_size_frames_)) | 455 if (FAILED(hr) || (num_available_frames < packet_size_frames_)) |
410 continue; | 456 continue; |
411 | 457 |
412 // Derive the number of packets we need get from the client to | 458 // Derive the number of packets we need get from the client to |
413 // fill up the available area in the endpoint buffer. | 459 // fill up the available area in the endpoint buffer. |
460 // |num_packets| will always be one for exclusive-mode streams. | |
414 size_t num_packets = (num_available_frames / packet_size_frames_); | 461 size_t num_packets = (num_available_frames / packet_size_frames_); |
415 | 462 |
416 // Get data from the client/source. | 463 // Get data from the client/source. |
417 for (size_t n = 0; n < num_packets; ++n) { | 464 for (size_t n = 0; n < num_packets; ++n) { |
418 // Grab all available space in the rendering endpoint buffer | 465 // Grab all available space in the rendering endpoint buffer |
419 // into which the client can write a data packet. | 466 // into which the client can write a data packet. |
420 hr = audio_render_client_->GetBuffer(packet_size_frames_, | 467 hr = audio_render_client_->GetBuffer(packet_size_frames_, |
421 &audio_data); | 468 &audio_data); |
422 if (FAILED(hr)) { | 469 if (FAILED(hr)) { |
423 DLOG(ERROR) << "Failed to use rendering audio buffer: " | 470 DLOG(ERROR) << "Failed to use rendering audio buffer: " |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
504 PLOG(WARNING) << "Failed to disable MMCSS"; | 551 PLOG(WARNING) << "Failed to disable MMCSS"; |
505 } | 552 } |
506 } | 553 } |
507 | 554 |
508 void WASAPIAudioOutputStream::HandleError(HRESULT err) { | 555 void WASAPIAudioOutputStream::HandleError(HRESULT err) { |
509 NOTREACHED() << "Error code: " << std::hex << err; | 556 NOTREACHED() << "Error code: " << std::hex << err; |
510 if (source_) | 557 if (source_) |
511 source_->OnError(this, static_cast<int>(err)); | 558 source_->OnError(this, static_cast<int>(err)); |
512 } | 559 } |
513 | 560 |
514 HRESULT WASAPIAudioOutputStream::SetRenderDevice(ERole device_role) { | 561 HRESULT WASAPIAudioOutputStream::SetRenderDevice() { |
562 ScopedComPtr<IMMDeviceEnumerator> device_enumerator; | |
563 ScopedComPtr<IMMDevice> endpoint_device; | |
564 | |
515 // Create the IMMDeviceEnumerator interface. | 565 // Create the IMMDeviceEnumerator interface. |
516 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), | 566 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
517 NULL, | 567 NULL, |
518 CLSCTX_INPROC_SERVER, | 568 CLSCTX_INPROC_SERVER, |
519 __uuidof(IMMDeviceEnumerator), | 569 __uuidof(IMMDeviceEnumerator), |
520 device_enumerator_.ReceiveVoid()); | 570 device_enumerator.ReceiveVoid()); |
521 if (SUCCEEDED(hr)) { | 571 if (SUCCEEDED(hr)) { |
522 // Retrieve the default render audio endpoint for the specified role. | 572 // Retrieve the default render audio endpoint for the specified role. |
523 // Note that, in Windows Vista, the MMDevice API supports device roles | 573 // Note that, in Windows Vista, the MMDevice API supports device roles |
524 // but the system-supplied user interface programs do not. | 574 // but the system-supplied user interface programs do not. |
525 hr = device_enumerator_->GetDefaultAudioEndpoint( | 575 hr = device_enumerator->GetDefaultAudioEndpoint( |
526 eRender, device_role, endpoint_device_.Receive()); | 576 eRender, device_role_, endpoint_device.Receive()); |
527 if (FAILED(hr)) | 577 if (FAILED(hr)) |
528 return hr; | 578 return hr; |
529 | 579 |
530 // Verify that the audio endpoint device is active. That is, the audio | 580 // Verify that the audio endpoint device is active. That is, the audio |
531 // adapter that connects to the endpoint device is present and enabled. | 581 // adapter that connects to the endpoint device is present and enabled. |
532 DWORD state = DEVICE_STATE_DISABLED; | 582 DWORD state = DEVICE_STATE_DISABLED; |
533 hr = endpoint_device_->GetState(&state); | 583 hr = endpoint_device->GetState(&state); |
534 if (SUCCEEDED(hr)) { | 584 if (SUCCEEDED(hr)) { |
535 if (!(state & DEVICE_STATE_ACTIVE)) { | 585 if (!(state & DEVICE_STATE_ACTIVE)) { |
536 DLOG(ERROR) << "Selected render device is not active."; | 586 DLOG(ERROR) << "Selected render device is not active."; |
537 hr = E_ACCESSDENIED; | 587 hr = E_ACCESSDENIED; |
538 } | 588 } |
539 } | 589 } |
540 } | 590 } |
541 | 591 |
592 if (SUCCEEDED(hr)) { | |
593 device_enumerator_ = device_enumerator; | |
594 endpoint_device_ = endpoint_device; | |
595 } | |
596 | |
597 return hr; | |
598 } | |
599 | |
600 HRESULT WASAPIAudioOutputStream::ActivateRenderDevice() { | |
601 ScopedComPtr<IAudioClient> audio_client; | |
602 | |
603 // Creates and activates an IAudioClient COM object given the selected | |
604 // render endpoint device. | |
605 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), | |
606 CLSCTX_INPROC_SERVER, | |
607 NULL, | |
608 audio_client.ReceiveVoid()); | |
609 if (SUCCEEDED(hr)) { | |
610 // Retrieve the stream format that the audio engine uses for its internal | |
611 // processing/mixing of shared-mode streams. | |
612 audio_engine_mix_format_.Reset(NULL); | |
613 hr = audio_client->GetMixFormat(&audio_engine_mix_format_); | |
614 | |
615 if (SUCCEEDED(hr)) { | |
616 audio_client_ = audio_client; | |
617 } | |
618 } | |
619 | |
542 return hr; | 620 return hr; |
543 } | 621 } |
544 | 622 |
545 HRESULT WASAPIAudioOutputStream::ActivateRenderDevice() { | |
546 // Creates and activates an IAudioClient COM object given the selected | |
547 // render endpoint device. | |
548 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), | |
549 CLSCTX_INPROC_SERVER, | |
550 NULL, | |
551 audio_client_.ReceiveVoid()); | |
552 return hr; | |
553 } | |
554 | |
555 HRESULT WASAPIAudioOutputStream::GetAudioEngineStreamFormat() { | |
556 // Retrieve the stream format that the audio engine uses for its internal | |
557 // processing/mixing of shared-mode streams. | |
558 return audio_client_->GetMixFormat(&audio_engine_mix_format_); | |
559 } | |
560 | |
561 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { | 623 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { |
624 // Determine, before calling IAudioClient::Initialize(), whether the audio | |
625 // engine supports a particular stream format. | |
562 // In shared mode, the audio engine always supports the mix format, | 626 // In shared mode, the audio engine always supports the mix format, |
563 // which is stored in the |audio_engine_mix_format_| member. In addition, | 627 // which is stored in the |audio_engine_mix_format_| member and it is also |
564 // the audio engine *might* support similar formats that have the same | 628 // possible to receive a proposed (closest) format if the current format is |
565 // sample rate and number of channels as the mix format but differ in | 629 // not supported. |
566 // the representation of audio sample values. | |
567 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; | 630 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; |
568 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, | 631 HRESULT hr = audio_client_->IsFormatSupported(share_mode(), |
569 &format_, | 632 &format_, |
570 &closest_match); | 633 &closest_match); |
634 | |
635 // This log can only be triggered for shared mode. | |
571 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " | 636 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " |
572 << "but a closest match exists."; | 637 << "but a closest match exists."; |
638 // This log can be triggered both for shared and exclusive modes. | |
639 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format."; | |
640 if (hr == S_FALSE) { | |
641 DVLOG(1) << "wFormatTag : " << closest_match->wFormatTag; | |
642 DVLOG(1) << "nChannels : " << closest_match->nChannels; | |
643 DVLOG(1) << "nSamplesPerSec: " << closest_match->nSamplesPerSec; | |
644 DVLOG(1) << "wBitsPerSample: " << closest_match->wBitsPerSample; | |
645 } | |
646 | |
573 return (hr == S_OK); | 647 return (hr == S_OK); |
574 } | 648 } |
575 | 649 |
576 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { | 650 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { |
651 #if !defined(NDEBUG) | |
652 // The period between processing passes by the audio engine is fixed for a | |
653 // particular audio endpoint device and represents the smallest processing | |
654 // quantum for the audio engine. This period plus the stream latency between | |
655 // the buffer and endpoint device represents the minimum possible latency | |
656 // that an audio application can achieve in shared mode. | |
657 { | |
658 REFERENCE_TIME default_device_period = 0; | |
659 REFERENCE_TIME minimum_device_period = 0; | |
660 HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period, | |
661 &minimum_device_period); | |
662 if (SUCCEEDED(hr_dbg)) { | |
663 // Shared mode device period. | |
664 DVLOG(1) << "shared mode (default) device period: " | |
665 << static_cast<double>(default_device_period / 10000.0) | |
666 << " [ms]"; | |
667 // Exclusive mode device period. | |
668 DVLOG(1) << "exclusive mode (minimum) device period: " | |
669 << static_cast<double>(minimum_device_period / 10000.0) | |
670 << " [ms]"; | |
671 } | |
672 | |
673 REFERENCE_TIME latency = 0; | |
674 hr_dbg = audio_client_->GetStreamLatency(&latency); | |
675 if (SUCCEEDED(hr_dbg)) { | |
676 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) | |
677 << " [ms]"; | |
678 } | |
679 } | |
680 #endif | |
681 | |
682 HRESULT hr = S_FALSE; | |
683 | |
684 // Perform different initialization depending on if the device shall be | |
685 // opened in shared mode or in exclusive mode. | |
686 hr = (share_mode() == AUDCLNT_SHAREMODE_SHARED) ? | |
687 SharedModeInitialization() : ExclusiveModeInitialization(); | |
688 if (FAILED(hr)) { | |
689 LOG(WARNING) << "IAudioClient::Initialize() failed: " << std::hex << hr; | |
690 return hr; | |
691 } | |
692 | |
693 // Retrieve the length of the endpoint buffer. The buffer length represents | |
694 // the maximum amount of rendering data that the client can write to | |
695 // the endpoint buffer during a single processing pass. | |
696 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. | |
697 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); | |
698 if (FAILED(hr)) | |
699 return hr; | |
700 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ | |
701 << " [frames]"; | |
702 | |
703 // The buffer scheme for exclusive mode streams is not designed for max | |
704 // flexibility. We only allow a "perfect match" between the packet size set | |
705 // by the user and the actual endpoint buffer size. | |
706 if (share_mode() == AUDCLNT_SHAREMODE_EXCLUSIVE && | |
707 endpoint_buffer_size_frames_ != packet_size_frames_) { | |
708 hr = AUDCLNT_E_INVALID_SIZE; | |
709 DLOG(ERROR) << "AUDCLNT_E_INVALID_SIZE"; | |
710 return hr; | |
711 } | |
712 | |
713 // Set the event handle that the audio engine will signal each time | |
714 // a buffer becomes ready to be processed by the client. | |
715 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); | |
716 if (FAILED(hr)) | |
717 return hr; | |
718 | |
719 // Get access to the IAudioRenderClient interface. This interface | |
720 // enables us to write output data to a rendering endpoint buffer. | |
721 // The methods in this interface manage the movement of data packets | |
722 // that contain audio-rendering data. | |
723 hr = audio_client_->GetService(__uuidof(IAudioRenderClient), | |
724 audio_render_client_.ReceiveVoid()); | |
725 return hr; | |
726 } | |
727 | |
728 HRESULT WASAPIAudioOutputStream::SharedModeInitialization() { | |
729 DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_SHARED); | |
730 | |
577 // TODO(henrika): this buffer scheme is still under development. | 731 // TODO(henrika): this buffer scheme is still under development. |
578 // The exact details are yet to be determined based on tests with different | 732 // The exact details are yet to be determined based on tests with different |
579 // audio clients. | 733 // audio clients. |
580 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); | 734 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); |
581 if (audio_engine_mix_format_->nSamplesPerSec == 48000) { | 735 if (audio_engine_mix_format_->nSamplesPerSec == 48000) { |
582 // Initial tests have shown that we have to add 10 ms extra to | 736 // Initial tests have shown that we have to add 10 ms extra to |
583 // ensure that we don't run empty for any packet size. | 737 // ensure that we don't run empty for any packet size. |
584 glitch_free_buffer_size_ms += 10; | 738 glitch_free_buffer_size_ms += 10; |
585 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) { | 739 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) { |
586 // Initial tests have shown that we have to add 20 ms extra to | 740 // Initial tests have shown that we have to add 20 ms extra to |
587 // ensure that we don't run empty for any packet size. | 741 // ensure that we don't run empty for any packet size. |
588 glitch_free_buffer_size_ms += 20; | 742 glitch_free_buffer_size_ms += 20; |
589 } else { | 743 } else { |
590 glitch_free_buffer_size_ms += 20; | 744 glitch_free_buffer_size_ms += 20; |
591 } | 745 } |
592 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; | 746 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; |
593 REFERENCE_TIME requested_buffer_duration_hns = | 747 REFERENCE_TIME requested_buffer_duration = |
594 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); | 748 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); |
595 | 749 |
596 // Initialize the audio stream between the client and the device. | 750 // Initialize the audio stream between the client and the device. |
597 // We connect indirectly through the audio engine by using shared mode | 751 // We connect indirectly through the audio engine by using shared mode |
598 // and WASAPI is initialized in an event driven mode. | 752 // and WASAPI is initialized in an event driven mode. |
599 // Note that this API ensures that the buffer is never smaller than the | 753 // Note that this API ensures that the buffer is never smaller than the |
600 // minimum buffer size needed to ensure glitch-free rendering. | 754 // minimum buffer size needed to ensure glitch-free rendering. |
601 // If we requests a buffer size that is smaller than the audio engine's | 755 // If we requests a buffer size that is smaller than the audio engine's |
602 // minimum required buffer size, the method sets the buffer size to this | 756 // minimum required buffer size, the method sets the buffer size to this |
603 // minimum buffer size rather than to the buffer size requested. | 757 // minimum buffer size rather than to the buffer size requested. |
604 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, | 758 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, |
605 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | 759 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | |
606 AUDCLNT_STREAMFLAGS_NOPERSIST, | 760 AUDCLNT_STREAMFLAGS_NOPERSIST, |
607 requested_buffer_duration_hns, | 761 requested_buffer_duration, |
608 0, | 762 0, |
609 &format_, | 763 &format_, |
610 NULL); | 764 NULL); |
611 if (FAILED(hr)) | |
612 return hr; | |
613 | |
614 // Retrieve the length of the endpoint buffer shared between the client | |
615 // and the audio engine. The buffer length the buffer length determines | |
616 // the maximum amount of rendering data that the client can write to | |
617 // the endpoint buffer during a single processing pass. | |
618 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. | |
619 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); | |
620 if (FAILED(hr)) | |
621 return hr; | |
622 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ | |
623 << " [frames]"; | |
624 #ifndef NDEBUG | |
625 // The period between processing passes by the audio engine is fixed for a | |
626 // particular audio endpoint device and represents the smallest processing | |
627 // quantum for the audio engine. This period plus the stream latency between | |
628 // the buffer and endpoint device represents the minimum possible latency | |
629 // that an audio application can achieve in shared mode. | |
630 REFERENCE_TIME default_device_period = 0; | |
631 REFERENCE_TIME minimum_device_period = 0; | |
632 HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period, | |
633 &minimum_device_period); | |
634 if (SUCCEEDED(hr_dbg)) { | |
635 // Shared mode device period. | |
636 DVLOG(1) << "default device period: " | |
637 << static_cast<double>(default_device_period / 10000.0) | |
638 << " [ms]"; | |
639 // Exclusive mode device period. | |
640 DVLOG(1) << "minimum device period: " | |
641 << static_cast<double>(minimum_device_period / 10000.0) | |
642 << " [ms]"; | |
643 } | |
644 | |
645 REFERENCE_TIME latency = 0; | |
646 hr_dbg = audio_client_->GetStreamLatency(&latency); | |
647 if (SUCCEEDED(hr_dbg)) { | |
648 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) | |
649 << " [ms]"; | |
650 } | |
651 #endif | |
652 | |
653 // Set the event handle that the audio engine will signal each time | |
654 // a buffer becomes ready to be processed by the client. | |
655 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); | |
656 if (FAILED(hr)) | |
657 return hr; | |
658 | |
659 // Get access to the IAudioRenderClient interface. This interface | |
660 // enables us to write output data to a rendering endpoint buffer. | |
661 // The methods in this interface manage the movement of data packets | |
662 // that contain audio-rendering data. | |
663 hr = audio_client_->GetService(__uuidof(IAudioRenderClient), | |
664 audio_render_client_.ReceiveVoid()); | |
665 return hr; | 765 return hr; |
666 } | 766 } |
667 | 767 |
768 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() { | |
769 DCHECK_EQ(share_mode(), AUDCLNT_SHAREMODE_EXCLUSIVE); | |
770 | |
771 float f = (1000.0 * packet_size_frames_) / format_.nSamplesPerSec; | |
772 REFERENCE_TIME requested_buffer_duration = | |
773 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); | |
774 | |
775 // Initialize the audio stream between the client and the device. | |
776 // For an exclusive-mode stream that uses event-driven buffering, the | |
777 // caller must specify nonzero values for hnsPeriodicity and | |
778 // hnsBufferDuration, and the values of these two parameters must be equal. | |
779 // The Initialize method allocates two buffers for the stream. Each buffer | |
780 // is equal in duration to the value of the hnsBufferDuration parameter. | |
781 // Following the Initialize call for a rendering stream, the caller should | |
782 // fill the first of the two buffers before starting the stream. | |
783 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, | |
784 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | |
785 AUDCLNT_STREAMFLAGS_NOPERSIST, | |
786 requested_buffer_duration, | |
787 requested_buffer_duration, | |
788 &format_, | |
789 NULL); | |
790 if (FAILED(hr)) { | |
791 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { | |
792 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED"; | |
793 | |
794 UINT32 aligned_buffer_size = 0; | |
795 audio_client_->GetBufferSize(&aligned_buffer_size); | |
796 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; | |
797 audio_client_.Release(); | |
798 | |
799 // Calculate new aligned periodicity. Each unit of reference time | |
800 // is 100 nanoseconds. | |
801 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>( | |
802 (10000000.0 * aligned_buffer_size / format_.nSamplesPerSec) + 0.5); | |
803 | |
804 // It is possible to re-activate and re-initialize the audio client | |
805 // at this stage but we bail out with an error code instead and | |
806 // combine it with a log message which informs about the suggested | |
807 // aligned buffer size which should be used instead. | |
808 DVLOG(1) << "aligned_buffer_duration: " | |
809 << static_cast<double>(aligned_buffer_duration / 10000.0) | |
810 << " [ms]"; | |
811 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) { | |
812 // We will get this error if we try to use a smaller buffer size than | |
813 // the minimum supported size (usually ~3ms on Windows 7). | |
814 LOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD"; | |
815 } | |
816 } | |
817 | |
818 return hr; | |
819 } | |
820 | |
668 ULONG WASAPIAudioOutputStream::AddRef() { | 821 ULONG WASAPIAudioOutputStream::AddRef() { |
669 NOTREACHED() << "IMMNotificationClient should not use this method."; | 822 NOTREACHED() << "IMMNotificationClient should not use this method."; |
670 return 1; | 823 return 1; |
671 } | 824 } |
672 | 825 |
673 ULONG WASAPIAudioOutputStream::Release() { | 826 ULONG WASAPIAudioOutputStream::Release() { |
674 NOTREACHED() << "IMMNotificationClient should not use this method."; | 827 NOTREACHED() << "IMMNotificationClient should not use this method."; |
675 return 1; | 828 return 1; |
676 } | 829 } |
677 | 830 |
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
823 // are now re-initiated and it is now possible to re-start audio rendering. | 976 // are now re-initiated and it is now possible to re-start audio rendering. |
824 | 977 |
825 // Start rendering again using the new default audio endpoint. | 978 // Start rendering again using the new default audio endpoint. |
826 hr = audio_client_->Start(); | 979 hr = audio_client_->Start(); |
827 | 980 |
828 restart_rendering_mode_ = false; | 981 restart_rendering_mode_ = false; |
829 return SUCCEEDED(hr); | 982 return SUCCEEDED(hr); |
830 } | 983 } |
831 | 984 |
832 } // namespace media | 985 } // namespace media |
OLD | NEW |