OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
6 | 6 |
7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
8 | 8 |
9 #include "base/logging.h" | 9 #include "base/logging.h" |
10 #include "base/memory/scoped_ptr.h" | 10 #include "base/memory/scoped_ptr.h" |
11 #include "base/utf_string_conversions.h" | 11 #include "base/utf_string_conversions.h" |
12 #include "media/audio/audio_util.h" | 12 #include "media/audio/audio_util.h" |
13 #include "media/audio/win/audio_manager_win.h" | 13 #include "media/audio/win/audio_manager_win.h" |
14 #include "media/audio/win/avrt_wrapper_win.h" | 14 #include "media/audio/win/avrt_wrapper_win.h" |
15 | 15 |
16 using base::win::ScopedComPtr; | 16 using base::win::ScopedComPtr; |
17 using base::win::ScopedCOMInitializer; | 17 using base::win::ScopedCOMInitializer; |
18 | 18 |
19 namespace media { | 19 namespace media { |
20 | 20 |
21 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, | 21 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
22 const AudioParameters& params, | 22 const AudioParameters& params, |
23 ERole device_role) | 23 ERole device_role, |
24 AUDCLNT_SHAREMODE share_mode) | |
24 : com_init_(ScopedCOMInitializer::kMTA), | 25 : com_init_(ScopedCOMInitializer::kMTA), |
25 creating_thread_id_(base::PlatformThread::CurrentId()), | 26 creating_thread_id_(base::PlatformThread::CurrentId()), |
26 manager_(manager), | 27 manager_(manager), |
27 render_thread_(NULL), | 28 render_thread_(NULL), |
28 opened_(false), | 29 opened_(false), |
29 started_(false), | 30 started_(false), |
30 restart_rendering_mode_(false), | 31 restart_rendering_mode_(false), |
31 volume_(1.0), | 32 volume_(1.0), |
32 endpoint_buffer_size_frames_(0), | 33 endpoint_buffer_size_frames_(0), |
33 device_role_(device_role), | 34 device_role_(device_role), |
35 share_mode_(share_mode), | |
34 num_written_frames_(0), | 36 num_written_frames_(0), |
35 source_(NULL) { | 37 source_(NULL) { |
36 CHECK(com_init_.succeeded()); | 38 CHECK(com_init_.succeeded()); |
37 DCHECK(manager_); | 39 DCHECK(manager_); |
38 | 40 |
39 // Load the Avrt DLL if not already loaded. Required to support MMCSS. | 41 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
40 bool avrt_init = avrt::Initialize(); | 42 bool avrt_init = avrt::Initialize(); |
41 DCHECK(avrt_init) << "Failed to load the avrt.dll"; | 43 DCHECK(avrt_init) << "Failed to load the avrt.dll"; |
42 | 44 |
45 if (share_mode == AUDCLNT_SHAREMODE_EXCLUSIVE) { | |
46 DVLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<"; | |
Chris Rogers
2012/06/20 00:02:42
I'd make the comment a little more specific, maybe
henrika (OOO until Aug 14)
2012/06/21 06:39:23
"WASAPI is configured for exclusive mode streaming
| |
47 } | |
48 | |
43 // Set up the desired render format specified by the client. | 49 // Set up the desired render format specified by the client. |
44 format_.nSamplesPerSec = params.sample_rate(); | 50 format_.nSamplesPerSec = params.sample_rate(); |
45 format_.wFormatTag = WAVE_FORMAT_PCM; | 51 format_.wFormatTag = WAVE_FORMAT_PCM; |
46 format_.wBitsPerSample = params.bits_per_sample(); | 52 format_.wBitsPerSample = params.bits_per_sample(); |
47 format_.nChannels = params.channels(); | 53 format_.nChannels = params.channels(); |
48 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; | 54 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; |
49 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; | 55 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; |
50 format_.cbSize = 0; | 56 format_.cbSize = 0; |
51 | 57 |
52 // Size in bytes of each audio frame. | 58 // Size in bytes of each audio frame. |
(...skipping 17 matching lines...) Expand all Loading... | |
70 | 76 |
71 // Create the event which will be set in Stop() when capturing shall stop. | 77 // Create the event which will be set in Stop() when capturing shall stop. |
72 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); | 78 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
73 DCHECK(stop_render_event_.IsValid()); | 79 DCHECK(stop_render_event_.IsValid()); |
74 | 80 |
75 // Create the event which will be set when a stream switch shall take place. | 81 // Create the event which will be set when a stream switch shall take place. |
76 stream_switch_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); | 82 stream_switch_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
77 DCHECK(stream_switch_event_.IsValid()); | 83 DCHECK(stream_switch_event_.IsValid()); |
78 } | 84 } |
79 | 85 |
80 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} | 86 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() { |
87 DVLOG(1) << "WASAPIAudioOutputStream::~WASAPIAudioOutputStream()"; | |
Chris Rogers
2012/06/20 00:02:42
Do we want this DVLOG?
henrika (OOO until Aug 14)
2012/06/21 06:39:23
Removed.
| |
88 } | |
81 | 89 |
82 bool WASAPIAudioOutputStream::Open() { | 90 bool WASAPIAudioOutputStream::Open() { |
83 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 91 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
84 if (opened_) | 92 if (opened_) |
85 return true; | 93 return true; |
86 | 94 |
87 // Create an IMMDeviceEnumerator interface and obtain a reference to | 95 // Create an IMMDeviceEnumerator interface and obtain a reference to |
88 // the IMMDevice interface of the default rendering device with the | 96 // the IMMDevice interface of the default rendering device with the |
89 // specified role. | 97 // specified role. |
90 HRESULT hr = SetRenderDevice(device_role_); | 98 HRESULT hr = SetRenderDevice(); |
91 if (FAILED(hr)) { | 99 if (FAILED(hr)) { |
92 return false; | 100 return false; |
93 } | 101 } |
94 | 102 |
95 // Obtain an IAudioClient interface which enables us to create and initialize | 103 // Obtain an IAudioClient interface which enables us to create and initialize |
96 // an audio stream between an audio application and the audio engine. | 104 // an audio stream between an audio application and the audio engine. |
97 hr = ActivateRenderDevice(); | 105 hr = ActivateRenderDevice(); |
98 if (FAILED(hr)) { | 106 if (FAILED(hr)) { |
99 return false; | 107 return false; |
100 } | 108 } |
101 | 109 |
102 // Retrieve the stream format which the audio engine uses for its internal | 110 // Retrieve the stream format which the audio engine uses for its internal |
103 // processing/mixing of shared-mode streams. | 111 // processing/mixing of shared-mode streams. The result of this method is |
112 // ignored for shared mode streams. | |
104 hr = GetAudioEngineStreamFormat(); | 113 hr = GetAudioEngineStreamFormat(); |
105 if (FAILED(hr)) { | 114 if (FAILED(hr)) { |
106 return false; | 115 return false; |
107 } | 116 } |
108 | 117 |
109 // Verify that the selected audio endpoint supports the specified format | 118 // Verify that the selected audio endpoint supports the specified format |
110 // set during construction. | 119 // set during construction. |
120 // In exclusive mode, the client can choose to open the stream in any audio | |
121 // format that the endpoint device supports. In shared mode, the client must | |
122 // open the stream in the mix format that is currently in use by the audio | |
123 // engine (or a format that is similar to the mix format). The audio engine's | |
124 // input streams and the output mix from the engine are all in this format. | |
111 if (!DesiredFormatIsSupported()) { | 125 if (!DesiredFormatIsSupported()) { |
112 return false; | 126 return false; |
113 } | 127 } |
114 | 128 |
115 // Initialize the audio stream between the client and the device using | 129 // Initialize the audio stream between the client and the device using |
116 // shared mode and a lowest possible glitch-free latency. | 130 // shared or exclusive mode and a lowest possible glitch-free latency. |
131 // We will enter different code paths depending on the specified share mode. | |
117 hr = InitializeAudioEngine(); | 132 hr = InitializeAudioEngine(); |
118 if (FAILED(hr)) { | 133 if (FAILED(hr)) { |
119 return false; | 134 return false; |
120 } | 135 } |
121 | 136 |
122 // Register this client as an IMMNotificationClient implementation. | 137 // Register this client as an IMMNotificationClient implementation. |
123 // Only OnDefaultDeviceChanged() and OnDeviceStateChanged() and are | 138 // Only OnDefaultDeviceChanged() and OnDeviceStateChanged() and are |
124 // non-trivial. | 139 // non-trivial. |
125 hr = device_enumerator_->RegisterEndpointNotificationCallback(this); | 140 hr = device_enumerator_->RegisterEndpointNotificationCallback(this); |
126 | 141 |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
222 // Flush all pending data and reset the audio clock stream position to 0. | 237 // Flush all pending data and reset the audio clock stream position to 0. |
223 hr = audio_client_->Reset(); | 238 hr = audio_client_->Reset(); |
224 if (FAILED(hr)) { | 239 if (FAILED(hr)) { |
225 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) | 240 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) |
226 << "Failed to reset streaming: " << std::hex << hr; | 241 << "Failed to reset streaming: " << std::hex << hr; |
227 } | 242 } |
228 | 243 |
229 // Extra safety check to ensure that the buffers are cleared. | 244 // Extra safety check to ensure that the buffers are cleared. |
230 // If the buffers are not cleared correctly, the next call to Start() | 245 // If the buffers are not cleared correctly, the next call to Start() |
231 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). | 246 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). |
232 UINT32 num_queued_frames = 0; | 247 // This check is is only needed for shared-mode streams. |
233 audio_client_->GetCurrentPadding(&num_queued_frames); | 248 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { |
234 DCHECK_EQ(0u, num_queued_frames); | 249 UINT32 num_queued_frames = 0; |
250 audio_client_->GetCurrentPadding(&num_queued_frames); | |
251 DCHECK_EQ(0u, num_queued_frames); | |
252 } | |
235 | 253 |
236 // Ensure that we don't quit the main thread loop immediately next | 254 // Ensure that we don't quit the main thread loop immediately next |
237 // time Start() is called. | 255 // time Start() is called. |
238 ResetEvent(stop_render_event_.Get()); | 256 ResetEvent(stop_render_event_.Get()); |
239 | 257 |
240 started_ = false; | 258 started_ = false; |
241 } | 259 } |
242 | 260 |
243 void WASAPIAudioOutputStream::Close() { | 261 void WASAPIAudioOutputStream::Close() { |
244 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 262 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
(...skipping 26 matching lines...) Expand all Loading... | |
271 | 289 |
272 void WASAPIAudioOutputStream::GetVolume(double* volume) { | 290 void WASAPIAudioOutputStream::GetVolume(double* volume) { |
273 DVLOG(1) << "GetVolume()"; | 291 DVLOG(1) << "GetVolume()"; |
274 *volume = static_cast<double>(volume_); | 292 *volume = static_cast<double>(volume_); |
275 } | 293 } |
276 | 294 |
277 // static | 295 // static |
278 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { | 296 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { |
279 // It is assumed that this static method is called from a COM thread, i.e., | 297 // It is assumed that this static method is called from a COM thread, i.e., |
280 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. | 298 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. |
299 // Note that, calling this function only makes sense for shared mode streams, | |
300 // since if the device will be opened in exclusive mode, then the application | |
301 // specified format is used instead. | |
281 ScopedComPtr<IMMDeviceEnumerator> enumerator; | 302 ScopedComPtr<IMMDeviceEnumerator> enumerator; |
282 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), | 303 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
283 NULL, | 304 NULL, |
284 CLSCTX_INPROC_SERVER, | 305 CLSCTX_INPROC_SERVER, |
285 __uuidof(IMMDeviceEnumerator), | 306 __uuidof(IMMDeviceEnumerator), |
286 enumerator.ReceiveVoid()); | 307 enumerator.ReceiveVoid()); |
287 if (FAILED(hr)) { | 308 if (FAILED(hr)) { |
288 NOTREACHED() << "error code: " << std::hex << hr; | 309 NOTREACHED() << "error code: " << std::hex << hr; |
289 return 0.0; | 310 return 0.0; |
290 } | 311 } |
(...skipping 13 matching lines...) Expand all Loading... | |
304 ScopedComPtr<IAudioClient> audio_client; | 325 ScopedComPtr<IAudioClient> audio_client; |
305 hr = endpoint_device->Activate(__uuidof(IAudioClient), | 326 hr = endpoint_device->Activate(__uuidof(IAudioClient), |
306 CLSCTX_INPROC_SERVER, | 327 CLSCTX_INPROC_SERVER, |
307 NULL, | 328 NULL, |
308 audio_client.ReceiveVoid()); | 329 audio_client.ReceiveVoid()); |
309 if (FAILED(hr)) { | 330 if (FAILED(hr)) { |
310 NOTREACHED() << "error code: " << std::hex << hr; | 331 NOTREACHED() << "error code: " << std::hex << hr; |
311 return 0.0; | 332 return 0.0; |
312 } | 333 } |
313 | 334 |
335 // Retrieve the stream format that the audio engine uses for its internal | |
336 // processing of shared-mode streams. | |
314 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format; | 337 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format; |
315 hr = audio_client->GetMixFormat(&audio_engine_mix_format); | 338 hr = audio_client->GetMixFormat(&audio_engine_mix_format); |
316 if (FAILED(hr)) { | 339 if (FAILED(hr)) { |
317 NOTREACHED() << "error code: " << std::hex << hr; | 340 NOTREACHED() << "error code: " << std::hex << hr; |
318 return 0.0; | 341 return 0.0; |
319 } | 342 } |
320 | 343 |
321 return static_cast<int>(audio_engine_mix_format->nSamplesPerSec); | 344 return static_cast<int>(audio_engine_mix_format->nSamplesPerSec); |
322 } | 345 } |
323 | 346 |
(...skipping 14 matching lines...) Expand all Loading... | |
338 // Failed to enable MMCSS on this thread. It is not fatal but can lead | 361 // Failed to enable MMCSS on this thread. It is not fatal but can lead |
339 // to reduced QoS at high load. | 362 // to reduced QoS at high load. |
340 DWORD err = GetLastError(); | 363 DWORD err = GetLastError(); |
341 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; | 364 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
342 } | 365 } |
343 | 366 |
344 HRESULT hr = S_FALSE; | 367 HRESULT hr = S_FALSE; |
345 | 368 |
346 bool playing = true; | 369 bool playing = true; |
347 bool error = false; | 370 bool error = false; |
348 HANDLE wait_array[] = { stop_render_event_, | 371 HANDLE wait_array[] = {stop_render_event_, |
349 stream_switch_event_, | 372 stream_switch_event_, |
350 audio_samples_render_event_ }; | 373 audio_samples_render_event_ }; |
351 UINT64 device_frequency = 0; | 374 UINT64 device_frequency = 0; |
352 | 375 |
353 // The IAudioClock interface enables us to monitor a stream's data | 376 // The IAudioClock interface enables us to monitor a stream's data |
354 // rate and the current position in the stream. Allocate it before we | 377 // rate and the current position in the stream. Allocate it before we |
355 // start spinning. | 378 // start spinning. |
356 ScopedComPtr<IAudioClock> audio_clock; | 379 ScopedComPtr<IAudioClock> audio_clock; |
357 hr = audio_client_->GetService(__uuidof(IAudioClock), | 380 hr = audio_client_->GetService(__uuidof(IAudioClock), |
358 audio_clock.ReceiveVoid()); | 381 audio_clock.ReceiveVoid()); |
359 if (SUCCEEDED(hr)) { | 382 if (SUCCEEDED(hr)) { |
360 // The device frequency is the frequency generated by the hardware clock in | 383 // The device frequency is the frequency generated by the hardware clock in |
(...skipping 26 matching lines...) Expand all Loading... | |
387 playing = false; | 410 playing = false; |
388 error = true; | 411 error = true; |
389 } | 412 } |
390 break; | 413 break; |
391 case WAIT_OBJECT_0 + 2: | 414 case WAIT_OBJECT_0 + 2: |
392 { | 415 { |
393 // |audio_samples_render_event_| has been set. | 416 // |audio_samples_render_event_| has been set. |
394 UINT32 num_queued_frames = 0; | 417 UINT32 num_queued_frames = 0; |
395 uint8* audio_data = NULL; | 418 uint8* audio_data = NULL; |
396 | 419 |
397 // Get the padding value which represents the amount of rendering | 420 // Contains how much new data we can write to the buffer without |
398 // data that is queued up to play in the endpoint buffer. | |
399 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | |
400 | |
401 // Determine how much new data we can write to the buffer without | |
402 // the risk of overwriting previously written data that the audio | 421 // the risk of overwriting previously written data that the audio |
403 // engine has not yet read from the buffer. | 422 // engine has not yet read from the buffer. |
404 size_t num_available_frames = | 423 size_t num_available_frames = 0; |
405 endpoint_buffer_size_frames_ - num_queued_frames; | 424 |
425 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { | |
426 // Get the padding value which represents the amount of rendering | |
427 // data that is queued up to play in the endpoint buffer. | |
428 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | |
429 num_available_frames = | |
430 endpoint_buffer_size_frames_ - num_queued_frames; | |
431 } else { | |
432 // While the stream is running, the system alternately sends one | |
433 // buffer or the other to the client. This form of double buffering | |
434 // is referred to as "ping-ponging". Each time the client receives | |
435 // a buffer from the system (triggers this event) the client must | |
436 // process the entire buffer. Calls to the GetCurrentPadding method | |
437 // are unnecessary because the packet size must always equal the | |
438 // buffer size. In contrast to the shared mode buffering scheme, | |
439 // the latency for an event-driven, exclusive-mode stream depends | |
440 // directly on the buffer size. | |
441 num_available_frames = endpoint_buffer_size_frames_; | |
442 } | |
406 | 443 |
407 // Check if there is enough available space to fit the packet size | 444 // Check if there is enough available space to fit the packet size |
408 // specified by the client. | 445 // specified by the client. |
409 if (FAILED(hr) || (num_available_frames < packet_size_frames_)) | 446 if (FAILED(hr) || (num_available_frames < packet_size_frames_)) |
410 continue; | 447 continue; |
411 | 448 |
412 // Derive the number of packets we need get from the client to | 449 // Derive the number of packets we need get from the client to |
413 // fill up the available area in the endpoint buffer. | 450 // fill up the available area in the endpoint buffer. |
451 // |num_packets| will always be one for exclusive-mode streams. | |
414 size_t num_packets = (num_available_frames / packet_size_frames_); | 452 size_t num_packets = (num_available_frames / packet_size_frames_); |
415 | 453 |
416 // Get data from the client/source. | 454 // Get data from the client/source. |
417 for (size_t n = 0; n < num_packets; ++n) { | 455 for (size_t n = 0; n < num_packets; ++n) { |
418 // Grab all available space in the rendering endpoint buffer | 456 // Grab all available space in the rendering endpoint buffer |
419 // into which the client can write a data packet. | 457 // into which the client can write a data packet. |
420 hr = audio_render_client_->GetBuffer(packet_size_frames_, | 458 hr = audio_render_client_->GetBuffer(packet_size_frames_, |
421 &audio_data); | 459 &audio_data); |
422 if (FAILED(hr)) { | 460 if (FAILED(hr)) { |
423 DLOG(ERROR) << "Failed to use rendering audio buffer: " | 461 DLOG(ERROR) << "Failed to use rendering audio buffer: " |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
504 PLOG(WARNING) << "Failed to disable MMCSS"; | 542 PLOG(WARNING) << "Failed to disable MMCSS"; |
505 } | 543 } |
506 } | 544 } |
507 | 545 |
508 void WASAPIAudioOutputStream::HandleError(HRESULT err) { | 546 void WASAPIAudioOutputStream::HandleError(HRESULT err) { |
509 NOTREACHED() << "Error code: " << std::hex << err; | 547 NOTREACHED() << "Error code: " << std::hex << err; |
510 if (source_) | 548 if (source_) |
511 source_->OnError(this, static_cast<int>(err)); | 549 source_->OnError(this, static_cast<int>(err)); |
512 } | 550 } |
513 | 551 |
514 HRESULT WASAPIAudioOutputStream::SetRenderDevice(ERole device_role) { | 552 HRESULT WASAPIAudioOutputStream::SetRenderDevice() { |
515 // Create the IMMDeviceEnumerator interface. | 553 // Create the IMMDeviceEnumerator interface. |
516 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), | 554 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
517 NULL, | 555 NULL, |
518 CLSCTX_INPROC_SERVER, | 556 CLSCTX_INPROC_SERVER, |
519 __uuidof(IMMDeviceEnumerator), | 557 __uuidof(IMMDeviceEnumerator), |
520 device_enumerator_.ReceiveVoid()); | 558 device_enumerator_.ReceiveVoid()); |
521 if (SUCCEEDED(hr)) { | 559 if (SUCCEEDED(hr)) { |
522 // Retrieve the default render audio endpoint for the specified role. | 560 // Retrieve the default render audio endpoint for the specified role. |
523 // Note that, in Windows Vista, the MMDevice API supports device roles | 561 // Note that, in Windows Vista, the MMDevice API supports device roles |
524 // but the system-supplied user interface programs do not. | 562 // but the system-supplied user interface programs do not. |
525 hr = device_enumerator_->GetDefaultAudioEndpoint( | 563 hr = device_enumerator_->GetDefaultAudioEndpoint( |
526 eRender, device_role, endpoint_device_.Receive()); | 564 eRender, device_role_, endpoint_device_.Receive()); |
527 if (FAILED(hr)) | 565 if (FAILED(hr)) |
528 return hr; | 566 return hr; |
529 | 567 |
530 // Verify that the audio endpoint device is active. That is, the audio | 568 // Verify that the audio endpoint device is active. That is, the audio |
531 // adapter that connects to the endpoint device is present and enabled. | 569 // adapter that connects to the endpoint device is present and enabled. |
532 DWORD state = DEVICE_STATE_DISABLED; | 570 DWORD state = DEVICE_STATE_DISABLED; |
533 hr = endpoint_device_->GetState(&state); | 571 hr = endpoint_device_->GetState(&state); |
534 if (SUCCEEDED(hr)) { | 572 if (SUCCEEDED(hr)) { |
535 if (!(state & DEVICE_STATE_ACTIVE)) { | 573 if (!(state & DEVICE_STATE_ACTIVE)) { |
536 DLOG(ERROR) << "Selected render device is not active."; | 574 DLOG(ERROR) << "Selected render device is not active."; |
(...skipping 15 matching lines...) Expand all Loading... | |
552 return hr; | 590 return hr; |
553 } | 591 } |
554 | 592 |
555 HRESULT WASAPIAudioOutputStream::GetAudioEngineStreamFormat() { | 593 HRESULT WASAPIAudioOutputStream::GetAudioEngineStreamFormat() { |
556 // Retrieve the stream format that the audio engine uses for its internal | 594 // Retrieve the stream format that the audio engine uses for its internal |
557 // processing/mixing of shared-mode streams. | 595 // processing/mixing of shared-mode streams. |
558 return audio_client_->GetMixFormat(&audio_engine_mix_format_); | 596 return audio_client_->GetMixFormat(&audio_engine_mix_format_); |
559 } | 597 } |
560 | 598 |
561 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { | 599 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { |
600 // Determine, before calling IAudioClient::Initialize, whether the audio | |
601 // engine supports a particular stream format. | |
562 // In shared mode, the audio engine always supports the mix format, | 602 // In shared mode, the audio engine always supports the mix format, |
563 // which is stored in the |audio_engine_mix_format_| member. In addition, | 603 // which is stored in the |audio_engine_mix_format_| member. |
564 // the audio engine *might* support similar formats that have the same | |
565 // sample rate and number of channels as the mix format but differ in | |
566 // the representation of audio sample values. | |
567 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; | 604 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; |
568 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, | 605 HRESULT hr = audio_client_->IsFormatSupported(share_mode_, |
569 &format_, | 606 &format_, |
570 &closest_match); | 607 &closest_match); |
571 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " | 608 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " |
572 << "but a closest match exists."; | 609 << "but a closest match exists."; |
573 return (hr == S_OK); | 610 return (hr == S_OK); |
574 } | 611 } |
575 | 612 |
576 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { | 613 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { |
577 // TODO(henrika): this buffer scheme is still under development. | |
578 // The exact details are yet to be determined based on tests with different | |
579 // audio clients. | |
580 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); | |
581 if (audio_engine_mix_format_->nSamplesPerSec == 48000) { | |
582 // Initial tests have shown that we have to add 10 ms extra to | |
583 // ensure that we don't run empty for any packet size. | |
584 glitch_free_buffer_size_ms += 10; | |
585 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) { | |
586 // Initial tests have shown that we have to add 20 ms extra to | |
587 // ensure that we don't run empty for any packet size. | |
588 glitch_free_buffer_size_ms += 20; | |
589 } else { | |
590 glitch_free_buffer_size_ms += 20; | |
591 } | |
592 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; | |
593 REFERENCE_TIME requested_buffer_duration_hns = | |
594 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); | |
595 | |
596 // Initialize the audio stream between the client and the device. | |
597 // We connect indirectly through the audio engine by using shared mode | |
598 // and WASAPI is initialized in an event driven mode. | |
599 // Note that this API ensures that the buffer is never smaller than the | |
600 // minimum buffer size needed to ensure glitch-free rendering. | |
601 // If we requests a buffer size that is smaller than the audio engine's | |
602 // minimum required buffer size, the method sets the buffer size to this | |
603 // minimum buffer size rather than to the buffer size requested. | |
604 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, | |
605 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | |
606 AUDCLNT_STREAMFLAGS_NOPERSIST, | |
607 requested_buffer_duration_hns, | |
608 0, | |
609 &format_, | |
610 NULL); | |
611 if (FAILED(hr)) | |
612 return hr; | |
613 | |
614 // Retrieve the length of the endpoint buffer shared between the client | |
615 // and the audio engine. The buffer length the buffer length determines | |
616 // the maximum amount of rendering data that the client can write to | |
617 // the endpoint buffer during a single processing pass. | |
618 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. | |
619 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); | |
620 if (FAILED(hr)) | |
621 return hr; | |
622 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ | |
623 << " [frames]"; | |
624 #ifndef NDEBUG | 614 #ifndef NDEBUG |
625 // The period between processing passes by the audio engine is fixed for a | 615 // The period between processing passes by the audio engine is fixed for a |
626 // particular audio endpoint device and represents the smallest processing | 616 // particular audio endpoint device and represents the smallest processing |
627 // quantum for the audio engine. This period plus the stream latency between | 617 // quantum for the audio engine. This period plus the stream latency between |
628 // the buffer and endpoint device represents the minimum possible latency | 618 // the buffer and endpoint device represents the minimum possible latency |
629 // that an audio application can achieve in shared mode. | 619 // that an audio application can achieve in shared mode. |
630 REFERENCE_TIME default_device_period = 0; | 620 REFERENCE_TIME default_device_period = 0; |
631 REFERENCE_TIME minimum_device_period = 0; | 621 REFERENCE_TIME minimum_device_period = 0; |
632 HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period, | 622 HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period, |
633 &minimum_device_period); | 623 &minimum_device_period); |
634 if (SUCCEEDED(hr_dbg)) { | 624 if (SUCCEEDED(hr_dbg)) { |
635 // Shared mode device period. | 625 // Shared mode device period. |
636 DVLOG(1) << "default device period: " | 626 DVLOG(1) << "shared mode (default) device period: " |
637 << static_cast<double>(default_device_period / 10000.0) | 627 << static_cast<double>(default_device_period / 10000.0) |
638 << " [ms]"; | 628 << " [ms]"; |
639 // Exclusive mode device period. | 629 // Exclusive mode device period. |
640 DVLOG(1) << "minimum device period: " | 630 DVLOG(1) << "exclusive mode (minimum) device period: " |
641 << static_cast<double>(minimum_device_period / 10000.0) | 631 << static_cast<double>(minimum_device_period / 10000.0) |
642 << " [ms]"; | 632 << " [ms]"; |
643 } | 633 } |
644 | 634 |
645 REFERENCE_TIME latency = 0; | 635 REFERENCE_TIME latency = 0; |
646 hr_dbg = audio_client_->GetStreamLatency(&latency); | 636 hr_dbg = audio_client_->GetStreamLatency(&latency); |
647 if (SUCCEEDED(hr_dbg)) { | 637 if (SUCCEEDED(hr_dbg)) { |
648 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) | 638 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) |
649 << " [ms]"; | 639 << " [ms]"; |
650 } | 640 } |
651 #endif | 641 #endif |
652 | 642 |
643 HRESULT hr = S_FALSE; | |
644 REFERENCE_TIME requested_buffer_duration = 0; | |
645 | |
646 // Perform different initialization depending on if the device shall be | |
647 // opened in shared mode or in exclusive mode. | |
648 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { | |
649 // The device will be opened in shared mode and use the WAS format. | |
650 | |
651 // TODO(henrika): this buffer scheme is still under development. | |
652 // The exact details are yet to be determined based on tests with different | |
653 // audio clients. | |
654 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); | |
655 if (audio_engine_mix_format_->nSamplesPerSec == 48000) { | |
656 // Initial tests have shown that we have to add 10 ms extra to | |
657 // ensure that we don't run empty for any packet size. | |
658 glitch_free_buffer_size_ms += 10; | |
659 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) { | |
660 // Initial tests have shown that we have to add 20 ms extra to | |
661 // ensure that we don't run empty for any packet size. | |
662 glitch_free_buffer_size_ms += 20; | |
663 } else { | |
664 glitch_free_buffer_size_ms += 20; | |
665 } | |
666 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; | |
667 requested_buffer_duration = | |
668 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); | |
669 | |
670 // Initialize the audio stream between the client and the device. | |
671 // We connect indirectly through the audio engine by using shared mode | |
672 // and WASAPI is initialized in an event driven mode. | |
673 // Note that this API ensures that the buffer is never smaller than the | |
674 // minimum buffer size needed to ensure glitch-free rendering. | |
675 // If we requests a buffer size that is smaller than the audio engine's | |
676 // minimum required buffer size, the method sets the buffer size to this | |
677 // minimum buffer size rather than to the buffer size requested. | |
678 hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, | |
679 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | |
680 AUDCLNT_STREAMFLAGS_NOPERSIST, | |
681 requested_buffer_duration, | |
682 0, | |
683 &format_, | |
684 NULL); | |
685 } else { | |
686 // The device will be opened in exclusive mode and use the application | |
687 // specified format. | |
688 | |
689 float f = (1000.0 * packet_size_frames_) / format_.nSamplesPerSec; | |
690 requested_buffer_duration = static_cast<REFERENCE_TIME>(f*10000.0 + 0.5); | |
691 | |
692 // Initialize the audio stream between the client and the device. | |
693 // For an exclusive-mode stream that uses event-driven buffering, the | |
694 // caller must specify nonzero values for hnsPeriodicity and | |
695 // hnsBufferDuration, and the values of these two parameters must be equal. | |
696 // The Initialize method allocates two buffers for the stream. Each buffer | |
697 // is equal in duration to the value of the hnsBufferDuration parameter. | |
698 // Following the Initialize call for a rendering stream, the caller should | |
699 // fill the first of the two buffers before starting the stream. | |
700 hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, | |
701 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | |
702 AUDCLNT_STREAMFLAGS_NOPERSIST, | |
703 requested_buffer_duration, | |
704 requested_buffer_duration, | |
705 &format_, | |
706 NULL); | |
707 if (FAILED(hr)) { | |
708 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { | |
709 DLOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED"; | |
710 | |
711 UINT32 aligned_buffer_size = 0; | |
712 audio_client_->GetBufferSize(&aligned_buffer_size); | |
713 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; | |
714 audio_client_.Release(); | |
715 | |
716 // Calculate new aligned periodicity. Each unit of reference time | |
717 // is 100 nanoseconds. | |
718 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>( | |
719 10000000.0 * aligned_buffer_size / format_.nSamplesPerSec + 0.5); | |
720 | |
721 // It is possible to re-activate and re-initialize the audio client | |
722 // at this stage but we bail out with an error code instead and | |
723 // combine it with a log message which informs about the suggested | |
724 // aligned buffer size which should be used instead. | |
725 DVLOG(1) << "aligned_buffer_duration: " | |
726 << static_cast<double>(aligned_buffer_duration / 10000.0) | |
727 << " [ms]"; | |
728 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) { | |
729 // We will get this error if we try to use a smaller buffer size than | |
730 // the minimum supported size (usually ~3ms on Windows 7). | |
731 DLOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD"; | |
732 } | |
733 } | |
734 } | |
735 | |
736 if (FAILED(hr)) { | |
737 DVLOG(1) << "IAudioClient::Initialize() failed: " << std::hex << hr; | |
738 return hr; | |
739 } | |
740 | |
741 // Retrieve the length of the endpoint buffer. The buffer length represents | |
742 // the maximum amount of rendering data that the client can write to | |
743 // the endpoint buffer during a single processing pass. | |
744 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. | |
745 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); | |
746 if (FAILED(hr)) | |
747 return hr; | |
748 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ | |
749 << " [frames]"; | |
750 | |
751 // The buffer scheme for exclusive mode streams is not designed for max | |
752 // flexibility. We only allow a "perfect match" between the packet size set | |
753 // by the user and the actual endpoint buffer size. | |
754 if (share_mode() == AUDCLNT_SHAREMODE_EXCLUSIVE) { | |
755 if (endpoint_buffer_size_frames_ != packet_size_frames_) { | |
756 hr = AUDCLNT_E_INVALID_SIZE; | |
757 DLOG(ERROR) << "AUDCLNT_E_INVALID_SIZE"; | |
758 return hr; | |
759 } | |
760 } | |
761 | |
653 // Set the event handle that the audio engine will signal each time | 762 // Set the event handle that the audio engine will signal each time |
654 // a buffer becomes ready to be processed by the client. | 763 // a buffer becomes ready to be processed by the client. |
655 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); | 764 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); |
656 if (FAILED(hr)) | 765 if (FAILED(hr)) |
657 return hr; | 766 return hr; |
658 | 767 |
659 // Get access to the IAudioRenderClient interface. This interface | 768 // Get access to the IAudioRenderClient interface. This interface |
660 // enables us to write output data to a rendering endpoint buffer. | 769 // enables us to write output data to a rendering endpoint buffer. |
661 // The methods in this interface manage the movement of data packets | 770 // The methods in this interface manage the movement of data packets |
662 // that contain audio-rendering data. | 771 // that contain audio-rendering data. |
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
823 // are now re-initiated and it is now possible to re-start audio rendering. | 932 // are now re-initiated and it is now possible to re-start audio rendering. |
824 | 933 |
825 // Start rendering again using the new default audio endpoint. | 934 // Start rendering again using the new default audio endpoint. |
826 hr = audio_client_->Start(); | 935 hr = audio_client_->Start(); |
827 | 936 |
828 restart_rendering_mode_ = false; | 937 restart_rendering_mode_ = false; |
829 return SUCCEEDED(hr); | 938 return SUCCEEDED(hr); |
830 } | 939 } |
831 | 940 |
832 } // namespace media | 941 } // namespace media |
OLD | NEW |