Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
| 6 | 6 |
| 7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
| 8 | 8 |
| 9 #include "base/command_line.h" | |
| 9 #include "base/logging.h" | 10 #include "base/logging.h" |
| 10 #include "base/memory/scoped_ptr.h" | 11 #include "base/memory/scoped_ptr.h" |
| 11 #include "base/utf_string_conversions.h" | 12 #include "base/utf_string_conversions.h" |
| 12 #include "media/audio/audio_util.h" | 13 #include "media/audio/audio_util.h" |
| 13 #include "media/audio/win/audio_manager_win.h" | 14 #include "media/audio/win/audio_manager_win.h" |
| 14 #include "media/audio/win/avrt_wrapper_win.h" | 15 #include "media/audio/win/avrt_wrapper_win.h" |
| 16 #include "media/base/media_switches.h" | |
| 15 | 17 |
| 16 using base::win::ScopedComPtr; | 18 using base::win::ScopedComPtr; |
| 17 using base::win::ScopedCOMInitializer; | 19 using base::win::ScopedCOMInitializer; |
| 18 | 20 |
| 19 namespace media { | 21 namespace media { |
| 20 | 22 |
| 23 AUDCLNT_SHAREMODE GetShareModeImpl() { | |
| 24 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); | |
| 25 if (cmd_line->HasSwitch(switches::kEnableExclusiveMode)) | |
| 26 return AUDCLNT_SHAREMODE_EXCLUSIVE; | |
| 27 else | |
|
tommi (sloooow) - chröme
2012/07/25 11:49:40
nit: can remove the else
henrika (OOO until Aug 14)
2012/07/25 15:26:30
Done.
| |
| 28 return AUDCLNT_SHAREMODE_SHARED; | |
| 29 } | |
| 30 | |
| 31 // static | |
| 32 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() { | |
| 33 static const AUDCLNT_SHAREMODE kShareMode = GetShareModeImpl(); | |
|
no longer working on chromium
2012/07/25 12:32:57
do we need const for an enum?
henrika (OOO until Aug 14)
2012/07/25 15:26:30
Why not ;-)
| |
| 34 return kShareMode; | |
| 35 } | |
| 36 | |
| 21 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, | 37 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
| 22 const AudioParameters& params, | 38 const AudioParameters& params, |
| 23 ERole device_role) | 39 ERole device_role) |
| 24 : com_init_(ScopedCOMInitializer::kMTA), | 40 : com_init_(ScopedCOMInitializer::kMTA), |
| 25 creating_thread_id_(base::PlatformThread::CurrentId()), | 41 creating_thread_id_(base::PlatformThread::CurrentId()), |
| 26 manager_(manager), | 42 manager_(manager), |
| 27 render_thread_(NULL), | 43 render_thread_(NULL), |
| 28 opened_(false), | 44 opened_(false), |
| 29 started_(false), | 45 started_(false), |
| 30 restart_rendering_mode_(false), | 46 restart_rendering_mode_(false), |
| 31 volume_(1.0), | 47 volume_(1.0), |
| 32 endpoint_buffer_size_frames_(0), | 48 endpoint_buffer_size_frames_(0), |
| 33 device_role_(device_role), | 49 device_role_(device_role), |
| 50 share_mode_(GetShareMode()), | |
| 34 num_written_frames_(0), | 51 num_written_frames_(0), |
| 35 source_(NULL) { | 52 source_(NULL) { |
| 36 CHECK(com_init_.succeeded()); | 53 CHECK(com_init_.succeeded()); |
| 37 DCHECK(manager_); | 54 DCHECK(manager_); |
| 38 | 55 |
| 39 // Load the Avrt DLL if not already loaded. Required to support MMCSS. | 56 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
| 40 bool avrt_init = avrt::Initialize(); | 57 bool avrt_init = avrt::Initialize(); |
| 41 DCHECK(avrt_init) << "Failed to load the avrt.dll"; | 58 DCHECK(avrt_init) << "Failed to load the avrt.dll"; |
| 42 | 59 |
| 60 if (AUDCLNT_SHAREMODE_EXCLUSIVE == share_mode()) { | |
| 61 DVLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<"; | |
|
tommi (sloooow) - chröme
2012/07/25 11:49:40
maybe VLOG?
henrika (OOO until Aug 14)
2012/07/25 15:26:30
Done.
| |
| 62 } | |
| 63 | |
| 43 // Set up the desired render format specified by the client. | 64 // Set up the desired render format specified by the client. |
| 44 format_.nSamplesPerSec = params.sample_rate(); | 65 format_.nSamplesPerSec = params.sample_rate(); |
| 45 format_.wFormatTag = WAVE_FORMAT_PCM; | 66 format_.wFormatTag = WAVE_FORMAT_PCM; |
| 46 format_.wBitsPerSample = params.bits_per_sample(); | 67 format_.wBitsPerSample = params.bits_per_sample(); |
| 47 format_.nChannels = params.channels(); | 68 format_.nChannels = params.channels(); |
| 48 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; | 69 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; |
| 49 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; | 70 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; |
| 50 format_.cbSize = 0; | 71 format_.cbSize = 0; |
| 51 | 72 |
| 52 // Size in bytes of each audio frame. | 73 // Size in bytes of each audio frame. |
| (...skipping 27 matching lines...) Expand all Loading... | |
| 80 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} | 101 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} |
| 81 | 102 |
| 82 bool WASAPIAudioOutputStream::Open() { | 103 bool WASAPIAudioOutputStream::Open() { |
| 83 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 104 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
| 84 if (opened_) | 105 if (opened_) |
| 85 return true; | 106 return true; |
| 86 | 107 |
| 87 // Create an IMMDeviceEnumerator interface and obtain a reference to | 108 // Create an IMMDeviceEnumerator interface and obtain a reference to |
| 88 // the IMMDevice interface of the default rendering device with the | 109 // the IMMDevice interface of the default rendering device with the |
| 89 // specified role. | 110 // specified role. |
| 90 HRESULT hr = SetRenderDevice(device_role_); | 111 HRESULT hr = SetRenderDevice(); |
| 91 if (FAILED(hr)) { | 112 if (FAILED(hr)) { |
| 92 return false; | 113 return false; |
| 93 } | 114 } |
| 94 | 115 |
| 95 // Obtain an IAudioClient interface which enables us to create and initialize | 116 // Obtain an IAudioClient interface which enables us to create and initialize |
| 96 // an audio stream between an audio application and the audio engine. | 117 // an audio stream between an audio application and the audio engine. |
| 97 hr = ActivateRenderDevice(); | 118 hr = ActivateRenderDevice(); |
| 98 if (FAILED(hr)) { | 119 if (FAILED(hr)) { |
| 99 return false; | 120 return false; |
| 100 } | 121 } |
| 101 | 122 |
| 102 // Retrieve the stream format which the audio engine uses for its internal | 123 // Retrieve the stream format which the audio engine uses for its internal |
| 103 // processing/mixing of shared-mode streams. | 124 // processing/mixing of shared-mode streams. The result of this method is |
| 125 // ignored for shared mode streams. | |
| 104 hr = GetAudioEngineStreamFormat(); | 126 hr = GetAudioEngineStreamFormat(); |
| 105 if (FAILED(hr)) { | 127 if (FAILED(hr)) { |
| 106 return false; | 128 return false; |
| 107 } | 129 } |
| 108 | 130 |
| 109 // Verify that the selected audio endpoint supports the specified format | 131 // Verify that the selected audio endpoint supports the specified format |
| 110 // set during construction. | 132 // set during construction. |
| 133 // In exclusive mode, the client can choose to open the stream in any audio | |
| 134 // format that the endpoint device supports. In shared mode, the client must | |
| 135 // open the stream in the mix format that is currently in use by the audio | |
| 136 // engine (or a format that is similar to the mix format). The audio engine's | |
| 137 // input streams and the output mix from the engine are all in this format. | |
| 111 if (!DesiredFormatIsSupported()) { | 138 if (!DesiredFormatIsSupported()) { |
| 112 return false; | 139 return false; |
| 113 } | 140 } |
| 114 | 141 |
| 115 // Initialize the audio stream between the client and the device using | 142 // Initialize the audio stream between the client and the device using |
| 116 // shared mode and a lowest possible glitch-free latency. | 143 // shared or exclusive mode and a lowest possible glitch-free latency. |
| 144 // We will enter different code paths depending on the specified share mode. | |
| 117 hr = InitializeAudioEngine(); | 145 hr = InitializeAudioEngine(); |
| 118 if (FAILED(hr)) { | 146 if (FAILED(hr)) { |
| 119 return false; | 147 return false; |
| 120 } | 148 } |
| 121 | 149 |
| 122 // Register this client as an IMMNotificationClient implementation. | 150 // Register this client as an IMMNotificationClient implementation. |
| 123 // Only OnDefaultDeviceChanged() and OnDeviceStateChanged() and are | 151 // Only OnDefaultDeviceChanged() and OnDeviceStateChanged() and are |
| 124 // non-trivial. | 152 // non-trivial. |
| 125 hr = device_enumerator_->RegisterEndpointNotificationCallback(this); | 153 hr = device_enumerator_->RegisterEndpointNotificationCallback(this); |
| 126 | 154 |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 222 // Flush all pending data and reset the audio clock stream position to 0. | 250 // Flush all pending data and reset the audio clock stream position to 0. |
| 223 hr = audio_client_->Reset(); | 251 hr = audio_client_->Reset(); |
| 224 if (FAILED(hr)) { | 252 if (FAILED(hr)) { |
| 225 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) | 253 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) |
| 226 << "Failed to reset streaming: " << std::hex << hr; | 254 << "Failed to reset streaming: " << std::hex << hr; |
| 227 } | 255 } |
| 228 | 256 |
| 229 // Extra safety check to ensure that the buffers are cleared. | 257 // Extra safety check to ensure that the buffers are cleared. |
| 230 // If the buffers are not cleared correctly, the next call to Start() | 258 // If the buffers are not cleared correctly, the next call to Start() |
| 231 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). | 259 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). |
| 232 UINT32 num_queued_frames = 0; | 260 // This check is is only needed for shared-mode streams. |
| 233 audio_client_->GetCurrentPadding(&num_queued_frames); | 261 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { |
| 234 DCHECK_EQ(0u, num_queued_frames); | 262 UINT32 num_queued_frames = 0; |
| 263 audio_client_->GetCurrentPadding(&num_queued_frames); | |
| 264 DCHECK_EQ(0u, num_queued_frames); | |
| 265 } | |
| 235 | 266 |
| 236 // Ensure that we don't quit the main thread loop immediately next | 267 // Ensure that we don't quit the main thread loop immediately next |
| 237 // time Start() is called. | 268 // time Start() is called. |
| 238 ResetEvent(stop_render_event_.Get()); | 269 ResetEvent(stop_render_event_.Get()); |
| 239 | 270 |
| 240 started_ = false; | 271 started_ = false; |
| 241 } | 272 } |
| 242 | 273 |
| 243 void WASAPIAudioOutputStream::Close() { | 274 void WASAPIAudioOutputStream::Close() { |
| 244 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 275 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 271 | 302 |
| 272 void WASAPIAudioOutputStream::GetVolume(double* volume) { | 303 void WASAPIAudioOutputStream::GetVolume(double* volume) { |
| 273 DVLOG(1) << "GetVolume()"; | 304 DVLOG(1) << "GetVolume()"; |
| 274 *volume = static_cast<double>(volume_); | 305 *volume = static_cast<double>(volume_); |
| 275 } | 306 } |
| 276 | 307 |
| 277 // static | 308 // static |
| 278 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { | 309 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { |
| 279 // It is assumed that this static method is called from a COM thread, i.e., | 310 // It is assumed that this static method is called from a COM thread, i.e., |
| 280 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. | 311 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. |
| 312 // Note that, calling this function only makes sense for shared mode streams, | |
| 313 // since if the device will be opened in exclusive mode, then the application | |
| 314 // specified format is used instead. | |
| 281 ScopedComPtr<IMMDeviceEnumerator> enumerator; | 315 ScopedComPtr<IMMDeviceEnumerator> enumerator; |
| 282 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), | 316 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
| 283 NULL, | 317 NULL, |
| 284 CLSCTX_INPROC_SERVER, | 318 CLSCTX_INPROC_SERVER, |
| 285 __uuidof(IMMDeviceEnumerator), | 319 __uuidof(IMMDeviceEnumerator), |
| 286 enumerator.ReceiveVoid()); | 320 enumerator.ReceiveVoid()); |
| 287 if (FAILED(hr)) { | 321 if (FAILED(hr)) { |
| 288 NOTREACHED() << "error code: " << std::hex << hr; | 322 NOTREACHED() << "error code: " << std::hex << hr; |
| 289 return 0.0; | 323 return 0.0; |
| 290 } | 324 } |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 304 ScopedComPtr<IAudioClient> audio_client; | 338 ScopedComPtr<IAudioClient> audio_client; |
| 305 hr = endpoint_device->Activate(__uuidof(IAudioClient), | 339 hr = endpoint_device->Activate(__uuidof(IAudioClient), |
| 306 CLSCTX_INPROC_SERVER, | 340 CLSCTX_INPROC_SERVER, |
| 307 NULL, | 341 NULL, |
| 308 audio_client.ReceiveVoid()); | 342 audio_client.ReceiveVoid()); |
| 309 if (FAILED(hr)) { | 343 if (FAILED(hr)) { |
| 310 NOTREACHED() << "error code: " << std::hex << hr; | 344 NOTREACHED() << "error code: " << std::hex << hr; |
| 311 return 0.0; | 345 return 0.0; |
| 312 } | 346 } |
| 313 | 347 |
| 348 // Retrieve the stream format that the audio engine uses for its internal | |
| 349 // processing of shared-mode streams. | |
| 314 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format; | 350 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format; |
| 315 hr = audio_client->GetMixFormat(&audio_engine_mix_format); | 351 hr = audio_client->GetMixFormat(&audio_engine_mix_format); |
| 316 if (FAILED(hr)) { | 352 if (FAILED(hr)) { |
| 317 NOTREACHED() << "error code: " << std::hex << hr; | 353 NOTREACHED() << "error code: " << std::hex << hr; |
| 318 return 0.0; | 354 return 0.0; |
| 319 } | 355 } |
| 320 | 356 |
| 321 return static_cast<int>(audio_engine_mix_format->nSamplesPerSec); | 357 return static_cast<int>(audio_engine_mix_format->nSamplesPerSec); |
| 322 } | 358 } |
| 323 | 359 |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 338 // Failed to enable MMCSS on this thread. It is not fatal but can lead | 374 // Failed to enable MMCSS on this thread. It is not fatal but can lead |
| 339 // to reduced QoS at high load. | 375 // to reduced QoS at high load. |
| 340 DWORD err = GetLastError(); | 376 DWORD err = GetLastError(); |
| 341 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; | 377 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
| 342 } | 378 } |
| 343 | 379 |
| 344 HRESULT hr = S_FALSE; | 380 HRESULT hr = S_FALSE; |
| 345 | 381 |
| 346 bool playing = true; | 382 bool playing = true; |
| 347 bool error = false; | 383 bool error = false; |
| 348 HANDLE wait_array[] = { stop_render_event_, | 384 HANDLE wait_array[] = {stop_render_event_, |
|
tommi (sloooow) - chröme
2012/07/25 11:49:40
nit: revert this change? (there's still space for
henrika (OOO until Aug 14)
2012/07/25 15:26:30
Fixed.
| |
| 349 stream_switch_event_, | 385 stream_switch_event_, |
| 350 audio_samples_render_event_ }; | 386 audio_samples_render_event_ }; |
| 351 UINT64 device_frequency = 0; | 387 UINT64 device_frequency = 0; |
| 352 | 388 |
| 353 // The IAudioClock interface enables us to monitor a stream's data | 389 // The IAudioClock interface enables us to monitor a stream's data |
| 354 // rate and the current position in the stream. Allocate it before we | 390 // rate and the current position in the stream. Allocate it before we |
| 355 // start spinning. | 391 // start spinning. |
| 356 ScopedComPtr<IAudioClock> audio_clock; | 392 ScopedComPtr<IAudioClock> audio_clock; |
| 357 hr = audio_client_->GetService(__uuidof(IAudioClock), | 393 hr = audio_client_->GetService(__uuidof(IAudioClock), |
| 358 audio_clock.ReceiveVoid()); | 394 audio_clock.ReceiveVoid()); |
| 359 if (SUCCEEDED(hr)) { | 395 if (SUCCEEDED(hr)) { |
| 360 // The device frequency is the frequency generated by the hardware clock in | 396 // The device frequency is the frequency generated by the hardware clock in |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 387 playing = false; | 423 playing = false; |
| 388 error = true; | 424 error = true; |
| 389 } | 425 } |
| 390 break; | 426 break; |
| 391 case WAIT_OBJECT_0 + 2: | 427 case WAIT_OBJECT_0 + 2: |
| 392 { | 428 { |
| 393 // |audio_samples_render_event_| has been set. | 429 // |audio_samples_render_event_| has been set. |
| 394 UINT32 num_queued_frames = 0; | 430 UINT32 num_queued_frames = 0; |
| 395 uint8* audio_data = NULL; | 431 uint8* audio_data = NULL; |
| 396 | 432 |
| 397 // Get the padding value which represents the amount of rendering | 433 // Contains how much new data we can write to the buffer without |
| 398 // data that is queued up to play in the endpoint buffer. | |
| 399 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | |
| 400 | |
| 401 // Determine how much new data we can write to the buffer without | |
| 402 // the risk of overwriting previously written data that the audio | 434 // the risk of overwriting previously written data that the audio |
| 403 // engine has not yet read from the buffer. | 435 // engine has not yet read from the buffer. |
| 404 size_t num_available_frames = | 436 size_t num_available_frames = 0; |
| 405 endpoint_buffer_size_frames_ - num_queued_frames; | 437 |
| 438 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { | |
| 439 // Get the padding value which represents the amount of rendering | |
| 440 // data that is queued up to play in the endpoint buffer. | |
| 441 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | |
| 442 num_available_frames = | |
| 443 endpoint_buffer_size_frames_ - num_queued_frames; | |
| 444 } else { | |
| 445 // While the stream is running, the system alternately sends one | |
| 446 // buffer or the other to the client. This form of double buffering | |
| 447 // is referred to as "ping-ponging". Each time the client receives | |
| 448 // a buffer from the system (triggers this event) the client must | |
| 449 // process the entire buffer. Calls to the GetCurrentPadding method | |
| 450 // are unnecessary because the packet size must always equal the | |
| 451 // buffer size. In contrast to the shared mode buffering scheme, | |
| 452 // the latency for an event-driven, exclusive-mode stream depends | |
| 453 // directly on the buffer size. | |
| 454 num_available_frames = endpoint_buffer_size_frames_; | |
| 455 } | |
| 406 | 456 |
| 407 // Check if there is enough available space to fit the packet size | 457 // Check if there is enough available space to fit the packet size |
| 408 // specified by the client. | 458 // specified by the client. |
| 409 if (FAILED(hr) || (num_available_frames < packet_size_frames_)) | 459 if (FAILED(hr) || (num_available_frames < packet_size_frames_)) |
| 410 continue; | 460 continue; |
| 411 | 461 |
| 412 // Derive the number of packets we need get from the client to | 462 // Derive the number of packets we need get from the client to |
| 413 // fill up the available area in the endpoint buffer. | 463 // fill up the available area in the endpoint buffer. |
| 464 // |num_packets| will always be one for exclusive-mode streams. | |
| 414 size_t num_packets = (num_available_frames / packet_size_frames_); | 465 size_t num_packets = (num_available_frames / packet_size_frames_); |
| 415 | 466 |
| 416 // Get data from the client/source. | 467 // Get data from the client/source. |
| 417 for (size_t n = 0; n < num_packets; ++n) { | 468 for (size_t n = 0; n < num_packets; ++n) { |
| 418 // Grab all available space in the rendering endpoint buffer | 469 // Grab all available space in the rendering endpoint buffer |
| 419 // into which the client can write a data packet. | 470 // into which the client can write a data packet. |
| 420 hr = audio_render_client_->GetBuffer(packet_size_frames_, | 471 hr = audio_render_client_->GetBuffer(packet_size_frames_, |
| 421 &audio_data); | 472 &audio_data); |
| 422 if (FAILED(hr)) { | 473 if (FAILED(hr)) { |
| 423 DLOG(ERROR) << "Failed to use rendering audio buffer: " | 474 DLOG(ERROR) << "Failed to use rendering audio buffer: " |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 504 PLOG(WARNING) << "Failed to disable MMCSS"; | 555 PLOG(WARNING) << "Failed to disable MMCSS"; |
| 505 } | 556 } |
| 506 } | 557 } |
| 507 | 558 |
| 508 void WASAPIAudioOutputStream::HandleError(HRESULT err) { | 559 void WASAPIAudioOutputStream::HandleError(HRESULT err) { |
| 509 NOTREACHED() << "Error code: " << std::hex << err; | 560 NOTREACHED() << "Error code: " << std::hex << err; |
| 510 if (source_) | 561 if (source_) |
| 511 source_->OnError(this, static_cast<int>(err)); | 562 source_->OnError(this, static_cast<int>(err)); |
| 512 } | 563 } |
| 513 | 564 |
| 514 HRESULT WASAPIAudioOutputStream::SetRenderDevice(ERole device_role) { | 565 HRESULT WASAPIAudioOutputStream::SetRenderDevice() { |
| 515 // Create the IMMDeviceEnumerator interface. | 566 // Create the IMMDeviceEnumerator interface. |
| 516 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), | 567 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
| 517 NULL, | 568 NULL, |
| 518 CLSCTX_INPROC_SERVER, | 569 CLSCTX_INPROC_SERVER, |
| 519 __uuidof(IMMDeviceEnumerator), | 570 __uuidof(IMMDeviceEnumerator), |
| 520 device_enumerator_.ReceiveVoid()); | 571 device_enumerator_.ReceiveVoid()); |
| 521 if (SUCCEEDED(hr)) { | 572 if (SUCCEEDED(hr)) { |
| 522 // Retrieve the default render audio endpoint for the specified role. | 573 // Retrieve the default render audio endpoint for the specified role. |
| 523 // Note that, in Windows Vista, the MMDevice API supports device roles | 574 // Note that, in Windows Vista, the MMDevice API supports device roles |
| 524 // but the system-supplied user interface programs do not. | 575 // but the system-supplied user interface programs do not. |
| 525 hr = device_enumerator_->GetDefaultAudioEndpoint( | 576 hr = device_enumerator_->GetDefaultAudioEndpoint( |
| 526 eRender, device_role, endpoint_device_.Receive()); | 577 eRender, device_role_, endpoint_device_.Receive()); |
| 527 if (FAILED(hr)) | 578 if (FAILED(hr)) |
| 528 return hr; | 579 return hr; |
| 529 | 580 |
| 530 // Verify that the audio endpoint device is active. That is, the audio | 581 // Verify that the audio endpoint device is active. That is, the audio |
| 531 // adapter that connects to the endpoint device is present and enabled. | 582 // adapter that connects to the endpoint device is present and enabled. |
| 532 DWORD state = DEVICE_STATE_DISABLED; | 583 DWORD state = DEVICE_STATE_DISABLED; |
| 533 hr = endpoint_device_->GetState(&state); | 584 hr = endpoint_device_->GetState(&state); |
| 534 if (SUCCEEDED(hr)) { | 585 if (SUCCEEDED(hr)) { |
| 535 if (!(state & DEVICE_STATE_ACTIVE)) { | 586 if (!(state & DEVICE_STATE_ACTIVE)) { |
| 536 DLOG(ERROR) << "Selected render device is not active."; | 587 DLOG(ERROR) << "Selected render device is not active."; |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 552 return hr; | 603 return hr; |
| 553 } | 604 } |
| 554 | 605 |
| 555 HRESULT WASAPIAudioOutputStream::GetAudioEngineStreamFormat() { | 606 HRESULT WASAPIAudioOutputStream::GetAudioEngineStreamFormat() { |
| 556 // Retrieve the stream format that the audio engine uses for its internal | 607 // Retrieve the stream format that the audio engine uses for its internal |
| 557 // processing/mixing of shared-mode streams. | 608 // processing/mixing of shared-mode streams. |
| 558 return audio_client_->GetMixFormat(&audio_engine_mix_format_); | 609 return audio_client_->GetMixFormat(&audio_engine_mix_format_); |
| 559 } | 610 } |
| 560 | 611 |
| 561 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { | 612 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { |
| 613 // Determine, before calling IAudioClient::Initialize, whether the audio | |
|
tommi (sloooow) - chröme
2012/07/25 11:49:40
nit: ()
henrika (OOO until Aug 14)
2012/07/25 15:26:30
Done.
| |
| 614 // engine supports a particular stream format. | |
| 562 // In shared mode, the audio engine always supports the mix format, | 615 // In shared mode, the audio engine always supports the mix format, |
| 563 // which is stored in the |audio_engine_mix_format_| member. In addition, | 616 // which is stored in the |audio_engine_mix_format_| member. |
| 564 // the audio engine *might* support similar formats that have the same | |
| 565 // sample rate and number of channels as the mix format but differ in | |
| 566 // the representation of audio sample values. | |
| 567 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; | 617 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; |
| 568 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, | 618 HRESULT hr = audio_client_->IsFormatSupported(share_mode(), |
| 569 &format_, | 619 &format_, |
| 570 &closest_match); | 620 &closest_match); |
| 571 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " | 621 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " |
| 572 << "but a closest match exists."; | 622 << "but a closest match exists."; |
| 573 return (hr == S_OK); | 623 return (hr == S_OK); |
| 574 } | 624 } |
| 575 | 625 |
| 576 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { | 626 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { |
| 577 // TODO(henrika): this buffer scheme is still under development. | |
| 578 // The exact details are yet to be determined based on tests with different | |
| 579 // audio clients. | |
| 580 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); | |
| 581 if (audio_engine_mix_format_->nSamplesPerSec == 48000) { | |
| 582 // Initial tests have shown that we have to add 10 ms extra to | |
| 583 // ensure that we don't run empty for any packet size. | |
| 584 glitch_free_buffer_size_ms += 10; | |
| 585 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) { | |
| 586 // Initial tests have shown that we have to add 20 ms extra to | |
| 587 // ensure that we don't run empty for any packet size. | |
| 588 glitch_free_buffer_size_ms += 20; | |
| 589 } else { | |
| 590 glitch_free_buffer_size_ms += 20; | |
| 591 } | |
| 592 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; | |
| 593 REFERENCE_TIME requested_buffer_duration_hns = | |
| 594 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); | |
| 595 | |
| 596 // Initialize the audio stream between the client and the device. | |
| 597 // We connect indirectly through the audio engine by using shared mode | |
| 598 // and WASAPI is initialized in an event driven mode. | |
| 599 // Note that this API ensures that the buffer is never smaller than the | |
| 600 // minimum buffer size needed to ensure glitch-free rendering. | |
| 601 // If we requests a buffer size that is smaller than the audio engine's | |
| 602 // minimum required buffer size, the method sets the buffer size to this | |
| 603 // minimum buffer size rather than to the buffer size requested. | |
| 604 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, | |
| 605 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | |
| 606 AUDCLNT_STREAMFLAGS_NOPERSIST, | |
| 607 requested_buffer_duration_hns, | |
| 608 0, | |
| 609 &format_, | |
| 610 NULL); | |
| 611 if (FAILED(hr)) | |
| 612 return hr; | |
| 613 | |
| 614 // Retrieve the length of the endpoint buffer shared between the client | |
| 615 // and the audio engine. The buffer length the buffer length determines | |
| 616 // the maximum amount of rendering data that the client can write to | |
| 617 // the endpoint buffer during a single processing pass. | |
| 618 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. | |
| 619 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); | |
| 620 if (FAILED(hr)) | |
| 621 return hr; | |
| 622 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ | |
| 623 << " [frames]"; | |
| 624 #ifndef NDEBUG | 627 #ifndef NDEBUG |
| 625 // The period between processing passes by the audio engine is fixed for a | 628 // The period between processing passes by the audio engine is fixed for a |
| 626 // particular audio endpoint device and represents the smallest processing | 629 // particular audio endpoint device and represents the smallest processing |
| 627 // quantum for the audio engine. This period plus the stream latency between | 630 // quantum for the audio engine. This period plus the stream latency between |
| 628 // the buffer and endpoint device represents the minimum possible latency | 631 // the buffer and endpoint device represents the minimum possible latency |
| 629 // that an audio application can achieve in shared mode. | 632 // that an audio application can achieve in shared mode. |
| 630 REFERENCE_TIME default_device_period = 0; | 633 REFERENCE_TIME default_device_period = 0; |
| 631 REFERENCE_TIME minimum_device_period = 0; | 634 REFERENCE_TIME minimum_device_period = 0; |
| 632 HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period, | 635 HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period, |
| 633 &minimum_device_period); | 636 &minimum_device_period); |
| 634 if (SUCCEEDED(hr_dbg)) { | 637 if (SUCCEEDED(hr_dbg)) { |
| 635 // Shared mode device period. | 638 // Shared mode device period. |
| 636 DVLOG(1) << "default device period: " | 639 DVLOG(1) << "shared mode (default) device period: " |
| 637 << static_cast<double>(default_device_period / 10000.0) | 640 << static_cast<double>(default_device_period / 10000.0) |
| 638 << " [ms]"; | 641 << " [ms]"; |
| 639 // Exclusive mode device period. | 642 // Exclusive mode device period. |
| 640 DVLOG(1) << "minimum device period: " | 643 DVLOG(1) << "exclusive mode (minimum) device period: " |
| 641 << static_cast<double>(minimum_device_period / 10000.0) | 644 << static_cast<double>(minimum_device_period / 10000.0) |
| 642 << " [ms]"; | 645 << " [ms]"; |
| 643 } | 646 } |
| 644 | 647 |
| 645 REFERENCE_TIME latency = 0; | 648 REFERENCE_TIME latency = 0; |
| 646 hr_dbg = audio_client_->GetStreamLatency(&latency); | 649 hr_dbg = audio_client_->GetStreamLatency(&latency); |
| 647 if (SUCCEEDED(hr_dbg)) { | 650 if (SUCCEEDED(hr_dbg)) { |
| 648 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) | 651 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) |
| 649 << " [ms]"; | 652 << " [ms]"; |
| 650 } | 653 } |
| 651 #endif | 654 #endif |
| 652 | 655 |
| 656 HRESULT hr = S_FALSE; | |
| 657 REFERENCE_TIME requested_buffer_duration = 0; | |
| 658 | |
| 659 // Perform different initialization depending on if the device shall be | |
| 660 // opened in shared mode or in exclusive mode. | |
| 661 if (share_mode() == AUDCLNT_SHAREMODE_SHARED) { | |
| 662 // The device will be opened in shared mode and use the WAS format. | |
|
tommi (sloooow) - chröme
2012/07/25 11:49:40
what about pulling these two initialization paths
| |
| 663 | |
| 664 // TODO(henrika): this buffer scheme is still under development. | |
| 665 // The exact details are yet to be determined based on tests with different | |
| 666 // audio clients. | |
| 667 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); | |
| 668 if (audio_engine_mix_format_->nSamplesPerSec == 48000) { | |
| 669 // Initial tests have shown that we have to add 10 ms extra to | |
| 670 // ensure that we don't run empty for any packet size. | |
| 671 glitch_free_buffer_size_ms += 10; | |
| 672 } else if (audio_engine_mix_format_->nSamplesPerSec == 44100) { | |
| 673 // Initial tests have shown that we have to add 20 ms extra to | |
| 674 // ensure that we don't run empty for any packet size. | |
| 675 glitch_free_buffer_size_ms += 20; | |
| 676 } else { | |
| 677 glitch_free_buffer_size_ms += 20; | |
| 678 } | |
| 679 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; | |
| 680 requested_buffer_duration = | |
| 681 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); | |
| 682 | |
| 683 // Initialize the audio stream between the client and the device. | |
| 684 // We connect indirectly through the audio engine by using shared mode | |
| 685 // and WASAPI is initialized in an event driven mode. | |
| 686 // Note that this API ensures that the buffer is never smaller than the | |
| 687 // minimum buffer size needed to ensure glitch-free rendering. | |
| 688 // If we requests a buffer size that is smaller than the audio engine's | |
| 689 // minimum required buffer size, the method sets the buffer size to this | |
| 690 // minimum buffer size rather than to the buffer size requested. | |
| 691 hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, | |
| 692 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | |
| 693 AUDCLNT_STREAMFLAGS_NOPERSIST, | |
| 694 requested_buffer_duration, | |
| 695 0, | |
| 696 &format_, | |
| 697 NULL); | |
| 698 } else { | |
| 699 // The device will be opened in exclusive mode and use the application | |
| 700 // specified format. | |
| 701 | |
| 702 float f = (1000.0 * packet_size_frames_) / format_.nSamplesPerSec; | |
| 703 requested_buffer_duration = static_cast<REFERENCE_TIME>(f*10000.0 + 0.5); | |
| 704 | |
| 705 // Initialize the audio stream between the client and the device. | |
| 706 // For an exclusive-mode stream that uses event-driven buffering, the | |
| 707 // caller must specify nonzero values for hnsPeriodicity and | |
| 708 // hnsBufferDuration, and the values of these two parameters must be equal. | |
| 709 // The Initialize method allocates two buffers for the stream. Each buffer | |
| 710 // is equal in duration to the value of the hnsBufferDuration parameter. | |
| 711 // Following the Initialize call for a rendering stream, the caller should | |
| 712 // fill the first of the two buffers before starting the stream. | |
| 713 hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, | |
| 714 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | |
| 715 AUDCLNT_STREAMFLAGS_NOPERSIST, | |
| 716 requested_buffer_duration, | |
| 717 requested_buffer_duration, | |
| 718 &format_, | |
| 719 NULL); | |
| 720 if (FAILED(hr)) { | |
| 721 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { | |
| 722 DLOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED"; | |
| 723 | |
| 724 UINT32 aligned_buffer_size = 0; | |
| 725 audio_client_->GetBufferSize(&aligned_buffer_size); | |
| 726 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; | |
| 727 audio_client_.Release(); | |
| 728 | |
| 729 // Calculate new aligned periodicity. Each unit of reference time | |
| 730 // is 100 nanoseconds. | |
| 731 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>( | |
| 732 10000000.0 * aligned_buffer_size / format_.nSamplesPerSec + 0.5); | |
| 733 | |
| 734 // It is possible to re-activate and re-initialize the audio client | |
| 735 // at this stage but we bail out with an error code instead and | |
| 736 // combine it with a log message which informs about the suggested | |
| 737 // aligned buffer size which should be used instead. | |
| 738 DVLOG(1) << "aligned_buffer_duration: " | |
| 739 << static_cast<double>(aligned_buffer_duration / 10000.0) | |
| 740 << " [ms]"; | |
| 741 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) { | |
| 742 // We will get this error if we try to use a smaller buffer size than | |
| 743 // the minimum supported size (usually ~3ms on Windows 7). | |
| 744 DLOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD"; | |
| 745 } | |
| 746 } | |
| 747 } | |
| 748 | |
| 749 if (FAILED(hr)) { | |
| 750 DVLOG(1) << "IAudioClient::Initialize() failed: " << std::hex << hr; | |
|
tommi (sloooow) - chröme
2012/07/25 11:49:40
should this perhaps be LOG(WARNING)?
henrika (OOO until Aug 14)
2012/07/25 15:26:30
Changed to PLOG(WARNING)
| |
| 751 return hr; | |
| 752 } | |
| 753 | |
| 754 // Retrieve the length of the endpoint buffer. The buffer length represents | |
| 755 // the maximum amount of rendering data that the client can write to | |
| 756 // the endpoint buffer during a single processing pass. | |
| 757 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. | |
| 758 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); | |
| 759 if (FAILED(hr)) | |
| 760 return hr; | |
| 761 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ | |
| 762 << " [frames]"; | |
| 763 | |
| 764 // The buffer scheme for exclusive mode streams is not designed for max | |
| 765 // flexibility. We only allow a "perfect match" between the packet size set | |
| 766 // by the user and the actual endpoint buffer size. | |
| 767 if (share_mode() == AUDCLNT_SHAREMODE_EXCLUSIVE) { | |
|
no longer working on chromium
2012/07/25 12:32:57
combine two if:
if (share_mode() == AUDCLNT_SHAREM
henrika (OOO until Aug 14)
2012/07/25 15:26:30
Done.
| |
| 768 if (endpoint_buffer_size_frames_ != packet_size_frames_) { | |
| 769 hr = AUDCLNT_E_INVALID_SIZE; | |
| 770 DLOG(ERROR) << "AUDCLNT_E_INVALID_SIZE"; | |
| 771 return hr; | |
| 772 } | |
| 773 } | |
| 774 | |
| 653 // Set the event handle that the audio engine will signal each time | 775 // Set the event handle that the audio engine will signal each time |
| 654 // a buffer becomes ready to be processed by the client. | 776 // a buffer becomes ready to be processed by the client. |
| 655 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); | 777 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); |
| 656 if (FAILED(hr)) | 778 if (FAILED(hr)) |
| 657 return hr; | 779 return hr; |
| 658 | 780 |
| 659 // Get access to the IAudioRenderClient interface. This interface | 781 // Get access to the IAudioRenderClient interface. This interface |
| 660 // enables us to write output data to a rendering endpoint buffer. | 782 // enables us to write output data to a rendering endpoint buffer. |
| 661 // The methods in this interface manage the movement of data packets | 783 // The methods in this interface manage the movement of data packets |
| 662 // that contain audio-rendering data. | 784 // that contain audio-rendering data. |
| (...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 823 // are now re-initiated and it is now possible to re-start audio rendering. | 945 // are now re-initiated and it is now possible to re-start audio rendering. |
| 824 | 946 |
| 825 // Start rendering again using the new default audio endpoint. | 947 // Start rendering again using the new default audio endpoint. |
| 826 hr = audio_client_->Start(); | 948 hr = audio_client_->Start(); |
| 827 | 949 |
| 828 restart_rendering_mode_ = false; | 950 restart_rendering_mode_ = false; |
| 829 return SUCCEEDED(hr); | 951 return SUCCEEDED(hr); |
| 830 } | 952 } |
| 831 | 953 |
| 832 } // namespace media | 954 } // namespace media |
| OLD | NEW |