OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "media/audio/win/audio_low_latency_output_win.h" |
| 6 |
| 7 #include "base/logging.h" |
| 8 #include "base/memory/scoped_ptr.h" |
| 9 #include "base/utf_string_conversions.h" |
| 10 #include "media/audio/audio_util.h" |
| 11 #include "media/audio/win/audio_manager_win.h" |
| 12 #include "media/audio/win/avrt_wrapper_win.h" |
| 13 |
| 14 using base::win::ScopedComPtr; |
| 15 using base::win::ScopedCOMInitializer; |
| 16 |
| 17 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
| 18 const AudioParameters& params, |
| 19 ERole device_role) |
| 20 : com_init_(ScopedCOMInitializer::kMTA), |
| 21 manager_(manager), |
| 22 render_thread_(NULL), |
| 23 opened_(false), |
| 24 started_(false), |
| 25 volume_(1.0), |
| 26 endpoint_buffer_size_frames_(0), |
| 27 device_role_(device_role), |
| 28 num_written_frames_(0), |
| 29 source_(NULL) { |
| 30 DCHECK(manager_); |
| 31 |
| 32 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
| 33 bool avrt_init = avrt::Initialize(); |
| 34 DCHECK(avrt_init) << "Failed to load the Avrt.dll"; |
| 35 |
| 36 // Set up the desired render format specified by the client. |
| 37 format_.nSamplesPerSec = params.sample_rate; |
| 38 format_.wFormatTag = WAVE_FORMAT_PCM; |
| 39 format_.wBitsPerSample = params.bits_per_sample; |
| 40 format_.nChannels = params.channels; |
| 41 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; |
| 42 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; |
| 43 format_.cbSize = 0; |
| 44 |
| 45 // Size in bytes of each audio frame. |
| 46 frame_size_ = format_.nBlockAlign; |
| 47 |
| 48 // Store size (in different units) of audio packets which we expect to |
| 49 // get from the audio endpoint device in each render event. |
| 50 packet_size_frames_ = params.GetPacketSize() / format_.nBlockAlign; |
| 51 packet_size_bytes_ = params.GetPacketSize(); |
| 52 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate; |
| 53 DVLOG(1) << "Number of bytes per audio frame : " << frame_size_; |
| 54 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; |
| 55 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_; |
| 56 |
| 57 // All events are auto-reset events and non-signaled initially. |
| 58 |
| 59 // Create the event which the audio engine will signal each time |
| 60 // a buffer becomes ready to be processed by the client. |
| 61 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
| 62 DCHECK(audio_samples_render_event_.IsValid()); |
| 63 |
| 64 // Create the event which will be set in Stop() when capturing shall stop. |
| 65 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
| 66 DCHECK(stop_render_event_.IsValid()); |
| 67 } |
| 68 |
| 69 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} |
| 70 |
| 71 bool WASAPIAudioOutputStream::Open() { |
| 72 // Verify that we are not already opened. |
| 73 if (opened_) |
| 74 return false; |
| 75 |
| 76 // Obtain a reference to the IMMDevice interface of the default rendering |
| 77 // device with the specified role. |
| 78 HRESULT hr = SetRenderDevice(device_role_); |
| 79 if (FAILED(hr)) { |
| 80 HandleError(hr); |
| 81 return false; |
| 82 } |
| 83 |
| 84 // Obtain an IAudioClient interface which enables us to create and initialize |
| 85 // an audio stream between an audio application and the audio engine. |
| 86 hr = ActivateRenderDevice(); |
| 87 if (FAILED(hr)) { |
| 88 HandleError(hr); |
| 89 return false; |
| 90 } |
| 91 |
| 92 // Retrieve the stream format which the audio engine uses for its internal |
| 93 // processing/mixing of shared-mode streams. |
| 94 hr = GetAudioEngineStreamFormat(); |
| 95 if (FAILED(hr)) { |
| 96 HandleError(hr); |
| 97 return false; |
| 98 } |
| 99 |
| 100 // Verify that the selected audio endpoint supports the specified format |
| 101 // set during construction. |
| 102 if (!DesiredFormatIsSupported()) { |
| 103 hr = E_INVALIDARG; |
| 104 HandleError(hr); |
| 105 return false; |
| 106 } |
| 107 |
| 108 // Initialize the audio stream between the client and the device using |
| 109 // shared mode and a lowest possible glitch-free latency. |
| 110 hr = InitializeAudioEngine(); |
| 111 if (FAILED(hr)) { |
| 112 HandleError(hr); |
| 113 return false; |
| 114 } |
| 115 |
| 116 opened_ = true; |
| 117 |
| 118 return true; |
| 119 } |
| 120 |
| 121 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { |
| 122 DCHECK(callback); |
| 123 DCHECK(opened_); |
| 124 |
| 125 if (!opened_) |
| 126 return; |
| 127 |
| 128 if (started_) |
| 129 return; |
| 130 |
| 131 source_ = callback; |
| 132 |
| 133 // Create and start the thread that will drive the rendering by waiting for |
| 134 // render events. |
| 135 render_thread_ = new base::DelegateSimpleThread(this, "wasapi_render_thread"); |
| 136 render_thread_->Start(); |
| 137 |
| 138 // Avoid start-up glitches by filling up the endpoint buffer with "silence" |
| 139 // before starting the stream. |
| 140 BYTE* data_ptr = NULL; |
| 141 HRESULT hr = audio_render_client_->GetBuffer(endpoint_buffer_size_frames_, |
| 142 &data_ptr); |
| 143 if (SUCCEEDED(hr)) { |
| 144 // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to |
| 145 // explicitly write silence data to the rendering buffer. |
| 146 audio_render_client_->ReleaseBuffer(endpoint_buffer_size_frames_, |
| 147 AUDCLNT_BUFFERFLAGS_SILENT); |
| 148 num_written_frames_ = endpoint_buffer_size_frames_; |
| 149 |
| 150 // Sanity check: verify that the endpoint buffer is filled with silence. |
| 151 UINT32 num_queued_frames = 0; |
| 152 audio_client_->GetCurrentPadding(&num_queued_frames); |
| 153 DCHECK(num_queued_frames == num_written_frames_); |
| 154 } |
| 155 |
| 156 // Start streaming data between the endpoint buffer and the audio engine. |
| 157 hr = audio_client_->Start(); |
| 158 DLOG_IF(ERROR, FAILED(hr)) << "Failed to start output streaming: " |
| 159 << std::hex << hr; |
| 160 |
| 161 started_ = SUCCEEDED(hr); |
| 162 } |
| 163 |
| 164 void WASAPIAudioOutputStream::Stop() { |
| 165 if (!started_) |
| 166 return; |
| 167 |
| 168 // Shut down the render thread. |
| 169 if (stop_render_event_.IsValid()) { |
| 170 SetEvent(stop_render_event_.Get()); |
| 171 } |
| 172 |
| 173 // Stop output audio streaming. |
| 174 HRESULT hr = audio_client_->Stop(); |
| 175 DLOG_IF(ERROR, FAILED(hr)) << "Failed to stop output streaming: " |
| 176 << std::hex << hr; |
| 177 |
| 178 // Wait until the thread completes and perform cleanup. |
| 179 if (render_thread_) { |
| 180 SetEvent(stop_render_event_.Get()); |
| 181 render_thread_->Join(); |
| 182 render_thread_ = NULL; |
| 183 } |
| 184 |
| 185 started_ = false; |
| 186 } |
| 187 |
| 188 void WASAPIAudioOutputStream::Close() { |
| 189 // It is valid to call Close() before calling open or Start(). |
| 190 // It is also valid to call Close() after Start() has been called. |
| 191 Stop(); |
| 192 |
| 193 // Inform the audio manager that we have been closed. This will cause our |
| 194 // destruction. |
| 195 manager_->ReleaseOutputStream(this); |
| 196 } |
| 197 |
| 198 void WASAPIAudioOutputStream::SetVolume(double volume) { |
| 199 if (volume < 0.0f || volume > 1.0f) |
| 200 return; |
| 201 volume_ = static_cast<float>(volume); |
| 202 } |
| 203 |
| 204 void WASAPIAudioOutputStream::GetVolume(double* volume) { |
| 205 *volume = static_cast<double>(volume_); |
| 206 } |
| 207 |
| 208 // static |
| 209 double WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { |
| 210 // It is assumed that this static method is called from a COM thread, i.e., |
| 211 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. |
| 212 ScopedComPtr<IMMDeviceEnumerator> enumerator; |
| 213 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
| 214 NULL, |
| 215 CLSCTX_INPROC_SERVER, |
| 216 __uuidof(IMMDeviceEnumerator), |
| 217 enumerator.ReceiveVoid()); |
| 218 if (FAILED(hr)) { |
| 219 NOTREACHED() << "error code: " << std::hex << hr; |
| 220 } |
| 221 |
| 222 ScopedComPtr<IMMDevice> endpoint_device; |
| 223 hr = enumerator->GetDefaultAudioEndpoint(eRender, |
| 224 device_role, |
| 225 endpoint_device.Receive()); |
| 226 if (FAILED(hr)) { |
| 227 // This will happen if there's no audio output device found or available |
| 228 // (e.g. some audio cards that have outputs will still report them as |
| 229 // "not found" when no speaker is plugged into the output jack). |
| 230 LOG(WARNING) << "No audio end point: " << std::hex << hr; |
| 231 return 0.0; |
| 232 } |
| 233 |
| 234 ScopedComPtr<IAudioClient> audio_client; |
| 235 hr = endpoint_device->Activate(__uuidof(IAudioClient), |
| 236 CLSCTX_INPROC_SERVER, |
| 237 NULL, |
| 238 audio_client.ReceiveVoid()); |
| 239 if (FAILED(hr)) { |
| 240 NOTREACHED() << "error code: " << std::hex << hr; |
| 241 return 0.0; |
| 242 } |
| 243 |
| 244 base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format; |
| 245 hr = audio_client->GetMixFormat(&audio_engine_mix_format); |
| 246 if (FAILED(hr)) { |
| 247 NOTREACHED() << "error code: " << std::hex << hr; |
| 248 return 0.0; |
| 249 } |
| 250 |
| 251 return static_cast<double>(audio_engine_mix_format->nSamplesPerSec); |
| 252 } |
| 253 |
| 254 void WASAPIAudioOutputStream::Run() { |
| 255 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); |
| 256 |
| 257 // Increase the thread priority. |
| 258 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); |
| 259 |
| 260 // Enable MMCSS to ensure that this thread receives prioritized access to |
| 261 // CPU resources. |
| 262 DWORD task_index = 0; |
| 263 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", |
| 264 &task_index); |
| 265 bool mmcss_is_ok = |
| 266 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); |
| 267 if (!mmcss_is_ok) { |
| 268 // Failed to enable MMCSS on this thread. It is not fatal but can lead |
| 269 // to reduced QoS at high load. |
| 270 DWORD err = GetLastError(); |
| 271 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
| 272 } |
| 273 |
| 274 HRESULT hr = S_FALSE; |
| 275 |
| 276 bool playing = true; |
| 277 bool error = false; |
| 278 HANDLE wait_array[2] = {stop_render_event_, audio_samples_render_event_}; |
| 279 UINT64 device_frequency = 0; |
| 280 |
| 281 // The IAudioClock interface enables us to monitor a stream's data |
| 282 // rate and the current position in the stream. Allocate it before we |
| 283 // start spinning. |
| 284 ScopedComPtr<IAudioClock> audio_clock; |
| 285 hr = audio_client_->GetService(__uuidof(IAudioClock), |
| 286 audio_clock.ReceiveVoid()); |
| 287 if (SUCCEEDED(hr)) { |
| 288 // The device frequency is the frequency generated by the hardware clock in |
| 289 // the audio device. The GetFrequency() method reports a constant frequency. |
| 290 hr = audio_clock->GetFrequency(&device_frequency); |
| 291 } |
| 292 error = FAILED(hr); |
| 293 PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: " |
| 294 << std::hex << hr; |
| 295 |
| 296 while (playing && !error) { |
| 297 // Wait for a close-down event or a new render event. |
| 298 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE); |
| 299 |
| 300 switch (wait_result) { |
| 301 case WAIT_OBJECT_0 + 0: |
| 302 // |stop_render_event_| has been set. |
| 303 playing = false; |
| 304 break; |
| 305 case WAIT_OBJECT_0 + 1: |
| 306 { |
| 307 // |audio_samples_render_event_| has been set. |
| 308 UINT32 num_queued_frames = 0; |
| 309 uint8* audio_data = NULL; |
| 310 |
| 311 // Get the padding value which represents the amount of rendering |
| 312 // data that is queued up to play in the endpoint buffer. |
| 313 hr = audio_client_->GetCurrentPadding(&num_queued_frames); |
| 314 |
| 315 // Determine how much new data we can write to the buffer without |
| 316 // the risk of overwriting previously written data that the audio |
| 317 // engine has not yet read from the buffer. |
| 318 size_t num_available_frames = |
| 319 endpoint_buffer_size_frames_ - num_queued_frames; |
| 320 |
| 321 // Check if there is enough available space to fit the packet size |
| 322 // specified by the client. |
| 323 if (num_available_frames < packet_size_frames_) { |
| 324 continue; |
| 325 } |
| 326 |
| 327 // Derive the number of packets we need get from the client to |
| 328 // fill up the available area in the endpoint buffer. |
| 329 size_t num_packets = (num_available_frames / packet_size_frames_); |
| 330 |
| 331 // Get data from the client/source. |
| 332 for (size_t n = 0; n < num_packets; n++) { |
| 333 // Grab all available space in the rendering endpoint buffer |
| 334 // into which the client can write a data packet. |
| 335 hr = audio_render_client_->GetBuffer(packet_size_frames_, |
| 336 &audio_data); |
| 337 |
| 338 // Derive the audio delay which corresponds to the delay between |
| 339 // a render event and the time when the first audio sample in a |
| 340 // packet is played out through the speaker. This delay value |
| 341 // can typically be utilized by an acoustic echo-control (AEC) |
| 342 // unit at the render side. |
| 343 if (SUCCEEDED(hr) && audio_data) { |
| 344 UINT64 position = 0; |
| 345 int audio_delay_bytes = 0; |
| 346 hr = audio_clock->GetPosition(&position, NULL); |
| 347 if (SUCCEEDED(hr)) { |
| 348 // Stream position of the sample that is currently playing |
| 349 // through the speaker. |
| 350 double pos_sample_playing_frames = format_.nSamplesPerSec * |
| 351 (static_cast<double>(position) / device_frequency); |
| 352 |
| 353 // Stream position of the last sample written to the endpoint |
| 354 // buffer. Note that, the packet we are about to receive in |
| 355 // the upcoming callback is also included. |
| 356 size_t pos_last_sample_written_frames = |
| 357 num_written_frames_ + packet_size_frames_; |
| 358 |
| 359 // Derive the actual delay value which will be fed to the |
| 360 // render client using the OnMoreData() callback. |
| 361 audio_delay_bytes = (pos_last_sample_written_frames - |
| 362 pos_sample_playing_frames) * frame_size_; |
| 363 } |
| 364 |
| 365 // Read a data packet from the registered client source and |
| 366 // deliver a delay estimate in the same callback to the client. |
| 367 // A time stamp is also stored in the AudioBuffersState. This |
| 368 // time stamp can be used at the client side to compensate for |
| 369 // the delay between the usage of the delay value and the time |
| 370 // of generation. |
| 371 uint32 num_filled_bytes = source_->OnMoreData( |
| 372 this, audio_data, packet_size_bytes_, |
| 373 AudioBuffersState(0, audio_delay_bytes)); |
| 374 |
| 375 // Perform in-place, software-volume adjustments. |
| 376 media::AdjustVolume(audio_data, |
| 377 num_filled_bytes, |
| 378 format_.nChannels, |
| 379 format_.wBitsPerSample >> 3, |
| 380 volume_); |
| 381 |
| 382 // Zero out the part of the packet which has not been filled by |
| 383 // the client. |
| 384 if (num_filled_bytes < packet_size_bytes_) { |
| 385 memset(&audio_data[num_filled_bytes], |
| 386 0, |
| 387 (packet_size_bytes_ - num_filled_bytes)); |
| 388 } |
| 389 } |
| 390 |
| 391 // Release the buffer space acquired in the GetBuffer() call. |
| 392 DWORD flags(0); |
| 393 hr = audio_render_client_->ReleaseBuffer(packet_size_frames_, |
| 394 flags); |
| 395 |
| 396 num_written_frames_ += packet_size_frames_; |
| 397 } |
| 398 } |
| 399 break; |
| 400 default: |
| 401 error = true; |
| 402 break; |
| 403 } |
| 404 } |
| 405 |
| 406 if (playing && error) { |
| 407 // TODO(henrika): perhaps it worth improving the cleanup here by e.g. |
| 408 // stopping the audio client, joining the thread etc.? |
| 409 NOTREACHED() << "WASAPI rendering failed with error code " |
| 410 << GetLastError(); |
| 411 } |
| 412 |
| 413 // Disable MMCSS. |
| 414 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { |
| 415 PLOG(WARNING) << "Failed to disable MMCSS"; |
| 416 } |
| 417 } |
| 418 |
| 419 void WASAPIAudioOutputStream::HandleError(HRESULT err) { |
| 420 NOTREACHED() << "Error code: " << std::hex << err; |
| 421 if (source_) |
| 422 source_->OnError(this, static_cast<int>(err)); |
| 423 } |
| 424 |
| 425 HRESULT WASAPIAudioOutputStream::SetRenderDevice(ERole device_role) { |
| 426 ScopedComPtr<IMMDeviceEnumerator> enumerator; |
| 427 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
| 428 NULL, |
| 429 CLSCTX_INPROC_SERVER, |
| 430 __uuidof(IMMDeviceEnumerator), |
| 431 enumerator.ReceiveVoid()); |
| 432 if (SUCCEEDED(hr)) { |
| 433 // Retrieve the default render audio endpoint for the specified role. |
| 434 // Note that, in Windows Vista, the MMDevice API supports device roles |
| 435 // but the system-supplied user interface programs do not. |
| 436 hr = enumerator->GetDefaultAudioEndpoint(eRender, |
| 437 device_role, |
| 438 endpoint_device_.Receive()); |
| 439 |
| 440 // Verify that the audio endpoint device is active. That is, the audio |
| 441 // adapter that connects to the endpoint device is present and enabled. |
| 442 DWORD state = DEVICE_STATE_DISABLED; |
| 443 hr = endpoint_device_->GetState(&state); |
| 444 if (SUCCEEDED(hr)) { |
| 445 if (!(state & DEVICE_STATE_ACTIVE)) { |
| 446 DLOG(ERROR) << "Selected render device is not active."; |
| 447 hr = E_ACCESSDENIED; |
| 448 } |
| 449 } |
| 450 } |
| 451 |
| 452 return hr; |
| 453 } |
| 454 |
| 455 HRESULT WASAPIAudioOutputStream::ActivateRenderDevice() { |
| 456 // Creates and activates an IAudioClient COM object given the selected |
| 457 // render endpoint device. |
| 458 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), |
| 459 CLSCTX_INPROC_SERVER, |
| 460 NULL, |
| 461 audio_client_.ReceiveVoid()); |
| 462 return hr; |
| 463 } |
| 464 |
| 465 HRESULT WASAPIAudioOutputStream::GetAudioEngineStreamFormat() { |
| 466 // Retrieve the stream format that the audio engine uses for its internal |
| 467 // processing/mixing of shared-mode streams. |
| 468 return audio_client_->GetMixFormat(&audio_engine_mix_format_); |
| 469 } |
| 470 |
| 471 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { |
| 472 // In shared mode, the audio engine always supports the mix format, |
| 473 // which is stored in the |audio_engine_mix_format_| member. In addition, |
| 474 // the audio engine *might* support similar formats that have the same |
| 475 // sample rate and number of channels as the mix format but differ in |
| 476 // the representation of audio sample values. |
| 477 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; |
| 478 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, |
| 479 &format_, |
| 480 &closest_match); |
| 481 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " |
| 482 << "but a closest match exists."; |
| 483 return (hr == S_OK); |
| 484 } |
| 485 |
| 486 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { |
| 487 // Initialize the audio stream between the client and the device. |
| 488 // We connect indirectly through the audio engine by using shared mode |
| 489 // and WASAPI is initialized in an event driven mode. |
| 490 // Note that this API ensures that the buffer is never smaller than the |
| 491 // minimum buffer size needed to ensure glitch-free rendering. |
| 492 // If we requests a buffer size that is smaller than the audio engine's |
| 493 // minimum required buffer size, the method sets the buffer size to this |
| 494 // minimum buffer size rather than to the buffer size requested. |
| 495 // Typical examples: |
| 496 // - User requests 5 ms buffer size => actual buffer is set to 20 ms (=min). |
| 497 // - User requests 10 ms buffer size => actual buffer is set to 20 ms (=min). |
| 498 // - User requests 30 ms buffer size => actual buffer is set to 30 ms (>min). |
| 499 REFERENCE_TIME requested_buffer_duration_hns = |
| 500 static_cast<REFERENCE_TIME>((packet_size_ms_ * 10000.0) + 0.5); |
| 501 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, |
| 502 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | |
| 503 AUDCLNT_STREAMFLAGS_NOPERSIST, |
| 504 requested_buffer_duration_hns, |
| 505 0, |
| 506 &format_, |
| 507 NULL); |
| 508 if (FAILED(hr)) |
| 509 return hr; |
| 510 |
| 511 // Retrieve the length of the endpoint buffer shared between the client |
| 512 // and the audio engine. The buffer length the buffer length determines |
| 513 // the maximum amount of rendering data that the client can write to |
| 514 // the endpoint buffer during a single processing pass. |
| 515 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. |
| 516 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); |
| 517 if (FAILED(hr)) |
| 518 return hr; |
| 519 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ |
| 520 << " [frames]"; |
| 521 |
| 522 #ifndef NDEBUG |
| 523 // The period between processing passes by the audio engine is fixed for a |
| 524 // particular audio endpoint device and represents the smallest processing |
| 525 // quantum for the audio engine. This period plus the stream latency between |
| 526 // the buffer and endpoint device represents the minimum possible latency |
| 527 // that an audio application can achieve. |
| 528 // TODO(henrika): possibly remove this section when all parts are ready. |
| 529 REFERENCE_TIME device_period_shared_mode = 0; |
| 530 REFERENCE_TIME device_period_exclusive_mode = 0; |
| 531 HRESULT hr_dbg = audio_client_->GetDevicePeriod( |
| 532 &device_period_shared_mode, &device_period_exclusive_mode); |
| 533 if (SUCCEEDED(hr_dbg)) { |
| 534 DVLOG(1) << "device period: " |
| 535 << static_cast<double>(device_period_shared_mode / 10000.0) |
| 536 << " [ms]"; |
| 537 } |
| 538 |
| 539 REFERENCE_TIME latency = 0; |
| 540 hr_dbg = audio_client_->GetStreamLatency(&latency); |
| 541 if (SUCCEEDED(hr_dbg)) { |
| 542 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) |
| 543 << " [ms]"; |
| 544 } |
| 545 #endif |
| 546 |
| 547 // Set the event handle that the audio engine will signal each time |
| 548 // a buffer becomes ready to be processed by the client. |
| 549 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); |
| 550 if (FAILED(hr)) |
| 551 return hr; |
| 552 |
| 553 // Get access to the IAudioRenderClient interface. This interface |
| 554 // enables us to write output data to a rendering endpoint buffer. |
| 555 // The methods in this interface manage the movement of data packets |
| 556 // that contain audio-rendering data. |
| 557 hr = audio_client_->GetService(__uuidof(IAudioRenderClient), |
| 558 audio_render_client_.ReceiveVoid()); |
| 559 return hr; |
| 560 } |
OLD | NEW |