| OLD | NEW |
| (Empty) | |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "media/audio/win/audio_low_latency_input_win.h" |
| 6 |
| 7 #include <comdef.h> |
| 8 |
| 9 #include "base/logging.h" |
| 10 #include "base/memory/scoped_ptr.h" |
| 11 #include "base/utf_string_conversions.h" |
| 12 #include "media/audio/audio_util.h" |
| 13 #include "media/audio/win/audio_manager_win.h" |
| 14 |
| 15 using base::win::ScopedComPtr; |
| 16 |
| 17 #ifndef NDEBUG |
| 18 static void DLogFormat(const char* str, const WAVEFORMATEX* format) { |
| 19 DLOG(INFO) << str << std::endl |
| 20 << " wFormatTag : " << format->wFormatTag << std::endl |
| 21 << " nChannels : " << format->nChannels << std::endl |
| 22 << " nSamplesPerSec : " << format->nSamplesPerSec << std::endl |
| 23 << " nAvgBytesPerSec: " << format->nAvgBytesPerSec << std::endl |
| 24 << " wBitsPerSample : " << format->wBitsPerSample << std::endl |
| 25 << " nBlockAlign : " << format->nBlockAlign << std::endl |
| 26 << " cbSize : " << format->cbSize << std::endl; |
| 27 } |
| 28 #endif |
| 29 |
| 30 WASAPIAudioInputStream::WASAPIAudioInputStream( |
| 31 AudioManagerWin* manager, const AudioParameters& params, ERole device_role) |
| 32 : manager_(manager), |
| 33 capture_thread_(NULL), |
| 34 opened_(false), |
| 35 started_(false), |
| 36 endpoint_buffer_size_frames_(0), |
| 37 device_role_(device_role), |
| 38 sink_(NULL) { |
| 39 DCHECK(manager_); |
| 40 |
| 41 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
| 42 DCHECK(avrt_.Initialize()); |
| 43 |
| 44 // Set up the desired capture format specified by the client. |
| 45 format_.nSamplesPerSec = params.sample_rate; |
| 46 format_.wFormatTag = WAVE_FORMAT_PCM; |
| 47 format_.wBitsPerSample = params.bits_per_sample; |
| 48 format_.nChannels = params.channels; |
| 49 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; |
| 50 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; |
| 51 format_.cbSize = 0; |
| 52 #ifndef NDEBUG |
| 53 DLogFormat("Desired capture format:", &format_); |
| 54 #endif |
| 55 |
| 56 // Size in bytes of each audio frame. |
| 57 frame_size_ = format_.nBlockAlign; |
| 58 // Store size of audio packets which we expect to get from the audio |
| 59 // endpoint device in each capture event. |
| 60 packet_size_frames_ = params.GetPacketSize() / format_.nBlockAlign; |
| 61 packet_size_bytes_ = params.GetPacketSize(); |
| 62 DLOG(INFO) << "Number of bytes per audio frame : " << frame_size_; |
| 63 DLOG(INFO) << "Number of audio frames per packet: " << packet_size_frames_; |
| 64 |
| 65 // All events are auto-reset events and non-signaled initially. |
| 66 |
| 67 // Create the event which the audio engine will signal each time |
| 68 // a buffer becomes ready to be processed by the client. |
| 69 audio_samples_ready_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
| 70 DCHECK(audio_samples_ready_event_.IsValid()); |
| 71 |
| 72 // Create the event which will be set in Stop() when capturing shall stop. |
| 73 stop_capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
| 74 DCHECK(stop_capture_event_.IsValid()); |
| 75 |
| 76 ms_to_frame_count_ = static_cast<double>(params.sample_rate) / 1000.0; |
| 77 |
| 78 LARGE_INTEGER performance_frequency; |
| 79 if (QueryPerformanceFrequency(&performance_frequency)) { |
| 80 perf_count_to_100ns_units_ = |
| 81 (10000000.0 / static_cast<double>(performance_frequency.QuadPart)); |
| 82 } else { |
| 83 LOG(ERROR) << "High-resolution performance counters are not supported."; |
| 84 perf_count_to_100ns_units_ = 0.0; |
| 85 } |
| 86 } |
| 87 |
| 88 WASAPIAudioInputStream::~WASAPIAudioInputStream() { |
| 89 } |
| 90 |
| 91 bool WASAPIAudioInputStream::Open() { |
| 92 // Verify that we are not already opened. |
| 93 if (opened_) |
| 94 return false; |
| 95 |
| 96 // Obtain a reference to the IMMDevice interface of the default capturing |
| 97 // device with the specified role. |
| 98 HRESULT hr = SetCaptureDevice(device_role_); |
| 99 if (FAILED(hr)) { |
| 100 HandleError(hr); |
| 101 return false; |
| 102 } |
| 103 |
| 104 // Obtain an IAudioClient interface which enables us to create and initialize |
| 105 // an audio stream between an audio application and the audio engine. |
| 106 hr = ActivateCaptureDevice(); |
| 107 if (FAILED(hr)) { |
| 108 HandleError(hr); |
| 109 return false; |
| 110 } |
| 111 |
| 112 // Retrieve the stream format which the audio engine uses for its internal |
| 113 // processing/mixing of shared-mode streams. |
| 114 hr = GetAudioEngineStreamFormat(); |
| 115 if (FAILED(hr)) { |
| 116 HandleError(hr); |
| 117 return false; |
| 118 } |
| 119 |
| 120 // Verify that the selected audio endpoint supports the specified format |
| 121 // set during construction. |
| 122 if (!DesiredFormatIsSupported()) { |
| 123 hr = E_INVALIDARG; |
| 124 HandleError(hr); |
| 125 return false; |
| 126 } |
| 127 |
| 128 // Initialize the audio stream between the client and the device using |
| 129 // shared mode and a lowest possible glitch-free latency. |
| 130 hr = InitializeAudioEngine(); |
| 131 if (FAILED(hr)) { |
| 132 HandleError(hr); |
| 133 return false; |
| 134 } |
| 135 |
| 136 opened_ = true; |
| 137 |
| 138 return true; |
| 139 } |
| 140 |
| 141 void WASAPIAudioInputStream::Start(AudioInputCallback* callback) { |
| 142 DCHECK(callback); |
| 143 DCHECK(opened_); |
| 144 |
| 145 if (!opened_) |
| 146 return; |
| 147 |
| 148 if (started_) |
| 149 return; |
| 150 |
| 151 sink_ = callback; |
| 152 |
| 153 // Create and start the thread that will drive the capturing by waiting for |
| 154 // capture events. |
| 155 capture_thread_ = |
| 156 new base::DelegateSimpleThread(this, "wasapi_capture_thread"); |
| 157 capture_thread_->Start(); |
| 158 |
| 159 // Start streaming data between the endpoint buffer and the audio engine. |
| 160 HRESULT hr = audio_client_->Start(); |
| 161 DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming."; |
| 162 |
| 163 started_ = SUCCEEDED(hr); |
| 164 } |
| 165 |
| 166 void WASAPIAudioInputStream::Stop() { |
| 167 if (!started_) |
| 168 return; |
| 169 |
| 170 // Shut down the capture thread. |
| 171 if (stop_capture_event_.IsValid()) { |
| 172 SetEvent(stop_capture_event_.Get()); |
| 173 } |
| 174 |
| 175 // Stop the input audio streaming. |
| 176 HRESULT hr = audio_client_->Stop(); |
| 177 if (FAILED(hr)) { |
| 178 LOG(ERROR) << "Failed to stop input streaming."; |
| 179 } |
| 180 |
| 181 // Wait until the thread completes and perform cleanup. |
| 182 if (capture_thread_) { |
| 183 SetEvent(stop_capture_event_.Get()); |
| 184 capture_thread_->Join(); |
| 185 capture_thread_ = NULL; |
| 186 } |
| 187 |
| 188 started_ = false; |
| 189 } |
| 190 |
| 191 void WASAPIAudioInputStream::Close() { |
| 192 // It is valid to call Close() before calling open or Start(). |
| 193 // It is also valid to call Close() after Start() has been called. |
| 194 Stop(); |
| 195 if (sink_) { |
| 196 sink_->OnClose(this); |
| 197 sink_ = NULL; |
| 198 } |
| 199 |
| 200 // Inform the audio manager that we have been closed. This will cause our |
| 201 // destruction. |
| 202 manager_->ReleaseInputStream(this); |
| 203 } |
| 204 |
| 205 double WASAPIAudioInputStream::HardwareSampleRate(ERole device_role) { |
| 206 // It is assumed that this static method is called from a COM thread, i.e., |
| 207 // CoInitializeEx() is not called here to avoid STA/MTA conflicts. |
| 208 ScopedComPtr<IMMDeviceEnumerator> enumerator; |
| 209 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
| 210 NULL, |
| 211 CLSCTX_INPROC_SERVER, |
| 212 __uuidof(IMMDeviceEnumerator), |
| 213 enumerator.ReceiveVoid()); |
| 214 if (FAILED(hr)) { |
| 215 NOTREACHED() << "error code: " << hr; |
| 216 return 0.0; |
| 217 } |
| 218 |
| 219 ScopedComPtr<IMMDevice> endpoint_device; |
| 220 hr = enumerator->GetDefaultAudioEndpoint(eCapture, |
| 221 device_role, |
| 222 endpoint_device.Receive()); |
| 223 if (FAILED(hr)) { |
| 224 NOTREACHED() << "error code: " << hr; |
| 225 return 0.0; |
| 226 } |
| 227 |
| 228 ScopedComPtr<IAudioClient> audio_client; |
| 229 hr = endpoint_device->Activate(__uuidof(IAudioClient), |
| 230 CLSCTX_INPROC_SERVER, |
| 231 NULL, |
| 232 audio_client.ReceiveVoid()); |
| 233 if (FAILED(hr)) { |
| 234 NOTREACHED() << "error code: " << hr; |
| 235 return 0.0; |
| 236 } |
| 237 |
| 238 ScopedComMem<WAVEFORMATEX> audio_engine_mix_format; |
| 239 hr = audio_client->GetMixFormat(audio_engine_mix_format.Receive()); |
| 240 if (FAILED(hr)) { |
| 241 NOTREACHED() << "error code: " << hr; |
| 242 return 0.0; |
| 243 } |
| 244 |
| 245 return static_cast<double>(audio_engine_mix_format->nSamplesPerSec); |
| 246 } |
| 247 |
| 248 void WASAPIAudioInputStream::Run() { |
| 249 ScopedCOMInitializerMTA com_init; |
| 250 |
| 251 // Increase the thread priority. |
| 252 capture_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); |
| 253 |
| 254 // Enable MMCSS to ensure that this thread receives prioritized access to |
| 255 // CPU resources. |
| 256 DWORD task_index = 0; |
| 257 HANDLE mm_task = avrt_.AvSetMmThreadCharacteristics("Pro Audio", &task_index); |
| 258 bool mmcss_is_ok = (mm_task && |
| 259 avrt_.AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); |
| 260 if (!mmcss_is_ok) { |
| 261 // Failed to enable MMCSS on this thread. It is not fatal but can lead |
| 262 // to reduced QoS at high load. |
| 263 DWORD err = GetLastError(); |
| 264 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
| 265 } |
| 266 |
| 267 // Allocate a buffer with a size that enables us to take care of cases like: |
| 268 // 1) The recorded buffer size is smaller, or does not match exactly with, |
| 269 // the selected packet size used in each callback. |
| 270 // 2) The selected buffer size is larger than the recorded buffer size in |
| 271 // each event. |
| 272 size_t buffer_frame_index = 0; |
| 273 size_t capture_buffer_size = std::max( |
| 274 2 * endpoint_buffer_size_frames_ * frame_size_, |
| 275 2 * packet_size_frames_ * frame_size_); |
| 276 scoped_array<uint8> capture_buffer(new uint8[capture_buffer_size]); |
| 277 |
| 278 LARGE_INTEGER now_count; |
| 279 bool recording = true; |
| 280 HANDLE wait_array[2] = {stop_capture_event_, audio_samples_ready_event_}; |
| 281 |
| 282 while (recording) { |
| 283 HRESULT hr = S_FALSE; |
| 284 |
| 285 // Wait for a close-down event or a new capture event. |
| 286 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE); |
| 287 switch (wait_result) { |
| 288 case WAIT_FAILED: |
| 289 recording = false; |
| 290 LOG(ERROR) << "WASAPI capturing failed with error code " |
| 291 << GetLastError(); |
| 292 break; |
| 293 case WAIT_OBJECT_0 + 0: |
| 294 // |stop_capture_event_| has been set. |
| 295 recording = false; |
| 296 break; |
| 297 case WAIT_OBJECT_0 + 1: |
| 298 // |audio_samples_ready_event_| has been set. |
| 299 BYTE* data_ptr = NULL; |
| 300 UINT32 num_frames_to_read = 0; |
| 301 DWORD flags = 0; |
| 302 UINT64 device_position = 0; |
| 303 UINT64 first_audio_frame_timestamp = 0; |
| 304 |
| 305 // Retrieve the amount of data in the capture endpoint buffer, |
| 306 // replace it with silence if required, create callbacks for each |
| 307 // packet and store non-delivered data for the next event. |
| 308 hr = audio_capture_client_->GetBuffer(&data_ptr, |
| 309 &num_frames_to_read, |
| 310 &flags, |
| 311 &device_position, |
| 312 &first_audio_frame_timestamp); |
| 313 if (SUCCEEDED(hr)) { |
| 314 if (num_frames_to_read != 0) { |
| 315 size_t pos = buffer_frame_index * frame_size_; |
| 316 size_t num_bytes = num_frames_to_read * frame_size_; |
| 317 DCHECK_GE(capture_buffer_size, pos + num_bytes); |
| 318 |
| 319 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { |
| 320 // Clear out the local buffer since silence is reported. |
| 321 memset(&capture_buffer[pos], 0, num_bytes); |
| 322 } else { |
| 323 // Copy captured data from audio engine buffer to local buffer. |
| 324 memcpy(&capture_buffer[pos], data_ptr, num_bytes); |
| 325 } |
| 326 |
| 327 buffer_frame_index += num_frames_to_read; |
| 328 } |
| 329 |
| 330 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); |
| 331 if (FAILED(hr)) |
| 332 HandleError(hr); |
| 333 |
| 334 // Derive a delay estimate for the captured audio packet. |
| 335 // The value contains two parts (A+B), where A is the delay of the |
| 336 // first audio frame in the packet and B is the extra delay contained |
| 337 // in any stored data. Unit is in audio frames. |
| 338 QueryPerformanceCounter(&now_count); |
| 339 double audio_delay_frames = |
| 340 ((perf_count_to_100ns_units_ * now_count.QuadPart - |
| 341 first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ + |
| 342 buffer_frame_index - num_frames_to_read; |
| 343 |
| 344 // Deliver captured data to the registered consumer using a packet |
| 345 // size which was specified at construction. |
| 346 uint32 delay_frames = static_cast<uint32> (audio_delay_frames + 0.5); |
| 347 while (buffer_frame_index >= packet_size_frames_) { |
| 348 uint8* audio_data = reinterpret_cast<uint8*>(capture_buffer.get()); |
| 349 |
| 350 // Deliver data packet and delay estimation to the user. |
| 351 sink_->OnData(this, |
| 352 audio_data, |
| 353 packet_size_bytes_, |
| 354 delay_frames * frame_size_); |
| 355 |
| 356 // Store parts of the recorded data which can't be delivered |
| 357 // using the current packet size. The stored section will be used |
| 358 // either in the next while-loop iteration or in the next |
| 359 // capture event. |
| 360 memmove(&capture_buffer[0], |
| 361 &capture_buffer[packet_size_bytes_], |
| 362 (buffer_frame_index - packet_size_frames_) * frame_size_); |
| 363 |
| 364 buffer_frame_index -= packet_size_frames_; |
| 365 delay_frames -= packet_size_frames_; |
| 366 } |
| 367 } |
| 368 break; |
| 369 } |
| 370 } |
| 371 |
| 372 // Disable MMCSS. |
| 373 if (mm_task && !avrt_.AvRevertMmThreadCharacteristics(mm_task)) { |
| 374 DWORD err = GetLastError(); |
| 375 LOG(WARNING) << "Failed to disable MMCSS (error code=" << err << ")."; |
| 376 } |
| 377 } |
| 378 |
| 379 void WASAPIAudioInputStream::HandleError(HRESULT err) { |
| 380 _com_error com_error(err); |
| 381 std::string message(WideToUTF8(com_error.ErrorMessage())); |
| 382 DLOG(ERROR) << "Error code: " << err; |
| 383 NOTREACHED() << "Error details: " << message; |
| 384 |
| 385 if (sink_) |
| 386 sink_->OnError(this, static_cast<int>(err)); |
| 387 } |
| 388 |
| 389 HRESULT WASAPIAudioInputStream::SetCaptureDevice(ERole device_role) { |
| 390 ScopedComPtr<IMMDeviceEnumerator> enumerator; |
| 391 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), |
| 392 NULL, |
| 393 CLSCTX_INPROC_SERVER, |
| 394 __uuidof(IMMDeviceEnumerator), |
| 395 enumerator.ReceiveVoid()); |
| 396 if (SUCCEEDED(hr)) { |
| 397 // Retrieve the default capture audio endpoint for the specified role. |
| 398 // Note that, in Windows Vista, the MMDevice API supports device roles |
| 399 // but the system-supplied user interface programs do not. |
| 400 hr = enumerator->GetDefaultAudioEndpoint(eCapture, |
| 401 device_role, |
| 402 endpoint_device_.Receive()); |
| 403 |
| 404 // Verify that the audio endpoint device is active. That is, the audio |
| 405 // adapter that connects to the endpoint device is present and enabled. |
| 406 DWORD state = DEVICE_STATE_DISABLED; |
| 407 hr = endpoint_device_->GetState(&state); |
| 408 if (SUCCEEDED(hr)) { |
| 409 if (!(state & DEVICE_STATE_ACTIVE)) { |
| 410 DLOG(ERROR) << "Selected capture device is not active."; |
| 411 hr = E_ACCESSDENIED; |
| 412 } |
| 413 } |
| 414 } |
| 415 |
| 416 return hr; |
| 417 } |
| 418 |
| 419 HRESULT WASAPIAudioInputStream::ActivateCaptureDevice() { |
| 420 // Creates and activates an IAudioClient COM object given the selected |
| 421 // capture endpoint device. |
| 422 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), |
| 423 CLSCTX_INPROC_SERVER, |
| 424 NULL, |
| 425 audio_client_.ReceiveVoid()); |
| 426 return hr; |
| 427 } |
| 428 |
| 429 HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() { |
| 430 // Retrieve the stream format that the audio engine uses for its internal |
| 431 // processing/mixing of shared-mode streams. |
| 432 HRESULT hr = audio_client_->GetMixFormat(audio_engine_mix_format_.Receive()); |
| 433 #ifndef NDEBUG |
| 434 if (SUCCEEDED(hr)) |
| 435 DLogFormat("Audio Engine's format:", audio_engine_mix_format_.get()); |
| 436 #endif |
| 437 return hr; |
| 438 } |
| 439 |
| 440 bool WASAPIAudioInputStream::DesiredFormatIsSupported() { |
| 441 // In shared mode, the audio engine always supports the mix format, |
| 442 // which is stored in the |audio_engine_mix_format_| member. In addition, |
| 443 // the audio engine *might* support similar formats that have the same |
| 444 // sample rate and number of channels as the mix format but differ in |
| 445 // the representation of audio sample values. |
| 446 ScopedComMem<WAVEFORMATEX> closest_match; |
| 447 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, |
| 448 &format_, |
| 449 closest_match.Receive()); |
| 450 if (hr == S_FALSE) { |
| 451 DLOG(ERROR) << "Format is not supported but a closest match exists."; |
| 452 #ifndef NDEBUG |
| 453 DLogFormat("Closest suggested capture format:", closest_match.get()); |
| 454 #endif |
| 455 } |
| 456 return (hr == S_OK); |
| 457 } |
| 458 |
| 459 HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { |
| 460 // Initialize the audio stream between the client and the device. |
| 461 // We connect indirectly through the audio engine by using shared mode |
| 462 // and WASAPI is initialized in an event driven mode. |
| 463 // Note that, |hnsBufferDuration| is set of 0, which ensures that the |
| 464 // buffer is never smaller than the minimum buffer size needed to ensure |
| 465 // that glitches do not occur between the periodic processing passes. |
| 466 // This setting should lead to lowest possible latency. |
| 467 HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, |
| 468 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | |
| 469 AUDCLNT_STREAMFLAGS_NOPERSIST, |
| 470 0, // hnsBufferDuration |
| 471 0, |
| 472 &format_, |
| 473 NULL); |
| 474 if (FAILED(hr)) |
| 475 return hr; |
| 476 |
| 477 // Retrieve the length of the endpoint buffer shared between the client |
| 478 // and the audio engine. The buffer length determines the maximum amount |
| 479 // of capture data that the audio engine can read from the endpoint buffer |
| 480 // during a single processing pass. |
| 481 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. |
| 482 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); |
| 483 if (FAILED(hr)) |
| 484 return hr; |
| 485 DLOG(INFO) << "endpoint buffer size: " << endpoint_buffer_size_frames_ |
| 486 << " [frames]"; |
| 487 |
| 488 #ifndef NDEBUG |
| 489 // The period between processing passes by the audio engine is fixed for a |
| 490 // particular audio endpoint device and represents the smallest processing |
| 491 // quantum for the audio engine. This period plus the stream latency between |
| 492 // the buffer and endpoint device represents the minimum possible latency |
| 493 // that an audio application can achieve. |
| 494 REFERENCE_TIME device_period_shared_mode = 0; |
| 495 REFERENCE_TIME device_period_exclusive_mode = 0; |
| 496 HRESULT hr_dbg = audio_client_->GetDevicePeriod( |
| 497 &device_period_shared_mode, &device_period_exclusive_mode); |
| 498 if (SUCCEEDED(hr_dbg)) { |
| 499 DLOG(INFO) << "device period: " |
| 500 << static_cast<double>(device_period_shared_mode / 10000.0) |
| 501 << " [ms]"; |
| 502 } |
| 503 |
| 504 REFERENCE_TIME latency = 0; |
| 505 hr_dbg = audio_client_->GetStreamLatency(&latency); |
| 506 if (SUCCEEDED(hr_dbg)) { |
| 507 DLOG(INFO) << "stream latency: " << static_cast<double>(latency / 10000.0) |
| 508 << " [ms]"; |
| 509 } |
| 510 #endif |
| 511 |
| 512 // Set the event handle that the audio engine will signal each time |
| 513 // a buffer becomes ready to be processed by the client. |
| 514 hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get()); |
| 515 if (FAILED(hr)) |
| 516 return hr; |
| 517 |
| 518 // Get access to the IAudioCaptureClient interface. This interface |
| 519 // enables us to read input data from the capture endpoint buffer. |
| 520 hr = audio_client_->GetService(__uuidof(IAudioCaptureClient), |
| 521 audio_capture_client_.ReceiveVoid()); |
| 522 return hr; |
| 523 } |
| OLD | NEW |