Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/audio/win/audio_low_latency_input_win.h" | 5 #include "media/audio/win/audio_low_latency_input_win.h" |
| 6 | 6 |
| 7 #include <memory> | 7 #include <memory> |
| 8 | 8 |
| 9 #include "base/logging.h" | 9 #include "base/logging.h" |
| 10 #include "base/metrics/histogram_macros.h" | |
| 10 #include "base/strings/utf_string_conversions.h" | 11 #include "base/strings/utf_string_conversions.h" |
| 11 #include "base/trace_event/trace_event.h" | 12 #include "base/trace_event/trace_event.h" |
| 12 #include "media/audio/audio_device_description.h" | 13 #include "media/audio/audio_device_description.h" |
| 13 #include "media/audio/win/audio_manager_win.h" | 14 #include "media/audio/win/audio_manager_win.h" |
| 14 #include "media/audio/win/avrt_wrapper_win.h" | 15 #include "media/audio/win/avrt_wrapper_win.h" |
| 15 #include "media/audio/win/core_audio_util_win.h" | 16 #include "media/audio/win/core_audio_util_win.h" |
| 16 #include "media/base/audio_bus.h" | 17 #include "media/base/audio_bus.h" |
| 17 | 18 |
| 18 using base::win::ScopedComPtr; | 19 using base::win::ScopedComPtr; |
| 19 using base::win::ScopedCOMInitializer; | 20 using base::win::ScopedCOMInitializer; |
| 20 | 21 |
| 21 namespace media { | 22 namespace media { |
| 22 | 23 |
| 23 WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager, | 24 WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager, |
| 24 const AudioParameters& params, | 25 const AudioParameters& params, |
| 25 const std::string& device_id) | 26 const std::string& device_id) |
| 26 : manager_(manager), | 27 : manager_(manager), |
| 27 capture_thread_(NULL), | |
| 28 opened_(false), | |
| 29 started_(false), | |
| 30 frame_size_(0), | |
| 31 packet_size_frames_(0), | |
| 32 packet_size_bytes_(0), | |
| 33 endpoint_buffer_size_frames_(0), | |
| 34 device_id_(device_id), | 28 device_id_(device_id), |
| 35 perf_count_to_100ns_units_(0.0), | 29 audio_bus_(media::AudioBus::Create(params)) { |
| 36 ms_to_frame_count_(0.0), | |
| 37 sink_(NULL), | |
| 38 audio_bus_(media::AudioBus::Create(params)), | |
| 39 mute_done_(false) { | |
| 40 DCHECK(manager_); | 30 DCHECK(manager_); |
| 31 DCHECK(!device_id_.empty()); | |
| 41 | 32 |
| 42 // Load the Avrt DLL if not already loaded. Required to support MMCSS. | 33 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
| 43 bool avrt_init = avrt::Initialize(); | 34 bool avrt_init = avrt::Initialize(); |
| 44 DCHECK(avrt_init) << "Failed to load the Avrt.dll"; | 35 DCHECK(avrt_init) << "Failed to load the Avrt.dll"; |
| 45 | 36 |
| 46 // Set up the desired capture format specified by the client. | 37 // Set up the desired capture format specified by the client. |
| 47 format_.nSamplesPerSec = params.sample_rate(); | 38 format_.nSamplesPerSec = params.sample_rate(); |
| 48 format_.wFormatTag = WAVE_FORMAT_PCM; | 39 format_.wFormatTag = WAVE_FORMAT_PCM; |
| 49 format_.wBitsPerSample = params.bits_per_sample(); | 40 format_.wBitsPerSample = params.bits_per_sample(); |
| 50 format_.nChannels = params.channels(); | 41 format_.nChannels = params.channels(); |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 82 DLOG(ERROR) << "High-resolution performance counters are not supported."; | 73 DLOG(ERROR) << "High-resolution performance counters are not supported."; |
| 83 } | 74 } |
| 84 } | 75 } |
| 85 | 76 |
| 86 WASAPIAudioInputStream::~WASAPIAudioInputStream() { | 77 WASAPIAudioInputStream::~WASAPIAudioInputStream() { |
| 87 DCHECK(CalledOnValidThread()); | 78 DCHECK(CalledOnValidThread()); |
| 88 } | 79 } |
| 89 | 80 |
| 90 bool WASAPIAudioInputStream::Open() { | 81 bool WASAPIAudioInputStream::Open() { |
| 91 DCHECK(CalledOnValidThread()); | 82 DCHECK(CalledOnValidThread()); |
| 83 DCHECK_EQ(OPEN_RESULT_OK, open_result_); | |
| 84 | |
| 92 // Verify that we are not already opened. | 85 // Verify that we are not already opened. |
| 93 if (opened_) | 86 if (opened_) |
| 94 return false; | 87 return false; |
| 95 | 88 |
| 96 // Obtain a reference to the IMMDevice interface of the capturing | 89 // Obtain a reference to the IMMDevice interface of the capturing |
| 97 // device with the specified unique identifier or role which was | 90 // device with the specified unique identifier or role which was |
| 98 // set at construction. | 91 // set at construction. |
| 99 HRESULT hr = SetCaptureDevice(); | 92 HRESULT hr = SetCaptureDevice(); |
| 100 if (FAILED(hr)) | 93 if (FAILED(hr)) { |
| 94 ReportOpenResult(); | |
| 101 return false; | 95 return false; |
| 96 } | |
| 102 | 97 |
| 103 // Obtain an IAudioClient interface which enables us to create and initialize | 98 // Obtain an IAudioClient interface which enables us to create and initialize |
| 104 // an audio stream between an audio application and the audio engine. | 99 // an audio stream between an audio application and the audio engine. |
| 105 hr = ActivateCaptureDevice(); | 100 hr = endpoint_device_->Activate(__uuidof(IAudioClient), CLSCTX_INPROC_SERVER, |
| 106 if (FAILED(hr)) | 101 NULL, audio_client_.ReceiveVoid()); |
| 102 if (FAILED(hr)) { | |
| 103 open_result_ = OPEN_RESULT_ACTIVATION_FAILED; | |
| 104 ReportOpenResult(); | |
| 107 return false; | 105 return false; |
| 106 } | |
| 108 | 107 |
| 108 #ifndef NDEBUG | |
| 109 // Retrieve the stream format which the audio engine uses for its internal | 109 // Retrieve the stream format which the audio engine uses for its internal |
| 110 // processing/mixing of shared-mode streams. This function call is for | 110 // processing/mixing of shared-mode streams. This function call is for |
| 111 // diagnostic purposes only and only in debug mode. | 111 // diagnostic purposes only and only in debug mode. |
| 112 #ifndef NDEBUG | |
| 113 hr = GetAudioEngineStreamFormat(); | 112 hr = GetAudioEngineStreamFormat(); |
| 114 #endif | 113 #endif |
| 115 | 114 |
| 116 // Verify that the selected audio endpoint supports the specified format | 115 // Verify that the selected audio endpoint supports the specified format |
| 117 // set during construction. | 116 // set during construction. |
| 118 if (!DesiredFormatIsSupported()) | 117 if (!DesiredFormatIsSupported()) { |
| 118 open_result_ = OPEN_RESULT_FORMAT_NOT_SUPPORTED; | |
| 119 ReportOpenResult(); | |
| 119 return false; | 120 return false; |
| 121 } | |
| 120 | 122 |
| 121 // Initialize the audio stream between the client and the device using | 123 // Initialize the audio stream between the client and the device using |
| 122 // shared mode and a lowest possible glitch-free latency. | 124 // shared mode and a lowest possible glitch-free latency. |
| 123 hr = InitializeAudioEngine(); | 125 hr = InitializeAudioEngine(); |
| 126 ReportOpenResult(); // Report before we assign a value to |opened_|. | |
| 127 opened_ = SUCCEEDED(hr); | |
| 128 DCHECK(open_result_ == OPEN_RESULT_OK || !opened_); | |
| 124 | 129 |
| 125 opened_ = SUCCEEDED(hr); | |
| 126 return opened_; | 130 return opened_; |
| 127 } | 131 } |
| 128 | 132 |
| 129 void WASAPIAudioInputStream::Start(AudioInputCallback* callback) { | 133 void WASAPIAudioInputStream::Start(AudioInputCallback* callback) { |
| 130 DCHECK(CalledOnValidThread()); | 134 DCHECK(CalledOnValidThread()); |
| 131 DCHECK(callback); | 135 DCHECK(callback); |
| 132 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; | 136 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; |
| 133 if (!opened_) | 137 if (!opened_) |
| 134 return; | 138 return; |
| 135 | 139 |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 152 | 156 |
| 153 DCHECK(!sink_); | 157 DCHECK(!sink_); |
| 154 sink_ = callback; | 158 sink_ = callback; |
| 155 | 159 |
| 156 // Starts periodic AGC microphone measurements if the AGC has been enabled | 160 // Starts periodic AGC microphone measurements if the AGC has been enabled |
| 157 // using SetAutomaticGainControl(). | 161 // using SetAutomaticGainControl(). |
| 158 StartAgc(); | 162 StartAgc(); |
| 159 | 163 |
| 160 // Create and start the thread that will drive the capturing by waiting for | 164 // Create and start the thread that will drive the capturing by waiting for |
| 161 // capture events. | 165 // capture events. |
| 162 capture_thread_ = new base::DelegateSimpleThread( | 166 DCHECK(!capture_thread_.get()); |
| 167 capture_thread_.reset(new base::DelegateSimpleThread( | |
| 163 this, "wasapi_capture_thread", | 168 this, "wasapi_capture_thread", |
| 164 base::SimpleThread::Options(base::ThreadPriority::REALTIME_AUDIO)); | 169 base::SimpleThread::Options(base::ThreadPriority::REALTIME_AUDIO))); |
| 165 capture_thread_->Start(); | 170 capture_thread_->Start(); |
| 166 | 171 |
| 167 // Start streaming data between the endpoint buffer and the audio engine. | 172 // Start streaming data between the endpoint buffer and the audio engine. |
| 168 HRESULT hr = audio_client_->Start(); | 173 HRESULT hr = audio_client_->Start(); |
| 169 DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming."; | 174 DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming."; |
| 170 | 175 |
| 171 if (SUCCEEDED(hr) && audio_render_client_for_loopback_.get()) | 176 if (SUCCEEDED(hr) && audio_render_client_for_loopback_.get()) |
| 172 hr = audio_render_client_for_loopback_->Start(); | 177 hr = audio_render_client_for_loopback_->Start(); |
| 173 | 178 |
| 174 started_ = SUCCEEDED(hr); | 179 started_ = SUCCEEDED(hr); |
| (...skipping 27 matching lines...) Expand all Loading... | |
| 202 // Stop the input audio streaming. | 207 // Stop the input audio streaming. |
| 203 HRESULT hr = audio_client_->Stop(); | 208 HRESULT hr = audio_client_->Stop(); |
| 204 if (FAILED(hr)) { | 209 if (FAILED(hr)) { |
| 205 LOG(ERROR) << "Failed to stop input streaming."; | 210 LOG(ERROR) << "Failed to stop input streaming."; |
| 206 } | 211 } |
| 207 | 212 |
| 208 // Wait until the thread completes and perform cleanup. | 213 // Wait until the thread completes and perform cleanup. |
| 209 if (capture_thread_) { | 214 if (capture_thread_) { |
| 210 SetEvent(stop_capture_event_.Get()); | 215 SetEvent(stop_capture_event_.Get()); |
| 211 capture_thread_->Join(); | 216 capture_thread_->Join(); |
| 212 capture_thread_ = NULL; | 217 capture_thread_.reset(); |
| 213 } | 218 } |
| 214 | 219 |
| 215 started_ = false; | 220 started_ = false; |
| 216 sink_ = NULL; | 221 sink_ = NULL; |
| 217 } | 222 } |
| 218 | 223 |
| 219 void WASAPIAudioInputStream::Close() { | 224 void WASAPIAudioInputStream::Close() { |
| 220 DVLOG(1) << "WASAPIAudioInputStream::Close()"; | 225 DVLOG(1) << "WASAPIAudioInputStream::Close()"; |
| 221 // It is valid to call Close() before calling open or Start(). | 226 // It is valid to call Close() before calling open or Start(). |
| 222 // It is also valid to call Close() after Start() has been called. | 227 // It is also valid to call Close() after Start() has been called. |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 292 | 297 |
| 293 return is_muted != FALSE; | 298 return is_muted != FALSE; |
| 294 } | 299 } |
| 295 | 300 |
| 296 void WASAPIAudioInputStream::Run() { | 301 void WASAPIAudioInputStream::Run() { |
| 297 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); | 302 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); |
| 298 | 303 |
| 299 // Enable MMCSS to ensure that this thread receives prioritized access to | 304 // Enable MMCSS to ensure that this thread receives prioritized access to |
| 300 // CPU resources. | 305 // CPU resources. |
| 301 DWORD task_index = 0; | 306 DWORD task_index = 0; |
| 302 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", | 307 HANDLE mm_task = |
|
tommi (sloooow) - chröme
2017/02/07 19:34:46
The changes here and below (all of Run()), are the
| |
| 303 &task_index); | 308 avrt::AvSetMmThreadCharacteristics(L"Pro Audio", &task_index); |
| 304 bool mmcss_is_ok = | 309 bool mmcss_is_ok = |
| 305 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); | 310 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); |
| 306 if (!mmcss_is_ok) { | 311 if (!mmcss_is_ok) { |
| 307 // Failed to enable MMCSS on this thread. It is not fatal but can lead | 312 // Failed to enable MMCSS on this thread. It is not fatal but can lead |
| 308 // to reduced QoS at high load. | 313 // to reduced QoS at high load. |
| 309 DWORD err = GetLastError(); | 314 DWORD err = GetLastError(); |
| 310 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; | 315 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
| 311 } | 316 } |
| 312 | 317 |
| 313 // Allocate a buffer with a size that enables us to take care of cases like: | 318 // Allocate a buffer with a size that enables us to take care of cases like: |
| 314 // 1) The recorded buffer size is smaller, or does not match exactly with, | 319 // 1) The recorded buffer size is smaller, or does not match exactly with, |
| 315 // the selected packet size used in each callback. | 320 // the selected packet size used in each callback. |
| 316 // 2) The selected buffer size is larger than the recorded buffer size in | 321 // 2) The selected buffer size is larger than the recorded buffer size in |
| 317 // each event. | 322 // each event. |
| 318 size_t buffer_frame_index = 0; | 323 size_t buffer_frame_index = 0; |
| 319 size_t capture_buffer_size = std::max( | 324 size_t capture_buffer_size = |
| 320 2 * endpoint_buffer_size_frames_ * frame_size_, | 325 std::max(2 * endpoint_buffer_size_frames_ * frame_size_, |
| 321 2 * packet_size_frames_ * frame_size_); | 326 2 * packet_size_frames_ * frame_size_); |
| 322 std::unique_ptr<uint8_t[]> capture_buffer(new uint8_t[capture_buffer_size]); | 327 std::unique_ptr<uint8_t[]> capture_buffer(new uint8_t[capture_buffer_size]); |
| 323 | 328 |
| 324 LARGE_INTEGER now_count = {}; | 329 LARGE_INTEGER now_count = {}; |
| 325 bool recording = true; | 330 bool recording = true; |
| 326 bool error = false; | 331 bool error = false; |
| 327 double volume = GetVolume(); | 332 double volume = GetVolume(); |
| 328 HANDLE wait_array[2] = | 333 HANDLE wait_array[2] = {stop_capture_event_.Get(), |
| 329 { stop_capture_event_.Get(), audio_samples_ready_event_.Get() }; | 334 audio_samples_ready_event_.Get()}; |
| 330 | 335 |
| 331 base::win::ScopedComPtr<IAudioClock> audio_clock; | 336 base::win::ScopedComPtr<IAudioClock> audio_clock; |
| 332 audio_client_->GetService(__uuidof(IAudioClock), audio_clock.ReceiveVoid()); | 337 audio_client_->GetService(__uuidof(IAudioClock), audio_clock.ReceiveVoid()); |
| 333 | 338 |
| 334 while (recording && !error) { | 339 while (recording && !error) { |
| 335 HRESULT hr = S_FALSE; | 340 HRESULT hr = S_FALSE; |
| 336 | 341 |
| 337 // Wait for a close-down event or a new capture event. | 342 // Wait for a close-down event or a new capture event. |
| 338 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE); | 343 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE); |
| 339 switch (wait_result) { | 344 switch (wait_result) { |
| 340 case WAIT_FAILED: | 345 case WAIT_FAILED: |
| 341 error = true; | 346 error = true; |
| 342 break; | 347 break; |
| 343 case WAIT_OBJECT_0 + 0: | 348 case WAIT_OBJECT_0 + 0: |
| 344 // |stop_capture_event_| has been set. | 349 // |stop_capture_event_| has been set. |
| 345 recording = false; | 350 recording = false; |
| 346 break; | 351 break; |
| 347 case WAIT_OBJECT_0 + 1: | 352 case WAIT_OBJECT_0 + 1: { |
| 348 { | 353 TRACE_EVENT0("audio", "WASAPIAudioInputStream::Run_0"); |
| 349 TRACE_EVENT0("audio", "WASAPIAudioInputStream::Run_0"); | 354 // |audio_samples_ready_event_| has been set. |
| 350 // |audio_samples_ready_event_| has been set. | 355 BYTE* data_ptr = NULL; |
| 351 BYTE* data_ptr = NULL; | 356 UINT32 num_frames_to_read = 0; |
| 352 UINT32 num_frames_to_read = 0; | 357 DWORD flags = 0; |
| 353 DWORD flags = 0; | 358 UINT64 device_position = 0; |
| 354 UINT64 device_position = 0; | 359 UINT64 first_audio_frame_timestamp = 0; |
| 355 UINT64 first_audio_frame_timestamp = 0; | |
| 356 | 360 |
| 357 // Retrieve the amount of data in the capture endpoint buffer, | 361 // Retrieve the amount of data in the capture endpoint buffer, |
| 358 // replace it with silence if required, create callbacks for each | 362 // replace it with silence if required, create callbacks for each |
| 359 // packet and store non-delivered data for the next event. | 363 // packet and store non-delivered data for the next event. |
| 360 hr = audio_capture_client_->GetBuffer(&data_ptr, | 364 hr = audio_capture_client_->GetBuffer(&data_ptr, &num_frames_to_read, |
| 361 &num_frames_to_read, | 365 &flags, &device_position, |
| 362 &flags, | 366 &first_audio_frame_timestamp); |
| 363 &device_position, | 367 if (FAILED(hr)) { |
| 364 &first_audio_frame_timestamp); | 368 DLOG(ERROR) << "Failed to get data from the capture buffer"; |
| 365 if (FAILED(hr)) { | 369 continue; |
| 366 DLOG(ERROR) << "Failed to get data from the capture buffer"; | 370 } |
| 367 continue; | 371 |
| 372 if (audio_clock) { | |
| 373 // The reported timestamp from GetBuffer is not as reliable as the | |
| 374 // clock from the client. We've seen timestamps reported for | |
| 375 // USB audio devices, be off by several days. Furthermore we've | |
| 376 // seen them jump back in time every 2 seconds or so. | |
| 377 audio_clock->GetPosition(&device_position, | |
| 378 &first_audio_frame_timestamp); | |
| 379 } | |
| 380 | |
| 381 if (num_frames_to_read != 0) { | |
| 382 size_t pos = buffer_frame_index * frame_size_; | |
| 383 size_t num_bytes = num_frames_to_read * frame_size_; | |
| 384 DCHECK_GE(capture_buffer_size, pos + num_bytes); | |
| 385 | |
| 386 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { | |
| 387 // Clear out the local buffer since silence is reported. | |
| 388 memset(&capture_buffer[pos], 0, num_bytes); | |
| 389 } else { | |
| 390 // Copy captured data from audio engine buffer to local buffer. | |
| 391 memcpy(&capture_buffer[pos], data_ptr, num_bytes); | |
| 368 } | 392 } |
| 369 | 393 |
| 370 if (audio_clock) { | 394 buffer_frame_index += num_frames_to_read; |
| 371 // The reported timestamp from GetBuffer is not as reliable as the | 395 } |
| 372 // clock from the client. We've seen timestamps reported for | |
| 373 // USB audio devices, be off by several days. Furthermore we've | |
| 374 // seen them jump back in time every 2 seconds or so. | |
| 375 audio_clock->GetPosition( | |
| 376 &device_position, &first_audio_frame_timestamp); | |
| 377 } | |
| 378 | 396 |
| 397 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); | |
| 398 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; | |
| 379 | 399 |
| 380 if (num_frames_to_read != 0) { | 400 // Derive a delay estimate for the captured audio packet. |
| 381 size_t pos = buffer_frame_index * frame_size_; | 401 // The value contains two parts (A+B), where A is the delay of the |
| 382 size_t num_bytes = num_frames_to_read * frame_size_; | 402 // first audio frame in the packet and B is the extra delay |
| 383 DCHECK_GE(capture_buffer_size, pos + num_bytes); | 403 // contained in any stored data. Unit is in audio frames. |
| 404 QueryPerformanceCounter(&now_count); | |
| 405 // first_audio_frame_timestamp will be 0 if we didn't get a timestamp. | |
| 406 double audio_delay_frames = | |
| 407 first_audio_frame_timestamp == 0 | |
| 408 ? num_frames_to_read | |
| 409 : ((perf_count_to_100ns_units_ * now_count.QuadPart - | |
| 410 first_audio_frame_timestamp) / | |
| 411 10000.0) * | |
| 412 ms_to_frame_count_ + | |
| 413 buffer_frame_index - num_frames_to_read; | |
| 384 | 414 |
| 385 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { | 415 // Get a cached AGC volume level which is updated once every second |
| 386 // Clear out the local buffer since silence is reported. | 416 // on the audio manager thread. Note that, |volume| is also updated |
| 387 memset(&capture_buffer[pos], 0, num_bytes); | 417 // each time SetVolume() is called through IPC by the render-side AGC. |
| 388 } else { | 418 GetAgcVolume(&volume); |
| 389 // Copy captured data from audio engine buffer to local buffer. | |
| 390 memcpy(&capture_buffer[pos], data_ptr, num_bytes); | |
| 391 } | |
| 392 | 419 |
| 393 buffer_frame_index += num_frames_to_read; | 420 // Deliver captured data to the registered consumer using a packet |
| 394 } | 421 // size which was specified at construction. |
| 422 uint32_t delay_frames = static_cast<uint32_t>(audio_delay_frames + 0.5); | |
| 423 while (buffer_frame_index >= packet_size_frames_) { | |
| 424 // Copy data to audio bus to match the OnData interface. | |
| 425 uint8_t* audio_data = | |
| 426 reinterpret_cast<uint8_t*>(capture_buffer.get()); | |
| 427 audio_bus_->FromInterleaved(audio_data, audio_bus_->frames(), | |
| 428 format_.wBitsPerSample / 8); | |
| 395 | 429 |
| 396 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); | 430 // Deliver data packet, delay estimation and volume level to |
| 397 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; | 431 // the user. |
| 432 sink_->OnData(this, audio_bus_.get(), delay_frames * frame_size_, | |
| 433 volume); | |
| 398 | 434 |
| 399 // Derive a delay estimate for the captured audio packet. | 435 // Store parts of the recorded data which can't be delivered |
| 400 // The value contains two parts (A+B), where A is the delay of the | 436 // using the current packet size. The stored section will be used |
| 401 // first audio frame in the packet and B is the extra delay | 437 // either in the next while-loop iteration or in the next |
| 402 // contained in any stored data. Unit is in audio frames. | 438 // capture event. |
| 403 QueryPerformanceCounter(&now_count); | 439 // TODO(tommi): If this data will be used in the next capture |
| 404 // first_audio_frame_timestamp will be 0 if we didn't get a timestamp. | 440 // event, we will report incorrect delay estimates because |
| 405 double audio_delay_frames = first_audio_frame_timestamp == 0 ? | 441 // we'll use the one for the captured data that time around |
| 406 num_frames_to_read : | 442 // (i.e. in the future). |
| 407 ((perf_count_to_100ns_units_ * now_count.QuadPart - | 443 memmove(&capture_buffer[0], &capture_buffer[packet_size_bytes_], |
| 408 first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ + | 444 (buffer_frame_index - packet_size_frames_) * frame_size_); |
| 409 buffer_frame_index - num_frames_to_read; | |
| 410 | 445 |
| 411 // Get a cached AGC volume level which is updated once every second | 446 DCHECK_GE(buffer_frame_index, packet_size_frames_); |
| 412 // on the audio manager thread. Note that, |volume| is also updated | 447 buffer_frame_index -= packet_size_frames_; |
| 413 // each time SetVolume() is called through IPC by the render-side AGC. | 448 if (delay_frames > packet_size_frames_) { |
| 414 GetAgcVolume(&volume); | 449 delay_frames -= packet_size_frames_; |
| 415 | 450 } else { |
| 416 // Deliver captured data to the registered consumer using a packet | 451 delay_frames = 0; |
| 417 // size which was specified at construction. | |
| 418 uint32_t delay_frames = | |
| 419 static_cast<uint32_t>(audio_delay_frames + 0.5); | |
| 420 while (buffer_frame_index >= packet_size_frames_) { | |
| 421 // Copy data to audio bus to match the OnData interface. | |
| 422 uint8_t* audio_data = | |
| 423 reinterpret_cast<uint8_t*>(capture_buffer.get()); | |
| 424 audio_bus_->FromInterleaved( | |
| 425 audio_data, audio_bus_->frames(), format_.wBitsPerSample / 8); | |
| 426 | |
| 427 // Deliver data packet, delay estimation and volume level to | |
| 428 // the user. | |
| 429 sink_->OnData( | |
| 430 this, audio_bus_.get(), delay_frames * frame_size_, volume); | |
| 431 | |
| 432 // Store parts of the recorded data which can't be delivered | |
| 433 // using the current packet size. The stored section will be used | |
| 434 // either in the next while-loop iteration or in the next | |
| 435 // capture event. | |
| 436 // TODO(tommi): If this data will be used in the next capture | |
| 437 // event, we will report incorrect delay estimates because | |
| 438 // we'll use the one for the captured data that time around | |
| 439 // (i.e. in the future). | |
| 440 memmove(&capture_buffer[0], | |
| 441 &capture_buffer[packet_size_bytes_], | |
| 442 (buffer_frame_index - packet_size_frames_) * frame_size_); | |
| 443 | |
| 444 DCHECK_GE(buffer_frame_index, packet_size_frames_); | |
| 445 buffer_frame_index -= packet_size_frames_; | |
| 446 if (delay_frames > packet_size_frames_) { | |
| 447 delay_frames -= packet_size_frames_; | |
| 448 } else { | |
| 449 delay_frames = 0; | |
| 450 } | |
| 451 } | 452 } |
| 452 } | 453 } |
| 453 break; | 454 } break; |
| 454 default: | 455 default: |
| 455 error = true; | 456 error = true; |
| 456 break; | 457 break; |
| 457 } | 458 } |
| 458 } | 459 } |
| 459 | 460 |
| 460 if (recording && error) { | 461 if (recording && error) { |
| 461 // TODO(henrika): perhaps it worth improving the cleanup here by e.g. | 462 // TODO(henrika): perhaps it worth improving the cleanup here by e.g. |
| 462 // stopping the audio client, joining the thread etc.? | 463 // stopping the audio client, joining the thread etc.? |
| 463 NOTREACHED() << "WASAPI capturing failed with error code " | 464 NOTREACHED() << "WASAPI capturing failed with error code " |
| 464 << GetLastError(); | 465 << GetLastError(); |
| 465 } | 466 } |
| 466 | 467 |
| 467 // Disable MMCSS. | 468 // Disable MMCSS. |
| 468 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { | 469 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { |
| 469 PLOG(WARNING) << "Failed to disable MMCSS"; | 470 PLOG(WARNING) << "Failed to disable MMCSS"; |
| 470 } | 471 } |
| 471 } | 472 } |
| 472 | 473 |
| 473 void WASAPIAudioInputStream::HandleError(HRESULT err) { | 474 void WASAPIAudioInputStream::HandleError(HRESULT err) { |
| 474 NOTREACHED() << "Error code: " << err; | 475 NOTREACHED() << "Error code: " << err; |
| 475 if (sink_) | 476 if (sink_) |
| 476 sink_->OnError(this); | 477 sink_->OnError(this); |
| 477 } | 478 } |
| 478 | 479 |
| 479 HRESULT WASAPIAudioInputStream::SetCaptureDevice() { | 480 HRESULT WASAPIAudioInputStream::SetCaptureDevice() { |
| 481 DCHECK_EQ(OPEN_RESULT_OK, open_result_); | |
| 480 DCHECK(!endpoint_device_.get()); | 482 DCHECK(!endpoint_device_.get()); |
| 481 | 483 |
| 482 ScopedComPtr<IMMDeviceEnumerator> enumerator; | 484 ScopedComPtr<IMMDeviceEnumerator> enumerator; |
| 483 HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator), | 485 HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator), NULL, |
| 484 NULL, CLSCTX_INPROC_SERVER); | 486 CLSCTX_INPROC_SERVER); |
| 485 if (FAILED(hr)) | 487 if (FAILED(hr)) { |
| 488 open_result_ = OPEN_RESULT_CREATE_INSTANCE; | |
| 486 return hr; | 489 return hr; |
| 490 } | |
| 487 | 491 |
| 488 // Retrieve the IMMDevice by using the specified role or the specified | 492 // Retrieve the IMMDevice by using the specified role or the specified |
| 489 // unique endpoint device-identification string. | 493 // unique endpoint device-identification string. |
| 490 | 494 |
| 491 if (device_id_ == AudioDeviceDescription::kDefaultDeviceId) { | 495 if (device_id_ == AudioDeviceDescription::kDefaultDeviceId) { |
| 492 // Retrieve the default capture audio endpoint for the specified role. | 496 // Retrieve the default capture audio endpoint for the specified role. |
| 493 // Note that, in Windows Vista, the MMDevice API supports device roles | 497 // Note that, in Windows Vista, the MMDevice API supports device roles |
| 494 // but the system-supplied user interface programs do not. | 498 // but the system-supplied user interface programs do not. |
| 495 hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole, | 499 hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole, |
| 496 endpoint_device_.Receive()); | 500 endpoint_device_.Receive()); |
| 497 } else if (device_id_ == AudioDeviceDescription::kCommunicationsDeviceId) { | 501 } else if (device_id_ == AudioDeviceDescription::kCommunicationsDeviceId) { |
| 498 hr = enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications, | 502 hr = enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications, |
| 499 endpoint_device_.Receive()); | 503 endpoint_device_.Receive()); |
| 500 } else if (device_id_ == AudioDeviceDescription::kLoopbackWithMuteDeviceId) { | 504 } else if (device_id_ == AudioDeviceDescription::kLoopbackWithMuteDeviceId) { |
| 501 // Capture the default playback stream. | 505 // Capture the default playback stream. |
| 502 hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, | 506 hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, |
| 503 endpoint_device_.Receive()); | 507 endpoint_device_.Receive()); |
| 504 | 508 |
| 505 endpoint_device_->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, | 509 endpoint_device_->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, |
| 506 system_audio_volume_.ReceiveVoid()); | 510 system_audio_volume_.ReceiveVoid()); |
| 507 } else if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId) { | 511 } else if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId) { |
| 508 // Capture the default playback stream. | 512 // Capture the default playback stream. |
| 509 hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, | 513 hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, |
| 510 endpoint_device_.Receive()); | 514 endpoint_device_.Receive()); |
| 511 } else { | 515 } else { |
| 512 hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(), | 516 hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(), |
| 513 endpoint_device_.Receive()); | 517 endpoint_device_.Receive()); |
| 514 } | 518 } |
| 515 | 519 |
| 516 if (FAILED(hr)) | 520 if (FAILED(hr)) { |
| 521 open_result_ = OPEN_RESULT_NO_ENDPOINT; | |
| 517 return hr; | 522 return hr; |
| 523 } | |
| 518 | 524 |
| 519 // Verify that the audio endpoint device is active, i.e., the audio | 525 // Verify that the audio endpoint device is active, i.e., the audio |
| 520 // adapter that connects to the endpoint device is present and enabled. | 526 // adapter that connects to the endpoint device is present and enabled. |
| 521 DWORD state = DEVICE_STATE_DISABLED; | 527 DWORD state = DEVICE_STATE_DISABLED; |
| 522 hr = endpoint_device_->GetState(&state); | 528 hr = endpoint_device_->GetState(&state); |
| 523 if (FAILED(hr)) | 529 if (FAILED(hr)) { |
| 530 open_result_ = OPEN_RESULT_NO_STATE; | |
| 524 return hr; | 531 return hr; |
| 532 } | |
| 525 | 533 |
| 526 if (!(state & DEVICE_STATE_ACTIVE)) { | 534 if (!(state & DEVICE_STATE_ACTIVE)) { |
| 527 DLOG(ERROR) << "Selected capture device is not active."; | 535 DLOG(ERROR) << "Selected capture device is not active."; |
| 536 open_result_ = OPEN_RESULT_DEVICE_NOT_ACTIVE; | |
| 528 hr = E_ACCESSDENIED; | 537 hr = E_ACCESSDENIED; |
| 529 } | 538 } |
| 530 | 539 |
| 531 return hr; | 540 return hr; |
| 532 } | 541 } |
| 533 | 542 |
| 534 HRESULT WASAPIAudioInputStream::ActivateCaptureDevice() { | |
| 535 // Creates and activates an IAudioClient COM object given the selected | |
| 536 // capture endpoint device. | |
| 537 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), | |
| 538 CLSCTX_INPROC_SERVER, | |
| 539 NULL, | |
| 540 audio_client_.ReceiveVoid()); | |
| 541 return hr; | |
| 542 } | |
| 543 | |
| 544 HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() { | 543 HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() { |
| 545 HRESULT hr = S_OK; | 544 HRESULT hr = S_OK; |
| 546 #ifndef NDEBUG | 545 #ifndef NDEBUG |
| 547 // The GetMixFormat() method retrieves the stream format that the | 546 // The GetMixFormat() method retrieves the stream format that the |
| 548 // audio engine uses for its internal processing of shared-mode streams. | 547 // audio engine uses for its internal processing of shared-mode streams. |
| 549 // The method always uses a WAVEFORMATEXTENSIBLE structure, instead | 548 // The method always uses a WAVEFORMATEXTENSIBLE structure, instead |
| 550 // of a stand-alone WAVEFORMATEX structure, to specify the format. | 549 // of a stand-alone WAVEFORMATEX structure, to specify the format. |
| 551 // An WAVEFORMATEXTENSIBLE structure can specify both the mapping of | 550 // An WAVEFORMATEXTENSIBLE structure can specify both the mapping of |
| 552 // channels to speakers and the number of bits of precision in each sample. | 551 // channels to speakers and the number of bits of precision in each sample. |
| 553 base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> format_ex; | 552 base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> format_ex; |
| 554 hr = audio_client_->GetMixFormat( | 553 hr = |
| 555 reinterpret_cast<WAVEFORMATEX**>(&format_ex)); | 554 audio_client_->GetMixFormat(reinterpret_cast<WAVEFORMATEX**>(&format_ex)); |
| 556 | 555 |
| 557 // See http://msdn.microsoft.com/en-us/windows/hardware/gg463006#EFH | 556 // See http://msdn.microsoft.com/en-us/windows/hardware/gg463006#EFH |
| 558 // for details on the WAVE file format. | 557 // for details on the WAVE file format. |
| 559 WAVEFORMATEX format = format_ex->Format; | 558 WAVEFORMATEX format = format_ex->Format; |
| 560 DVLOG(2) << "WAVEFORMATEX:"; | 559 DVLOG(2) << "WAVEFORMATEX:"; |
| 561 DVLOG(2) << " wFormatTags : 0x" << std::hex << format.wFormatTag; | 560 DVLOG(2) << " wFormatTags : 0x" << std::hex << format.wFormatTag; |
| 562 DVLOG(2) << " nChannels : " << format.nChannels; | 561 DVLOG(2) << " nChannels : " << format.nChannels; |
| 563 DVLOG(2) << " nSamplesPerSec : " << format.nSamplesPerSec; | 562 DVLOG(2) << " nSamplesPerSec : " << format.nSamplesPerSec; |
| 564 DVLOG(2) << " nAvgBytesPerSec: " << format.nAvgBytesPerSec; | 563 DVLOG(2) << " nAvgBytesPerSec: " << format.nAvgBytesPerSec; |
| 565 DVLOG(2) << " nBlockAlign : " << format.nBlockAlign; | 564 DVLOG(2) << " nBlockAlign : " << format.nBlockAlign; |
| 566 DVLOG(2) << " wBitsPerSample : " << format.wBitsPerSample; | 565 DVLOG(2) << " wBitsPerSample : " << format.wBitsPerSample; |
| 567 DVLOG(2) << " cbSize : " << format.cbSize; | 566 DVLOG(2) << " cbSize : " << format.cbSize; |
| 568 | 567 |
| 569 DVLOG(2) << "WAVEFORMATEXTENSIBLE:"; | 568 DVLOG(2) << "WAVEFORMATEXTENSIBLE:"; |
| 570 DVLOG(2) << " wValidBitsPerSample: " << | 569 DVLOG(2) << " wValidBitsPerSample: " |
| 571 format_ex->Samples.wValidBitsPerSample; | 570 << format_ex->Samples.wValidBitsPerSample; |
| 572 DVLOG(2) << " dwChannelMask : 0x" << std::hex << | 571 DVLOG(2) << " dwChannelMask : 0x" << std::hex |
| 573 format_ex->dwChannelMask; | 572 << format_ex->dwChannelMask; |
| 574 if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) | 573 if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) |
| 575 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_PCM"; | 574 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_PCM"; |
| 576 else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) | 575 else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) |
| 577 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_IEEE_FLOAT"; | 576 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_IEEE_FLOAT"; |
| 578 else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_WAVEFORMATEX) | 577 else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_WAVEFORMATEX) |
| 579 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_WAVEFORMATEX"; | 578 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_WAVEFORMATEX"; |
| 580 #endif | 579 #endif |
| 581 return hr; | 580 return hr; |
| 582 } | 581 } |
| 583 | 582 |
| 584 bool WASAPIAudioInputStream::DesiredFormatIsSupported() { | 583 bool WASAPIAudioInputStream::DesiredFormatIsSupported() { |
| 585 // An application that uses WASAPI to manage shared-mode streams can rely | 584 // An application that uses WASAPI to manage shared-mode streams can rely |
| 586 // on the audio engine to perform only limited format conversions. The audio | 585 // on the audio engine to perform only limited format conversions. The audio |
| 587 // engine can convert between a standard PCM sample size used by the | 586 // engine can convert between a standard PCM sample size used by the |
| 588 // application and the floating-point samples that the engine uses for its | 587 // application and the floating-point samples that the engine uses for its |
| 589 // internal processing. However, the format for an application stream | 588 // internal processing. However, the format for an application stream |
| 590 // typically must have the same number of channels and the same sample | 589 // typically must have the same number of channels and the same sample |
| 591 // rate as the stream format used by the device. | 590 // rate as the stream format used by the device. |
| 592 // Many audio devices support both PCM and non-PCM stream formats. However, | 591 // Many audio devices support both PCM and non-PCM stream formats. However, |
| 593 // the audio engine can mix only PCM streams. | 592 // the audio engine can mix only PCM streams. |
| 594 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; | 593 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; |
| 595 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, | 594 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, |
| 596 &format_, | 595 &format_, &closest_match); |
| 597 &closest_match); | |
| 598 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " | 596 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " |
| 599 << "but a closest match exists."; | 597 << "but a closest match exists."; |
| 600 return (hr == S_OK); | 598 return (hr == S_OK); |
| 601 } | 599 } |
| 602 | 600 |
| 603 HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { | 601 HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { |
| 602 DCHECK_EQ(OPEN_RESULT_OK, open_result_); | |
| 604 DWORD flags; | 603 DWORD flags; |
| 605 // Use event-driven mode only fo regular input devices. For loopback the | 604 // Use event-driven mode only fo regular input devices. For loopback the |
| 606 // EVENTCALLBACK flag is specified when intializing | 605 // EVENTCALLBACK flag is specified when intializing |
| 607 // |audio_render_client_for_loopback_|. | 606 // |audio_render_client_for_loopback_|. |
| 608 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId || | 607 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId || |
| 609 device_id_ == AudioDeviceDescription::kLoopbackWithMuteDeviceId) { | 608 device_id_ == AudioDeviceDescription::kLoopbackWithMuteDeviceId) { |
| 610 flags = AUDCLNT_STREAMFLAGS_LOOPBACK | AUDCLNT_STREAMFLAGS_NOPERSIST; | 609 flags = AUDCLNT_STREAMFLAGS_LOOPBACK | AUDCLNT_STREAMFLAGS_NOPERSIST; |
| 611 } else { | 610 } else { |
| 612 flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST; | 611 flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST; |
| 613 } | 612 } |
| 614 | 613 |
| 615 // Initialize the audio stream between the client and the device. | 614 // Initialize the audio stream between the client and the device. |
| 616 // We connect indirectly through the audio engine by using shared mode. | 615 // We connect indirectly through the audio engine by using shared mode. |
| 617 // Note that, |hnsBufferDuration| is set of 0, which ensures that the | 616 // Note that, |hnsBufferDuration| is set of 0, which ensures that the |
| 618 // buffer is never smaller than the minimum buffer size needed to ensure | 617 // buffer is never smaller than the minimum buffer size needed to ensure |
| 619 // that glitches do not occur between the periodic processing passes. | 618 // that glitches do not occur between the periodic processing passes. |
| 620 // This setting should lead to lowest possible latency. | 619 // This setting should lead to lowest possible latency. |
| 621 HRESULT hr = audio_client_->Initialize( | 620 HRESULT hr = audio_client_->Initialize( |
| 622 AUDCLNT_SHAREMODE_SHARED, flags, | 621 AUDCLNT_SHAREMODE_SHARED, flags, |
| 623 0, // hnsBufferDuration | 622 0, // hnsBufferDuration |
| 624 0, &format_, device_id_ == AudioDeviceDescription::kCommunicationsDeviceId | 623 0, &format_, device_id_ == AudioDeviceDescription::kCommunicationsDeviceId |
| 625 ? &kCommunicationsSessionId | 624 ? &kCommunicationsSessionId |
| 626 : nullptr); | 625 : nullptr); |
| 627 | 626 |
| 628 if (FAILED(hr)) | 627 if (FAILED(hr)) { |
| 628 open_result_ = OPEN_RESULT_AUDIO_CLIENT_INIT_FAILED; | |
| 629 return hr; | 629 return hr; |
| 630 } | |
| 630 | 631 |
| 631 // Retrieve the length of the endpoint buffer shared between the client | 632 // Retrieve the length of the endpoint buffer shared between the client |
| 632 // and the audio engine. The buffer length determines the maximum amount | 633 // and the audio engine. The buffer length determines the maximum amount |
| 633 // of capture data that the audio engine can read from the endpoint buffer | 634 // of capture data that the audio engine can read from the endpoint buffer |
| 634 // during a single processing pass. | 635 // during a single processing pass. |
| 635 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. | 636 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. |
| 636 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); | 637 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); |
| 637 if (FAILED(hr)) | 638 if (FAILED(hr)) { |
| 639 open_result_ = OPEN_RESULT_GET_BUFFER_SIZE_FAILED; | |
| 638 return hr; | 640 return hr; |
| 641 } | |
| 639 | 642 |
| 640 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ | 643 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ |
| 641 << " [frames]"; | 644 << " [frames]"; |
| 642 | 645 |
| 643 #ifndef NDEBUG | 646 #ifndef NDEBUG |
| 644 // The period between processing passes by the audio engine is fixed for a | 647 // The period between processing passes by the audio engine is fixed for a |
| 645 // particular audio endpoint device and represents the smallest processing | 648 // particular audio endpoint device and represents the smallest processing |
| 646 // quantum for the audio engine. This period plus the stream latency between | 649 // quantum for the audio engine. This period plus the stream latency between |
| 647 // the buffer and endpoint device represents the minimum possible latency | 650 // the buffer and endpoint device represents the minimum possible latency |
| 648 // that an audio application can achieve. | 651 // that an audio application can achieve. |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 678 // time the client receives an event for the render stream, it must signal | 681 // time the client receives an event for the render stream, it must signal |
| 679 // the capture client to run the capture thread that reads the next set of | 682 // the capture client to run the capture thread that reads the next set of |
| 680 // samples from the capture endpoint buffer. | 683 // samples from the capture endpoint buffer. |
| 681 // | 684 // |
| 682 // http://msdn.microsoft.com/en-us/library/windows/desktop/dd316551(v=vs.85).a spx | 685 // http://msdn.microsoft.com/en-us/library/windows/desktop/dd316551(v=vs.85).a spx |
| 683 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId || | 686 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId || |
| 684 device_id_ == AudioDeviceDescription::kLoopbackWithMuteDeviceId) { | 687 device_id_ == AudioDeviceDescription::kLoopbackWithMuteDeviceId) { |
| 685 hr = endpoint_device_->Activate( | 688 hr = endpoint_device_->Activate( |
| 686 __uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL, | 689 __uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL, |
| 687 audio_render_client_for_loopback_.ReceiveVoid()); | 690 audio_render_client_for_loopback_.ReceiveVoid()); |
| 688 if (FAILED(hr)) | 691 if (FAILED(hr)) { |
| 692 open_result_ = OPEN_RESULT_LOOPBACK_ACTIVATE_FAILED; | |
| 689 return hr; | 693 return hr; |
| 694 } | |
| 690 | 695 |
| 691 hr = audio_render_client_for_loopback_->Initialize( | 696 hr = audio_render_client_for_loopback_->Initialize( |
| 692 AUDCLNT_SHAREMODE_SHARED, | 697 AUDCLNT_SHAREMODE_SHARED, |
| 693 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST, | 698 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST, 0, 0, |
| 694 0, 0, &format_, NULL); | 699 &format_, NULL); |
| 695 if (FAILED(hr)) | 700 if (FAILED(hr)) { |
| 701 open_result_ = OPEN_RESULT_LOOPBACK_INIT_FAILED; | |
| 696 return hr; | 702 return hr; |
| 703 } | |
| 697 | 704 |
| 698 hr = audio_render_client_for_loopback_->SetEventHandle( | 705 hr = audio_render_client_for_loopback_->SetEventHandle( |
| 699 audio_samples_ready_event_.Get()); | 706 audio_samples_ready_event_.Get()); |
| 700 } else { | 707 } else { |
| 701 hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get()); | 708 hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get()); |
| 702 } | 709 } |
| 703 | 710 |
| 704 if (FAILED(hr)) | 711 if (FAILED(hr)) { |
| 712 open_result_ = OPEN_RESULT_SET_EVENT_HANDLE; | |
| 705 return hr; | 713 return hr; |
| 714 } | |
| 706 | 715 |
| 707 // Get access to the IAudioCaptureClient interface. This interface | 716 // Get access to the IAudioCaptureClient interface. This interface |
| 708 // enables us to read input data from the capture endpoint buffer. | 717 // enables us to read input data from the capture endpoint buffer. |
| 709 hr = audio_client_->GetService(__uuidof(IAudioCaptureClient), | 718 hr = audio_client_->GetService(__uuidof(IAudioCaptureClient), |
| 710 audio_capture_client_.ReceiveVoid()); | 719 audio_capture_client_.ReceiveVoid()); |
| 711 if (FAILED(hr)) | 720 if (FAILED(hr)) { |
| 721 open_result_ = OPEN_RESULT_NO_CAPTURE_CLIENT; | |
| 712 return hr; | 722 return hr; |
| 723 } | |
| 713 | 724 |
| 714 // Obtain a reference to the ISimpleAudioVolume interface which enables | 725 // Obtain a reference to the ISimpleAudioVolume interface which enables |
| 715 // us to control the master volume level of an audio session. | 726 // us to control the master volume level of an audio session. |
| 716 hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume), | 727 hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume), |
| 717 simple_audio_volume_.ReceiveVoid()); | 728 simple_audio_volume_.ReceiveVoid()); |
| 729 if (FAILED(hr)) | |
| 730 open_result_ = OPEN_RESULT_NO_AUDIO_VOLUME; | |
| 731 | |
| 718 return hr; | 732 return hr; |
| 719 } | 733 } |
| 720 | 734 |
| 735 void WASAPIAudioInputStream::ReportOpenResult() const { | |
| 736 DCHECK(!opened_); // This method must be called before we set this flag. | |
| 737 UMA_HISTOGRAM_ENUMERATION("Media.Audio.Capture.Win.Open", open_result_, | |
| 738 OPEN_RESULT_MAX + 1); | |
| 739 } | |
| 740 | |
| 721 } // namespace media | 741 } // namespace media |
| OLD | NEW |