OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_input_win.h" | 5 #include "media/audio/win/audio_low_latency_input_win.h" |
6 | 6 |
7 #include "base/logging.h" | 7 #include "base/logging.h" |
8 #include "base/memory/scoped_ptr.h" | 8 #include "base/memory/scoped_ptr.h" |
9 #include "base/strings/utf_string_conversions.h" | 9 #include "base/strings/utf_string_conversions.h" |
10 #include "media/audio/win/audio_manager_win.h" | 10 #include "media/audio/win/audio_manager_win.h" |
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
151 if (started_) | 151 if (started_) |
152 return; | 152 return; |
153 | 153 |
154 DCHECK(!sink_); | 154 DCHECK(!sink_); |
155 sink_ = callback; | 155 sink_ = callback; |
156 | 156 |
157 // Starts periodic AGC microphone measurements if the AGC has been enabled | 157 // Starts periodic AGC microphone measurements if the AGC has been enabled |
158 // using SetAutomaticGainControl(). | 158 // using SetAutomaticGainControl(). |
159 StartAgc(); | 159 StartAgc(); |
160 | 160 |
| 161 if (!MarshalComPointers()) { |
| 162 HandleError(S_FALSE); |
| 163 return; |
| 164 } |
| 165 |
161 // Create and start the thread that will drive the capturing by waiting for | 166 // Create and start the thread that will drive the capturing by waiting for |
162 // capture events. | 167 // capture events. |
163 capture_thread_ = | 168 capture_thread_ = |
164 new base::DelegateSimpleThread(this, "wasapi_capture_thread"); | 169 new base::DelegateSimpleThread(this, "wasapi_capture_thread"); |
165 capture_thread_->Start(); | 170 capture_thread_->Start(); |
166 | 171 |
167 // Start streaming data between the endpoint buffer and the audio engine. | 172 // Start streaming data between the endpoint buffer and the audio engine. |
168 HRESULT hr = audio_client_->Start(); | 173 HRESULT hr = audio_client_->Start(); |
169 DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming."; | 174 DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming."; |
170 | 175 |
171 if (SUCCEEDED(hr) && audio_render_client_for_loopback_.get()) | 176 if (SUCCEEDED(hr) && audio_render_client_for_loopback_.get()) |
172 hr = audio_render_client_for_loopback_->Start(); | 177 hr = audio_render_client_for_loopback_->Start(); |
173 | 178 |
174 started_ = SUCCEEDED(hr); | 179 started_ = SUCCEEDED(hr); |
| 180 if (!started_) |
| 181 HandleError(hr); |
175 } | 182 } |
176 | 183 |
177 void WASAPIAudioInputStream::Stop() { | 184 void WASAPIAudioInputStream::Stop() { |
178 DCHECK(CalledOnValidThread()); | 185 DCHECK(CalledOnValidThread()); |
179 DVLOG(1) << "WASAPIAudioInputStream::Stop()"; | 186 DVLOG(1) << "WASAPIAudioInputStream::Stop()"; |
180 if (!started_) | 187 if (!started_) |
181 return; | 188 return; |
182 | 189 |
183 // Stops periodic AGC microphone measurements. | 190 // Stops periodic AGC microphone measurements. |
184 StopAgc(); | 191 StopAgc(); |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
343 | 350 |
344 ScopedComPtr<IAudioClient> audio_client; | 351 ScopedComPtr<IAudioClient> audio_client; |
345 hr = endpoint_device->Activate(__uuidof(IAudioClient), | 352 hr = endpoint_device->Activate(__uuidof(IAudioClient), |
346 CLSCTX_INPROC_SERVER, | 353 CLSCTX_INPROC_SERVER, |
347 NULL, | 354 NULL, |
348 audio_client.ReceiveVoid()); | 355 audio_client.ReceiveVoid()); |
349 return SUCCEEDED(hr) ? audio_client->GetMixFormat(device_format) : hr; | 356 return SUCCEEDED(hr) ? audio_client->GetMixFormat(device_format) : hr; |
350 } | 357 } |
351 | 358 |
352 void WASAPIAudioInputStream::Run() { | 359 void WASAPIAudioInputStream::Run() { |
353 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); | 360 ScopedCOMInitializer com_init; |
354 | 361 |
355 // Increase the thread priority. | 362 // Increase the thread priority. |
356 capture_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO); | 363 capture_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO); |
357 | 364 |
358 // Enable MMCSS to ensure that this thread receives prioritized access to | 365 // Enable MMCSS to ensure that this thread receives prioritized access to |
359 // CPU resources. | 366 // CPU resources. |
360 DWORD task_index = 0; | 367 DWORD task_index = 0; |
361 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", | 368 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", |
362 &task_index); | 369 &task_index); |
363 bool mmcss_is_ok = | 370 bool mmcss_is_ok = |
364 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); | 371 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); |
365 if (!mmcss_is_ok) { | 372 if (!mmcss_is_ok) { |
366 // Failed to enable MMCSS on this thread. It is not fatal but can lead | 373 // Failed to enable MMCSS on this thread. It is not fatal but can lead |
367 // to reduced QoS at high load. | 374 // to reduced QoS at high load. |
368 DWORD err = GetLastError(); | 375 DWORD err = GetLastError(); |
369 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; | 376 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
370 } | 377 } |
371 | 378 |
| 379 // Retrieve COM pointers from the main thread. |
| 380 ScopedComPtr<IAudioCaptureClient> audio_capture_client; |
| 381 UnmarshalComPointers(&audio_capture_client); |
| 382 |
372 // Allocate a buffer with a size that enables us to take care of cases like: | 383 // Allocate a buffer with a size that enables us to take care of cases like: |
373 // 1) The recorded buffer size is smaller, or does not match exactly with, | 384 // 1) The recorded buffer size is smaller, or does not match exactly with, |
374 // the selected packet size used in each callback. | 385 // the selected packet size used in each callback. |
375 // 2) The selected buffer size is larger than the recorded buffer size in | 386 // 2) The selected buffer size is larger than the recorded buffer size in |
376 // each event. | 387 // each event. |
377 size_t buffer_frame_index = 0; | 388 size_t buffer_frame_index = 0; |
378 size_t capture_buffer_size = std::max( | 389 size_t capture_buffer_size = std::max( |
379 2 * endpoint_buffer_size_frames_ * frame_size_, | 390 2 * endpoint_buffer_size_frames_ * frame_size_, |
380 2 * packet_size_frames_ * frame_size_); | 391 2 * packet_size_frames_ * frame_size_); |
381 scoped_ptr<uint8[]> capture_buffer(new uint8[capture_buffer_size]); | 392 scoped_ptr<uint8[]> capture_buffer(new uint8[capture_buffer_size]); |
382 | 393 |
383 LARGE_INTEGER now_count; | 394 LARGE_INTEGER now_count; |
384 bool recording = true; | 395 bool recording = true; |
385 bool error = false; | 396 bool error = false; |
386 double volume = GetVolume(); | 397 double volume = 0; |
387 HANDLE wait_array[2] = | 398 HANDLE wait_array[2] = |
388 { stop_capture_event_.Get(), audio_samples_ready_event_.Get() }; | 399 { stop_capture_event_.Get(), audio_samples_ready_event_.Get() }; |
389 | 400 |
390 while (recording && !error) { | 401 while (recording && !error) { |
391 HRESULT hr = S_FALSE; | 402 HRESULT hr = S_FALSE; |
392 | 403 |
393 // Wait for a close-down event or a new capture event. | 404 // Wait for a close-down event or a new capture event. |
394 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE); | 405 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE); |
395 switch (wait_result) { | 406 switch (wait_result) { |
396 case WAIT_FAILED: | 407 case WAIT_FAILED: |
397 error = true; | 408 error = true; |
398 break; | 409 break; |
399 case WAIT_OBJECT_0 + 0: | 410 case WAIT_OBJECT_0 + 0: |
400 // |stop_capture_event_| has been set. | 411 // |stop_capture_event_| has been set. |
401 recording = false; | 412 recording = false; |
402 break; | 413 break; |
403 case WAIT_OBJECT_0 + 1: | 414 case WAIT_OBJECT_0 + 1: |
404 { | 415 { |
405 // |audio_samples_ready_event_| has been set. | 416 // |audio_samples_ready_event_| has been set. |
406 BYTE* data_ptr = NULL; | 417 BYTE* data_ptr = NULL; |
407 UINT32 num_frames_to_read = 0; | 418 UINT32 num_frames_to_read = 0; |
408 DWORD flags = 0; | 419 DWORD flags = 0; |
409 UINT64 device_position = 0; | 420 UINT64 device_position = 0; |
410 UINT64 first_audio_frame_timestamp = 0; | 421 UINT64 first_audio_frame_timestamp = 0; |
411 | 422 |
412 // Retrieve the amount of data in the capture endpoint buffer, | 423 // Retrieve the amount of data in the capture endpoint buffer, |
413 // replace it with silence if required, create callbacks for each | 424 // replace it with silence if required, create callbacks for each |
414 // packet and store non-delivered data for the next event. | 425 // packet and store non-delivered data for the next event. |
415 hr = audio_capture_client_->GetBuffer(&data_ptr, | 426 hr = audio_capture_client->GetBuffer( |
416 &num_frames_to_read, | 427 &data_ptr, &num_frames_to_read, &flags, &device_position, |
417 &flags, | 428 &first_audio_frame_timestamp); |
418 &device_position, | |
419 &first_audio_frame_timestamp); | |
420 if (FAILED(hr)) { | 429 if (FAILED(hr)) { |
421 DLOG(ERROR) << "Failed to get data from the capture buffer"; | 430 DLOG(ERROR) << "Failed to get data from the capture buffer"; |
422 continue; | 431 continue; |
423 } | 432 } |
424 | 433 |
425 if (num_frames_to_read != 0) { | 434 if (num_frames_to_read != 0) { |
426 size_t pos = buffer_frame_index * frame_size_; | 435 size_t pos = buffer_frame_index * frame_size_; |
427 size_t num_bytes = num_frames_to_read * frame_size_; | 436 size_t num_bytes = num_frames_to_read * frame_size_; |
428 DCHECK_GE(capture_buffer_size, pos + num_bytes); | 437 DCHECK_GE(capture_buffer_size, pos + num_bytes); |
429 | 438 |
430 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { | 439 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { |
431 // Clear out the local buffer since silence is reported. | 440 // Clear out the local buffer since silence is reported. |
432 memset(&capture_buffer[pos], 0, num_bytes); | 441 memset(&capture_buffer[pos], 0, num_bytes); |
433 } else { | 442 } else { |
434 // Copy captured data from audio engine buffer to local buffer. | 443 // Copy captured data from audio engine buffer to local buffer. |
435 memcpy(&capture_buffer[pos], data_ptr, num_bytes); | 444 memcpy(&capture_buffer[pos], data_ptr, num_bytes); |
436 } | 445 } |
437 | 446 |
438 buffer_frame_index += num_frames_to_read; | 447 buffer_frame_index += num_frames_to_read; |
439 } | 448 } |
440 | 449 |
441 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); | 450 hr = audio_capture_client->ReleaseBuffer(num_frames_to_read); |
442 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; | 451 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; |
443 | 452 |
444 // Derive a delay estimate for the captured audio packet. | 453 // Derive a delay estimate for the captured audio packet. |
445 // The value contains two parts (A+B), where A is the delay of the | 454 // The value contains two parts (A+B), where A is the delay of the |
446 // first audio frame in the packet and B is the extra delay | 455 // first audio frame in the packet and B is the extra delay |
447 // contained in any stored data. Unit is in audio frames. | 456 // contained in any stored data. Unit is in audio frames. |
448 QueryPerformanceCounter(&now_count); | 457 QueryPerformanceCounter(&now_count); |
449 double audio_delay_frames = | 458 double audio_delay_frames = |
450 ((perf_count_to_100ns_units_ * now_count.QuadPart - | 459 ((perf_count_to_100ns_units_ * now_count.QuadPart - |
451 first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ + | 460 first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ + |
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
669 // that glitches do not occur between the periodic processing passes. | 678 // that glitches do not occur between the periodic processing passes. |
670 // This setting should lead to lowest possible latency. | 679 // This setting should lead to lowest possible latency. |
671 HRESULT hr = audio_client_->Initialize( | 680 HRESULT hr = audio_client_->Initialize( |
672 AUDCLNT_SHAREMODE_SHARED, | 681 AUDCLNT_SHAREMODE_SHARED, |
673 flags, | 682 flags, |
674 0, // hnsBufferDuration | 683 0, // hnsBufferDuration |
675 0, | 684 0, |
676 &format_, | 685 &format_, |
677 (effects_ & AudioParameters::DUCKING) ? &kCommunicationsSessionId : NULL); | 686 (effects_ & AudioParameters::DUCKING) ? &kCommunicationsSessionId : NULL); |
678 | 687 |
679 if (FAILED(hr)) | 688 if (FAILED(hr)) { |
| 689 PLOG(ERROR) << "Failed to initalize IAudioClient: " << std::hex << hr |
| 690 << " : "; |
680 return hr; | 691 return hr; |
| 692 } |
681 | 693 |
682 // Retrieve the length of the endpoint buffer shared between the client | 694 // Retrieve the length of the endpoint buffer shared between the client |
683 // and the audio engine. The buffer length determines the maximum amount | 695 // and the audio engine. The buffer length determines the maximum amount |
684 // of capture data that the audio engine can read from the endpoint buffer | 696 // of capture data that the audio engine can read from the endpoint buffer |
685 // during a single processing pass. | 697 // during a single processing pass. |
686 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. | 698 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. |
687 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); | 699 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); |
688 if (FAILED(hr)) | 700 if (FAILED(hr)) |
689 return hr; | 701 return hr; |
690 | 702 |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
761 if (FAILED(hr)) | 773 if (FAILED(hr)) |
762 return hr; | 774 return hr; |
763 | 775 |
764 // Obtain a reference to the ISimpleAudioVolume interface which enables | 776 // Obtain a reference to the ISimpleAudioVolume interface which enables |
765 // us to control the master volume level of an audio session. | 777 // us to control the master volume level of an audio session. |
766 hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume), | 778 hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume), |
767 simple_audio_volume_.ReceiveVoid()); | 779 simple_audio_volume_.ReceiveVoid()); |
768 return hr; | 780 return hr; |
769 } | 781 } |
770 | 782 |
| 783 bool WASAPIAudioInputStream::MarshalComPointers() { |
| 784 DCHECK(CalledOnValidThread()); |
| 785 DCHECK(!com_stream_); |
| 786 HRESULT hr = CoMarshalInterThreadInterfaceInStream( |
| 787 __uuidof(IAudioCaptureClient), audio_capture_client_.get(), |
| 788 com_stream_.Receive()); |
| 789 if (FAILED(hr)) |
| 790 DLOG(ERROR) << "Marshal failed for IAudioCaptureClient: " << std::hex << hr; |
| 791 DCHECK_EQ(SUCCEEDED(hr), !!com_stream_); |
| 792 return SUCCEEDED(hr); |
| 793 } |
| 794 |
| 795 void WASAPIAudioInputStream::UnmarshalComPointers( |
| 796 ScopedComPtr<IAudioCaptureClient>* audio_capture_client) { |
| 797 DCHECK_EQ(capture_thread_->tid(), base::PlatformThread::CurrentId()); |
| 798 DCHECK(com_stream_); |
| 799 HRESULT hr = CoGetInterfaceAndReleaseStream( |
| 800 com_stream_.Detach(), __uuidof(IAudioCaptureClient), |
| 801 audio_capture_client->ReceiveVoid()); |
| 802 CHECK(SUCCEEDED(hr)); |
| 803 } |
| 804 |
771 } // namespace media | 805 } // namespace media |
OLD | NEW |