OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_input_win.h" | 5 #include "media/audio/win/audio_low_latency_input_win.h" |
6 | 6 |
7 #include "base/logging.h" | 7 #include "base/logging.h" |
8 #include "base/memory/scoped_ptr.h" | 8 #include "base/memory/scoped_ptr.h" |
9 #include "base/strings/utf_string_conversions.h" | 9 #include "base/strings/utf_string_conversions.h" |
10 #include "media/audio/win/audio_manager_win.h" | 10 #include "media/audio/win/audio_manager_win.h" |
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
151 if (started_) | 151 if (started_) |
152 return; | 152 return; |
153 | 153 |
154 DCHECK(!sink_); | 154 DCHECK(!sink_); |
155 sink_ = callback; | 155 sink_ = callback; |
156 | 156 |
157 // Starts periodic AGC microphone measurements if the AGC has been enabled | 157 // Starts periodic AGC microphone measurements if the AGC has been enabled |
158 // using SetAutomaticGainControl(). | 158 // using SetAutomaticGainControl(). |
159 StartAgc(); | 159 StartAgc(); |
160 | 160 |
161 if (!MarshalComPointers()) { | |
162 HandleError(S_FALSE); | |
163 return; | |
164 } | |
165 | |
166 // Create and start the thread that will drive the capturing by waiting for | 161 // Create and start the thread that will drive the capturing by waiting for |
167 // capture events. | 162 // capture events. |
168 capture_thread_ = | 163 capture_thread_ = |
169 new base::DelegateSimpleThread(this, "wasapi_capture_thread"); | 164 new base::DelegateSimpleThread(this, "wasapi_capture_thread"); |
170 capture_thread_->Start(); | 165 capture_thread_->Start(); |
171 | 166 |
172 // Start streaming data between the endpoint buffer and the audio engine. | 167 // Start streaming data between the endpoint buffer and the audio engine. |
173 HRESULT hr = audio_client_->Start(); | 168 HRESULT hr = audio_client_->Start(); |
174 DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming."; | 169 DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming."; |
175 | 170 |
176 if (SUCCEEDED(hr) && audio_render_client_for_loopback_.get()) | 171 if (SUCCEEDED(hr) && audio_render_client_for_loopback_.get()) |
177 hr = audio_render_client_for_loopback_->Start(); | 172 hr = audio_render_client_for_loopback_->Start(); |
178 | 173 |
179 started_ = SUCCEEDED(hr); | 174 started_ = SUCCEEDED(hr); |
180 if (!started_) | |
181 HandleError(hr); | |
182 } | 175 } |
183 | 176 |
184 void WASAPIAudioInputStream::Stop() { | 177 void WASAPIAudioInputStream::Stop() { |
185 DCHECK(CalledOnValidThread()); | 178 DCHECK(CalledOnValidThread()); |
186 DVLOG(1) << "WASAPIAudioInputStream::Stop()"; | 179 DVLOG(1) << "WASAPIAudioInputStream::Stop()"; |
187 if (!started_) | 180 if (!started_) |
188 return; | 181 return; |
189 | 182 |
190 // Stops periodic AGC microphone measurements. | 183 // Stops periodic AGC microphone measurements. |
191 StopAgc(); | 184 StopAgc(); |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
350 | 343 |
351 ScopedComPtr<IAudioClient> audio_client; | 344 ScopedComPtr<IAudioClient> audio_client; |
352 hr = endpoint_device->Activate(__uuidof(IAudioClient), | 345 hr = endpoint_device->Activate(__uuidof(IAudioClient), |
353 CLSCTX_INPROC_SERVER, | 346 CLSCTX_INPROC_SERVER, |
354 NULL, | 347 NULL, |
355 audio_client.ReceiveVoid()); | 348 audio_client.ReceiveVoid()); |
356 return SUCCEEDED(hr) ? audio_client->GetMixFormat(device_format) : hr; | 349 return SUCCEEDED(hr) ? audio_client->GetMixFormat(device_format) : hr; |
357 } | 350 } |
358 | 351 |
359 void WASAPIAudioInputStream::Run() { | 352 void WASAPIAudioInputStream::Run() { |
360 ScopedCOMInitializer com_init; | 353 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); |
361 | 354 |
362 // Increase the thread priority. | 355 // Increase the thread priority. |
363 capture_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO); | 356 capture_thread_->SetThreadPriority(base::ThreadPriority::REALTIME_AUDIO); |
364 | 357 |
365 // Enable MMCSS to ensure that this thread receives prioritized access to | 358 // Enable MMCSS to ensure that this thread receives prioritized access to |
366 // CPU resources. | 359 // CPU resources. |
367 DWORD task_index = 0; | 360 DWORD task_index = 0; |
368 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", | 361 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", |
369 &task_index); | 362 &task_index); |
370 bool mmcss_is_ok = | 363 bool mmcss_is_ok = |
371 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); | 364 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); |
372 if (!mmcss_is_ok) { | 365 if (!mmcss_is_ok) { |
373 // Failed to enable MMCSS on this thread. It is not fatal but can lead | 366 // Failed to enable MMCSS on this thread. It is not fatal but can lead |
374 // to reduced QoS at high load. | 367 // to reduced QoS at high load. |
375 DWORD err = GetLastError(); | 368 DWORD err = GetLastError(); |
376 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; | 369 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
377 } | 370 } |
378 | 371 |
379 // Retrieve COM pointers from the main thread. | |
380 ScopedComPtr<IAudioCaptureClient> audio_capture_client; | |
381 UnmarshalComPointers(&audio_capture_client); | |
382 | |
383 // Allocate a buffer with a size that enables us to take care of cases like: | 372 // Allocate a buffer with a size that enables us to take care of cases like: |
384 // 1) The recorded buffer size is smaller, or does not match exactly with, | 373 // 1) The recorded buffer size is smaller, or does not match exactly with, |
385 // the selected packet size used in each callback. | 374 // the selected packet size used in each callback. |
386 // 2) The selected buffer size is larger than the recorded buffer size in | 375 // 2) The selected buffer size is larger than the recorded buffer size in |
387 // each event. | 376 // each event. |
388 size_t buffer_frame_index = 0; | 377 size_t buffer_frame_index = 0; |
389 size_t capture_buffer_size = std::max( | 378 size_t capture_buffer_size = std::max( |
390 2 * endpoint_buffer_size_frames_ * frame_size_, | 379 2 * endpoint_buffer_size_frames_ * frame_size_, |
391 2 * packet_size_frames_ * frame_size_); | 380 2 * packet_size_frames_ * frame_size_); |
392 scoped_ptr<uint8[]> capture_buffer(new uint8[capture_buffer_size]); | 381 scoped_ptr<uint8[]> capture_buffer(new uint8[capture_buffer_size]); |
393 | 382 |
394 LARGE_INTEGER now_count; | 383 LARGE_INTEGER now_count; |
395 bool recording = true; | 384 bool recording = true; |
396 bool error = false; | 385 bool error = false; |
397 double volume = 0; | 386 double volume = GetVolume(); |
398 HANDLE wait_array[2] = | 387 HANDLE wait_array[2] = |
399 { stop_capture_event_.Get(), audio_samples_ready_event_.Get() }; | 388 { stop_capture_event_.Get(), audio_samples_ready_event_.Get() }; |
400 | 389 |
401 while (recording && !error) { | 390 while (recording && !error) { |
402 HRESULT hr = S_FALSE; | 391 HRESULT hr = S_FALSE; |
403 | 392 |
404 // Wait for a close-down event or a new capture event. | 393 // Wait for a close-down event or a new capture event. |
405 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE); | 394 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE); |
406 switch (wait_result) { | 395 switch (wait_result) { |
407 case WAIT_FAILED: | 396 case WAIT_FAILED: |
408 error = true; | 397 error = true; |
409 break; | 398 break; |
410 case WAIT_OBJECT_0 + 0: | 399 case WAIT_OBJECT_0 + 0: |
411 // |stop_capture_event_| has been set. | 400 // |stop_capture_event_| has been set. |
412 recording = false; | 401 recording = false; |
413 break; | 402 break; |
414 case WAIT_OBJECT_0 + 1: | 403 case WAIT_OBJECT_0 + 1: |
415 { | 404 { |
416 // |audio_samples_ready_event_| has been set. | 405 // |audio_samples_ready_event_| has been set. |
417 BYTE* data_ptr = NULL; | 406 BYTE* data_ptr = NULL; |
418 UINT32 num_frames_to_read = 0; | 407 UINT32 num_frames_to_read = 0; |
419 DWORD flags = 0; | 408 DWORD flags = 0; |
420 UINT64 device_position = 0; | 409 UINT64 device_position = 0; |
421 UINT64 first_audio_frame_timestamp = 0; | 410 UINT64 first_audio_frame_timestamp = 0; |
422 | 411 |
423 // Retrieve the amount of data in the capture endpoint buffer, | 412 // Retrieve the amount of data in the capture endpoint buffer, |
424 // replace it with silence if required, create callbacks for each | 413 // replace it with silence if required, create callbacks for each |
425 // packet and store non-delivered data for the next event. | 414 // packet and store non-delivered data for the next event. |
426 hr = audio_capture_client->GetBuffer( | 415 hr = audio_capture_client_->GetBuffer(&data_ptr, |
427 &data_ptr, &num_frames_to_read, &flags, &device_position, | 416 &num_frames_to_read, |
428 &first_audio_frame_timestamp); | 417 &flags, |
| 418 &device_position, |
| 419 &first_audio_frame_timestamp); |
429 if (FAILED(hr)) { | 420 if (FAILED(hr)) { |
430 DLOG(ERROR) << "Failed to get data from the capture buffer"; | 421 DLOG(ERROR) << "Failed to get data from the capture buffer"; |
431 continue; | 422 continue; |
432 } | 423 } |
433 | 424 |
434 if (num_frames_to_read != 0) { | 425 if (num_frames_to_read != 0) { |
435 size_t pos = buffer_frame_index * frame_size_; | 426 size_t pos = buffer_frame_index * frame_size_; |
436 size_t num_bytes = num_frames_to_read * frame_size_; | 427 size_t num_bytes = num_frames_to_read * frame_size_; |
437 DCHECK_GE(capture_buffer_size, pos + num_bytes); | 428 DCHECK_GE(capture_buffer_size, pos + num_bytes); |
438 | 429 |
439 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { | 430 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { |
440 // Clear out the local buffer since silence is reported. | 431 // Clear out the local buffer since silence is reported. |
441 memset(&capture_buffer[pos], 0, num_bytes); | 432 memset(&capture_buffer[pos], 0, num_bytes); |
442 } else { | 433 } else { |
443 // Copy captured data from audio engine buffer to local buffer. | 434 // Copy captured data from audio engine buffer to local buffer. |
444 memcpy(&capture_buffer[pos], data_ptr, num_bytes); | 435 memcpy(&capture_buffer[pos], data_ptr, num_bytes); |
445 } | 436 } |
446 | 437 |
447 buffer_frame_index += num_frames_to_read; | 438 buffer_frame_index += num_frames_to_read; |
448 } | 439 } |
449 | 440 |
450 hr = audio_capture_client->ReleaseBuffer(num_frames_to_read); | 441 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); |
451 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; | 442 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; |
452 | 443 |
453 // Derive a delay estimate for the captured audio packet. | 444 // Derive a delay estimate for the captured audio packet. |
454 // The value contains two parts (A+B), where A is the delay of the | 445 // The value contains two parts (A+B), where A is the delay of the |
455 // first audio frame in the packet and B is the extra delay | 446 // first audio frame in the packet and B is the extra delay |
456 // contained in any stored data. Unit is in audio frames. | 447 // contained in any stored data. Unit is in audio frames. |
457 QueryPerformanceCounter(&now_count); | 448 QueryPerformanceCounter(&now_count); |
458 double audio_delay_frames = | 449 double audio_delay_frames = |
459 ((perf_count_to_100ns_units_ * now_count.QuadPart - | 450 ((perf_count_to_100ns_units_ * now_count.QuadPart - |
460 first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ + | 451 first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ + |
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
678 // that glitches do not occur between the periodic processing passes. | 669 // that glitches do not occur between the periodic processing passes. |
679 // This setting should lead to lowest possible latency. | 670 // This setting should lead to lowest possible latency. |
680 HRESULT hr = audio_client_->Initialize( | 671 HRESULT hr = audio_client_->Initialize( |
681 AUDCLNT_SHAREMODE_SHARED, | 672 AUDCLNT_SHAREMODE_SHARED, |
682 flags, | 673 flags, |
683 0, // hnsBufferDuration | 674 0, // hnsBufferDuration |
684 0, | 675 0, |
685 &format_, | 676 &format_, |
686 (effects_ & AudioParameters::DUCKING) ? &kCommunicationsSessionId : NULL); | 677 (effects_ & AudioParameters::DUCKING) ? &kCommunicationsSessionId : NULL); |
687 | 678 |
688 if (FAILED(hr)) { | 679 if (FAILED(hr)) |
689 PLOG(ERROR) << "Failed to initalize IAudioClient: " << std::hex << hr | |
690 << " : "; | |
691 return hr; | 680 return hr; |
692 } | |
693 | 681 |
694 // Retrieve the length of the endpoint buffer shared between the client | 682 // Retrieve the length of the endpoint buffer shared between the client |
695 // and the audio engine. The buffer length determines the maximum amount | 683 // and the audio engine. The buffer length determines the maximum amount |
696 // of capture data that the audio engine can read from the endpoint buffer | 684 // of capture data that the audio engine can read from the endpoint buffer |
697 // during a single processing pass. | 685 // during a single processing pass. |
698 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. | 686 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. |
699 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); | 687 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); |
700 if (FAILED(hr)) | 688 if (FAILED(hr)) |
701 return hr; | 689 return hr; |
702 | 690 |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
773 if (FAILED(hr)) | 761 if (FAILED(hr)) |
774 return hr; | 762 return hr; |
775 | 763 |
776 // Obtain a reference to the ISimpleAudioVolume interface which enables | 764 // Obtain a reference to the ISimpleAudioVolume interface which enables |
777 // us to control the master volume level of an audio session. | 765 // us to control the master volume level of an audio session. |
778 hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume), | 766 hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume), |
779 simple_audio_volume_.ReceiveVoid()); | 767 simple_audio_volume_.ReceiveVoid()); |
780 return hr; | 768 return hr; |
781 } | 769 } |
782 | 770 |
783 bool WASAPIAudioInputStream::MarshalComPointers() { | |
784 DCHECK(CalledOnValidThread()); | |
785 DCHECK(!com_stream_); | |
786 HRESULT hr = CoMarshalInterThreadInterfaceInStream( | |
787 __uuidof(IAudioCaptureClient), audio_capture_client_.get(), | |
788 com_stream_.Receive()); | |
789 if (FAILED(hr)) | |
790 DLOG(ERROR) << "Marshal failed for IAudioCaptureClient: " << std::hex << hr; | |
791 DCHECK_EQ(SUCCEEDED(hr), !!com_stream_); | |
792 return SUCCEEDED(hr); | |
793 } | |
794 | |
795 void WASAPIAudioInputStream::UnmarshalComPointers( | |
796 ScopedComPtr<IAudioCaptureClient>* audio_capture_client) { | |
797 DCHECK_EQ(capture_thread_->tid(), base::PlatformThread::CurrentId()); | |
798 DCHECK(com_stream_); | |
799 HRESULT hr = CoGetInterfaceAndReleaseStream( | |
800 com_stream_.Detach(), __uuidof(IAudioCaptureClient), | |
801 audio_capture_client->ReceiveVoid()); | |
802 CHECK(SUCCEEDED(hr)); | |
803 } | |
804 | |
805 } // namespace media | 771 } // namespace media |
OLD | NEW |