Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/audio/win/audio_low_latency_input_win.h" | 5 #include "media/audio/win/audio_low_latency_input_win.h" |
| 6 | 6 |
| 7 #include <memory> | 7 #include <memory> |
| 8 | 8 |
| 9 #include "base/logging.h" | 9 #include "base/logging.h" |
| 10 #include "base/strings/utf_string_conversions.h" | 10 #include "base/strings/utf_string_conversions.h" |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 28 opened_(false), | 28 opened_(false), |
| 29 started_(false), | 29 started_(false), |
| 30 frame_size_(0), | 30 frame_size_(0), |
| 31 packet_size_frames_(0), | 31 packet_size_frames_(0), |
| 32 packet_size_bytes_(0), | 32 packet_size_bytes_(0), |
| 33 endpoint_buffer_size_frames_(0), | 33 endpoint_buffer_size_frames_(0), |
| 34 device_id_(device_id), | 34 device_id_(device_id), |
| 35 perf_count_to_100ns_units_(0.0), | 35 perf_count_to_100ns_units_(0.0), |
| 36 ms_to_frame_count_(0.0), | 36 ms_to_frame_count_(0.0), |
| 37 sink_(NULL), | 37 sink_(NULL), |
| 38 audio_bus_(media::AudioBus::Create(params)) { | 38 audio_bus_(media::AudioBus::Create(params)), |
| 39 mute_done_(false) { | |
| 39 DCHECK(manager_); | 40 DCHECK(manager_); |
| 40 | 41 |
| 41 // Load the Avrt DLL if not already loaded. Required to support MMCSS. | 42 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
| 42 bool avrt_init = avrt::Initialize(); | 43 bool avrt_init = avrt::Initialize(); |
| 43 DCHECK(avrt_init) << "Failed to load the Avrt.dll"; | 44 DCHECK(avrt_init) << "Failed to load the Avrt.dll"; |
| 44 | 45 |
| 45 // Set up the desired capture format specified by the client. | 46 // Set up the desired capture format specified by the client. |
| 46 format_.nSamplesPerSec = params.sample_rate(); | 47 format_.nSamplesPerSec = params.sample_rate(); |
| 47 format_.wFormatTag = WAVE_FORMAT_PCM; | 48 format_.wFormatTag = WAVE_FORMAT_PCM; |
| 48 format_.wBitsPerSample = params.bits_per_sample(); | 49 format_.wBitsPerSample = params.bits_per_sample(); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 98 HRESULT hr = SetCaptureDevice(); | 99 HRESULT hr = SetCaptureDevice(); |
| 99 if (FAILED(hr)) | 100 if (FAILED(hr)) |
| 100 return false; | 101 return false; |
| 101 | 102 |
| 102 // Obtain an IAudioClient interface which enables us to create and initialize | 103 // Obtain an IAudioClient interface which enables us to create and initialize |
| 103 // an audio stream between an audio application and the audio engine. | 104 // an audio stream between an audio application and the audio engine. |
| 104 hr = ActivateCaptureDevice(); | 105 hr = ActivateCaptureDevice(); |
| 105 if (FAILED(hr)) | 106 if (FAILED(hr)) |
| 106 return false; | 107 return false; |
| 107 | 108 |
| 108 // Retrieve the stream format which the audio engine uses for its internal | 109 // Retrieve the stream format which the audio engine uses for its internal |
| 109 // processing/mixing of shared-mode streams. This function call is for | 110 // processing/mixing of shared-mode streams. This function call is for |
| 110 // diagnostic purposes only and only in debug mode. | 111 // diagnostic purposes only and only in debug mode. |
| 111 #ifndef NDEBUG | 112 #ifndef NDEBUG |
| 112 hr = GetAudioEngineStreamFormat(); | 113 hr = GetAudioEngineStreamFormat(); |
| 113 #endif | 114 #endif |
| 114 | 115 |
| 115 // Verify that the selected audio endpoint supports the specified format | 116 // Verify that the selected audio endpoint supports the specified format |
| 116 // set during construction. | 117 // set during construction. |
| 117 if (!DesiredFormatIsSupported()) | 118 if (!DesiredFormatIsSupported()) |
| 118 return false; | 119 return false; |
| 119 | 120 |
| 120 // Initialize the audio stream between the client and the device using | 121 // Initialize the audio stream between the client and the device using |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 158 | 159 |
| 159 started_ = SUCCEEDED(hr); | 160 started_ = SUCCEEDED(hr); |
| 160 } | 161 } |
| 161 | 162 |
| 162 void WASAPIAudioInputStream::Stop() { | 163 void WASAPIAudioInputStream::Stop() { |
| 163 DCHECK(CalledOnValidThread()); | 164 DCHECK(CalledOnValidThread()); |
| 164 DVLOG(1) << "WASAPIAudioInputStream::Stop()"; | 165 DVLOG(1) << "WASAPIAudioInputStream::Stop()"; |
| 165 if (!started_) | 166 if (!started_) |
| 166 return; | 167 return; |
| 167 | 168 |
| 169 // We have muted system audio for capturing, so we need to unmute it when | |
| 170 // capturing stops. | |
| 171 if (device_id_ == AudioDeviceDescription::kLoopbackWithMuteDeviceId && | |
| 172 mute_done_) { | |
| 173 if (system_audio_volume_) { | |
|
henrika (OOO until Aug 14)
2016/07/26 10:04:25
What is system_audio_volume_ is NULL? Add DCHECK p
qiangchen
2016/07/26 16:53:02
Done.
| |
| 174 system_audio_volume_->SetMute(false, NULL); | |
| 175 mute_done_ = false; | |
| 176 } | |
| 177 } | |
| 178 | |
| 168 // Stops periodic AGC microphone measurements. | 179 // Stops periodic AGC microphone measurements. |
| 169 StopAgc(); | 180 StopAgc(); |
| 170 | 181 |
| 171 // Shut down the capture thread. | 182 // Shut down the capture thread. |
| 172 if (stop_capture_event_.IsValid()) { | 183 if (stop_capture_event_.IsValid()) { |
| 173 SetEvent(stop_capture_event_.Get()); | 184 SetEvent(stop_capture_event_.Get()); |
| 174 } | 185 } |
| 175 | 186 |
| 176 // Stop the input audio streaming. | 187 // Stop the input audio streaming. |
| 177 HRESULT hr = audio_client_->Stop(); | 188 HRESULT hr = audio_client_->Stop(); |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 266 | 277 |
| 267 return is_muted != FALSE; | 278 return is_muted != FALSE; |
| 268 } | 279 } |
| 269 | 280 |
| 270 void WASAPIAudioInputStream::Run() { | 281 void WASAPIAudioInputStream::Run() { |
| 271 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); | 282 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); |
| 272 | 283 |
| 273 // Enable MMCSS to ensure that this thread receives prioritized access to | 284 // Enable MMCSS to ensure that this thread receives prioritized access to |
| 274 // CPU resources. | 285 // CPU resources. |
| 275 DWORD task_index = 0; | 286 DWORD task_index = 0; |
| 276 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", | 287 HANDLE mm_task = |
| 277 &task_index); | 288 avrt::AvSetMmThreadCharacteristics(L"Pro Audio", &task_index); |
| 278 bool mmcss_is_ok = | 289 bool mmcss_is_ok = |
| 279 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); | 290 (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); |
| 280 if (!mmcss_is_ok) { | 291 if (!mmcss_is_ok) { |
| 281 // Failed to enable MMCSS on this thread. It is not fatal but can lead | 292 // Failed to enable MMCSS on this thread. It is not fatal but can lead |
| 282 // to reduced QoS at high load. | 293 // to reduced QoS at high load. |
| 283 DWORD err = GetLastError(); | 294 DWORD err = GetLastError(); |
| 284 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; | 295 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
| 285 } | 296 } |
| 286 | 297 |
| 287 // Allocate a buffer with a size that enables us to take care of cases like: | 298 // Allocate a buffer with a size that enables us to take care of cases like: |
| 288 // 1) The recorded buffer size is smaller, or does not match exactly with, | 299 // 1) The recorded buffer size is smaller, or does not match exactly with, |
| 289 // the selected packet size used in each callback. | 300 // the selected packet size used in each callback. |
| 290 // 2) The selected buffer size is larger than the recorded buffer size in | 301 // 2) The selected buffer size is larger than the recorded buffer size in |
| 291 // each event. | 302 // each event. |
| 292 size_t buffer_frame_index = 0; | 303 size_t buffer_frame_index = 0; |
| 293 size_t capture_buffer_size = std::max( | 304 size_t capture_buffer_size = |
| 294 2 * endpoint_buffer_size_frames_ * frame_size_, | 305 std::max(2 * endpoint_buffer_size_frames_ * frame_size_, |
| 295 2 * packet_size_frames_ * frame_size_); | 306 2 * packet_size_frames_ * frame_size_); |
| 296 std::unique_ptr<uint8_t[]> capture_buffer(new uint8_t[capture_buffer_size]); | 307 std::unique_ptr<uint8_t[]> capture_buffer(new uint8_t[capture_buffer_size]); |
| 297 | 308 |
| 298 LARGE_INTEGER now_count = {}; | 309 LARGE_INTEGER now_count = {}; |
| 299 bool recording = true; | 310 bool recording = true; |
| 300 bool error = false; | 311 bool error = false; |
| 301 double volume = GetVolume(); | 312 double volume = GetVolume(); |
| 302 HANDLE wait_array[2] = | 313 HANDLE wait_array[2] = {stop_capture_event_.Get(), |
| 303 { stop_capture_event_.Get(), audio_samples_ready_event_.Get() }; | 314 audio_samples_ready_event_.Get()}; |
| 304 | 315 |
| 305 base::win::ScopedComPtr<IAudioClock> audio_clock; | 316 base::win::ScopedComPtr<IAudioClock> audio_clock; |
| 306 audio_client_->GetService(__uuidof(IAudioClock), audio_clock.ReceiveVoid()); | 317 audio_client_->GetService(__uuidof(IAudioClock), audio_clock.ReceiveVoid()); |
| 307 | 318 |
| 308 while (recording && !error) { | 319 while (recording && !error) { |
| 309 HRESULT hr = S_FALSE; | 320 HRESULT hr = S_FALSE; |
| 310 | 321 |
| 311 // Wait for a close-down event or a new capture event. | 322 // Wait for a close-down event or a new capture event. |
| 312 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE); | 323 DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE); |
| 313 switch (wait_result) { | 324 switch (wait_result) { |
| 314 case WAIT_FAILED: | 325 case WAIT_FAILED: |
| 315 error = true; | 326 error = true; |
| 316 break; | 327 break; |
| 317 case WAIT_OBJECT_0 + 0: | 328 case WAIT_OBJECT_0 + 0: |
| 318 // |stop_capture_event_| has been set. | 329 // |stop_capture_event_| has been set. |
| 319 recording = false; | 330 recording = false; |
| 320 break; | 331 break; |
| 321 case WAIT_OBJECT_0 + 1: | 332 case WAIT_OBJECT_0 + 1: { |
| 322 { | 333 TRACE_EVENT0("audio", "WASAPIAudioInputStream::Run_0"); |
| 323 TRACE_EVENT0("audio", "WASAPIAudioInputStream::Run_0"); | 334 // |audio_samples_ready_event_| has been set. |
| 324 // |audio_samples_ready_event_| has been set. | 335 BYTE* data_ptr = NULL; |
| 325 BYTE* data_ptr = NULL; | 336 UINT32 num_frames_to_read = 0; |
| 326 UINT32 num_frames_to_read = 0; | 337 DWORD flags = 0; |
| 327 DWORD flags = 0; | 338 UINT64 device_position = 0; |
| 328 UINT64 device_position = 0; | 339 UINT64 first_audio_frame_timestamp = 0; |
| 329 UINT64 first_audio_frame_timestamp = 0; | 340 |
| 330 | 341 // Retrieve the amount of data in the capture endpoint buffer, |
| 331 // Retrieve the amount of data in the capture endpoint buffer, | 342 // replace it with silence if required, create callbacks for each |
| 332 // replace it with silence if required, create callbacks for each | 343 // packet and store non-delivered data for the next event. |
| 333 // packet and store non-delivered data for the next event. | 344 hr = audio_capture_client_->GetBuffer(&data_ptr, &num_frames_to_read, |
| 334 hr = audio_capture_client_->GetBuffer(&data_ptr, | 345 &flags, &device_position, |
| 335 &num_frames_to_read, | 346 &first_audio_frame_timestamp); |
| 336 &flags, | 347 if (FAILED(hr)) { |
| 337 &device_position, | 348 DLOG(ERROR) << "Failed to get data from the capture buffer"; |
| 338 &first_audio_frame_timestamp); | 349 continue; |
| 339 if (FAILED(hr)) { | 350 } |
| 340 DLOG(ERROR) << "Failed to get data from the capture buffer"; | 351 |
| 341 continue; | 352 if (audio_clock) { |
| 353 // The reported timestamp from GetBuffer is not as reliable as the | |
| 354 // clock from the client. We've seen timestamps reported for | |
| 355 // USB audio devices, be off by several days. Furthermore we've | |
| 356 // seen them jump back in time every 2 seconds or so. | |
| 357 audio_clock->GetPosition(&device_position, | |
| 358 &first_audio_frame_timestamp); | |
| 359 } | |
| 360 | |
| 361 if (num_frames_to_read != 0) { | |
| 362 size_t pos = buffer_frame_index * frame_size_; | |
| 363 size_t num_bytes = num_frames_to_read * frame_size_; | |
| 364 DCHECK_GE(capture_buffer_size, pos + num_bytes); | |
| 365 | |
| 366 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { | |
| 367 // Clear out the local buffer since silence is reported. | |
| 368 memset(&capture_buffer[pos], 0, num_bytes); | |
| 369 } else { | |
| 370 // Copy captured data from audio engine buffer to local buffer. | |
| 371 memcpy(&capture_buffer[pos], data_ptr, num_bytes); | |
| 342 } | 372 } |
| 343 | 373 |
| 344 if (audio_clock) { | 374 buffer_frame_index += num_frames_to_read; |
| 345 // The reported timestamp from GetBuffer is not as reliable as the | 375 } |
| 346 // clock from the client. We've seen timestamps reported for | 376 |
| 347 // USB audio devices, be off by several days. Furthermore we've | 377 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); |
| 348 // seen them jump back in time every 2 seconds or so. | 378 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; |
| 349 audio_clock->GetPosition( | 379 |
| 350 &device_position, &first_audio_frame_timestamp); | 380 // Derive a delay estimate for the captured audio packet. |
| 351 } | 381 // The value contains two parts (A+B), where A is the delay of the |
| 352 | 382 // first audio frame in the packet and B is the extra delay |
| 353 | 383 // contained in any stored data. Unit is in audio frames. |
| 354 if (num_frames_to_read != 0) { | 384 QueryPerformanceCounter(&now_count); |
| 355 size_t pos = buffer_frame_index * frame_size_; | 385 // first_audio_frame_timestamp will be 0 if we didn't get a timestamp. |
| 356 size_t num_bytes = num_frames_to_read * frame_size_; | 386 double audio_delay_frames = |
|
henrika (OOO until Aug 14)
2016/07/26 10:04:25
Indentation looks really odd here. Think I like th
qiangchen
2016/07/26 16:53:02
git cl format changed it to this way. If I change
| |
| 357 DCHECK_GE(capture_buffer_size, pos + num_bytes); | 387 first_audio_frame_timestamp == 0 |
| 358 | 388 ? num_frames_to_read |
| 359 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { | 389 : ((perf_count_to_100ns_units_ * now_count.QuadPart - |
| 360 // Clear out the local buffer since silence is reported. | 390 first_audio_frame_timestamp) / |
| 361 memset(&capture_buffer[pos], 0, num_bytes); | 391 10000.0) * |
| 362 } else { | 392 ms_to_frame_count_ + |
| 363 // Copy captured data from audio engine buffer to local buffer. | 393 buffer_frame_index - num_frames_to_read; |
| 364 memcpy(&capture_buffer[pos], data_ptr, num_bytes); | 394 |
| 365 } | 395 // Get a cached AGC volume level which is updated once every second |
| 366 | 396 // on the audio manager thread. Note that, |volume| is also updated |
| 367 buffer_frame_index += num_frames_to_read; | 397 // each time SetVolume() is called through IPC by the render-side AGC. |
| 368 } | 398 GetAgcVolume(&volume); |
| 369 | 399 |
| 370 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); | 400 // Deliver captured data to the registered consumer using a packet |
| 371 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; | 401 // size which was specified at construction. |
| 372 | 402 uint32_t delay_frames = static_cast<uint32_t>(audio_delay_frames + 0.5); |
| 373 // Derive a delay estimate for the captured audio packet. | 403 while (buffer_frame_index >= packet_size_frames_) { |
| 374 // The value contains two parts (A+B), where A is the delay of the | 404 // Copy data to audio bus to match the OnData interface. |
| 375 // first audio frame in the packet and B is the extra delay | 405 uint8_t* audio_data = |
| 376 // contained in any stored data. Unit is in audio frames. | 406 reinterpret_cast<uint8_t*>(capture_buffer.get()); |
| 377 QueryPerformanceCounter(&now_count); | 407 audio_bus_->FromInterleaved(audio_data, audio_bus_->frames(), |
| 378 // first_audio_frame_timestamp will be 0 if we didn't get a timestamp. | 408 format_.wBitsPerSample / 8); |
| 379 double audio_delay_frames = first_audio_frame_timestamp == 0 ? | 409 |
| 380 num_frames_to_read : | 410 // Deliver data packet, delay estimation and volume level to |
| 381 ((perf_count_to_100ns_units_ * now_count.QuadPart - | 411 // the user. |
| 382 first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ + | 412 sink_->OnData(this, audio_bus_.get(), delay_frames * frame_size_, |
| 383 buffer_frame_index - num_frames_to_read; | 413 volume); |
| 384 | 414 |
| 385 // Get a cached AGC volume level which is updated once every second | 415 // Store parts of the recorded data which can't be delivered |
| 386 // on the audio manager thread. Note that, |volume| is also updated | 416 // using the current packet size. The stored section will be used |
| 387 // each time SetVolume() is called through IPC by the render-side AGC. | 417 // either in the next while-loop iteration or in the next |
| 388 GetAgcVolume(&volume); | 418 // capture event. |
| 389 | 419 // TODO(tommi): If this data will be used in the next capture |
| 390 // Deliver captured data to the registered consumer using a packet | 420 // event, we will report incorrect delay estimates because |
| 391 // size which was specified at construction. | 421 // we'll use the one for the captured data that time around |
| 392 uint32_t delay_frames = | 422 // (i.e. in the future). |
| 393 static_cast<uint32_t>(audio_delay_frames + 0.5); | 423 memmove(&capture_buffer[0], &capture_buffer[packet_size_bytes_], |
| 394 while (buffer_frame_index >= packet_size_frames_) { | 424 (buffer_frame_index - packet_size_frames_) * frame_size_); |
| 395 // Copy data to audio bus to match the OnData interface. | 425 |
| 396 uint8_t* audio_data = | 426 DCHECK_GE(buffer_frame_index, packet_size_frames_); |
| 397 reinterpret_cast<uint8_t*>(capture_buffer.get()); | 427 buffer_frame_index -= packet_size_frames_; |
| 398 audio_bus_->FromInterleaved( | 428 if (delay_frames > packet_size_frames_) { |
| 399 audio_data, audio_bus_->frames(), format_.wBitsPerSample / 8); | 429 delay_frames -= packet_size_frames_; |
| 400 | 430 } else { |
| 401 // Deliver data packet, delay estimation and volume level to | 431 delay_frames = 0; |
| 402 // the user. | |
| 403 sink_->OnData( | |
| 404 this, audio_bus_.get(), delay_frames * frame_size_, volume); | |
| 405 | |
| 406 // Store parts of the recorded data which can't be delivered | |
| 407 // using the current packet size. The stored section will be used | |
| 408 // either in the next while-loop iteration or in the next | |
| 409 // capture event. | |
| 410 // TODO(tommi): If this data will be used in the next capture | |
| 411 // event, we will report incorrect delay estimates because | |
| 412 // we'll use the one for the captured data that time around | |
| 413 // (i.e. in the future). | |
| 414 memmove(&capture_buffer[0], | |
| 415 &capture_buffer[packet_size_bytes_], | |
| 416 (buffer_frame_index - packet_size_frames_) * frame_size_); | |
| 417 | |
| 418 DCHECK_GE(buffer_frame_index, packet_size_frames_); | |
| 419 buffer_frame_index -= packet_size_frames_; | |
| 420 if (delay_frames > packet_size_frames_) { | |
| 421 delay_frames -= packet_size_frames_; | |
| 422 } else { | |
| 423 delay_frames = 0; | |
| 424 } | |
| 425 } | 432 } |
| 426 } | 433 } |
| 427 break; | 434 } break; |
| 428 default: | 435 default: |
| 429 error = true; | 436 error = true; |
| 430 break; | 437 break; |
| 431 } | 438 } |
| 432 } | 439 } |
| 433 | 440 |
| 434 if (recording && error) { | 441 if (recording && error) { |
| 435 // TODO(henrika): perhaps it worth improving the cleanup here by e.g. | 442 // TODO(henrika): perhaps it worth improving the cleanup here by e.g. |
| 436 // stopping the audio client, joining the thread etc.? | 443 // stopping the audio client, joining the thread etc.? |
| 437 NOTREACHED() << "WASAPI capturing failed with error code " | 444 NOTREACHED() << "WASAPI capturing failed with error code " |
| 438 << GetLastError(); | 445 << GetLastError(); |
| 439 } | 446 } |
| 440 | 447 |
| 441 // Disable MMCSS. | 448 // Disable MMCSS. |
| 442 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { | 449 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { |
| 443 PLOG(WARNING) << "Failed to disable MMCSS"; | 450 PLOG(WARNING) << "Failed to disable MMCSS"; |
| 444 } | 451 } |
| 445 } | 452 } |
| 446 | 453 |
| 447 void WASAPIAudioInputStream::HandleError(HRESULT err) { | 454 void WASAPIAudioInputStream::HandleError(HRESULT err) { |
| 448 NOTREACHED() << "Error code: " << err; | 455 NOTREACHED() << "Error code: " << err; |
| 449 if (sink_) | 456 if (sink_) |
| 450 sink_->OnError(this); | 457 sink_->OnError(this); |
| 451 } | 458 } |
| 452 | 459 |
| 453 HRESULT WASAPIAudioInputStream::SetCaptureDevice() { | 460 HRESULT WASAPIAudioInputStream::SetCaptureDevice() { |
| 454 DCHECK(!endpoint_device_.get()); | 461 DCHECK(!endpoint_device_.get()); |
| 455 | 462 |
| 456 ScopedComPtr<IMMDeviceEnumerator> enumerator; | 463 ScopedComPtr<IMMDeviceEnumerator> enumerator; |
| 457 HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator), | 464 HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator), NULL, |
| 458 NULL, CLSCTX_INPROC_SERVER); | 465 CLSCTX_INPROC_SERVER); |
| 459 if (FAILED(hr)) | 466 if (FAILED(hr)) |
| 460 return hr; | 467 return hr; |
| 461 | 468 |
| 462 // Retrieve the IMMDevice by using the specified role or the specified | 469 // Retrieve the IMMDevice by using the specified role or the specified |
| 463 // unique endpoint device-identification string. | 470 // unique endpoint device-identification string. |
| 464 | 471 |
| 465 if (device_id_ == AudioDeviceDescription::kDefaultDeviceId) { | 472 if (device_id_ == AudioDeviceDescription::kDefaultDeviceId) { |
| 466 // Retrieve the default capture audio endpoint for the specified role. | 473 // Retrieve the default capture audio endpoint for the specified role. |
| 467 // Note that, in Windows Vista, the MMDevice API supports device roles | 474 // Note that, in Windows Vista, the MMDevice API supports device roles |
| 468 // but the system-supplied user interface programs do not. | 475 // but the system-supplied user interface programs do not. |
| 469 hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole, | 476 hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole, |
| 470 endpoint_device_.Receive()); | 477 endpoint_device_.Receive()); |
| 471 } else if (device_id_ == AudioDeviceDescription::kCommunicationsDeviceId) { | 478 } else if (device_id_ == AudioDeviceDescription::kCommunicationsDeviceId) { |
| 472 hr = enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications, | 479 hr = enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications, |
| 473 endpoint_device_.Receive()); | 480 endpoint_device_.Receive()); |
| 481 } else if (device_id_ == AudioDeviceDescription::kLoopbackWithMuteDeviceId) { | |
| 482 // Capture the default playback stream. | |
| 483 hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, | |
| 484 endpoint_device_.Receive()); | |
| 485 | |
| 486 endpoint_device_->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, | |
| 487 system_audio_volume_.ReceiveVoid()); | |
| 488 if (system_audio_volume_) { | |
| 489 BOOL muted = false; | |
| 490 system_audio_volume_->GetMute(&muted); | |
| 491 | |
| 492 // If the system audio is mute at the time of capturing, then no need to | |
|
henrika (OOO until Aug 14)
2016/07/26 10:04:25
muted
qiangchen
2016/07/26 16:53:02
Done.
| |
| 493 // mute it again, and later we do not unmute system audio when stopping | |
| 494 // capturing. | |
| 495 if (!muted) { | |
| 496 system_audio_volume_->SetMute(true, NULL); | |
| 497 mute_done_ = true; | |
| 498 } | |
| 499 } | |
| 474 } else if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId) { | 500 } else if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId) { |
| 475 // Capture the default playback stream. | 501 // Capture the default playback stream. |
| 476 hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, | 502 hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, |
| 477 endpoint_device_.Receive()); | 503 endpoint_device_.Receive()); |
| 478 } else { | 504 } else { |
| 479 hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(), | 505 hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(), |
| 480 endpoint_device_.Receive()); | 506 endpoint_device_.Receive()); |
| 481 } | 507 } |
| 482 | 508 |
| 483 if (FAILED(hr)) | 509 if (FAILED(hr)) |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 494 DLOG(ERROR) << "Selected capture device is not active."; | 520 DLOG(ERROR) << "Selected capture device is not active."; |
| 495 hr = E_ACCESSDENIED; | 521 hr = E_ACCESSDENIED; |
| 496 } | 522 } |
| 497 | 523 |
| 498 return hr; | 524 return hr; |
| 499 } | 525 } |
| 500 | 526 |
| 501 HRESULT WASAPIAudioInputStream::ActivateCaptureDevice() { | 527 HRESULT WASAPIAudioInputStream::ActivateCaptureDevice() { |
| 502 // Creates and activates an IAudioClient COM object given the selected | 528 // Creates and activates an IAudioClient COM object given the selected |
| 503 // capture endpoint device. | 529 // capture endpoint device. |
| 504 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), | 530 HRESULT hr = |
| 505 CLSCTX_INPROC_SERVER, | 531 endpoint_device_->Activate(__uuidof(IAudioClient), CLSCTX_INPROC_SERVER, |
| 506 NULL, | 532 NULL, audio_client_.ReceiveVoid()); |
| 507 audio_client_.ReceiveVoid()); | |
| 508 return hr; | 533 return hr; |
| 509 } | 534 } |
| 510 | 535 |
| 511 HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() { | 536 HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() { |
| 512 HRESULT hr = S_OK; | 537 HRESULT hr = S_OK; |
| 513 #ifndef NDEBUG | 538 #ifndef NDEBUG |
| 514 // The GetMixFormat() method retrieves the stream format that the | 539 // The GetMixFormat() method retrieves the stream format that the |
| 515 // audio engine uses for its internal processing of shared-mode streams. | 540 // audio engine uses for its internal processing of shared-mode streams. |
| 516 // The method always uses a WAVEFORMATEXTENSIBLE structure, instead | 541 // The method always uses a WAVEFORMATEXTENSIBLE structure, instead |
| 517 // of a stand-alone WAVEFORMATEX structure, to specify the format. | 542 // of a stand-alone WAVEFORMATEX structure, to specify the format. |
| 518 // An WAVEFORMATEXTENSIBLE structure can specify both the mapping of | 543 // An WAVEFORMATEXTENSIBLE structure can specify both the mapping of |
| 519 // channels to speakers and the number of bits of precision in each sample. | 544 // channels to speakers and the number of bits of precision in each sample. |
| 520 base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> format_ex; | 545 base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> format_ex; |
| 521 hr = audio_client_->GetMixFormat( | 546 hr = |
| 522 reinterpret_cast<WAVEFORMATEX**>(&format_ex)); | 547 audio_client_->GetMixFormat(reinterpret_cast<WAVEFORMATEX**>(&format_ex)); |
| 523 | 548 |
| 524 // See http://msdn.microsoft.com/en-us/windows/hardware/gg463006#EFH | 549 // See http://msdn.microsoft.com/en-us/windows/hardware/gg463006#EFH |
| 525 // for details on the WAVE file format. | 550 // for details on the WAVE file format. |
| 526 WAVEFORMATEX format = format_ex->Format; | 551 WAVEFORMATEX format = format_ex->Format; |
| 527 DVLOG(2) << "WAVEFORMATEX:"; | 552 DVLOG(2) << "WAVEFORMATEX:"; |
| 528 DVLOG(2) << " wFormatTags : 0x" << std::hex << format.wFormatTag; | 553 DVLOG(2) << " wFormatTags : 0x" << std::hex << format.wFormatTag; |
| 529 DVLOG(2) << " nChannels : " << format.nChannels; | 554 DVLOG(2) << " nChannels : " << format.nChannels; |
| 530 DVLOG(2) << " nSamplesPerSec : " << format.nSamplesPerSec; | 555 DVLOG(2) << " nSamplesPerSec : " << format.nSamplesPerSec; |
| 531 DVLOG(2) << " nAvgBytesPerSec: " << format.nAvgBytesPerSec; | 556 DVLOG(2) << " nAvgBytesPerSec: " << format.nAvgBytesPerSec; |
| 532 DVLOG(2) << " nBlockAlign : " << format.nBlockAlign; | 557 DVLOG(2) << " nBlockAlign : " << format.nBlockAlign; |
| 533 DVLOG(2) << " wBitsPerSample : " << format.wBitsPerSample; | 558 DVLOG(2) << " wBitsPerSample : " << format.wBitsPerSample; |
| 534 DVLOG(2) << " cbSize : " << format.cbSize; | 559 DVLOG(2) << " cbSize : " << format.cbSize; |
| 535 | 560 |
| 536 DVLOG(2) << "WAVEFORMATEXTENSIBLE:"; | 561 DVLOG(2) << "WAVEFORMATEXTENSIBLE:"; |
| 537 DVLOG(2) << " wValidBitsPerSample: " << | 562 DVLOG(2) << " wValidBitsPerSample: " |
| 538 format_ex->Samples.wValidBitsPerSample; | 563 << format_ex->Samples.wValidBitsPerSample; |
| 539 DVLOG(2) << " dwChannelMask : 0x" << std::hex << | 564 DVLOG(2) << " dwChannelMask : 0x" << std::hex |
| 540 format_ex->dwChannelMask; | 565 << format_ex->dwChannelMask; |
| 541 if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) | 566 if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) |
| 542 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_PCM"; | 567 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_PCM"; |
| 543 else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) | 568 else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) |
| 544 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_IEEE_FLOAT"; | 569 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_IEEE_FLOAT"; |
| 545 else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_WAVEFORMATEX) | 570 else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_WAVEFORMATEX) |
| 546 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_WAVEFORMATEX"; | 571 DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_WAVEFORMATEX"; |
| 547 #endif | 572 #endif |
| 548 return hr; | 573 return hr; |
| 549 } | 574 } |
| 550 | 575 |
| 551 bool WASAPIAudioInputStream::DesiredFormatIsSupported() { | 576 bool WASAPIAudioInputStream::DesiredFormatIsSupported() { |
| 552 // An application that uses WASAPI to manage shared-mode streams can rely | 577 // An application that uses WASAPI to manage shared-mode streams can rely |
| 553 // on the audio engine to perform only limited format conversions. The audio | 578 // on the audio engine to perform only limited format conversions. The audio |
| 554 // engine can convert between a standard PCM sample size used by the | 579 // engine can convert between a standard PCM sample size used by the |
| 555 // application and the floating-point samples that the engine uses for its | 580 // application and the floating-point samples that the engine uses for its |
| 556 // internal processing. However, the format for an application stream | 581 // internal processing. However, the format for an application stream |
| 557 // typically must have the same number of channels and the same sample | 582 // typically must have the same number of channels and the same sample |
| 558 // rate as the stream format used by the device. | 583 // rate as the stream format used by the device. |
| 559 // Many audio devices support both PCM and non-PCM stream formats. However, | 584 // Many audio devices support both PCM and non-PCM stream formats. However, |
| 560 // the audio engine can mix only PCM streams. | 585 // the audio engine can mix only PCM streams. |
| 561 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; | 586 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; |
| 562 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, | 587 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, |
| 563 &format_, | 588 &format_, &closest_match); |
| 564 &closest_match); | |
| 565 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " | 589 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " |
| 566 << "but a closest match exists."; | 590 << "but a closest match exists."; |
| 567 return (hr == S_OK); | 591 return (hr == S_OK); |
| 568 } | 592 } |
| 569 | 593 |
| 570 HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { | 594 HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { |
| 571 DWORD flags; | 595 DWORD flags; |
| 572 // Use event-driven mode only fo regular input devices. For loopback the | 596 // Use event-driven mode only fo regular input devices. For loopback the |
| 573 // EVENTCALLBACK flag is specified when intializing | 597 // EVENTCALLBACK flag is specified when intializing |
| 574 // |audio_render_client_for_loopback_|. | 598 // |audio_render_client_for_loopback_|. |
| 575 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId) { | 599 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId || |
| 600 device_id_ == AudioDeviceDescription::kLoopbackWithMuteDeviceId) { | |
| 576 flags = AUDCLNT_STREAMFLAGS_LOOPBACK | AUDCLNT_STREAMFLAGS_NOPERSIST; | 601 flags = AUDCLNT_STREAMFLAGS_LOOPBACK | AUDCLNT_STREAMFLAGS_NOPERSIST; |
| 577 } else { | 602 } else { |
| 578 flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST; | 603 flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST; |
| 579 } | 604 } |
| 580 | 605 |
| 581 // Initialize the audio stream between the client and the device. | 606 // Initialize the audio stream between the client and the device. |
| 582 // We connect indirectly through the audio engine by using shared mode. | 607 // We connect indirectly through the audio engine by using shared mode. |
| 583 // Note that, |hnsBufferDuration| is set of 0, which ensures that the | 608 // Note that, |hnsBufferDuration| is set of 0, which ensures that the |
| 584 // buffer is never smaller than the minimum buffer size needed to ensure | 609 // buffer is never smaller than the minimum buffer size needed to ensure |
| 585 // that glitches do not occur between the periodic processing passes. | 610 // that glitches do not occur between the periodic processing passes. |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 639 // to MSDN: | 664 // to MSDN: |
| 640 // | 665 // |
| 641 // A pull-mode capture client does not receive any events when a stream is | 666 // A pull-mode capture client does not receive any events when a stream is |
| 642 // initialized with event-driven buffering and is loopback-enabled. To | 667 // initialized with event-driven buffering and is loopback-enabled. To |
| 643 // work around this, initialize a render stream in event-driven mode. Each | 668 // work around this, initialize a render stream in event-driven mode. Each |
| 644 // time the client receives an event for the render stream, it must signal | 669 // time the client receives an event for the render stream, it must signal |
| 645 // the capture client to run the capture thread that reads the next set of | 670 // the capture client to run the capture thread that reads the next set of |
| 646 // samples from the capture endpoint buffer. | 671 // samples from the capture endpoint buffer. |
| 647 // | 672 // |
| 648 // http://msdn.microsoft.com/en-us/library/windows/desktop/dd316551(v=vs.85).a spx | 673 // http://msdn.microsoft.com/en-us/library/windows/desktop/dd316551(v=vs.85).a spx |
| 649 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId) { | 674 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId || |
| 675 device_id_ == AudioDeviceDescription::kLoopbackWithMuteDeviceId) { | |
| 650 hr = endpoint_device_->Activate( | 676 hr = endpoint_device_->Activate( |
| 651 __uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL, | 677 __uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL, |
| 652 audio_render_client_for_loopback_.ReceiveVoid()); | 678 audio_render_client_for_loopback_.ReceiveVoid()); |
| 653 if (FAILED(hr)) | 679 if (FAILED(hr)) |
| 654 return hr; | 680 return hr; |
| 655 | 681 |
| 656 hr = audio_render_client_for_loopback_->Initialize( | 682 hr = audio_render_client_for_loopback_->Initialize( |
| 657 AUDCLNT_SHAREMODE_SHARED, | 683 AUDCLNT_SHAREMODE_SHARED, |
| 658 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST, | 684 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST, 0, 0, |
| 659 0, 0, &format_, NULL); | 685 &format_, NULL); |
| 660 if (FAILED(hr)) | 686 if (FAILED(hr)) |
| 661 return hr; | 687 return hr; |
| 662 | 688 |
| 663 hr = audio_render_client_for_loopback_->SetEventHandle( | 689 hr = audio_render_client_for_loopback_->SetEventHandle( |
| 664 audio_samples_ready_event_.Get()); | 690 audio_samples_ready_event_.Get()); |
| 665 } else { | 691 } else { |
| 666 hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get()); | 692 hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get()); |
| 667 } | 693 } |
| 668 | 694 |
| 669 if (FAILED(hr)) | 695 if (FAILED(hr)) |
| 670 return hr; | 696 return hr; |
| 671 | 697 |
| 672 // Get access to the IAudioCaptureClient interface. This interface | 698 // Get access to the IAudioCaptureClient interface. This interface |
| 673 // enables us to read input data from the capture endpoint buffer. | 699 // enables us to read input data from the capture endpoint buffer. |
| 674 hr = audio_client_->GetService(__uuidof(IAudioCaptureClient), | 700 hr = audio_client_->GetService(__uuidof(IAudioCaptureClient), |
| 675 audio_capture_client_.ReceiveVoid()); | 701 audio_capture_client_.ReceiveVoid()); |
| 676 if (FAILED(hr)) | 702 if (FAILED(hr)) |
| 677 return hr; | 703 return hr; |
| 678 | 704 |
| 679 // Obtain a reference to the ISimpleAudioVolume interface which enables | 705 // Obtain a reference to the ISimpleAudioVolume interface which enables |
| 680 // us to control the master volume level of an audio session. | 706 // us to control the master volume level of an audio session. |
| 681 hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume), | 707 hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume), |
| 682 simple_audio_volume_.ReceiveVoid()); | 708 simple_audio_volume_.ReceiveVoid()); |
| 683 return hr; | 709 return hr; |
| 684 } | 710 } |
| 685 | 711 |
| 686 } // namespace media | 712 } // namespace media |
| OLD | NEW |