OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_input_win.h" | 5 #include "media/audio/win/audio_low_latency_input_win.h" |
6 | 6 |
7 #include <math.h> | |
7 #include <memory> | 8 #include <memory> |
8 | 9 |
9 #include "base/logging.h" | 10 #include "base/logging.h" |
10 #include "base/metrics/histogram_macros.h" | 11 #include "base/metrics/histogram_macros.h" |
11 #include "base/strings/utf_string_conversions.h" | 12 #include "base/strings/utf_string_conversions.h" |
12 #include "base/trace_event/trace_event.h" | 13 #include "base/trace_event/trace_event.h" |
13 #include "media/audio/audio_device_description.h" | 14 #include "media/audio/audio_device_description.h" |
14 #include "media/audio/win/audio_manager_win.h" | 15 #include "media/audio/win/audio_manager_win.h" |
15 #include "media/audio/win/avrt_wrapper_win.h" | 16 #include "media/audio/win/avrt_wrapper_win.h" |
16 #include "media/audio/win/core_audio_util_win.h" | 17 #include "media/audio/win/core_audio_util_win.h" |
18 #include "media/base/audio_block_fifo.h" | |
17 #include "media/base/audio_bus.h" | 19 #include "media/base/audio_bus.h" |
20 #include "media/base/channel_layout.h" | |
21 #include "media/base/limits.h" | |
18 | 22 |
19 using base::win::ScopedComPtr; | 23 using base::win::ScopedComPtr; |
20 using base::win::ScopedCOMInitializer; | 24 using base::win::ScopedCOMInitializer; |
21 | 25 |
22 namespace media { | 26 namespace media { |
23 | 27 |
24 WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager, | 28 WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager, |
25 const AudioParameters& params, | 29 const AudioParameters& params, |
26 const std::string& device_id) | 30 const std::string& device_id) |
27 : manager_(manager), | 31 : manager_(manager), |
28 device_id_(device_id), | 32 device_id_(device_id) { |
29 audio_bus_(media::AudioBus::Create(params)) { | |
30 DCHECK(manager_); | 33 DCHECK(manager_); |
31 DCHECK(!device_id_.empty()); | 34 DCHECK(!device_id_.empty()); |
32 | 35 |
33 // Load the Avrt DLL if not already loaded. Required to support MMCSS. | 36 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
34 bool avrt_init = avrt::Initialize(); | 37 bool avrt_init = avrt::Initialize(); |
35 DCHECK(avrt_init) << "Failed to load the Avrt.dll"; | 38 DCHECK(avrt_init) << "Failed to load the Avrt.dll"; |
36 | 39 |
37 // Set up the desired capture format specified by the client. | 40 // Set up the desired capture format specified by the client. |
38 format_.nSamplesPerSec = params.sample_rate(); | 41 format_.nSamplesPerSec = params.sample_rate(); |
39 format_.wFormatTag = WAVE_FORMAT_PCM; | 42 format_.wFormatTag = WAVE_FORMAT_PCM; |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
116 // set during construction. | 119 // set during construction. |
117 if (!DesiredFormatIsSupported()) { | 120 if (!DesiredFormatIsSupported()) { |
118 open_result_ = OPEN_RESULT_FORMAT_NOT_SUPPORTED; | 121 open_result_ = OPEN_RESULT_FORMAT_NOT_SUPPORTED; |
119 ReportOpenResult(); | 122 ReportOpenResult(); |
120 return false; | 123 return false; |
121 } | 124 } |
122 | 125 |
123 // Initialize the audio stream between the client and the device using | 126 // Initialize the audio stream between the client and the device using |
124 // shared mode and a lowest possible glitch-free latency. | 127 // shared mode and a lowest possible glitch-free latency. |
125 hr = InitializeAudioEngine(); | 128 hr = InitializeAudioEngine(); |
129 if (SUCCEEDED(hr) && converter_) | |
130 open_result_ = OPEN_RESULT_OK_WITH_RESAMPLING; | |
126 ReportOpenResult(); // Report before we assign a value to |opened_|. | 131 ReportOpenResult(); // Report before we assign a value to |opened_|. |
127 opened_ = SUCCEEDED(hr); | 132 opened_ = SUCCEEDED(hr); |
128 DCHECK(open_result_ == OPEN_RESULT_OK || !opened_); | |
129 | 133 |
130 return opened_; | 134 return opened_; |
131 } | 135 } |
132 | 136 |
133 void WASAPIAudioInputStream::Start(AudioInputCallback* callback) { | 137 void WASAPIAudioInputStream::Start(AudioInputCallback* callback) { |
134 DCHECK(CalledOnValidThread()); | 138 DCHECK(CalledOnValidThread()); |
135 DCHECK(callback); | 139 DCHECK(callback); |
136 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; | 140 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; |
137 if (!opened_) | 141 if (!opened_) |
138 return; | 142 return; |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
220 started_ = false; | 224 started_ = false; |
221 sink_ = NULL; | 225 sink_ = NULL; |
222 } | 226 } |
223 | 227 |
224 void WASAPIAudioInputStream::Close() { | 228 void WASAPIAudioInputStream::Close() { |
225 DVLOG(1) << "WASAPIAudioInputStream::Close()"; | 229 DVLOG(1) << "WASAPIAudioInputStream::Close()"; |
226 // It is valid to call Close() before calling open or Start(). | 230 // It is valid to call Close() before calling open or Start(). |
227 // It is also valid to call Close() after Start() has been called. | 231 // It is also valid to call Close() after Start() has been called. |
228 Stop(); | 232 Stop(); |
229 | 233 |
234 if (converter_) | |
235 converter_->RemoveInput(this); | |
236 | |
230 // Inform the audio manager that we have been closed. This will cause our | 237 // Inform the audio manager that we have been closed. This will cause our |
231 // destruction. | 238 // destruction. |
232 manager_->ReleaseInputStream(this); | 239 manager_->ReleaseInputStream(this); |
233 } | 240 } |
234 | 241 |
235 double WASAPIAudioInputStream::GetMaxVolume() { | 242 double WASAPIAudioInputStream::GetMaxVolume() { |
236 // Verify that Open() has been called succesfully, to ensure that an audio | 243 // Verify that Open() has been called succesfully, to ensure that an audio |
237 // session exists and that an ISimpleAudioVolume interface has been created. | 244 // session exists and that an ISimpleAudioVolume interface has been created. |
238 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; | 245 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; |
239 if (!opened_) | 246 if (!opened_) |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
313 // to reduced QoS at high load. | 320 // to reduced QoS at high load. |
314 DWORD err = GetLastError(); | 321 DWORD err = GetLastError(); |
315 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; | 322 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
316 } | 323 } |
317 | 324 |
318 // Allocate a buffer with a size that enables us to take care of cases like: | 325 // Allocate a buffer with a size that enables us to take care of cases like: |
319 // 1) The recorded buffer size is smaller, or does not match exactly with, | 326 // 1) The recorded buffer size is smaller, or does not match exactly with, |
320 // the selected packet size used in each callback. | 327 // the selected packet size used in each callback. |
321 // 2) The selected buffer size is larger than the recorded buffer size in | 328 // 2) The selected buffer size is larger than the recorded buffer size in |
322 // each event. | 329 // each event. |
323 size_t buffer_frame_index = 0; | 330 // In the case where no resampling is required, a single buffer should be |
324 size_t capture_buffer_size = | 331 // enough but in case we get buffers that don't match exactly, we'll go with |
325 std::max(2 * endpoint_buffer_size_frames_ * frame_size_, | 332 // two. Same applies if we need to resample and the buffer ratio is perfect. |
326 2 * packet_size_frames_ * frame_size_); | 333 // However if the buffer ratio is imperfect, we will need 3 buffers to safely |
327 std::unique_ptr<uint8_t[]> capture_buffer(new uint8_t[capture_buffer_size]); | 334 // be able to buffer up data in cases where a conversion requires two audio |
335 // buffers (and we need to be able to write to the third one). | |
336 DCHECK(!fifo_); | |
337 const int buffers_required = | |
338 converter_ && imperfect_buffer_size_conversion_ ? 3 : 2; | |
339 fifo_.reset(new AudioBlockFifo(format_.nChannels, packet_size_frames_, | |
340 buffers_required)); | |
341 | |
342 DVLOG(1) << "AudioBlockFifo needs " << buffers_required << " buffers"; | |
328 | 343 |
329 LARGE_INTEGER now_count = {}; | 344 LARGE_INTEGER now_count = {}; |
330 bool recording = true; | 345 bool recording = true; |
331 bool error = false; | 346 bool error = false; |
332 double volume = GetVolume(); | 347 double volume = GetVolume(); |
333 HANDLE wait_array[2] = {stop_capture_event_.Get(), | 348 HANDLE wait_array[2] = {stop_capture_event_.Get(), |
334 audio_samples_ready_event_.Get()}; | 349 audio_samples_ready_event_.Get()}; |
335 | 350 |
336 base::win::ScopedComPtr<IAudioClock> audio_clock; | 351 base::win::ScopedComPtr<IAudioClock> audio_clock; |
337 audio_client_->GetService(__uuidof(IAudioClock), audio_clock.ReceiveVoid()); | 352 audio_client_->GetService(__uuidof(IAudioClock), audio_clock.ReceiveVoid()); |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
372 if (audio_clock) { | 387 if (audio_clock) { |
373 // The reported timestamp from GetBuffer is not as reliable as the | 388 // The reported timestamp from GetBuffer is not as reliable as the |
374 // clock from the client. We've seen timestamps reported for | 389 // clock from the client. We've seen timestamps reported for |
375 // USB audio devices, be off by several days. Furthermore we've | 390 // USB audio devices, be off by several days. Furthermore we've |
376 // seen them jump back in time every 2 seconds or so. | 391 // seen them jump back in time every 2 seconds or so. |
377 audio_clock->GetPosition(&device_position, | 392 audio_clock->GetPosition(&device_position, |
378 &first_audio_frame_timestamp); | 393 &first_audio_frame_timestamp); |
379 } | 394 } |
380 | 395 |
381 if (num_frames_to_read != 0) { | 396 if (num_frames_to_read != 0) { |
382 size_t pos = buffer_frame_index * frame_size_; | |
383 size_t num_bytes = num_frames_to_read * frame_size_; | |
384 DCHECK_GE(capture_buffer_size, pos + num_bytes); | |
385 | |
386 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { | 397 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { |
387 // Clear out the local buffer since silence is reported. | 398 // TODO(tommi): Is this safe? |
388 memset(&capture_buffer[pos], 0, num_bytes); | 399 memset(data_ptr, 0, num_frames_to_read * frame_size_); |
tommi (sloooow) - chröme
2017/02/17 17:09:21
I'm looking for a way to test this right now, so i
DaleCurtis
2017/02/17 17:25:18
Not currently, but feel free to add PushSilence()
tommi (sloooow) - chröme
2017/02/17 18:04:12
Done.
| |
389 } else { | |
390 // Copy captured data from audio engine buffer to local buffer. | |
391 memcpy(&capture_buffer[pos], data_ptr, num_bytes); | |
392 } | 400 } |
393 | 401 |
394 buffer_frame_index += num_frames_to_read; | 402 fifo_->Push(data_ptr, num_frames_to_read, format_.wBitsPerSample / 8); |
395 } | 403 } |
396 | 404 |
397 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); | 405 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); |
398 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; | 406 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; |
399 | 407 |
400 // Derive a delay estimate for the captured audio packet. | 408 // Derive a delay estimate for the captured audio packet. |
401 // The value contains two parts (A+B), where A is the delay of the | 409 // The value contains two parts (A+B), where A is the delay of the |
402 // first audio frame in the packet and B is the extra delay | 410 // first audio frame in the packet and B is the extra delay |
403 // contained in any stored data. Unit is in audio frames. | 411 // contained in any stored data. Unit is in audio frames. |
404 QueryPerformanceCounter(&now_count); | 412 QueryPerformanceCounter(&now_count); |
405 // first_audio_frame_timestamp will be 0 if we didn't get a timestamp. | 413 // first_audio_frame_timestamp will be 0 if we didn't get a timestamp. |
406 double audio_delay_frames = | 414 double audio_delay_frames = |
407 first_audio_frame_timestamp == 0 | 415 first_audio_frame_timestamp == 0 |
408 ? num_frames_to_read | 416 ? num_frames_to_read |
409 : ((perf_count_to_100ns_units_ * now_count.QuadPart - | 417 : ((perf_count_to_100ns_units_ * now_count.QuadPart - |
410 first_audio_frame_timestamp) / | 418 first_audio_frame_timestamp) / |
411 10000.0) * | 419 10000.0) * |
412 ms_to_frame_count_ + | 420 ms_to_frame_count_ + |
413 buffer_frame_index - num_frames_to_read; | 421 fifo_->GetAvailableFrames() - num_frames_to_read; |
414 | 422 |
415 // Get a cached AGC volume level which is updated once every second | 423 // Get a cached AGC volume level which is updated once every second |
416 // on the audio manager thread. Note that, |volume| is also updated | 424 // on the audio manager thread. Note that, |volume| is also updated |
417 // each time SetVolume() is called through IPC by the render-side AGC. | 425 // each time SetVolume() is called through IPC by the render-side AGC. |
418 GetAgcVolume(&volume); | 426 GetAgcVolume(&volume); |
419 | 427 |
420 // Deliver captured data to the registered consumer using a packet | 428 // Deliver captured data to the registered consumer using a packet |
421 // size which was specified at construction. | 429 // size which was specified at construction. |
422 uint32_t delay_frames = static_cast<uint32_t>(audio_delay_frames + 0.5); | 430 uint32_t delay_frames = static_cast<uint32_t>(audio_delay_frames + 0.5); |
423 while (buffer_frame_index >= packet_size_frames_) { | 431 while (fifo_->available_blocks()) { |
424 // Copy data to audio bus to match the OnData interface. | 432 if (converter_) { |
425 uint8_t* audio_data = | 433 if (imperfect_buffer_size_conversion_ && |
426 reinterpret_cast<uint8_t*>(capture_buffer.get()); | 434 fifo_->available_blocks() == 1) { |
427 audio_bus_->FromInterleaved(audio_data, audio_bus_->frames(), | 435 // Special case. We need to buffer up more audio before we can |
428 format_.wBitsPerSample / 8); | 436 // convert or else we'll suffer an underrun. |
437 break; | |
438 } | |
439 converter_->ConvertWithDelay(delay_frames, convert_bus_.get()); | |
440 sink_->OnData(this, convert_bus_.get(), delay_frames * frame_size_, | |
441 volume); | |
442 } else { | |
443 sink_->OnData(this, fifo_->Consume(), delay_frames * frame_size_, | |
444 volume); | |
445 } | |
429 | 446 |
430 // Deliver data packet, delay estimation and volume level to | |
431 // the user. | |
432 sink_->OnData(this, audio_bus_.get(), delay_frames * frame_size_, | |
433 volume); | |
434 | |
435 // Store parts of the recorded data which can't be delivered | |
436 // using the current packet size. The stored section will be used | |
437 // either in the next while-loop iteration or in the next | |
438 // capture event. | |
439 // TODO(tommi): If this data will be used in the next capture | |
440 // event, we will report incorrect delay estimates because | |
441 // we'll use the one for the captured data that time around | |
442 // (i.e. in the future). | |
443 memmove(&capture_buffer[0], &capture_buffer[packet_size_bytes_], | |
444 (buffer_frame_index - packet_size_frames_) * frame_size_); | |
445 | |
446 DCHECK_GE(buffer_frame_index, packet_size_frames_); | |
447 buffer_frame_index -= packet_size_frames_; | |
448 if (delay_frames > packet_size_frames_) { | 447 if (delay_frames > packet_size_frames_) { |
449 delay_frames -= packet_size_frames_; | 448 delay_frames -= packet_size_frames_; |
450 } else { | 449 } else { |
451 delay_frames = 0; | 450 delay_frames = 0; |
452 } | 451 } |
453 } | 452 } |
454 } break; | 453 } break; |
455 default: | 454 default: |
456 error = true; | 455 error = true; |
457 break; | 456 break; |
458 } | 457 } |
459 } | 458 } |
460 | 459 |
461 if (recording && error) { | 460 if (recording && error) { |
462 // TODO(henrika): perhaps it worth improving the cleanup here by e.g. | 461 // TODO(henrika): perhaps it worth improving the cleanup here by e.g. |
463 // stopping the audio client, joining the thread etc.? | 462 // stopping the audio client, joining the thread etc.? |
464 NOTREACHED() << "WASAPI capturing failed with error code " | 463 NOTREACHED() << "WASAPI capturing failed with error code " |
465 << GetLastError(); | 464 << GetLastError(); |
466 } | 465 } |
467 | 466 |
468 // Disable MMCSS. | 467 // Disable MMCSS. |
469 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { | 468 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { |
470 PLOG(WARNING) << "Failed to disable MMCSS"; | 469 PLOG(WARNING) << "Failed to disable MMCSS"; |
471 } | 470 } |
471 | |
472 fifo_.reset(); | |
472 } | 473 } |
473 | 474 |
474 void WASAPIAudioInputStream::HandleError(HRESULT err) { | 475 void WASAPIAudioInputStream::HandleError(HRESULT err) { |
475 NOTREACHED() << "Error code: " << err; | 476 NOTREACHED() << "Error code: " << err; |
476 if (sink_) | 477 if (sink_) |
477 sink_->OnError(this); | 478 sink_->OnError(this); |
478 } | 479 } |
479 | 480 |
480 HRESULT WASAPIAudioInputStream::SetCaptureDevice() { | 481 HRESULT WASAPIAudioInputStream::SetCaptureDevice() { |
481 DCHECK_EQ(OPEN_RESULT_OK, open_result_); | 482 DCHECK_EQ(OPEN_RESULT_OK, open_result_); |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
586 // engine can convert between a standard PCM sample size used by the | 587 // engine can convert between a standard PCM sample size used by the |
587 // application and the floating-point samples that the engine uses for its | 588 // application and the floating-point samples that the engine uses for its |
588 // internal processing. However, the format for an application stream | 589 // internal processing. However, the format for an application stream |
589 // typically must have the same number of channels and the same sample | 590 // typically must have the same number of channels and the same sample |
590 // rate as the stream format used by the device. | 591 // rate as the stream format used by the device. |
591 // Many audio devices support both PCM and non-PCM stream formats. However, | 592 // Many audio devices support both PCM and non-PCM stream formats. However, |
592 // the audio engine can mix only PCM streams. | 593 // the audio engine can mix only PCM streams. |
593 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; | 594 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; |
594 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, | 595 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, |
595 &format_, &closest_match); | 596 &format_, &closest_match); |
596 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " | 597 DLOG_IF(ERROR, hr == S_FALSE) |
597 << "but a closest match exists."; | 598 << "Format is not supported but a closest match exists."; |
599 | |
600 if (hr == S_FALSE && | |
601 closest_match->nSamplesPerSec >= limits::kMinSampleRate && | |
602 closest_match->nSamplesPerSec <= limits::kMaxSampleRate) { | |
603 DVLOG(1) << "Audio capture data conversion needed."; | |
604 // Ideally, we want a 1:1 ratio between the buffers we get and the buffers | |
605 // we give to OnData so that each buffer we receive from the OS can be | |
606 // directly converted to a buffer that matches with what was asked for. | |
607 const double buffer_ratio = | |
608 format_.nSamplesPerSec / static_cast<double>(packet_size_frames_); | |
609 double new_frames_per_buffer = closest_match->nSamplesPerSec / buffer_ratio; | |
610 | |
611 const AudioParameters input( | |
612 AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
613 GuessChannelLayout(closest_match->nChannels), | |
614 closest_match->nSamplesPerSec, | |
615 // We need to be careful here to not pick the closest wBitsPerSample | |
616 // match as we need to use the PCM format (which might not be what | |
617 // closeest_match->wFormat is) and the internal resampler doesn't | |
618 // support all formats we might get here. So, we stick to the | |
619 // wBitsPerSample that was asked for originally (most likely 16). | |
620 format_.wBitsPerSample, static_cast<int>(new_frames_per_buffer)); | |
621 | |
622 const AudioParameters output(AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
623 GuessChannelLayout(format_.nChannels), | |
624 format_.nSamplesPerSec, format_.wBitsPerSample, | |
625 packet_size_frames_); | |
626 | |
627 converter_.reset(new AudioConverter(input, output, false)); | |
628 converter_->AddInput(this); | |
629 converter_->PrimeWithSilence(); | |
630 convert_bus_ = AudioBus::Create(output); | |
631 | |
632 // Now change the format we're going to ask for to better match with what | |
633 // the OS can provide. If we succeed in opening the stream with these | |
634 // params, we can take care of the required resampling. | |
635 format_.nSamplesPerSec = closest_match->nSamplesPerSec; | |
636 format_.nChannels = closest_match->nChannels; | |
637 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; | |
638 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; | |
639 | |
640 // Update our packet size assumptions based on the new format. | |
641 const auto new_bytes_per_buffer = static_cast<int>(new_frames_per_buffer) * | |
642 format_.nChannels * | |
643 (format_.wBitsPerSample / 8); | |
644 packet_size_frames_ = new_bytes_per_buffer / format_.nBlockAlign; | |
645 packet_size_bytes_ = new_bytes_per_buffer; | |
646 frame_size_ = format_.nBlockAlign; | |
647 ms_to_frame_count_ = static_cast<double>(format_.nSamplesPerSec) / 1000.0; | |
648 | |
649 imperfect_buffer_size_conversion_ = | |
650 modf(new_frames_per_buffer, &new_frames_per_buffer) != 0.0; | |
651 DVLOG_IF(1, imperfect_buffer_size_conversion_) | |
652 << "Audio capture data conversion: Need to inject fifo"; | |
653 | |
654 // Indicate that we're good to go with a close match. | |
655 hr = S_OK; | |
656 } | |
657 | |
598 return (hr == S_OK); | 658 return (hr == S_OK); |
599 } | 659 } |
600 | 660 |
601 HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { | 661 HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { |
602 DCHECK_EQ(OPEN_RESULT_OK, open_result_); | 662 DCHECK_EQ(OPEN_RESULT_OK, open_result_); |
603 DWORD flags; | 663 DWORD flags; |
604 // Use event-driven mode only fo regular input devices. For loopback the | 664 // Use event-driven mode only fo regular input devices. For loopback the |
605 // EVENTCALLBACK flag is specified when intializing | 665 // EVENTCALLBACK flag is specified when intializing |
606 // |audio_render_client_for_loopback_|. | 666 // |audio_render_client_for_loopback_|. |
607 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId || | 667 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId || |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
731 | 791 |
732 return hr; | 792 return hr; |
733 } | 793 } |
734 | 794 |
735 void WASAPIAudioInputStream::ReportOpenResult() const { | 795 void WASAPIAudioInputStream::ReportOpenResult() const { |
736 DCHECK(!opened_); // This method must be called before we set this flag. | 796 DCHECK(!opened_); // This method must be called before we set this flag. |
737 UMA_HISTOGRAM_ENUMERATION("Media.Audio.Capture.Win.Open", open_result_, | 797 UMA_HISTOGRAM_ENUMERATION("Media.Audio.Capture.Win.Open", open_result_, |
738 OPEN_RESULT_MAX + 1); | 798 OPEN_RESULT_MAX + 1); |
739 } | 799 } |
740 | 800 |
801 double WASAPIAudioInputStream::ProvideInput(AudioBus* audio_bus, | |
802 uint32_t frames_delayed) { | |
803 fifo_->Consume()->CopyTo(audio_bus); | |
804 return 1.0; | |
805 } | |
806 | |
741 } // namespace media | 807 } // namespace media |
OLD | NEW |