OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_input_win.h" | 5 #include "media/audio/win/audio_low_latency_input_win.h" |
6 | 6 |
7 #include <math.h> | |
DaleCurtis
2017/02/17 18:23:23
cmath? for modf?
http://en.cppreference.com/w/cpp
tommi (sloooow) - chröme
2017/02/17 22:21:27
Done.
| |
7 #include <memory> | 8 #include <memory> |
8 | 9 |
9 #include "base/logging.h" | 10 #include "base/logging.h" |
10 #include "base/metrics/histogram_macros.h" | 11 #include "base/metrics/histogram_macros.h" |
11 #include "base/strings/utf_string_conversions.h" | 12 #include "base/strings/utf_string_conversions.h" |
12 #include "base/trace_event/trace_event.h" | 13 #include "base/trace_event/trace_event.h" |
13 #include "media/audio/audio_device_description.h" | 14 #include "media/audio/audio_device_description.h" |
14 #include "media/audio/win/audio_manager_win.h" | 15 #include "media/audio/win/audio_manager_win.h" |
15 #include "media/audio/win/avrt_wrapper_win.h" | 16 #include "media/audio/win/avrt_wrapper_win.h" |
16 #include "media/audio/win/core_audio_util_win.h" | 17 #include "media/audio/win/core_audio_util_win.h" |
18 #include "media/base/audio_block_fifo.h" | |
17 #include "media/base/audio_bus.h" | 19 #include "media/base/audio_bus.h" |
20 #include "media/base/channel_layout.h" | |
21 #include "media/base/limits.h" | |
18 | 22 |
19 using base::win::ScopedComPtr; | 23 using base::win::ScopedComPtr; |
20 using base::win::ScopedCOMInitializer; | 24 using base::win::ScopedCOMInitializer; |
21 | 25 |
22 namespace media { | 26 namespace media { |
23 | 27 |
24 WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager, | 28 WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager, |
25 const AudioParameters& params, | 29 const AudioParameters& params, |
26 const std::string& device_id) | 30 const std::string& device_id) |
27 : manager_(manager), | 31 : manager_(manager), device_id_(device_id) { |
28 device_id_(device_id), | |
29 audio_bus_(media::AudioBus::Create(params)) { | |
30 DCHECK(manager_); | 32 DCHECK(manager_); |
31 DCHECK(!device_id_.empty()); | 33 DCHECK(!device_id_.empty()); |
32 | 34 |
33 // Load the Avrt DLL if not already loaded. Required to support MMCSS. | 35 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
34 bool avrt_init = avrt::Initialize(); | 36 bool avrt_init = avrt::Initialize(); |
35 DCHECK(avrt_init) << "Failed to load the Avrt.dll"; | 37 DCHECK(avrt_init) << "Failed to load the Avrt.dll"; |
36 | 38 |
37 // Set up the desired capture format specified by the client. | 39 // Set up the desired capture format specified by the client. |
38 format_.nSamplesPerSec = params.sample_rate(); | 40 format_.nSamplesPerSec = params.sample_rate(); |
39 format_.wFormatTag = WAVE_FORMAT_PCM; | 41 format_.wFormatTag = WAVE_FORMAT_PCM; |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
116 // set during construction. | 118 // set during construction. |
117 if (!DesiredFormatIsSupported()) { | 119 if (!DesiredFormatIsSupported()) { |
118 open_result_ = OPEN_RESULT_FORMAT_NOT_SUPPORTED; | 120 open_result_ = OPEN_RESULT_FORMAT_NOT_SUPPORTED; |
119 ReportOpenResult(); | 121 ReportOpenResult(); |
120 return false; | 122 return false; |
121 } | 123 } |
122 | 124 |
123 // Initialize the audio stream between the client and the device using | 125 // Initialize the audio stream between the client and the device using |
124 // shared mode and a lowest possible glitch-free latency. | 126 // shared mode and a lowest possible glitch-free latency. |
125 hr = InitializeAudioEngine(); | 127 hr = InitializeAudioEngine(); |
128 if (SUCCEEDED(hr) && converter_) | |
129 open_result_ = OPEN_RESULT_OK_WITH_RESAMPLING; | |
126 ReportOpenResult(); // Report before we assign a value to |opened_|. | 130 ReportOpenResult(); // Report before we assign a value to |opened_|. |
127 opened_ = SUCCEEDED(hr); | 131 opened_ = SUCCEEDED(hr); |
128 DCHECK(open_result_ == OPEN_RESULT_OK || !opened_); | |
129 | 132 |
130 return opened_; | 133 return opened_; |
131 } | 134 } |
132 | 135 |
133 void WASAPIAudioInputStream::Start(AudioInputCallback* callback) { | 136 void WASAPIAudioInputStream::Start(AudioInputCallback* callback) { |
134 DCHECK(CalledOnValidThread()); | 137 DCHECK(CalledOnValidThread()); |
135 DCHECK(callback); | 138 DCHECK(callback); |
136 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; | 139 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; |
137 if (!opened_) | 140 if (!opened_) |
138 return; | 141 return; |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
220 started_ = false; | 223 started_ = false; |
221 sink_ = NULL; | 224 sink_ = NULL; |
222 } | 225 } |
223 | 226 |
224 void WASAPIAudioInputStream::Close() { | 227 void WASAPIAudioInputStream::Close() { |
225 DVLOG(1) << "WASAPIAudioInputStream::Close()"; | 228 DVLOG(1) << "WASAPIAudioInputStream::Close()"; |
226 // It is valid to call Close() before calling open or Start(). | 229 // It is valid to call Close() before calling open or Start(). |
227 // It is also valid to call Close() after Start() has been called. | 230 // It is also valid to call Close() after Start() has been called. |
228 Stop(); | 231 Stop(); |
229 | 232 |
233 if (converter_) | |
234 converter_->RemoveInput(this); | |
235 | |
230 // Inform the audio manager that we have been closed. This will cause our | 236 // Inform the audio manager that we have been closed. This will cause our |
231 // destruction. | 237 // destruction. |
232 manager_->ReleaseInputStream(this); | 238 manager_->ReleaseInputStream(this); |
233 } | 239 } |
234 | 240 |
235 double WASAPIAudioInputStream::GetMaxVolume() { | 241 double WASAPIAudioInputStream::GetMaxVolume() { |
236 // Verify that Open() has been called succesfully, to ensure that an audio | 242 // Verify that Open() has been called succesfully, to ensure that an audio |
237 // session exists and that an ISimpleAudioVolume interface has been created. | 243 // session exists and that an ISimpleAudioVolume interface has been created. |
238 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; | 244 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; |
239 if (!opened_) | 245 if (!opened_) |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
313 // to reduced QoS at high load. | 319 // to reduced QoS at high load. |
314 DWORD err = GetLastError(); | 320 DWORD err = GetLastError(); |
315 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; | 321 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
316 } | 322 } |
317 | 323 |
318 // Allocate a buffer with a size that enables us to take care of cases like: | 324 // Allocate a buffer with a size that enables us to take care of cases like: |
319 // 1) The recorded buffer size is smaller, or does not match exactly with, | 325 // 1) The recorded buffer size is smaller, or does not match exactly with, |
320 // the selected packet size used in each callback. | 326 // the selected packet size used in each callback. |
321 // 2) The selected buffer size is larger than the recorded buffer size in | 327 // 2) The selected buffer size is larger than the recorded buffer size in |
322 // each event. | 328 // each event. |
323 size_t buffer_frame_index = 0; | 329 // In the case where no resampling is required, a single buffer should be |
324 size_t capture_buffer_size = | 330 // enough but in case we get buffers that don't match exactly, we'll go with |
325 std::max(2 * endpoint_buffer_size_frames_ * frame_size_, | 331 // two. Same applies if we need to resample and the buffer ratio is perfect. |
326 2 * packet_size_frames_ * frame_size_); | 332 // However if the buffer ratio is imperfect, we will need 3 buffers to safely |
327 std::unique_ptr<uint8_t[]> capture_buffer(new uint8_t[capture_buffer_size]); | 333 // be able to buffer up data in cases where a conversion requires two audio |
334 // buffers (and we need to be able to write to the third one). | |
335 DCHECK(!fifo_); | |
336 const int buffers_required = | |
337 converter_ && imperfect_buffer_size_conversion_ ? 3 : 2; | |
338 fifo_.reset(new AudioBlockFifo(format_.nChannels, packet_size_frames_, | |
339 buffers_required)); | |
340 | |
341 DVLOG(1) << "AudioBlockFifo needs " << buffers_required << " buffers"; | |
328 | 342 |
329 LARGE_INTEGER now_count = {}; | 343 LARGE_INTEGER now_count = {}; |
330 bool recording = true; | 344 bool recording = true; |
331 bool error = false; | 345 bool error = false; |
332 double volume = GetVolume(); | 346 double volume = GetVolume(); |
333 HANDLE wait_array[2] = {stop_capture_event_.Get(), | 347 HANDLE wait_array[2] = {stop_capture_event_.Get(), |
334 audio_samples_ready_event_.Get()}; | 348 audio_samples_ready_event_.Get()}; |
335 | 349 |
336 base::win::ScopedComPtr<IAudioClock> audio_clock; | 350 base::win::ScopedComPtr<IAudioClock> audio_clock; |
337 audio_client_->GetService(__uuidof(IAudioClock), audio_clock.ReceiveVoid()); | 351 audio_client_->GetService(__uuidof(IAudioClock), audio_clock.ReceiveVoid()); |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
372 if (audio_clock) { | 386 if (audio_clock) { |
373 // The reported timestamp from GetBuffer is not as reliable as the | 387 // The reported timestamp from GetBuffer is not as reliable as the |
374 // clock from the client. We've seen timestamps reported for | 388 // clock from the client. We've seen timestamps reported for |
375 // USB audio devices, be off by several days. Furthermore we've | 389 // USB audio devices, be off by several days. Furthermore we've |
376 // seen them jump back in time every 2 seconds or so. | 390 // seen them jump back in time every 2 seconds or so. |
377 audio_clock->GetPosition(&device_position, | 391 audio_clock->GetPosition(&device_position, |
378 &first_audio_frame_timestamp); | 392 &first_audio_frame_timestamp); |
379 } | 393 } |
380 | 394 |
381 if (num_frames_to_read != 0) { | 395 if (num_frames_to_read != 0) { |
382 size_t pos = buffer_frame_index * frame_size_; | |
383 size_t num_bytes = num_frames_to_read * frame_size_; | |
384 DCHECK_GE(capture_buffer_size, pos + num_bytes); | |
385 | |
386 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { | 396 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { |
387 // Clear out the local buffer since silence is reported. | 397 fifo_->PushSilence(num_frames_to_read); |
388 memset(&capture_buffer[pos], 0, num_bytes); | |
389 } else { | 398 } else { |
390 // Copy captured data from audio engine buffer to local buffer. | 399 fifo_->Push(data_ptr, num_frames_to_read, |
391 memcpy(&capture_buffer[pos], data_ptr, num_bytes); | 400 format_.wBitsPerSample / 8); |
392 } | 401 } |
393 | |
394 buffer_frame_index += num_frames_to_read; | |
395 } | 402 } |
396 | 403 |
397 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); | 404 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); |
398 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; | 405 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; |
399 | 406 |
400 // Derive a delay estimate for the captured audio packet. | 407 // Derive a delay estimate for the captured audio packet. |
401 // The value contains two parts (A+B), where A is the delay of the | 408 // The value contains two parts (A+B), where A is the delay of the |
402 // first audio frame in the packet and B is the extra delay | 409 // first audio frame in the packet and B is the extra delay |
403 // contained in any stored data. Unit is in audio frames. | 410 // contained in any stored data. Unit is in audio frames. |
404 QueryPerformanceCounter(&now_count); | 411 QueryPerformanceCounter(&now_count); |
405 // first_audio_frame_timestamp will be 0 if we didn't get a timestamp. | 412 // first_audio_frame_timestamp will be 0 if we didn't get a timestamp. |
406 double audio_delay_frames = | 413 double audio_delay_frames = |
407 first_audio_frame_timestamp == 0 | 414 first_audio_frame_timestamp == 0 |
408 ? num_frames_to_read | 415 ? num_frames_to_read |
409 : ((perf_count_to_100ns_units_ * now_count.QuadPart - | 416 : ((perf_count_to_100ns_units_ * now_count.QuadPart - |
410 first_audio_frame_timestamp) / | 417 first_audio_frame_timestamp) / |
411 10000.0) * | 418 10000.0) * |
412 ms_to_frame_count_ + | 419 ms_to_frame_count_ + |
413 buffer_frame_index - num_frames_to_read; | 420 fifo_->GetAvailableFrames() - num_frames_to_read; |
414 | 421 |
415 // Get a cached AGC volume level which is updated once every second | 422 // Get a cached AGC volume level which is updated once every second |
416 // on the audio manager thread. Note that, |volume| is also updated | 423 // on the audio manager thread. Note that, |volume| is also updated |
417 // each time SetVolume() is called through IPC by the render-side AGC. | 424 // each time SetVolume() is called through IPC by the render-side AGC. |
418 GetAgcVolume(&volume); | 425 GetAgcVolume(&volume); |
419 | 426 |
420 // Deliver captured data to the registered consumer using a packet | 427 // Deliver captured data to the registered consumer using a packet |
421 // size which was specified at construction. | 428 // size which was specified at construction. |
422 uint32_t delay_frames = static_cast<uint32_t>(audio_delay_frames + 0.5); | 429 uint32_t delay_frames = static_cast<uint32_t>(audio_delay_frames + 0.5); |
423 while (buffer_frame_index >= packet_size_frames_) { | 430 while (fifo_->available_blocks()) { |
424 // Copy data to audio bus to match the OnData interface. | 431 if (converter_) { |
425 uint8_t* audio_data = | 432 if (imperfect_buffer_size_conversion_ && |
426 reinterpret_cast<uint8_t*>(capture_buffer.get()); | 433 fifo_->available_blocks() == 1) { |
427 audio_bus_->FromInterleaved(audio_data, audio_bus_->frames(), | 434 // Special case. We need to buffer up more audio before we can |
428 format_.wBitsPerSample / 8); | 435 // convert or else we'll suffer an underrun. |
436 break; | |
437 } | |
438 converter_->ConvertWithDelay(delay_frames, convert_bus_.get()); | |
439 sink_->OnData(this, convert_bus_.get(), delay_frames * frame_size_, | |
440 volume); | |
441 } else { | |
442 sink_->OnData(this, fifo_->Consume(), delay_frames * frame_size_, | |
443 volume); | |
444 } | |
429 | 445 |
430 // Deliver data packet, delay estimation and volume level to | |
431 // the user. | |
432 sink_->OnData(this, audio_bus_.get(), delay_frames * frame_size_, | |
433 volume); | |
434 | |
435 // Store parts of the recorded data which can't be delivered | |
436 // using the current packet size. The stored section will be used | |
437 // either in the next while-loop iteration or in the next | |
438 // capture event. | |
439 // TODO(tommi): If this data will be used in the next capture | |
440 // event, we will report incorrect delay estimates because | |
441 // we'll use the one for the captured data that time around | |
442 // (i.e. in the future). | |
443 memmove(&capture_buffer[0], &capture_buffer[packet_size_bytes_], | |
444 (buffer_frame_index - packet_size_frames_) * frame_size_); | |
445 | |
446 DCHECK_GE(buffer_frame_index, packet_size_frames_); | |
447 buffer_frame_index -= packet_size_frames_; | |
448 if (delay_frames > packet_size_frames_) { | 446 if (delay_frames > packet_size_frames_) { |
449 delay_frames -= packet_size_frames_; | 447 delay_frames -= packet_size_frames_; |
450 } else { | 448 } else { |
451 delay_frames = 0; | 449 delay_frames = 0; |
452 } | 450 } |
453 } | 451 } |
454 } break; | 452 } break; |
455 default: | 453 default: |
456 error = true; | 454 error = true; |
457 break; | 455 break; |
458 } | 456 } |
459 } | 457 } |
460 | 458 |
461 if (recording && error) { | 459 if (recording && error) { |
462 // TODO(henrika): perhaps it worth improving the cleanup here by e.g. | 460 // TODO(henrika): perhaps it worth improving the cleanup here by e.g. |
463 // stopping the audio client, joining the thread etc.? | 461 // stopping the audio client, joining the thread etc.? |
464 NOTREACHED() << "WASAPI capturing failed with error code " | 462 NOTREACHED() << "WASAPI capturing failed with error code " |
465 << GetLastError(); | 463 << GetLastError(); |
466 } | 464 } |
467 | 465 |
468 // Disable MMCSS. | 466 // Disable MMCSS. |
469 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { | 467 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { |
470 PLOG(WARNING) << "Failed to disable MMCSS"; | 468 PLOG(WARNING) << "Failed to disable MMCSS"; |
471 } | 469 } |
470 | |
471 fifo_.reset(); | |
472 } | 472 } |
473 | 473 |
474 void WASAPIAudioInputStream::HandleError(HRESULT err) { | 474 void WASAPIAudioInputStream::HandleError(HRESULT err) { |
475 NOTREACHED() << "Error code: " << err; | 475 NOTREACHED() << "Error code: " << err; |
476 if (sink_) | 476 if (sink_) |
477 sink_->OnError(this); | 477 sink_->OnError(this); |
478 } | 478 } |
479 | 479 |
480 HRESULT WASAPIAudioInputStream::SetCaptureDevice() { | 480 HRESULT WASAPIAudioInputStream::SetCaptureDevice() { |
481 DCHECK_EQ(OPEN_RESULT_OK, open_result_); | 481 DCHECK_EQ(OPEN_RESULT_OK, open_result_); |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
586 // engine can convert between a standard PCM sample size used by the | 586 // engine can convert between a standard PCM sample size used by the |
587 // application and the floating-point samples that the engine uses for its | 587 // application and the floating-point samples that the engine uses for its |
588 // internal processing. However, the format for an application stream | 588 // internal processing. However, the format for an application stream |
589 // typically must have the same number of channels and the same sample | 589 // typically must have the same number of channels and the same sample |
590 // rate as the stream format used by the device. | 590 // rate as the stream format used by the device. |
591 // Many audio devices support both PCM and non-PCM stream formats. However, | 591 // Many audio devices support both PCM and non-PCM stream formats. However, |
592 // the audio engine can mix only PCM streams. | 592 // the audio engine can mix only PCM streams. |
593 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; | 593 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; |
594 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, | 594 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, |
595 &format_, &closest_match); | 595 &format_, &closest_match); |
596 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " | 596 DLOG_IF(ERROR, hr == S_FALSE) |
597 << "but a closest match exists."; | 597 << "Format is not supported but a closest match exists."; |
598 | |
599 if (hr == S_FALSE && | |
600 closest_match->nSamplesPerSec >= limits::kMinSampleRate && | |
601 closest_match->nSamplesPerSec <= limits::kMaxSampleRate) { | |
602 DVLOG(1) << "Audio capture data conversion needed."; | |
603 // Ideally, we want a 1:1 ratio between the buffers we get and the buffers | |
604 // we give to OnData so that each buffer we receive from the OS can be | |
605 // directly converted to a buffer that matches with what was asked for. | |
606 const double buffer_ratio = | |
607 format_.nSamplesPerSec / static_cast<double>(packet_size_frames_); | |
608 double new_frames_per_buffer = closest_match->nSamplesPerSec / buffer_ratio; | |
609 | |
610 const AudioParameters input( | |
611 AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
612 GuessChannelLayout(closest_match->nChannels), | |
613 closest_match->nSamplesPerSec, | |
614 // We need to be careful here to not pick the closest wBitsPerSample | |
DaleCurtis
2017/02/17 18:23:23
Missed feedback from previous patchset? Re: dead p
tommi (sloooow) - chröme
2017/02/17 22:21:27
Ah, sorry, yes. Removed the comment and now use th
DaleCurtis
2017/02/17 22:29:23
You do need to check that it's 8, 16, or 32, other
tommi (sloooow) - chröme
2017/02/17 23:14:02
iow, 24 is not supported? I'll add the checks. I
DaleCurtis
2017/02/17 23:30:54
24, if it's actually packed into 3 bytes is not su
tommi (sloooow) - chröme
2017/02/18 11:29:49
Done.
| |
615 // match as we need to use the PCM format (which might not be what | |
616 // closeest_match->wFormat is) and the internal resampler doesn't | |
617 // support all formats we might get here. So, we stick to the | |
618 // wBitsPerSample that was asked for originally (most likely 16). | |
619 format_.wBitsPerSample, static_cast<int>(new_frames_per_buffer)); | |
620 | |
621 const AudioParameters output(AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
622 GuessChannelLayout(format_.nChannels), | |
623 format_.nSamplesPerSec, format_.wBitsPerSample, | |
624 packet_size_frames_); | |
625 | |
626 converter_.reset(new AudioConverter(input, output, false)); | |
627 converter_->AddInput(this); | |
628 converter_->PrimeWithSilence(); | |
629 convert_bus_ = AudioBus::Create(output); | |
630 | |
631 // Now change the format we're going to ask for to better match with what | |
632 // the OS can provide. If we succeed in opening the stream with these | |
633 // params, we can take care of the required resampling. | |
634 format_.nSamplesPerSec = closest_match->nSamplesPerSec; | |
635 format_.nChannels = closest_match->nChannels; | |
636 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; | |
637 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; | |
638 | |
639 // Update our packet size assumptions based on the new format. | |
640 const auto new_bytes_per_buffer = static_cast<int>(new_frames_per_buffer) * | |
641 format_.nChannels * | |
DaleCurtis
2017/02/17 18:23:23
Replace terms with * format_.nBlockAlign?
tommi (sloooow) - chröme
2017/02/17 22:21:27
Done.
| |
642 (format_.wBitsPerSample / 8); | |
643 packet_size_frames_ = new_bytes_per_buffer / format_.nBlockAlign; | |
644 packet_size_bytes_ = new_bytes_per_buffer; | |
645 frame_size_ = format_.nBlockAlign; | |
646 ms_to_frame_count_ = static_cast<double>(format_.nSamplesPerSec) / 1000.0; | |
647 | |
648 imperfect_buffer_size_conversion_ = | |
649 modf(new_frames_per_buffer, &new_frames_per_buffer) != 0.0; | |
DaleCurtis
2017/02/17 18:23:23
std:: ?
tommi (sloooow) - chröme
2017/02/17 22:21:27
Done.
| |
650 DVLOG_IF(1, imperfect_buffer_size_conversion_) | |
651 << "Audio capture data conversion: Need to inject fifo"; | |
652 | |
653 // Indicate that we're good to go with a close match. | |
654 hr = S_OK; | |
655 } | |
656 | |
598 return (hr == S_OK); | 657 return (hr == S_OK); |
599 } | 658 } |
600 | 659 |
601 HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { | 660 HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { |
602 DCHECK_EQ(OPEN_RESULT_OK, open_result_); | 661 DCHECK_EQ(OPEN_RESULT_OK, open_result_); |
603 DWORD flags; | 662 DWORD flags; |
604 // Use event-driven mode only fo regular input devices. For loopback the | 663 // Use event-driven mode only fo regular input devices. For loopback the |
605 // EVENTCALLBACK flag is specified when intializing | 664 // EVENTCALLBACK flag is specified when intializing |
606 // |audio_render_client_for_loopback_|. | 665 // |audio_render_client_for_loopback_|. |
607 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId || | 666 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId || |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
731 | 790 |
732 return hr; | 791 return hr; |
733 } | 792 } |
734 | 793 |
735 void WASAPIAudioInputStream::ReportOpenResult() const { | 794 void WASAPIAudioInputStream::ReportOpenResult() const { |
736 DCHECK(!opened_); // This method must be called before we set this flag. | 795 DCHECK(!opened_); // This method must be called before we set this flag. |
737 UMA_HISTOGRAM_ENUMERATION("Media.Audio.Capture.Win.Open", open_result_, | 796 UMA_HISTOGRAM_ENUMERATION("Media.Audio.Capture.Win.Open", open_result_, |
738 OPEN_RESULT_MAX + 1); | 797 OPEN_RESULT_MAX + 1); |
739 } | 798 } |
740 | 799 |
800 double WASAPIAudioInputStream::ProvideInput(AudioBus* audio_bus, | |
801 uint32_t frames_delayed) { | |
802 fifo_->Consume()->CopyTo(audio_bus); | |
803 return 1.0; | |
804 } | |
805 | |
741 } // namespace media | 806 } // namespace media |
OLD | NEW |