OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/win/audio_low_latency_input_win.h" | 5 #include "media/audio/win/audio_low_latency_input_win.h" |
6 | 6 |
| 7 #include <cmath> |
7 #include <memory> | 8 #include <memory> |
8 | 9 |
9 #include "base/logging.h" | 10 #include "base/logging.h" |
10 #include "base/metrics/histogram_macros.h" | 11 #include "base/metrics/histogram_macros.h" |
11 #include "base/strings/utf_string_conversions.h" | 12 #include "base/strings/utf_string_conversions.h" |
12 #include "base/trace_event/trace_event.h" | 13 #include "base/trace_event/trace_event.h" |
13 #include "media/audio/audio_device_description.h" | 14 #include "media/audio/audio_device_description.h" |
14 #include "media/audio/win/audio_manager_win.h" | 15 #include "media/audio/win/audio_manager_win.h" |
15 #include "media/audio/win/avrt_wrapper_win.h" | 16 #include "media/audio/win/avrt_wrapper_win.h" |
16 #include "media/audio/win/core_audio_util_win.h" | 17 #include "media/audio/win/core_audio_util_win.h" |
| 18 #include "media/base/audio_block_fifo.h" |
17 #include "media/base/audio_bus.h" | 19 #include "media/base/audio_bus.h" |
| 20 #include "media/base/channel_layout.h" |
| 21 #include "media/base/limits.h" |
18 | 22 |
19 using base::win::ScopedComPtr; | 23 using base::win::ScopedComPtr; |
20 using base::win::ScopedCOMInitializer; | 24 using base::win::ScopedCOMInitializer; |
21 | 25 |
22 namespace media { | 26 namespace media { |
| 27 namespace { |
| 28 bool IsSupportedFormatForConversion(const WAVEFORMATEX& format) { |
| 29 if (format.nSamplesPerSec < limits::kMinSampleRate || |
| 30 format.nSamplesPerSec > limits::kMaxSampleRate) { |
| 31 return false; |
| 32 } |
| 33 |
| 34 switch (format.wBitsPerSample) { |
| 35 case 8: |
| 36 case 16: |
| 37 case 32: |
| 38 break; |
| 39 default: |
| 40 return false; |
| 41 } |
| 42 |
| 43 if (GuessChannelLayout(format.nChannels) == CHANNEL_LAYOUT_UNSUPPORTED) { |
| 44 LOG(ERROR) << "Hardware configuration not supported for audio conversion"; |
| 45 return false; |
| 46 } |
| 47 |
| 48 return true; |
| 49 } |
| 50 } |
23 | 51 |
24 WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager, | 52 WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager, |
25 const AudioParameters& params, | 53 const AudioParameters& params, |
26 const std::string& device_id) | 54 const std::string& device_id) |
27 : manager_(manager), | 55 : manager_(manager), device_id_(device_id) { |
28 device_id_(device_id), | |
29 audio_bus_(media::AudioBus::Create(params)) { | |
30 DCHECK(manager_); | 56 DCHECK(manager_); |
31 DCHECK(!device_id_.empty()); | 57 DCHECK(!device_id_.empty()); |
32 | 58 |
33 // Load the Avrt DLL if not already loaded. Required to support MMCSS. | 59 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
34 bool avrt_init = avrt::Initialize(); | 60 bool avrt_init = avrt::Initialize(); |
35 DCHECK(avrt_init) << "Failed to load the Avrt.dll"; | 61 DCHECK(avrt_init) << "Failed to load the Avrt.dll"; |
36 | 62 |
37 // Set up the desired capture format specified by the client. | 63 // Set up the desired capture format specified by the client. |
38 format_.nSamplesPerSec = params.sample_rate(); | 64 format_.nSamplesPerSec = params.sample_rate(); |
39 format_.wFormatTag = WAVE_FORMAT_PCM; | 65 format_.wFormatTag = WAVE_FORMAT_PCM; |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
116 // set during construction. | 142 // set during construction. |
117 if (!DesiredFormatIsSupported()) { | 143 if (!DesiredFormatIsSupported()) { |
118 open_result_ = OPEN_RESULT_FORMAT_NOT_SUPPORTED; | 144 open_result_ = OPEN_RESULT_FORMAT_NOT_SUPPORTED; |
119 ReportOpenResult(); | 145 ReportOpenResult(); |
120 return false; | 146 return false; |
121 } | 147 } |
122 | 148 |
123 // Initialize the audio stream between the client and the device using | 149 // Initialize the audio stream between the client and the device using |
124 // shared mode and a lowest possible glitch-free latency. | 150 // shared mode and a lowest possible glitch-free latency. |
125 hr = InitializeAudioEngine(); | 151 hr = InitializeAudioEngine(); |
| 152 if (SUCCEEDED(hr) && converter_) |
| 153 open_result_ = OPEN_RESULT_OK_WITH_RESAMPLING; |
126 ReportOpenResult(); // Report before we assign a value to |opened_|. | 154 ReportOpenResult(); // Report before we assign a value to |opened_|. |
127 opened_ = SUCCEEDED(hr); | 155 opened_ = SUCCEEDED(hr); |
128 DCHECK(open_result_ == OPEN_RESULT_OK || !opened_); | |
129 | 156 |
130 return opened_; | 157 return opened_; |
131 } | 158 } |
132 | 159 |
133 void WASAPIAudioInputStream::Start(AudioInputCallback* callback) { | 160 void WASAPIAudioInputStream::Start(AudioInputCallback* callback) { |
134 DCHECK(CalledOnValidThread()); | 161 DCHECK(CalledOnValidThread()); |
135 DCHECK(callback); | 162 DCHECK(callback); |
136 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; | 163 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; |
137 if (!opened_) | 164 if (!opened_) |
138 return; | 165 return; |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
220 started_ = false; | 247 started_ = false; |
221 sink_ = NULL; | 248 sink_ = NULL; |
222 } | 249 } |
223 | 250 |
224 void WASAPIAudioInputStream::Close() { | 251 void WASAPIAudioInputStream::Close() { |
225 DVLOG(1) << "WASAPIAudioInputStream::Close()"; | 252 DVLOG(1) << "WASAPIAudioInputStream::Close()"; |
226 // It is valid to call Close() before calling open or Start(). | 253 // It is valid to call Close() before calling open or Start(). |
227 // It is also valid to call Close() after Start() has been called. | 254 // It is also valid to call Close() after Start() has been called. |
228 Stop(); | 255 Stop(); |
229 | 256 |
| 257 if (converter_) |
| 258 converter_->RemoveInput(this); |
| 259 |
230 // Inform the audio manager that we have been closed. This will cause our | 260 // Inform the audio manager that we have been closed. This will cause our |
231 // destruction. | 261 // destruction. |
232 manager_->ReleaseInputStream(this); | 262 manager_->ReleaseInputStream(this); |
233 } | 263 } |
234 | 264 |
235 double WASAPIAudioInputStream::GetMaxVolume() { | 265 double WASAPIAudioInputStream::GetMaxVolume() { |
236 // Verify that Open() has been called succesfully, to ensure that an audio | 266 // Verify that Open() has been called succesfully, to ensure that an audio |
237 // session exists and that an ISimpleAudioVolume interface has been created. | 267 // session exists and that an ISimpleAudioVolume interface has been created. |
238 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; | 268 DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully"; |
239 if (!opened_) | 269 if (!opened_) |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
313 // to reduced QoS at high load. | 343 // to reduced QoS at high load. |
314 DWORD err = GetLastError(); | 344 DWORD err = GetLastError(); |
315 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; | 345 LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ")."; |
316 } | 346 } |
317 | 347 |
318 // Allocate a buffer with a size that enables us to take care of cases like: | 348 // Allocate a buffer with a size that enables us to take care of cases like: |
319 // 1) The recorded buffer size is smaller, or does not match exactly with, | 349 // 1) The recorded buffer size is smaller, or does not match exactly with, |
320 // the selected packet size used in each callback. | 350 // the selected packet size used in each callback. |
321 // 2) The selected buffer size is larger than the recorded buffer size in | 351 // 2) The selected buffer size is larger than the recorded buffer size in |
322 // each event. | 352 // each event. |
323 size_t buffer_frame_index = 0; | 353 // In the case where no resampling is required, a single buffer should be |
324 size_t capture_buffer_size = | 354 // enough but in case we get buffers that don't match exactly, we'll go with |
325 std::max(2 * endpoint_buffer_size_frames_ * frame_size_, | 355 // two. Same applies if we need to resample and the buffer ratio is perfect. |
326 2 * packet_size_frames_ * frame_size_); | 356 // However if the buffer ratio is imperfect, we will need 3 buffers to safely |
327 std::unique_ptr<uint8_t[]> capture_buffer(new uint8_t[capture_buffer_size]); | 357 // be able to buffer up data in cases where a conversion requires two audio |
| 358 // buffers (and we need to be able to write to the third one). |
| 359 DCHECK(!fifo_); |
| 360 const int buffers_required = |
| 361 converter_ && imperfect_buffer_size_conversion_ ? 3 : 2; |
| 362 fifo_.reset(new AudioBlockFifo(format_.nChannels, packet_size_frames_, |
| 363 buffers_required)); |
| 364 |
| 365 DVLOG(1) << "AudioBlockFifo needs " << buffers_required << " buffers"; |
328 | 366 |
329 LARGE_INTEGER now_count = {}; | 367 LARGE_INTEGER now_count = {}; |
330 bool recording = true; | 368 bool recording = true; |
331 bool error = false; | 369 bool error = false; |
332 double volume = GetVolume(); | 370 double volume = GetVolume(); |
333 HANDLE wait_array[2] = {stop_capture_event_.Get(), | 371 HANDLE wait_array[2] = {stop_capture_event_.Get(), |
334 audio_samples_ready_event_.Get()}; | 372 audio_samples_ready_event_.Get()}; |
335 | 373 |
336 base::win::ScopedComPtr<IAudioClock> audio_clock; | 374 base::win::ScopedComPtr<IAudioClock> audio_clock; |
337 audio_client_->GetService(__uuidof(IAudioClock), audio_clock.ReceiveVoid()); | 375 audio_client_->GetService(__uuidof(IAudioClock), audio_clock.ReceiveVoid()); |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
372 if (audio_clock) { | 410 if (audio_clock) { |
373 // The reported timestamp from GetBuffer is not as reliable as the | 411 // The reported timestamp from GetBuffer is not as reliable as the |
374 // clock from the client. We've seen timestamps reported for | 412 // clock from the client. We've seen timestamps reported for |
375 // USB audio devices, be off by several days. Furthermore we've | 413 // USB audio devices, be off by several days. Furthermore we've |
376 // seen them jump back in time every 2 seconds or so. | 414 // seen them jump back in time every 2 seconds or so. |
377 audio_clock->GetPosition(&device_position, | 415 audio_clock->GetPosition(&device_position, |
378 &first_audio_frame_timestamp); | 416 &first_audio_frame_timestamp); |
379 } | 417 } |
380 | 418 |
381 if (num_frames_to_read != 0) { | 419 if (num_frames_to_read != 0) { |
382 size_t pos = buffer_frame_index * frame_size_; | |
383 size_t num_bytes = num_frames_to_read * frame_size_; | |
384 DCHECK_GE(capture_buffer_size, pos + num_bytes); | |
385 | |
386 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { | 420 if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { |
387 // Clear out the local buffer since silence is reported. | 421 fifo_->PushSilence(num_frames_to_read); |
388 memset(&capture_buffer[pos], 0, num_bytes); | |
389 } else { | 422 } else { |
390 // Copy captured data from audio engine buffer to local buffer. | 423 fifo_->Push(data_ptr, num_frames_to_read, |
391 memcpy(&capture_buffer[pos], data_ptr, num_bytes); | 424 format_.wBitsPerSample / 8); |
392 } | 425 } |
393 | |
394 buffer_frame_index += num_frames_to_read; | |
395 } | 426 } |
396 | 427 |
397 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); | 428 hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); |
398 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; | 429 DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; |
399 | 430 |
400 // Derive a delay estimate for the captured audio packet. | 431 // Derive a delay estimate for the captured audio packet. |
401 // The value contains two parts (A+B), where A is the delay of the | 432 // The value contains two parts (A+B), where A is the delay of the |
402 // first audio frame in the packet and B is the extra delay | 433 // first audio frame in the packet and B is the extra delay |
403 // contained in any stored data. Unit is in audio frames. | 434 // contained in any stored data. Unit is in audio frames. |
404 QueryPerformanceCounter(&now_count); | 435 QueryPerformanceCounter(&now_count); |
405 // first_audio_frame_timestamp will be 0 if we didn't get a timestamp. | 436 // first_audio_frame_timestamp will be 0 if we didn't get a timestamp. |
406 double audio_delay_frames = | 437 double audio_delay_frames = |
407 first_audio_frame_timestamp == 0 | 438 first_audio_frame_timestamp == 0 |
408 ? num_frames_to_read | 439 ? num_frames_to_read |
409 : ((perf_count_to_100ns_units_ * now_count.QuadPart - | 440 : ((perf_count_to_100ns_units_ * now_count.QuadPart - |
410 first_audio_frame_timestamp) / | 441 first_audio_frame_timestamp) / |
411 10000.0) * | 442 10000.0) * |
412 ms_to_frame_count_ + | 443 ms_to_frame_count_ + |
413 buffer_frame_index - num_frames_to_read; | 444 fifo_->GetAvailableFrames() - num_frames_to_read; |
414 | 445 |
415 // Get a cached AGC volume level which is updated once every second | 446 // Get a cached AGC volume level which is updated once every second |
416 // on the audio manager thread. Note that, |volume| is also updated | 447 // on the audio manager thread. Note that, |volume| is also updated |
417 // each time SetVolume() is called through IPC by the render-side AGC. | 448 // each time SetVolume() is called through IPC by the render-side AGC. |
418 GetAgcVolume(&volume); | 449 GetAgcVolume(&volume); |
419 | 450 |
420 // Deliver captured data to the registered consumer using a packet | 451 // Deliver captured data to the registered consumer using a packet |
421 // size which was specified at construction. | 452 // size which was specified at construction. |
422 uint32_t delay_frames = static_cast<uint32_t>(audio_delay_frames + 0.5); | 453 uint32_t delay_frames = static_cast<uint32_t>(audio_delay_frames + 0.5); |
423 while (buffer_frame_index >= packet_size_frames_) { | 454 while (fifo_->available_blocks()) { |
424 // Copy data to audio bus to match the OnData interface. | 455 if (converter_) { |
425 uint8_t* audio_data = | 456 if (imperfect_buffer_size_conversion_ && |
426 reinterpret_cast<uint8_t*>(capture_buffer.get()); | 457 fifo_->available_blocks() == 1) { |
427 audio_bus_->FromInterleaved(audio_data, audio_bus_->frames(), | 458 // Special case. We need to buffer up more audio before we can |
428 format_.wBitsPerSample / 8); | 459 // convert or else we'll suffer an underrun. |
| 460 break; |
| 461 } |
| 462 converter_->ConvertWithDelay(delay_frames, convert_bus_.get()); |
| 463 sink_->OnData(this, convert_bus_.get(), delay_frames * frame_size_, |
| 464 volume); |
| 465 } else { |
| 466 sink_->OnData(this, fifo_->Consume(), delay_frames * frame_size_, |
| 467 volume); |
| 468 } |
429 | 469 |
430 // Deliver data packet, delay estimation and volume level to | |
431 // the user. | |
432 sink_->OnData(this, audio_bus_.get(), delay_frames * frame_size_, | |
433 volume); | |
434 | |
435 // Store parts of the recorded data which can't be delivered | |
436 // using the current packet size. The stored section will be used | |
437 // either in the next while-loop iteration or in the next | |
438 // capture event. | |
439 // TODO(tommi): If this data will be used in the next capture | |
440 // event, we will report incorrect delay estimates because | |
441 // we'll use the one for the captured data that time around | |
442 // (i.e. in the future). | |
443 memmove(&capture_buffer[0], &capture_buffer[packet_size_bytes_], | |
444 (buffer_frame_index - packet_size_frames_) * frame_size_); | |
445 | |
446 DCHECK_GE(buffer_frame_index, packet_size_frames_); | |
447 buffer_frame_index -= packet_size_frames_; | |
448 if (delay_frames > packet_size_frames_) { | 470 if (delay_frames > packet_size_frames_) { |
449 delay_frames -= packet_size_frames_; | 471 delay_frames -= packet_size_frames_; |
450 } else { | 472 } else { |
451 delay_frames = 0; | 473 delay_frames = 0; |
452 } | 474 } |
453 } | 475 } |
454 } break; | 476 } break; |
455 default: | 477 default: |
456 error = true; | 478 error = true; |
457 break; | 479 break; |
458 } | 480 } |
459 } | 481 } |
460 | 482 |
461 if (recording && error) { | 483 if (recording && error) { |
462 // TODO(henrika): perhaps it worth improving the cleanup here by e.g. | 484 // TODO(henrika): perhaps it worth improving the cleanup here by e.g. |
463 // stopping the audio client, joining the thread etc.? | 485 // stopping the audio client, joining the thread etc.? |
464 NOTREACHED() << "WASAPI capturing failed with error code " | 486 NOTREACHED() << "WASAPI capturing failed with error code " |
465 << GetLastError(); | 487 << GetLastError(); |
466 } | 488 } |
467 | 489 |
468 // Disable MMCSS. | 490 // Disable MMCSS. |
469 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { | 491 if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) { |
470 PLOG(WARNING) << "Failed to disable MMCSS"; | 492 PLOG(WARNING) << "Failed to disable MMCSS"; |
471 } | 493 } |
| 494 |
| 495 fifo_.reset(); |
472 } | 496 } |
473 | 497 |
474 void WASAPIAudioInputStream::HandleError(HRESULT err) { | 498 void WASAPIAudioInputStream::HandleError(HRESULT err) { |
475 NOTREACHED() << "Error code: " << err; | 499 NOTREACHED() << "Error code: " << err; |
476 if (sink_) | 500 if (sink_) |
477 sink_->OnError(this); | 501 sink_->OnError(this); |
478 } | 502 } |
479 | 503 |
480 HRESULT WASAPIAudioInputStream::SetCaptureDevice() { | 504 HRESULT WASAPIAudioInputStream::SetCaptureDevice() { |
481 DCHECK_EQ(OPEN_RESULT_OK, open_result_); | 505 DCHECK_EQ(OPEN_RESULT_OK, open_result_); |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
580 return hr; | 604 return hr; |
581 } | 605 } |
582 | 606 |
583 bool WASAPIAudioInputStream::DesiredFormatIsSupported() { | 607 bool WASAPIAudioInputStream::DesiredFormatIsSupported() { |
584 // An application that uses WASAPI to manage shared-mode streams can rely | 608 // An application that uses WASAPI to manage shared-mode streams can rely |
585 // on the audio engine to perform only limited format conversions. The audio | 609 // on the audio engine to perform only limited format conversions. The audio |
586 // engine can convert between a standard PCM sample size used by the | 610 // engine can convert between a standard PCM sample size used by the |
587 // application and the floating-point samples that the engine uses for its | 611 // application and the floating-point samples that the engine uses for its |
588 // internal processing. However, the format for an application stream | 612 // internal processing. However, the format for an application stream |
589 // typically must have the same number of channels and the same sample | 613 // typically must have the same number of channels and the same sample |
590 // rate as the stream format used by the device. | 614 // rate as the stream format used byfCHANNEL_LAYOUT_UNSUPPORTED the device. |
591 // Many audio devices support both PCM and non-PCM stream formats. However, | 615 // Many audio devices support both PCM and non-PCM stream formats. However, |
592 // the audio engine can mix only PCM streams. | 616 // the audio engine can mix only PCM streams. |
593 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; | 617 base::win::ScopedCoMem<WAVEFORMATEX> closest_match; |
594 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, | 618 HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, |
595 &format_, &closest_match); | 619 &format_, &closest_match); |
596 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " | 620 DLOG_IF(ERROR, hr == S_FALSE) |
597 << "but a closest match exists."; | 621 << "Format is not supported but a closest match exists."; |
| 622 |
| 623 if (hr == S_FALSE && IsSupportedFormatForConversion(*closest_match.get())) { |
| 624 DVLOG(1) << "Audio capture data conversion needed."; |
| 625 // Ideally, we want a 1:1 ratio between the buffers we get and the buffers |
| 626 // we give to OnData so that each buffer we receive from the OS can be |
| 627 // directly converted to a buffer that matches with what was asked for. |
| 628 const double buffer_ratio = |
| 629 format_.nSamplesPerSec / static_cast<double>(packet_size_frames_); |
| 630 double new_frames_per_buffer = closest_match->nSamplesPerSec / buffer_ratio; |
| 631 |
| 632 const auto input_layout = GuessChannelLayout(closest_match->nChannels); |
| 633 DCHECK_NE(CHANNEL_LAYOUT_UNSUPPORTED, input_layout); |
| 634 const auto output_layout = GuessChannelLayout(format_.nChannels); |
| 635 DCHECK_NE(CHANNEL_LAYOUT_UNSUPPORTED, output_layout); |
| 636 |
| 637 const AudioParameters input(AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 638 input_layout, closest_match->nSamplesPerSec, |
| 639 closest_match->wBitsPerSample, |
| 640 static_cast<int>(new_frames_per_buffer)); |
| 641 |
| 642 const AudioParameters output(AudioParameters::AUDIO_PCM_LOW_LATENCY, |
| 643 output_layout, format_.nSamplesPerSec, |
| 644 format_.wBitsPerSample, packet_size_frames_); |
| 645 |
| 646 converter_.reset(new AudioConverter(input, output, false)); |
| 647 converter_->AddInput(this); |
| 648 converter_->PrimeWithSilence(); |
| 649 convert_bus_ = AudioBus::Create(output); |
| 650 |
| 651 // Now change the format we're going to ask for to better match with what |
| 652 // the OS can provide. If we succeed in opening the stream with these |
| 653 // params, we can take care of the required resampling. |
| 654 format_.wBitsPerSample = closest_match->wBitsPerSample; |
| 655 format_.nSamplesPerSec = closest_match->nSamplesPerSec; |
| 656 format_.nChannels = closest_match->nChannels; |
| 657 format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels; |
| 658 format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign; |
| 659 DVLOG(1) << "Will convert audio from: \nbits: " << format_.wBitsPerSample |
| 660 << "\nsample rate: " << format_.nSamplesPerSec |
| 661 << "\nchannels: " << format_.nChannels |
| 662 << "\nblock align: " << format_.nBlockAlign |
| 663 << "\navg bytes per sec: " << format_.nAvgBytesPerSec; |
| 664 |
| 665 // Update our packet size assumptions based on the new format. |
| 666 const auto new_bytes_per_buffer = |
| 667 static_cast<int>(new_frames_per_buffer) * format_.nBlockAlign; |
| 668 packet_size_frames_ = new_bytes_per_buffer / format_.nBlockAlign; |
| 669 packet_size_bytes_ = new_bytes_per_buffer; |
| 670 frame_size_ = format_.nBlockAlign; |
| 671 ms_to_frame_count_ = static_cast<double>(format_.nSamplesPerSec) / 1000.0; |
| 672 |
| 673 imperfect_buffer_size_conversion_ = |
| 674 std::modf(new_frames_per_buffer, &new_frames_per_buffer) != 0.0; |
| 675 DVLOG_IF(1, imperfect_buffer_size_conversion_) |
| 676 << "Audio capture data conversion: Need to inject fifo"; |
| 677 |
| 678 // Indicate that we're good to go with a close match. |
| 679 hr = S_OK; |
| 680 } |
| 681 |
598 return (hr == S_OK); | 682 return (hr == S_OK); |
599 } | 683 } |
600 | 684 |
601 HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { | 685 HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { |
602 DCHECK_EQ(OPEN_RESULT_OK, open_result_); | 686 DCHECK_EQ(OPEN_RESULT_OK, open_result_); |
603 DWORD flags; | 687 DWORD flags; |
604 // Use event-driven mode only fo regular input devices. For loopback the | 688 // Use event-driven mode only fo regular input devices. For loopback the |
605 // EVENTCALLBACK flag is specified when intializing | 689 // EVENTCALLBACK flag is specified when intializing |
606 // |audio_render_client_for_loopback_|. | 690 // |audio_render_client_for_loopback_|. |
607 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId || | 691 if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId || |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
731 | 815 |
732 return hr; | 816 return hr; |
733 } | 817 } |
734 | 818 |
735 void WASAPIAudioInputStream::ReportOpenResult() const { | 819 void WASAPIAudioInputStream::ReportOpenResult() const { |
736 DCHECK(!opened_); // This method must be called before we set this flag. | 820 DCHECK(!opened_); // This method must be called before we set this flag. |
737 UMA_HISTOGRAM_ENUMERATION("Media.Audio.Capture.Win.Open", open_result_, | 821 UMA_HISTOGRAM_ENUMERATION("Media.Audio.Capture.Win.Open", open_result_, |
738 OPEN_RESULT_MAX + 1); | 822 OPEN_RESULT_MAX + 1); |
739 } | 823 } |
740 | 824 |
| 825 double WASAPIAudioInputStream::ProvideInput(AudioBus* audio_bus, |
| 826 uint32_t frames_delayed) { |
| 827 fifo_->Consume()->CopyTo(audio_bus); |
| 828 return 1.0; |
| 829 } |
| 830 |
741 } // namespace media | 831 } // namespace media |
OLD | NEW |