Chromium Code Reviews| Index: media/audio/win/audio_low_latency_input_win.cc |
| diff --git a/media/audio/win/audio_low_latency_input_win.cc b/media/audio/win/audio_low_latency_input_win.cc |
| index 4e416a8a426ffca5a637227998d53e838d785ade..22355580aac45b2c656ae6066bd25fd1b72f1a3f 100644 |
| --- a/media/audio/win/audio_low_latency_input_win.cc |
| +++ b/media/audio/win/audio_low_latency_input_win.cc |
| @@ -7,6 +7,7 @@ |
| #include <memory> |
| #include "base/logging.h" |
| +#include "base/metrics/histogram_macros.h" |
| #include "base/strings/utf_string_conversions.h" |
| #include "base/trace_event/trace_event.h" |
| #include "media/audio/audio_device_description.h" |
| @@ -24,20 +25,10 @@ WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager, |
| const AudioParameters& params, |
| const std::string& device_id) |
| : manager_(manager), |
| - capture_thread_(NULL), |
| - opened_(false), |
| - started_(false), |
| - frame_size_(0), |
| - packet_size_frames_(0), |
| - packet_size_bytes_(0), |
| - endpoint_buffer_size_frames_(0), |
| device_id_(device_id), |
| - perf_count_to_100ns_units_(0.0), |
| - ms_to_frame_count_(0.0), |
| - sink_(NULL), |
| - audio_bus_(media::AudioBus::Create(params)), |
| - mute_done_(false) { |
| + audio_bus_(media::AudioBus::Create(params)) { |
| DCHECK(manager_); |
| + DCHECK(!device_id_.empty()); |
| // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
| bool avrt_init = avrt::Initialize(); |
| @@ -89,6 +80,8 @@ WASAPIAudioInputStream::~WASAPIAudioInputStream() { |
| bool WASAPIAudioInputStream::Open() { |
| DCHECK(CalledOnValidThread()); |
| + DCHECK_EQ(OPEN_RESULT_OK, open_result_); |
| + |
| // Verify that we are not already opened. |
| if (opened_) |
| return false; |
| @@ -97,32 +90,43 @@ bool WASAPIAudioInputStream::Open() { |
| // device with the specified unique identifier or role which was |
| // set at construction. |
| HRESULT hr = SetCaptureDevice(); |
| - if (FAILED(hr)) |
| + if (FAILED(hr)) { |
| + ReportOpenResult(); |
| return false; |
| + } |
| // Obtain an IAudioClient interface which enables us to create and initialize |
| // an audio stream between an audio application and the audio engine. |
| - hr = ActivateCaptureDevice(); |
| - if (FAILED(hr)) |
| + hr = endpoint_device_->Activate(__uuidof(IAudioClient), CLSCTX_INPROC_SERVER, |
| + NULL, audio_client_.ReceiveVoid()); |
| + if (FAILED(hr)) { |
| + open_result_ = OPEN_RESULT_ACTIVATION_FAILED; |
| + ReportOpenResult(); |
| return false; |
| + } |
| +#ifndef NDEBUG |
| // Retrieve the stream format which the audio engine uses for its internal |
| // processing/mixing of shared-mode streams. This function call is for |
| // diagnostic purposes only and only in debug mode. |
| -#ifndef NDEBUG |
| hr = GetAudioEngineStreamFormat(); |
| #endif |
| // Verify that the selected audio endpoint supports the specified format |
| // set during construction. |
| - if (!DesiredFormatIsSupported()) |
| + if (!DesiredFormatIsSupported()) { |
| + open_result_ = OPEN_RESULT_FORMAT_NOT_SUPPORTED; |
| + ReportOpenResult(); |
| return false; |
| + } |
| // Initialize the audio stream between the client and the device using |
| // shared mode and a lowest possible glitch-free latency. |
| hr = InitializeAudioEngine(); |
| - |
| + ReportOpenResult(); // Report before we assign a value to |opened_|. |
| opened_ = SUCCEEDED(hr); |
| + DCHECK(open_result_ == OPEN_RESULT_OK || !opened_); |
| + |
| return opened_; |
| } |
| @@ -159,9 +163,10 @@ void WASAPIAudioInputStream::Start(AudioInputCallback* callback) { |
| // Create and start the thread that will drive the capturing by waiting for |
| // capture events. |
| - capture_thread_ = new base::DelegateSimpleThread( |
| + DCHECK(!capture_thread_.get()); |
| + capture_thread_.reset(new base::DelegateSimpleThread( |
| this, "wasapi_capture_thread", |
| - base::SimpleThread::Options(base::ThreadPriority::REALTIME_AUDIO)); |
| + base::SimpleThread::Options(base::ThreadPriority::REALTIME_AUDIO))); |
| capture_thread_->Start(); |
| // Start streaming data between the endpoint buffer and the audio engine. |
| @@ -209,7 +214,7 @@ void WASAPIAudioInputStream::Stop() { |
| if (capture_thread_) { |
| SetEvent(stop_capture_event_.Get()); |
| capture_thread_->Join(); |
| - capture_thread_ = NULL; |
| + capture_thread_.reset(); |
| } |
| started_ = false; |
| @@ -299,8 +304,8 @@ void WASAPIAudioInputStream::Run() { |
| // Enable MMCSS to ensure that this thread receives prioritized access to |
| // CPU resources. |
| DWORD task_index = 0; |
| - HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", |
| - &task_index); |
| + HANDLE mm_task = |
|
tommi (sloooow) - chröme
2017/02/07 19:34:46
The changes here and below (all of Run()), are the
|
| + avrt::AvSetMmThreadCharacteristics(L"Pro Audio", &task_index); |
| bool mmcss_is_ok = |
| (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL)); |
| if (!mmcss_is_ok) { |
| @@ -316,17 +321,17 @@ void WASAPIAudioInputStream::Run() { |
| // 2) The selected buffer size is larger than the recorded buffer size in |
| // each event. |
| size_t buffer_frame_index = 0; |
| - size_t capture_buffer_size = std::max( |
| - 2 * endpoint_buffer_size_frames_ * frame_size_, |
| - 2 * packet_size_frames_ * frame_size_); |
| + size_t capture_buffer_size = |
| + std::max(2 * endpoint_buffer_size_frames_ * frame_size_, |
| + 2 * packet_size_frames_ * frame_size_); |
| std::unique_ptr<uint8_t[]> capture_buffer(new uint8_t[capture_buffer_size]); |
| LARGE_INTEGER now_count = {}; |
| bool recording = true; |
| bool error = false; |
| double volume = GetVolume(); |
| - HANDLE wait_array[2] = |
| - { stop_capture_event_.Get(), audio_samples_ready_event_.Get() }; |
| + HANDLE wait_array[2] = {stop_capture_event_.Get(), |
| + audio_samples_ready_event_.Get()}; |
| base::win::ScopedComPtr<IAudioClock> audio_clock; |
| audio_client_->GetService(__uuidof(IAudioClock), audio_clock.ReceiveVoid()); |
| @@ -344,113 +349,109 @@ void WASAPIAudioInputStream::Run() { |
| // |stop_capture_event_| has been set. |
| recording = false; |
| break; |
| - case WAIT_OBJECT_0 + 1: |
| - { |
| - TRACE_EVENT0("audio", "WASAPIAudioInputStream::Run_0"); |
| - // |audio_samples_ready_event_| has been set. |
| - BYTE* data_ptr = NULL; |
| - UINT32 num_frames_to_read = 0; |
| - DWORD flags = 0; |
| - UINT64 device_position = 0; |
| - UINT64 first_audio_frame_timestamp = 0; |
| - |
| - // Retrieve the amount of data in the capture endpoint buffer, |
| - // replace it with silence if required, create callbacks for each |
| - // packet and store non-delivered data for the next event. |
| - hr = audio_capture_client_->GetBuffer(&data_ptr, |
| - &num_frames_to_read, |
| - &flags, |
| - &device_position, |
| - &first_audio_frame_timestamp); |
| - if (FAILED(hr)) { |
| - DLOG(ERROR) << "Failed to get data from the capture buffer"; |
| - continue; |
| - } |
| - |
| - if (audio_clock) { |
| - // The reported timestamp from GetBuffer is not as reliable as the |
| - // clock from the client. We've seen timestamps reported for |
| - // USB audio devices, be off by several days. Furthermore we've |
| - // seen them jump back in time every 2 seconds or so. |
| - audio_clock->GetPosition( |
| - &device_position, &first_audio_frame_timestamp); |
| - } |
| - |
| - |
| - if (num_frames_to_read != 0) { |
| - size_t pos = buffer_frame_index * frame_size_; |
| - size_t num_bytes = num_frames_to_read * frame_size_; |
| - DCHECK_GE(capture_buffer_size, pos + num_bytes); |
| + case WAIT_OBJECT_0 + 1: { |
| + TRACE_EVENT0("audio", "WASAPIAudioInputStream::Run_0"); |
| + // |audio_samples_ready_event_| has been set. |
| + BYTE* data_ptr = NULL; |
| + UINT32 num_frames_to_read = 0; |
| + DWORD flags = 0; |
| + UINT64 device_position = 0; |
| + UINT64 first_audio_frame_timestamp = 0; |
| + |
| + // Retrieve the amount of data in the capture endpoint buffer, |
| + // replace it with silence if required, create callbacks for each |
| + // packet and store non-delivered data for the next event. |
| + hr = audio_capture_client_->GetBuffer(&data_ptr, &num_frames_to_read, |
| + &flags, &device_position, |
| + &first_audio_frame_timestamp); |
| + if (FAILED(hr)) { |
| + DLOG(ERROR) << "Failed to get data from the capture buffer"; |
| + continue; |
| + } |
| - if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { |
| - // Clear out the local buffer since silence is reported. |
| - memset(&capture_buffer[pos], 0, num_bytes); |
| - } else { |
| - // Copy captured data from audio engine buffer to local buffer. |
| - memcpy(&capture_buffer[pos], data_ptr, num_bytes); |
| - } |
| + if (audio_clock) { |
| + // The reported timestamp from GetBuffer is not as reliable as the |
| + // clock from the client. We've seen timestamps reported for |
| + // USB audio devices, be off by several days. Furthermore we've |
| + // seen them jump back in time every 2 seconds or so. |
| + audio_clock->GetPosition(&device_position, |
| + &first_audio_frame_timestamp); |
| + } |
| - buffer_frame_index += num_frames_to_read; |
| + if (num_frames_to_read != 0) { |
| + size_t pos = buffer_frame_index * frame_size_; |
| + size_t num_bytes = num_frames_to_read * frame_size_; |
| + DCHECK_GE(capture_buffer_size, pos + num_bytes); |
| + |
| + if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { |
| + // Clear out the local buffer since silence is reported. |
| + memset(&capture_buffer[pos], 0, num_bytes); |
| + } else { |
| + // Copy captured data from audio engine buffer to local buffer. |
| + memcpy(&capture_buffer[pos], data_ptr, num_bytes); |
| } |
| - hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); |
| - DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; |
| - |
| - // Derive a delay estimate for the captured audio packet. |
| - // The value contains two parts (A+B), where A is the delay of the |
| - // first audio frame in the packet and B is the extra delay |
| - // contained in any stored data. Unit is in audio frames. |
| - QueryPerformanceCounter(&now_count); |
| - // first_audio_frame_timestamp will be 0 if we didn't get a timestamp. |
| - double audio_delay_frames = first_audio_frame_timestamp == 0 ? |
| - num_frames_to_read : |
| - ((perf_count_to_100ns_units_ * now_count.QuadPart - |
| - first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ + |
| - buffer_frame_index - num_frames_to_read; |
| - |
| - // Get a cached AGC volume level which is updated once every second |
| - // on the audio manager thread. Note that, |volume| is also updated |
| - // each time SetVolume() is called through IPC by the render-side AGC. |
| - GetAgcVolume(&volume); |
| - |
| - // Deliver captured data to the registered consumer using a packet |
| - // size which was specified at construction. |
| - uint32_t delay_frames = |
| - static_cast<uint32_t>(audio_delay_frames + 0.5); |
| - while (buffer_frame_index >= packet_size_frames_) { |
| - // Copy data to audio bus to match the OnData interface. |
| - uint8_t* audio_data = |
| - reinterpret_cast<uint8_t*>(capture_buffer.get()); |
| - audio_bus_->FromInterleaved( |
| - audio_data, audio_bus_->frames(), format_.wBitsPerSample / 8); |
| - |
| - // Deliver data packet, delay estimation and volume level to |
| - // the user. |
| - sink_->OnData( |
| - this, audio_bus_.get(), delay_frames * frame_size_, volume); |
| - |
| - // Store parts of the recorded data which can't be delivered |
| - // using the current packet size. The stored section will be used |
| - // either in the next while-loop iteration or in the next |
| - // capture event. |
| - // TODO(tommi): If this data will be used in the next capture |
| - // event, we will report incorrect delay estimates because |
| - // we'll use the one for the captured data that time around |
| - // (i.e. in the future). |
| - memmove(&capture_buffer[0], |
| - &capture_buffer[packet_size_bytes_], |
| - (buffer_frame_index - packet_size_frames_) * frame_size_); |
| - |
| - DCHECK_GE(buffer_frame_index, packet_size_frames_); |
| - buffer_frame_index -= packet_size_frames_; |
| - if (delay_frames > packet_size_frames_) { |
| - delay_frames -= packet_size_frames_; |
| - } else { |
| - delay_frames = 0; |
| - } |
| + buffer_frame_index += num_frames_to_read; |
| + } |
| + |
| + hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read); |
| + DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer"; |
| + |
| + // Derive a delay estimate for the captured audio packet. |
| + // The value contains two parts (A+B), where A is the delay of the |
| + // first audio frame in the packet and B is the extra delay |
| + // contained in any stored data. Unit is in audio frames. |
| + QueryPerformanceCounter(&now_count); |
| + // first_audio_frame_timestamp will be 0 if we didn't get a timestamp. |
| + double audio_delay_frames = |
| + first_audio_frame_timestamp == 0 |
| + ? num_frames_to_read |
| + : ((perf_count_to_100ns_units_ * now_count.QuadPart - |
| + first_audio_frame_timestamp) / |
| + 10000.0) * |
| + ms_to_frame_count_ + |
| + buffer_frame_index - num_frames_to_read; |
| + |
| + // Get a cached AGC volume level which is updated once every second |
| + // on the audio manager thread. Note that, |volume| is also updated |
| + // each time SetVolume() is called through IPC by the render-side AGC. |
| + GetAgcVolume(&volume); |
| + |
| + // Deliver captured data to the registered consumer using a packet |
| + // size which was specified at construction. |
| + uint32_t delay_frames = static_cast<uint32_t>(audio_delay_frames + 0.5); |
| + while (buffer_frame_index >= packet_size_frames_) { |
| + // Copy data to audio bus to match the OnData interface. |
| + uint8_t* audio_data = |
| + reinterpret_cast<uint8_t*>(capture_buffer.get()); |
| + audio_bus_->FromInterleaved(audio_data, audio_bus_->frames(), |
| + format_.wBitsPerSample / 8); |
| + |
| + // Deliver data packet, delay estimation and volume level to |
| + // the user. |
| + sink_->OnData(this, audio_bus_.get(), delay_frames * frame_size_, |
| + volume); |
| + |
| + // Store parts of the recorded data which can't be delivered |
| + // using the current packet size. The stored section will be used |
| + // either in the next while-loop iteration or in the next |
| + // capture event. |
| + // TODO(tommi): If this data will be used in the next capture |
| + // event, we will report incorrect delay estimates because |
| + // we'll use the one for the captured data that time around |
| + // (i.e. in the future). |
| + memmove(&capture_buffer[0], &capture_buffer[packet_size_bytes_], |
| + (buffer_frame_index - packet_size_frames_) * frame_size_); |
| + |
| + DCHECK_GE(buffer_frame_index, packet_size_frames_); |
| + buffer_frame_index -= packet_size_frames_; |
| + if (delay_frames > packet_size_frames_) { |
| + delay_frames -= packet_size_frames_; |
| + } else { |
| + delay_frames = 0; |
| } |
| } |
| - break; |
| + } break; |
| default: |
| error = true; |
| break; |
| @@ -477,13 +478,16 @@ void WASAPIAudioInputStream::HandleError(HRESULT err) { |
| } |
| HRESULT WASAPIAudioInputStream::SetCaptureDevice() { |
| + DCHECK_EQ(OPEN_RESULT_OK, open_result_); |
| DCHECK(!endpoint_device_.get()); |
| ScopedComPtr<IMMDeviceEnumerator> enumerator; |
| - HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator), |
| - NULL, CLSCTX_INPROC_SERVER); |
| - if (FAILED(hr)) |
| + HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator), NULL, |
| + CLSCTX_INPROC_SERVER); |
| + if (FAILED(hr)) { |
| + open_result_ = OPEN_RESULT_CREATE_INSTANCE; |
| return hr; |
| + } |
| // Retrieve the IMMDevice by using the specified role or the specified |
| // unique endpoint device-identification string. |
| @@ -513,34 +517,29 @@ HRESULT WASAPIAudioInputStream::SetCaptureDevice() { |
| endpoint_device_.Receive()); |
| } |
| - if (FAILED(hr)) |
| + if (FAILED(hr)) { |
| + open_result_ = OPEN_RESULT_NO_ENDPOINT; |
| return hr; |
| + } |
| // Verify that the audio endpoint device is active, i.e., the audio |
| // adapter that connects to the endpoint device is present and enabled. |
| DWORD state = DEVICE_STATE_DISABLED; |
| hr = endpoint_device_->GetState(&state); |
| - if (FAILED(hr)) |
| + if (FAILED(hr)) { |
| + open_result_ = OPEN_RESULT_NO_STATE; |
| return hr; |
| + } |
| if (!(state & DEVICE_STATE_ACTIVE)) { |
| DLOG(ERROR) << "Selected capture device is not active."; |
| + open_result_ = OPEN_RESULT_DEVICE_NOT_ACTIVE; |
| hr = E_ACCESSDENIED; |
| } |
| return hr; |
| } |
| -HRESULT WASAPIAudioInputStream::ActivateCaptureDevice() { |
| - // Creates and activates an IAudioClient COM object given the selected |
| - // capture endpoint device. |
| - HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), |
| - CLSCTX_INPROC_SERVER, |
| - NULL, |
| - audio_client_.ReceiveVoid()); |
| - return hr; |
| -} |
| - |
| HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() { |
| HRESULT hr = S_OK; |
| #ifndef NDEBUG |
| @@ -551,8 +550,8 @@ HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() { |
| // An WAVEFORMATEXTENSIBLE structure can specify both the mapping of |
| // channels to speakers and the number of bits of precision in each sample. |
| base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> format_ex; |
| - hr = audio_client_->GetMixFormat( |
| - reinterpret_cast<WAVEFORMATEX**>(&format_ex)); |
| + hr = |
| + audio_client_->GetMixFormat(reinterpret_cast<WAVEFORMATEX**>(&format_ex)); |
| // See http://msdn.microsoft.com/en-us/windows/hardware/gg463006#EFH |
| // for details on the WAVE file format. |
| @@ -567,10 +566,10 @@ HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() { |
| DVLOG(2) << " cbSize : " << format.cbSize; |
| DVLOG(2) << "WAVEFORMATEXTENSIBLE:"; |
| - DVLOG(2) << " wValidBitsPerSample: " << |
| - format_ex->Samples.wValidBitsPerSample; |
| - DVLOG(2) << " dwChannelMask : 0x" << std::hex << |
| - format_ex->dwChannelMask; |
| + DVLOG(2) << " wValidBitsPerSample: " |
| + << format_ex->Samples.wValidBitsPerSample; |
| + DVLOG(2) << " dwChannelMask : 0x" << std::hex |
| + << format_ex->dwChannelMask; |
| if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) |
| DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_PCM"; |
| else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) |
| @@ -593,14 +592,14 @@ bool WASAPIAudioInputStream::DesiredFormatIsSupported() { |
| // the audio engine can mix only PCM streams. |
| base::win::ScopedCoMem<WAVEFORMATEX> closest_match; |
| HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, |
| - &format_, |
| - &closest_match); |
| + &format_, &closest_match); |
| DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " |
| << "but a closest match exists."; |
| return (hr == S_OK); |
| } |
| HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { |
| + DCHECK_EQ(OPEN_RESULT_OK, open_result_); |
| DWORD flags; |
| // Use event-driven mode only fo regular input devices. For loopback the |
| // EVENTCALLBACK flag is specified when intializing |
| @@ -625,8 +624,10 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { |
| ? &kCommunicationsSessionId |
| : nullptr); |
| - if (FAILED(hr)) |
| + if (FAILED(hr)) { |
| + open_result_ = OPEN_RESULT_AUDIO_CLIENT_INIT_FAILED; |
| return hr; |
| + } |
| // Retrieve the length of the endpoint buffer shared between the client |
| // and the audio engine. The buffer length determines the maximum amount |
| @@ -634,8 +635,10 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { |
| // during a single processing pass. |
| // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. |
| hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); |
| - if (FAILED(hr)) |
| + if (FAILED(hr)) { |
| + open_result_ = OPEN_RESULT_GET_BUFFER_SIZE_FAILED; |
| return hr; |
| + } |
| DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ |
| << " [frames]"; |
| @@ -685,15 +688,19 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { |
| hr = endpoint_device_->Activate( |
| __uuidof(IAudioClient), CLSCTX_INPROC_SERVER, NULL, |
| audio_render_client_for_loopback_.ReceiveVoid()); |
| - if (FAILED(hr)) |
| + if (FAILED(hr)) { |
| + open_result_ = OPEN_RESULT_LOOPBACK_ACTIVATE_FAILED; |
| return hr; |
| + } |
| hr = audio_render_client_for_loopback_->Initialize( |
| AUDCLNT_SHAREMODE_SHARED, |
| - AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST, |
| - 0, 0, &format_, NULL); |
| - if (FAILED(hr)) |
| + AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST, 0, 0, |
| + &format_, NULL); |
| + if (FAILED(hr)) { |
| + open_result_ = OPEN_RESULT_LOOPBACK_INIT_FAILED; |
| return hr; |
| + } |
| hr = audio_render_client_for_loopback_->SetEventHandle( |
| audio_samples_ready_event_.Get()); |
| @@ -701,21 +708,34 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() { |
| hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get()); |
| } |
| - if (FAILED(hr)) |
| + if (FAILED(hr)) { |
| + open_result_ = OPEN_RESULT_SET_EVENT_HANDLE; |
| return hr; |
| + } |
| // Get access to the IAudioCaptureClient interface. This interface |
| // enables us to read input data from the capture endpoint buffer. |
| hr = audio_client_->GetService(__uuidof(IAudioCaptureClient), |
| audio_capture_client_.ReceiveVoid()); |
| - if (FAILED(hr)) |
| + if (FAILED(hr)) { |
| + open_result_ = OPEN_RESULT_NO_CAPTURE_CLIENT; |
| return hr; |
| + } |
| // Obtain a reference to the ISimpleAudioVolume interface which enables |
| // us to control the master volume level of an audio session. |
| hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume), |
| simple_audio_volume_.ReceiveVoid()); |
| + if (FAILED(hr)) |
| + open_result_ = OPEN_RESULT_NO_AUDIO_VOLUME; |
| + |
| return hr; |
| } |
| +void WASAPIAudioInputStream::ReportOpenResult() const { |
| + DCHECK(!opened_); // This method must be called before we set this flag. |
| + UMA_HISTOGRAM_ENUMERATION("Media.Audio.Capture.Win.Open", open_result_, |
| + OPEN_RESULT_MAX + 1); |
| +} |
| + |
| } // namespace media |