Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
| 6 | 6 |
| 7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
| 8 | 8 |
| 9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
| 10 #include "base/debug/trace_event.h" | |
| 10 #include "base/logging.h" | 11 #include "base/logging.h" |
| 11 #include "base/memory/scoped_ptr.h" | 12 #include "base/memory/scoped_ptr.h" |
| 12 #include "base/metrics/histogram.h" | 13 #include "base/metrics/histogram.h" |
| 13 #include "base/utf_string_conversions.h" | 14 #include "base/utf_string_conversions.h" |
| 14 #include "media/audio/audio_util.h" | 15 #include "media/audio/audio_util.h" |
| 15 #include "media/audio/win/audio_manager_win.h" | 16 #include "media/audio/win/audio_manager_win.h" |
| 16 #include "media/audio/win/avrt_wrapper_win.h" | 17 #include "media/audio/win/avrt_wrapper_win.h" |
| 18 #include "media/audio/win/core_audio_util_win.h" | |
| 17 #include "media/base/limits.h" | 19 #include "media/base/limits.h" |
| 18 #include "media/base/media_switches.h" | 20 #include "media/base/media_switches.h" |
| 19 | 21 |
| 20 using base::win::ScopedComPtr; | 22 using base::win::ScopedComPtr; |
| 21 using base::win::ScopedCOMInitializer; | 23 using base::win::ScopedCOMInitializer; |
| 22 using base::win::ScopedCoMem; | 24 using base::win::ScopedCoMem; |
| 23 | 25 |
| 24 namespace media { | 26 namespace media { |
| 25 | 27 |
| 26 typedef uint32 ChannelConfig; | 28 typedef uint32 ChannelConfig; |
| 27 | 29 |
| 28 // Retrieves the stream format that the audio engine uses for its internal | 30 // Compare two sets of audio parameters and return true if they are equal. |
| 29 // processing/mixing of shared-mode streams. | 31 // Note that bits_per_sample() is excluded from this comparison since Core |
| 30 static HRESULT GetMixFormat(ERole device_role, WAVEFORMATEX** device_format) { | 32 // Audio can deal with most bit depths. As an example, if the native/mixing |
| 31 // Note that we are using the IAudioClient::GetMixFormat() API to get the | 33 // bit depth is 32 bits (default), opening at 16 or 24 still works fine and |
| 32 // device format in this function. It is in fact possible to be "more native", | 34 // the audio engine will do the required conversion for us. |
| 33 // and ask the endpoint device directly for its properties. Given a reference | 35 static bool CompareAudioParameters(const media::AudioParameters& a, |
|
DaleCurtis
2013/01/31 02:34:33
Worth adding as a Equals() method on AudioParamete
tommi (sloooow) - chröme
2013/01/31 13:42:08
Since this doesn't compare all the properties such
henrika (OOO until Aug 14)
2013/01/31 14:29:38
I don't know if that is kocher actually since I do
henrika (OOO until Aug 14)
2013/01/31 14:29:38
Done.
| |
| 34 // to the IMMDevice interface of an endpoint object, a client can obtain a | 36 const media::AudioParameters& b) { |
| 35 // reference to the endpoint object's property store by calling the | 37 return (a.format() == b.format() && |
| 36 // IMMDevice::OpenPropertyStore() method. However, I have not been able to | 38 a.channels() == b.channels() && |
| 37 // access any valuable information using this method on my HP Z600 desktop, | 39 a.sample_rate() == b.sample_rate() && |
| 38 // hence it feels more appropriate to use the IAudioClient::GetMixFormat() | 40 a.frames_per_buffer() == b.frames_per_buffer()); |
| 39 // approach instead. | |
| 40 | |
| 41 // Calling this function only makes sense for shared mode streams, since | |
| 42 // if the device will be opened in exclusive mode, then the application | |
| 43 // specified format is used instead. However, the result of this method can | |
| 44 // be useful for testing purposes so we don't DCHECK here. | |
| 45 DLOG_IF(WARNING, WASAPIAudioOutputStream::GetShareMode() == | |
| 46 AUDCLNT_SHAREMODE_EXCLUSIVE) << | |
| 47 "The mixing sample rate will be ignored for exclusive-mode streams."; | |
| 48 | |
| 49 // It is assumed that this static method is called from a COM thread, i.e., | |
| 50 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. | |
| 51 ScopedComPtr<IMMDeviceEnumerator> enumerator; | |
| 52 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), | |
| 53 NULL, | |
| 54 CLSCTX_INPROC_SERVER, | |
| 55 __uuidof(IMMDeviceEnumerator), | |
| 56 enumerator.ReceiveVoid()); | |
| 57 if (FAILED(hr)) | |
| 58 return hr; | |
| 59 | |
| 60 ScopedComPtr<IMMDevice> endpoint_device; | |
| 61 hr = enumerator->GetDefaultAudioEndpoint(eRender, | |
| 62 device_role, | |
| 63 endpoint_device.Receive()); | |
| 64 if (FAILED(hr)) | |
| 65 return hr; | |
| 66 | |
| 67 ScopedComPtr<IAudioClient> audio_client; | |
| 68 hr = endpoint_device->Activate(__uuidof(IAudioClient), | |
| 69 CLSCTX_INPROC_SERVER, | |
| 70 NULL, | |
| 71 audio_client.ReceiveVoid()); | |
| 72 return SUCCEEDED(hr) ? audio_client->GetMixFormat(device_format) : hr; | |
| 73 } | 41 } |
| 74 | 42 |
| 75 // Retrieves an integer mask which corresponds to the channel layout the | 43 // Retrieves an integer mask which corresponds to the channel layout the |
| 76 // audio engine uses for its internal processing/mixing of shared-mode | 44 // audio engine uses for its internal processing/mixing of shared-mode |
| 77 // streams. This mask indicates which channels are present in the multi- | 45 // streams. This mask indicates which channels are present in the multi- |
| 78 // channel stream. The least significant bit corresponds with the Front Left | 46 // channel stream. The least significant bit corresponds with the Front Left |
| 79 // speaker, the next least significant bit corresponds to the Front Right | 47 // speaker, the next least significant bit corresponds to the Front Right |
| 80 // speaker, and so on, continuing in the order defined in KsMedia.h. | 48 // speaker, and so on, continuing in the order defined in KsMedia.h. |
| 81 // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85 ).aspx | 49 // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85 ).aspx |
| 82 // for more details. | 50 // for more details. |
| 83 static ChannelConfig GetChannelConfig() { | 51 static ChannelConfig GetChannelConfig() { |
| 84 // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the | 52 ScopedComPtr<IAudioClient> client; |
|
tommi (sloooow) - chröme
2013/01/31 13:42:08
nit: pass the return value of CreateDefaultClient
henrika (OOO until Aug 14)
2013/01/31 14:29:38
These methods are modified. Please check again. Ve
| |
| 85 // number of channels and the mapping of channels to speakers for | 53 client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole); |
| 86 // multichannel devices. | 54 if (!client) |
| 87 base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex; | 55 return 0; |
| 88 HRESULT hr = S_FALSE; | 56 |
| 89 hr = GetMixFormat(eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex)); | 57 WAVEFORMATPCMEX format; |
| 58 HRESULT hr = CoreAudioUtil::GetSharedModeMixFormat(client, &format); | |
| 90 if (FAILED(hr)) | 59 if (FAILED(hr)) |
| 91 return 0; | 60 return 0; |
| 92 | 61 |
| 93 // The dwChannelMask member specifies which channels are present in the | 62 // The dwChannelMask member specifies which channels are present in the |
| 94 // multichannel stream. The least significant bit corresponds to the | 63 // multichannel stream. The least significant bit corresponds to the |
| 95 // front left speaker, the next least significant bit corresponds to the | 64 // front left speaker, the next least significant bit corresponds to the |
| 96 // front right speaker, and so on. | 65 // front right speaker, and so on. |
| 97 // See http://msdn.microsoft.com/en-us/library/windows/desktop/dd757714(v=vs.8 5).aspx | 66 // See http://msdn.microsoft.com/en-us/library/windows/desktop/dd757714(v=vs.8 5).aspx |
| 98 // for more details on the channel mapping. | 67 // for more details on the channel mapping. |
| 99 DVLOG(2) << "dwChannelMask: 0x" << std::hex << format_ex->dwChannelMask; | 68 DVLOG(2) << "dwChannelMask: 0x" << std::hex << format.dwChannelMask; |
| 100 | 69 return static_cast<ChannelConfig>(format.dwChannelMask); |
| 101 #if !defined(NDEBUG) | |
| 102 // See http://en.wikipedia.org/wiki/Surround_sound for more details on | |
| 103 // how to name various speaker configurations. The list below is not complete. | |
| 104 const char* speaker_config = "Undefined"; | |
| 105 switch (format_ex->dwChannelMask) { | |
| 106 case KSAUDIO_SPEAKER_MONO: | |
| 107 speaker_config = "Mono"; | |
| 108 break; | |
| 109 case KSAUDIO_SPEAKER_STEREO: | |
| 110 speaker_config = "Stereo"; | |
| 111 break; | |
| 112 case KSAUDIO_SPEAKER_5POINT1_SURROUND: | |
| 113 speaker_config = "5.1 surround"; | |
| 114 break; | |
| 115 case KSAUDIO_SPEAKER_5POINT1: | |
| 116 speaker_config = "5.1"; | |
| 117 break; | |
| 118 case KSAUDIO_SPEAKER_7POINT1_SURROUND: | |
| 119 speaker_config = "7.1 surround"; | |
| 120 break; | |
| 121 case KSAUDIO_SPEAKER_7POINT1: | |
| 122 speaker_config = "7.1"; | |
| 123 break; | |
| 124 default: | |
| 125 break; | |
| 126 } | |
| 127 DVLOG(2) << "speaker configuration: " << speaker_config; | |
| 128 #endif | |
| 129 | |
| 130 return static_cast<ChannelConfig>(format_ex->dwChannelMask); | |
| 131 } | 70 } |
| 132 | 71 |
| 133 // Converts Microsoft's channel configuration to ChannelLayout. | 72 // Converts Microsoft's channel configuration to ChannelLayout. |
| 134 // This mapping is not perfect but the best we can do given the current | 73 // This mapping is not perfect but the best we can do given the current |
| 135 // ChannelLayout enumerator and the Windows-specific speaker configurations | 74 // ChannelLayout enumerator and the Windows-specific speaker configurations |
| 136 // defined in ksmedia.h. Don't assume that the channel ordering in | 75 // defined in ksmedia.h. Don't assume that the channel ordering in |
| 137 // ChannelLayout is exactly the same as the Windows specific configuration. | 76 // ChannelLayout is exactly the same as the Windows specific configuration. |
| 138 // As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to | 77 // As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to |
| 139 // CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R | 78 // CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R |
| 140 // speakers are different in these two definitions. | 79 // speakers are different in these two definitions. |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 165 } | 104 } |
| 166 | 105 |
| 167 // static | 106 // static |
| 168 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() { | 107 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() { |
| 169 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); | 108 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); |
| 170 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) | 109 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) |
| 171 return AUDCLNT_SHAREMODE_EXCLUSIVE; | 110 return AUDCLNT_SHAREMODE_EXCLUSIVE; |
| 172 return AUDCLNT_SHAREMODE_SHARED; | 111 return AUDCLNT_SHAREMODE_SHARED; |
| 173 } | 112 } |
| 174 | 113 |
| 114 // static | |
| 115 int WASAPIAudioOutputStream::HardwareChannelCount() { | |
| 116 ScopedComPtr<IAudioClient> client; | |
|
DaleCurtis
2013/01/31 02:34:33
This section of code (115-124) is repeated pretty
tommi (sloooow) - chröme
2013/01/31 13:42:08
+1
henrika (OOO until Aug 14)
2013/01/31 14:29:38
Done. Much less overhead now. Thanks.
| |
| 117 client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole); | |
| 118 if (!client) | |
| 119 return 0; | |
| 120 | |
| 121 WAVEFORMATPCMEX format; | |
| 122 HRESULT hr = CoreAudioUtil::GetSharedModeMixFormat(client, &format); | |
| 123 if (FAILED(hr)) | |
| 124 return 0; | |
| 125 | |
| 126 // Number of channels in the stream. Corresponds to the number of bits | |
| 127 // set in the dwChannelMask. | |
| 128 DVLOG(2) << "nChannels: " << format.Format.nChannels; | |
| 129 return static_cast<int>(format.Format.nChannels); | |
| 130 } | |
| 131 | |
| 132 // static | |
| 133 ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() { | |
| 134 return ChannelConfigToChannelLayout(GetChannelConfig()); | |
| 135 } | |
| 136 | |
| 137 // static | |
| 138 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { | |
| 139 ScopedComPtr<IAudioClient> client; | |
| 140 client = CoreAudioUtil::CreateDefaultClient(eRender, device_role); | |
| 141 if (!client) | |
| 142 return 0; | |
| 143 | |
| 144 WAVEFORMATPCMEX format; | |
| 145 HRESULT hr = CoreAudioUtil::GetSharedModeMixFormat(client, &format); | |
| 146 if (FAILED(hr)) | |
| 147 return 0; | |
| 148 | |
| 149 DVLOG(2) << "nSamplesPerSec: " << format.Format.nSamplesPerSec; | |
| 150 return static_cast<int>(format.Format.nSamplesPerSec); | |
| 151 } | |
| 152 | |
| 175 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, | 153 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
| 176 const AudioParameters& params, | 154 const AudioParameters& params, |
| 177 ERole device_role) | 155 ERole device_role) |
| 178 : creating_thread_id_(base::PlatformThread::CurrentId()), | 156 : creating_thread_id_(base::PlatformThread::CurrentId()), |
| 179 manager_(manager), | 157 manager_(manager), |
| 180 opened_(false), | 158 opened_(false), |
| 181 restart_rendering_mode_(false), | 159 audio_parmeters_are_valid_(false), |
| 182 volume_(1.0), | 160 volume_(1.0), |
| 183 endpoint_buffer_size_frames_(0), | 161 endpoint_buffer_size_frames_(0), |
| 184 device_role_(device_role), | 162 device_role_(device_role), |
| 185 share_mode_(GetShareMode()), | 163 share_mode_(GetShareMode()), |
| 186 client_channel_count_(params.channels()), | |
| 187 num_written_frames_(0), | 164 num_written_frames_(0), |
| 188 source_(NULL), | 165 source_(NULL), |
| 189 audio_bus_(AudioBus::Create(params)) { | 166 audio_bus_(AudioBus::Create(params)) { |
| 190 DCHECK(manager_); | 167 DCHECK(manager_); |
| 168 DVLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()"; | |
| 169 DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE) | |
| 170 << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled."; | |
| 171 | |
| 172 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | |
| 173 // Verify that the input audio parameters are identical (bit depth is | |
| 174 // excluded) to the preferred (native) audio parameters. Open() will fail | |
| 175 // if this is not the case. | |
| 176 AudioParameters preferred_params; | |
| 177 HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters( | |
| 178 eRender, device_role, &preferred_params); | |
| 179 audio_parmeters_are_valid_ = | |
| 180 (SUCCEEDED(hr) && CompareAudioParameters(params, preferred_params)); | |
|
DaleCurtis
2013/01/31 02:34:33
extraneous parenthesis.
henrika (OOO until Aug 14)
2013/01/31 14:29:38
Done.
| |
| 181 DLOG_IF(WARNING, !audio_parmeters_are_valid_) | |
| 182 << "Input and preferred parameters are not identical."; | |
| 183 } | |
| 191 | 184 |
| 192 // Load the Avrt DLL if not already loaded. Required to support MMCSS. | 185 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
| 193 bool avrt_init = avrt::Initialize(); | 186 bool avrt_init = avrt::Initialize(); |
| 194 DCHECK(avrt_init) << "Failed to load the avrt.dll"; | 187 DCHECK(avrt_init) << "Failed to load the avrt.dll"; |
| 195 | 188 |
| 196 if (share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE) { | |
| 197 VLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<"; | |
| 198 } | |
| 199 | |
| 200 // Set up the desired render format specified by the client. We use the | 189 // Set up the desired render format specified by the client. We use the |
| 201 // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering | 190 // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering |
| 202 // and high precision data can be supported. | 191 // and high precision data can be supported. |
| 203 | 192 |
| 204 // Begin with the WAVEFORMATEX structure that specifies the basic format. | 193 // Begin with the WAVEFORMATEX structure that specifies the basic format. |
| 205 WAVEFORMATEX* format = &format_.Format; | 194 WAVEFORMATEX* format = &format_.Format; |
| 206 format->wFormatTag = WAVE_FORMAT_EXTENSIBLE; | 195 format->wFormatTag = WAVE_FORMAT_EXTENSIBLE; |
| 207 format->nChannels = client_channel_count_; | 196 format->nChannels = params.channels(); |
| 208 format->nSamplesPerSec = params.sample_rate(); | 197 format->nSamplesPerSec = params.sample_rate(); |
| 209 format->wBitsPerSample = params.bits_per_sample(); | 198 format->wBitsPerSample = params.bits_per_sample(); |
| 210 format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels; | 199 format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels; |
| 211 format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign; | 200 format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign; |
| 212 format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX); | 201 format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX); |
| 213 | 202 |
| 214 // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE. | 203 // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE. |
| 215 format_.Samples.wValidBitsPerSample = params.bits_per_sample(); | 204 format_.Samples.wValidBitsPerSample = params.bits_per_sample(); |
| 216 format_.dwChannelMask = GetChannelConfig(); | 205 format_.dwChannelMask = GetChannelConfig(); |
| 217 format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; | 206 format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; |
| 218 | 207 |
| 219 // Size in bytes of each audio frame. | |
| 220 frame_size_ = format->nBlockAlign; | |
| 221 | |
| 222 // Store size (in different units) of audio packets which we expect to | 208 // Store size (in different units) of audio packets which we expect to |
| 223 // get from the audio endpoint device in each render event. | 209 // get from the audio endpoint device in each render event. |
| 224 packet_size_frames_ = params.GetBytesPerBuffer() / format->nBlockAlign; | 210 packet_size_frames_ = params.frames_per_buffer(); |
| 225 packet_size_bytes_ = params.GetBytesPerBuffer(); | 211 packet_size_bytes_ = params.GetBytesPerBuffer(); |
| 226 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate(); | 212 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate(); |
| 227 DVLOG(1) << "Number of bytes per audio frame : " << frame_size_; | 213 DVLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign; |
| 228 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; | 214 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; |
| 229 DVLOG(1) << "Number of bytes per packet : " << packet_size_bytes_; | 215 DVLOG(1) << "Number of bytes per packet : " << packet_size_bytes_; |
| 230 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_; | 216 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_; |
| 231 | 217 |
| 232 // All events are auto-reset events and non-signaled initially. | 218 // All events are auto-reset events and non-signaled initially. |
| 233 | 219 |
| 234 // Create the event which the audio engine will signal each time | 220 // Create the event which the audio engine will signal each time |
| 235 // a buffer becomes ready to be processed by the client. | 221 // a buffer becomes ready to be processed by the client. |
| 236 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); | 222 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
| 237 DCHECK(audio_samples_render_event_.IsValid()); | 223 DCHECK(audio_samples_render_event_.IsValid()); |
| 238 | 224 |
| 239 // Create the event which will be set in Stop() when capturing shall stop. | 225 // Create the event which will be set in Stop() when capturing shall stop. |
| 240 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); | 226 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
| 241 DCHECK(stop_render_event_.IsValid()); | 227 DCHECK(stop_render_event_.IsValid()); |
| 242 } | 228 } |
| 243 | 229 |
| 244 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} | 230 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} |
| 245 | 231 |
| 246 bool WASAPIAudioOutputStream::Open() { | 232 bool WASAPIAudioOutputStream::Open() { |
| 233 DVLOG(1) << "WASAPIAudioOutputStream::Open()"; | |
| 247 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 234 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
| 248 if (opened_) | 235 if (opened_) |
| 249 return true; | 236 return true; |
| 250 | 237 |
| 251 // Channel mixing is not supported, it must be handled by ChannelMixer. | 238 |
| 252 if (format_.Format.nChannels != client_channel_count_) { | 239 // Audio parameters must be identical to the preferred set of parameters |
| 253 LOG(ERROR) << "Channel down-mixing is not supported."; | 240 // if shared mode (default) is utilized. |
| 241 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | |
| 242 if (!audio_parmeters_are_valid_) { | |
| 243 LOG(ERROR) << "Audio parameters are not valid."; | |
| 244 return false; | |
| 245 } | |
| 246 } | |
| 247 | |
| 248 // Create an IAudioClient interface for the default rendering IMMDevice. | |
| 249 ScopedComPtr<IAudioClient> audio_client = | |
| 250 CoreAudioUtil::CreateDefaultClient(eRender, device_role_); | |
| 251 if (!audio_client) | |
| 252 return false; | |
| 253 | |
| 254 // Extra sanity to ensure that the provided device format is still valid. | |
| 255 if (!CoreAudioUtil::IsFormatSupported(audio_client, | |
|
DaleCurtis
2013/01/31 02:34:33
Won't it just return false anyways a little furthe
henrika (OOO until Aug 14)
2013/01/31 14:29:38
yes but we also get a nice summary (using DLOG) ab
| |
| 256 share_mode_, | |
| 257 &format_)) { | |
| 254 return false; | 258 return false; |
| 255 } | 259 } |
| 256 | 260 |
| 257 // Create an IMMDeviceEnumerator interface and obtain a reference to | 261 HRESULT hr = S_FALSE; |
| 258 // the IMMDevice interface of the default rendering device with the | 262 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
| 259 // specified role. | 263 // Initialize the audio stream between the client and the device in shared |
| 260 HRESULT hr = SetRenderDevice(); | 264 // mode and using event-driven buffer handling. |
| 261 if (FAILED(hr)) { | 265 hr = CoreAudioUtil::SharedModeInitialize( |
| 262 return false; | 266 audio_client, &format_, audio_samples_render_event_.Get(), |
| 267 &endpoint_buffer_size_frames_); | |
| 268 if (FAILED(hr)) | |
| 269 return false; | |
| 270 | |
| 271 // We know from experience that the best possible callback sequence is | |
| 272 // achieved when the packet size (given by the native device period) | |
| 273 // is an even multiple of the endpoint buffer size. | |
| 274 // Examples: 48kHz => 960 % 480, 44.1kHz => 896 % 448 or 882 % 441. | |
| 275 if (endpoint_buffer_size_frames_ % packet_size_frames_ != 0) { | |
| 276 DLOG(ERROR) << "Bailing out due to non-perfect timing."; | |
| 277 return false; | |
| 278 } | |
| 279 } else { | |
| 280 // TODO(henrika): break out to CoreAudioUtil::ExclusiveModeInitialize() | |
| 281 // when removing the enable-exclusive-audio flag. | |
| 282 hr = ExclusiveModeInitialization(audio_client, | |
| 283 audio_samples_render_event_.Get(), | |
| 284 &endpoint_buffer_size_frames_); | |
| 285 if (FAILED(hr)) | |
| 286 return false; | |
| 287 | |
| 288 // The buffer scheme for exclusive mode streams is not designed for max | |
| 289 // flexibility. We only allow a "perfect match" between the packet size set | |
| 290 // by the user and the actual endpoint buffer size. | |
| 291 if (endpoint_buffer_size_frames_ != packet_size_frames_) { | |
| 292 DLOG(ERROR) << "Bailing out due to non-perfect timing."; | |
| 293 return false; | |
| 294 } | |
| 263 } | 295 } |
| 264 | 296 |
| 265 // Obtain an IAudioClient interface which enables us to create and initialize | 297 // Create an IAudioRenderClient client for an initialized IAudioClient. |
| 266 // an audio stream between an audio application and the audio engine. | 298 // The IAudioRenderClient interface enables us to write output data to |
| 267 hr = ActivateRenderDevice(); | 299 // a rendering endpoint buffer. |
| 268 if (FAILED(hr)) { | 300 ScopedComPtr<IAudioRenderClient> audio_render_client = |
| 301 CoreAudioUtil::CreateRenderClient(audio_client); | |
| 302 if (!audio_render_client) | |
| 269 return false; | 303 return false; |
| 270 } | |
| 271 | 304 |
| 272 // Verify that the selected audio endpoint supports the specified format | 305 // Store valid COM interfaces. |
| 273 // set during construction. | 306 audio_client_ = audio_client; |
| 274 // In exclusive mode, the client can choose to open the stream in any audio | 307 audio_render_client_ = audio_render_client; |
| 275 // format that the endpoint device supports. In shared mode, the client must | |
| 276 // open the stream in the mix format that is currently in use by the audio | |
| 277 // engine (or a format that is similar to the mix format). The audio engine's | |
| 278 // input streams and the output mix from the engine are all in this format. | |
| 279 if (!DesiredFormatIsSupported()) { | |
| 280 return false; | |
| 281 } | |
| 282 | |
| 283 // Initialize the audio stream between the client and the device using | |
| 284 // shared or exclusive mode and a lowest possible glitch-free latency. | |
| 285 // We will enter different code paths depending on the specified share mode. | |
| 286 hr = InitializeAudioEngine(); | |
| 287 if (FAILED(hr)) { | |
| 288 return false; | |
| 289 } | |
| 290 | 308 |
| 291 opened_ = true; | 309 opened_ = true; |
| 292 return true; | 310 return true; |
| 293 } | 311 } |
| 294 | 312 |
| 295 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { | 313 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { |
| 314 DVLOG(1) << "WASAPIAudioOutputStream::Start()"; | |
| 296 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 315 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
| 297 CHECK(callback); | 316 CHECK(callback); |
| 298 CHECK(opened_); | 317 CHECK(opened_); |
| 299 | 318 |
| 300 if (render_thread_.get()) { | 319 if (render_thread_.get()) { |
| 301 CHECK_EQ(callback, source_); | 320 CHECK_EQ(callback, source_); |
| 302 return; | 321 return; |
| 303 } | 322 } |
| 304 | 323 |
| 305 if (restart_rendering_mode_) { | |
| 306 // The selected audio device has been removed or disabled and a new | |
| 307 // default device has been enabled instead. The current implementation | |
| 308 // does not to support this sequence of events. Given that Open() | |
| 309 // and Start() are usually called in one sequence; it should be a very | |
| 310 // rare event. | |
| 311 // TODO(henrika): it is possible to extend the functionality here. | |
| 312 LOG(ERROR) << "Unable to start since the selected default device has " | |
| 313 "changed since Open() was called."; | |
| 314 return; | |
| 315 } | |
| 316 | |
| 317 source_ = callback; | 324 source_ = callback; |
| 318 | 325 |
| 319 // Avoid start-up glitches by filling up the endpoint buffer with "silence" | |
| 320 // before starting the stream. | |
| 321 BYTE* data_ptr = NULL; | |
| 322 HRESULT hr = audio_render_client_->GetBuffer(endpoint_buffer_size_frames_, | |
| 323 &data_ptr); | |
| 324 if (FAILED(hr)) { | |
| 325 DLOG(ERROR) << "Failed to use rendering audio buffer: " << std::hex << hr; | |
| 326 return; | |
| 327 } | |
| 328 | |
| 329 // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to | |
| 330 // explicitly write silence data to the rendering buffer. | |
| 331 audio_render_client_->ReleaseBuffer(endpoint_buffer_size_frames_, | |
| 332 AUDCLNT_BUFFERFLAGS_SILENT); | |
| 333 num_written_frames_ = endpoint_buffer_size_frames_; | |
| 334 | |
| 335 // Sanity check: verify that the endpoint buffer is filled with silence. | |
| 336 UINT32 num_queued_frames = 0; | |
| 337 audio_client_->GetCurrentPadding(&num_queued_frames); | |
| 338 DCHECK(num_queued_frames == num_written_frames_); | |
| 339 | |
| 340 // Create and start the thread that will drive the rendering by waiting for | 326 // Create and start the thread that will drive the rendering by waiting for |
| 341 // render events. | 327 // render events. |
| 342 render_thread_.reset( | 328 render_thread_.reset( |
| 343 new base::DelegateSimpleThread(this, "wasapi_render_thread")); | 329 new base::DelegateSimpleThread(this, "wasapi_render_thread")); |
| 344 render_thread_->Start(); | 330 render_thread_->Start(); |
| 331 if (!render_thread_->HasBeenStarted()) { | |
| 332 DLOG(ERROR) << "Failed to start WASAPI render thread."; | |
| 333 return; | |
| 334 } | |
| 335 | |
| 336 // Ensure that the endpoint buffer is prepared with silence. | |
| 337 UINT32 num_filled_frames = 0; | |
| 338 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | |
| 339 if (!FillEndpointBufferWithSilence(&num_filled_frames)) { | |
| 340 DLOG(WARNING) << "Failed to prepare endpoint buffers with silence."; | |
| 341 return; | |
| 342 } | |
| 343 DCHECK_EQ(num_filled_frames, endpoint_buffer_size_frames_); | |
| 344 } | |
| 345 num_written_frames_ = num_filled_frames; | |
| 345 | 346 |
| 346 // Start streaming data between the endpoint buffer and the audio engine. | 347 // Start streaming data between the endpoint buffer and the audio engine. |
| 347 hr = audio_client_->Start(); | 348 HRESULT hr = audio_client_->Start(); |
| 348 if (FAILED(hr)) { | 349 if (FAILED(hr)) { |
| 349 SetEvent(stop_render_event_.Get()); | 350 SetEvent(stop_render_event_.Get()); |
| 350 render_thread_->Join(); | 351 render_thread_->Join(); |
| 351 render_thread_.reset(); | 352 render_thread_.reset(); |
| 352 HandleError(hr); | 353 HandleError(hr); |
| 353 } | 354 } |
| 354 } | 355 } |
| 355 | 356 |
| 356 void WASAPIAudioOutputStream::Stop() { | 357 void WASAPIAudioOutputStream::Stop() { |
| 358 DVLOG(1) << "WASAPIAudioOutputStream::Stop()"; | |
| 357 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 359 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
| 358 if (!render_thread_.get()) | 360 if (!render_thread_.get()) |
| 359 return; | 361 return; |
| 360 | 362 |
| 361 // Stop output audio streaming. | 363 // Stop output audio streaming. |
| 362 HRESULT hr = audio_client_->Stop(); | 364 HRESULT hr = audio_client_->Stop(); |
| 363 if (FAILED(hr)) { | 365 if (FAILED(hr)) { |
| 364 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) | 366 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) |
| 365 << "Failed to stop output streaming: " << std::hex << hr; | 367 << "Failed to stop output streaming: " << std::hex << hr; |
| 366 } | 368 } |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 389 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). | 391 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). |
| 390 // This check is is only needed for shared-mode streams. | 392 // This check is is only needed for shared-mode streams. |
| 391 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | 393 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
| 392 UINT32 num_queued_frames = 0; | 394 UINT32 num_queued_frames = 0; |
| 393 audio_client_->GetCurrentPadding(&num_queued_frames); | 395 audio_client_->GetCurrentPadding(&num_queued_frames); |
| 394 DCHECK_EQ(0u, num_queued_frames); | 396 DCHECK_EQ(0u, num_queued_frames); |
| 395 } | 397 } |
| 396 } | 398 } |
| 397 | 399 |
| 398 void WASAPIAudioOutputStream::Close() { | 400 void WASAPIAudioOutputStream::Close() { |
| 401 DVLOG(1) << "WASAPIAudioOutputStream::Close()"; | |
| 399 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 402 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
| 400 | 403 |
| 401 // It is valid to call Close() before calling open or Start(). | 404 // It is valid to call Close() before calling open or Start(). |
| 402 // It is also valid to call Close() after Start() has been called. | 405 // It is also valid to call Close() after Start() has been called. |
| 403 Stop(); | 406 Stop(); |
| 404 | 407 |
| 405 // Inform the audio manager that we have been closed. This will cause our | 408 // Inform the audio manager that we have been closed. This will cause our |
| 406 // destruction. | 409 // destruction. |
| 407 manager_->ReleaseOutputStream(this); | 410 manager_->ReleaseOutputStream(this); |
| 408 } | 411 } |
| 409 | 412 |
| 410 void WASAPIAudioOutputStream::SetVolume(double volume) { | 413 void WASAPIAudioOutputStream::SetVolume(double volume) { |
| 411 DVLOG(1) << "SetVolume(volume=" << volume << ")"; | 414 DVLOG(1) << "SetVolume(volume=" << volume << ")"; |
| 412 float volume_float = static_cast<float>(volume); | 415 float volume_float = static_cast<float>(volume); |
| 413 if (volume_float < 0.0f || volume_float > 1.0f) { | 416 if (volume_float < 0.0f || volume_float > 1.0f) { |
| 414 return; | 417 return; |
| 415 } | 418 } |
| 416 volume_ = volume_float; | 419 volume_ = volume_float; |
| 417 } | 420 } |
| 418 | 421 |
| 419 void WASAPIAudioOutputStream::GetVolume(double* volume) { | 422 void WASAPIAudioOutputStream::GetVolume(double* volume) { |
| 420 DVLOG(1) << "GetVolume()"; | 423 DVLOG(1) << "GetVolume()"; |
| 421 *volume = static_cast<double>(volume_); | 424 *volume = static_cast<double>(volume_); |
| 422 } | 425 } |
| 423 | 426 |
| 424 // static | |
| 425 int WASAPIAudioOutputStream::HardwareChannelCount() { | |
| 426 // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the | |
| 427 // number of channels and the mapping of channels to speakers for | |
| 428 // multichannel devices. | |
| 429 base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex; | |
| 430 HRESULT hr = GetMixFormat( | |
| 431 eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex)); | |
| 432 if (FAILED(hr)) | |
| 433 return 0; | |
| 434 | |
| 435 // Number of channels in the stream. Corresponds to the number of bits | |
| 436 // set in the dwChannelMask. | |
| 437 DVLOG(1) << "endpoint channels (out): " << format_ex->Format.nChannels; | |
| 438 | |
| 439 return static_cast<int>(format_ex->Format.nChannels); | |
| 440 } | |
| 441 | |
| 442 // static | |
| 443 ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() { | |
| 444 return ChannelConfigToChannelLayout(GetChannelConfig()); | |
| 445 } | |
| 446 | |
| 447 // static | |
| 448 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { | |
| 449 base::win::ScopedCoMem<WAVEFORMATEX> format; | |
| 450 HRESULT hr = GetMixFormat(device_role, &format); | |
| 451 if (FAILED(hr)) | |
| 452 return 0; | |
| 453 | |
| 454 DVLOG(2) << "nSamplesPerSec: " << format->nSamplesPerSec; | |
| 455 return static_cast<int>(format->nSamplesPerSec); | |
| 456 } | |
| 457 | |
| 458 void WASAPIAudioOutputStream::Run() { | 427 void WASAPIAudioOutputStream::Run() { |
| 459 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); | 428 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); |
| 460 | 429 |
| 461 // Increase the thread priority. | 430 // Increase the thread priority. |
| 462 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); | 431 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); |
| 463 | 432 |
| 464 // Enable MMCSS to ensure that this thread receives prioritized access to | 433 // Enable MMCSS to ensure that this thread receives prioritized access to |
| 465 // CPU resources. | 434 // CPU resources. |
| 466 DWORD task_index = 0; | 435 DWORD task_index = 0; |
| 467 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", | 436 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 507 FALSE, | 476 FALSE, |
| 508 INFINITE); | 477 INFINITE); |
| 509 | 478 |
| 510 switch (wait_result) { | 479 switch (wait_result) { |
| 511 case WAIT_OBJECT_0 + 0: | 480 case WAIT_OBJECT_0 + 0: |
| 512 // |stop_render_event_| has been set. | 481 // |stop_render_event_| has been set. |
| 513 playing = false; | 482 playing = false; |
| 514 break; | 483 break; |
| 515 case WAIT_OBJECT_0 + 1: | 484 case WAIT_OBJECT_0 + 1: |
| 516 { | 485 { |
| 486 TRACE_EVENT0("audio", "WASAPIAudioOutputStream::Run"); | |
| 487 | |
| 517 // |audio_samples_render_event_| has been set. | 488 // |audio_samples_render_event_| has been set. |
| 518 UINT32 num_queued_frames = 0; | 489 UINT32 num_queued_frames = 0; |
| 519 uint8* audio_data = NULL; | 490 uint8* audio_data = NULL; |
| 520 | 491 |
| 521 // Contains how much new data we can write to the buffer without | 492 // Contains how much new data we can write to the buffer without |
| 522 // the risk of overwriting previously written data that the audio | 493 // the risk of overwriting previously written data that the audio |
| 523 // engine has not yet read from the buffer. | 494 // engine has not yet read from the buffer. |
| 524 size_t num_available_frames = 0; | 495 size_t num_available_frames = 0; |
| 525 | 496 |
| 526 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | 497 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
| 527 // Get the padding value which represents the amount of rendering | 498 // Get the padding value which represents the amount of rendering |
| 528 // data that is queued up to play in the endpoint buffer. | 499 // data that is queued up to play in the endpoint buffer. |
| 529 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | 500 hr = audio_client_->GetCurrentPadding(&num_queued_frames); |
| 530 num_available_frames = | 501 num_available_frames = |
| 531 endpoint_buffer_size_frames_ - num_queued_frames; | 502 endpoint_buffer_size_frames_ - num_queued_frames; |
| 532 } else { | 503 } else { |
| 533 // While the stream is running, the system alternately sends one | 504 // While the stream is running, the system alternately sends one |
| 534 // buffer or the other to the client. This form of double buffering | 505 // buffer or the other to the client. This form of double buffering |
| 535 // is referred to as "ping-ponging". Each time the client receives | 506 // is referred to as "ping-ponging". Each time the client receives |
| 536 // a buffer from the system (triggers this event) the client must | 507 // a buffer from the system (triggers this event) the client must |
| 537 // process the entire buffer. Calls to the GetCurrentPadding method | 508 // process the entire buffer. Calls to the GetCurrentPadding method |
| 538 // are unnecessary because the packet size must always equal the | 509 // are unnecessary because the packet size must always equal the |
| 539 // buffer size. In contrast to the shared mode buffering scheme, | 510 // buffer size. In contrast to the shared mode buffering scheme, |
| 540 // the latency for an event-driven, exclusive-mode stream depends | 511 // the latency for an event-driven, exclusive-mode stream depends |
| 541 // directly on the buffer size. | 512 // directly on the buffer size. |
| 542 num_available_frames = endpoint_buffer_size_frames_; | 513 num_available_frames = endpoint_buffer_size_frames_; |
| 543 } | 514 } |
| 515 if (FAILED(hr)) { | |
| 516 DLOG(ERROR) << "Failed to retrieve amount of available space: " | |
| 517 << std::hex << hr; | |
| 518 continue; | |
| 519 } | |
| 544 | 520 |
| 545 // Check if there is enough available space to fit the packet size | 521 // It is my current assumption that we will always end up with a |
| 546 // specified by the client. | 522 // perfect match here where the packet size is identical to what |
| 547 if (FAILED(hr) || (num_available_frames < packet_size_frames_)) | 523 // the audio engine needs (num_available_frames). I am adding a |
| 524 // DLOG to be able to track down any deviations from this theory. | |
| 525 if ((num_available_frames > 0) && | |
| 526 (num_available_frames != packet_size_frames_)) { | |
| 527 DLOG(WARNING) << "Non-perfect timing case detected."; | |
| 548 continue; | 528 continue; |
| 529 } | |
| 549 | 530 |
| 550 // Derive the number of packets we need get from the client to | 531 // Grab all available space in the rendering endpoint buffer |
| 551 // fill up the available area in the endpoint buffer. | 532 // into which the client can write a data packet. |
| 552 // |num_packets| will always be one for exclusive-mode streams. | 533 hr = audio_render_client_->GetBuffer(packet_size_frames_, |
| 553 size_t num_packets = (num_available_frames / packet_size_frames_); | 534 &audio_data); |
| 535 if (FAILED(hr)) { | |
| 536 DLOG(ERROR) << "Failed to use rendering audio buffer: " | |
| 537 << std::hex << hr; | |
| 538 continue; | |
| 539 } | |
| 554 | 540 |
| 555 // Get data from the client/source. | 541 // Derive the audio delay which corresponds to the delay between |
| 556 for (size_t n = 0; n < num_packets; ++n) { | 542 // a render event and the time when the first audio sample in a |
| 557 // Grab all available space in the rendering endpoint buffer | 543 // packet is played out through the speaker. This delay value |
| 558 // into which the client can write a data packet. | 544 // can typically be utilized by an acoustic echo-control (AEC) |
| 559 hr = audio_render_client_->GetBuffer(packet_size_frames_, | 545 // unit at the render side. |
| 560 &audio_data); | 546 UINT64 position = 0; |
| 561 if (FAILED(hr)) { | 547 int audio_delay_bytes = 0; |
| 562 DLOG(ERROR) << "Failed to use rendering audio buffer: " | 548 hr = audio_clock->GetPosition(&position, NULL); |
| 563 << std::hex << hr; | 549 if (SUCCEEDED(hr)) { |
| 564 continue; | 550 // Stream position of the sample that is currently playing |
| 565 } | 551 // through the speaker. |
| 552 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | |
| 553 (static_cast<double>(position) / device_frequency); | |
| 566 | 554 |
| 567 // Derive the audio delay which corresponds to the delay between | 555 // Stream position of the last sample written to the endpoint |
| 568 // a render event and the time when the first audio sample in a | 556 // buffer. Note that, the packet we are about to receive in |
| 569 // packet is played out through the speaker. This delay value | 557 // the upcoming callback is also included. |
| 570 // can typically be utilized by an acoustic echo-control (AEC) | 558 size_t pos_last_sample_written_frames = |
| 571 // unit at the render side. | 559 num_written_frames_ + packet_size_frames_; |
| 572 UINT64 position = 0; | |
| 573 int audio_delay_bytes = 0; | |
| 574 hr = audio_clock->GetPosition(&position, NULL); | |
| 575 if (SUCCEEDED(hr)) { | |
| 576 // Stream position of the sample that is currently playing | |
| 577 // through the speaker. | |
| 578 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | |
| 579 (static_cast<double>(position) / device_frequency); | |
| 580 | 560 |
| 581 // Stream position of the last sample written to the endpoint | 561 // Derive the actual delay value which will be fed to the |
| 582 // buffer. Note that, the packet we are about to receive in | 562 // render client using the OnMoreData() callback. |
| 583 // the upcoming callback is also included. | 563 audio_delay_bytes = (pos_last_sample_written_frames - |
| 584 size_t pos_last_sample_written_frames = | 564 pos_sample_playing_frames) * format_.Format.nBlockAlign; |
| 585 num_written_frames_ + packet_size_frames_; | 565 } |
| 586 | 566 |
| 587 // Derive the actual delay value which will be fed to the | 567 // Read a data packet from the registered client source and |
| 588 // render client using the OnMoreData() callback. | 568 // deliver a delay estimate in the same callback to the client. |
| 589 audio_delay_bytes = (pos_last_sample_written_frames - | 569 // A time stamp is also stored in the AudioBuffersState. This |
| 590 pos_sample_playing_frames) * frame_size_; | 570 // time stamp can be used at the client side to compensate for |
| 591 } | 571 // the delay between the usage of the delay value and the time |
| 572 // of generation. | |
| 592 | 573 |
| 593 // Read a data packet from the registered client source and | 574 uint32 num_filled_bytes = 0; |
| 594 // deliver a delay estimate in the same callback to the client. | 575 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; |
| 595 // A time stamp is also stored in the AudioBuffersState. This | |
| 596 // time stamp can be used at the client side to compensate for | |
| 597 // the delay between the usage of the delay value and the time | |
| 598 // of generation. | |
| 599 | 576 |
| 600 uint32 num_filled_bytes = 0; | 577 int frames_filled = source_->OnMoreData( |
| 601 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; | 578 audio_bus_.get(), AudioBuffersState(0, audio_delay_bytes)); |
| 579 num_filled_bytes = frames_filled * format_.Format.nBlockAlign; | |
| 580 DCHECK_LE(num_filled_bytes, packet_size_bytes_); | |
| 602 | 581 |
| 603 int frames_filled = source_->OnMoreData( | 582 // Note: If this ever changes to output raw float the data must be |
| 604 audio_bus_.get(), AudioBuffersState(0, audio_delay_bytes)); | 583 // clipped and sanitized since it may come from an untrusted |
| 605 num_filled_bytes = frames_filled * frame_size_; | 584 // source such as NaCl. |
| 606 DCHECK_LE(num_filled_bytes, packet_size_bytes_); | 585 audio_bus_->ToInterleaved( |
| 607 // Note: If this ever changes to output raw float the data must be | 586 frames_filled, bytes_per_sample, audio_data); |
| 608 // clipped and sanitized since it may come from an untrusted | |
| 609 // source such as NaCl. | |
| 610 audio_bus_->ToInterleaved( | |
| 611 frames_filled, bytes_per_sample, audio_data); | |
| 612 | 587 |
| 613 // Perform in-place, software-volume adjustments. | 588 // Perform in-place, software-volume adjustments. |
| 614 media::AdjustVolume(audio_data, | 589 media::AdjustVolume(audio_data, |
| 615 num_filled_bytes, | 590 num_filled_bytes, |
| 616 audio_bus_->channels(), | 591 audio_bus_->channels(), |
| 617 bytes_per_sample, | 592 bytes_per_sample, |
| 618 volume_); | 593 volume_); |
| 619 | 594 |
| 620 // Zero out the part of the packet which has not been filled by | 595 // Zero out the part of the packet which has not been filled by |
| 621 // the client. Using silence is the least bad option in this | 596 // the client. Using silence is the least bad option in this |
| 622 // situation. | 597 // situation. |
| 623 if (num_filled_bytes < packet_size_bytes_) { | 598 if (num_filled_bytes < packet_size_bytes_) { |
| 624 memset(&audio_data[num_filled_bytes], 0, | 599 memset(&audio_data[num_filled_bytes], 0, |
| 625 (packet_size_bytes_ - num_filled_bytes)); | 600 (packet_size_bytes_ - num_filled_bytes)); |
| 626 } | 601 } |
| 627 | 602 |
| 628 // Release the buffer space acquired in the GetBuffer() call. | 603 // Release the buffer space acquired in the GetBuffer() call. |
| 629 DWORD flags = 0; | 604 DWORD flags = 0; |
| 630 audio_render_client_->ReleaseBuffer(packet_size_frames_, | 605 audio_render_client_->ReleaseBuffer(packet_size_frames_, |
| 631 flags); | 606 flags); |
| 632 | 607 |
| 633 num_written_frames_ += packet_size_frames_; | 608 num_written_frames_ += packet_size_frames_; |
| 634 } | |
| 635 } | 609 } |
| 636 break; | 610 break; |
| 637 default: | 611 default: |
| 638 error = true; | 612 error = true; |
| 639 break; | 613 break; |
| 640 } | 614 } |
| 641 } | 615 } |
| 642 | 616 |
| 643 if (playing && error) { | 617 if (playing && error) { |
| 644 // Stop audio rendering since something has gone wrong in our main thread | 618 // Stop audio rendering since something has gone wrong in our main thread |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 655 } | 629 } |
| 656 | 630 |
| 657 void WASAPIAudioOutputStream::HandleError(HRESULT err) { | 631 void WASAPIAudioOutputStream::HandleError(HRESULT err) { |
| 658 CHECK((started() && GetCurrentThreadId() == render_thread_->tid()) || | 632 CHECK((started() && GetCurrentThreadId() == render_thread_->tid()) || |
| 659 (!started() && GetCurrentThreadId() == creating_thread_id_)); | 633 (!started() && GetCurrentThreadId() == creating_thread_id_)); |
| 660 NOTREACHED() << "Error code: " << std::hex << err; | 634 NOTREACHED() << "Error code: " << std::hex << err; |
| 661 if (source_) | 635 if (source_) |
| 662 source_->OnError(this, static_cast<int>(err)); | 636 source_->OnError(this, static_cast<int>(err)); |
| 663 } | 637 } |
| 664 | 638 |
| 665 HRESULT WASAPIAudioOutputStream::SetRenderDevice() { | 639 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( |
| 666 ScopedComPtr<IMMDeviceEnumerator> device_enumerator; | 640 IAudioClient* client, HANDLE event_handle, size_t* endpoint_buffer_size) { |
| 667 ScopedComPtr<IMMDevice> endpoint_device; | |
| 668 | |
| 669 // Create the IMMDeviceEnumerator interface. | |
| 670 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), | |
| 671 NULL, | |
| 672 CLSCTX_INPROC_SERVER, | |
| 673 __uuidof(IMMDeviceEnumerator), | |
| 674 device_enumerator.ReceiveVoid()); | |
| 675 if (SUCCEEDED(hr)) { | |
| 676 // Retrieve the default render audio endpoint for the specified role. | |
| 677 // Note that, in Windows Vista, the MMDevice API supports device roles | |
| 678 // but the system-supplied user interface programs do not. | |
| 679 hr = device_enumerator->GetDefaultAudioEndpoint( | |
| 680 eRender, device_role_, endpoint_device.Receive()); | |
| 681 if (FAILED(hr)) | |
| 682 return hr; | |
| 683 | |
| 684 // Verify that the audio endpoint device is active. That is, the audio | |
| 685 // adapter that connects to the endpoint device is present and enabled. | |
| 686 DWORD state = DEVICE_STATE_DISABLED; | |
| 687 hr = endpoint_device->GetState(&state); | |
| 688 if (SUCCEEDED(hr)) { | |
| 689 if (!(state & DEVICE_STATE_ACTIVE)) { | |
| 690 DLOG(ERROR) << "Selected render device is not active."; | |
| 691 hr = E_ACCESSDENIED; | |
| 692 } | |
| 693 } | |
| 694 } | |
| 695 | |
| 696 if (SUCCEEDED(hr)) { | |
| 697 device_enumerator_ = device_enumerator; | |
| 698 endpoint_device_ = endpoint_device; | |
| 699 } | |
| 700 | |
| 701 return hr; | |
| 702 } | |
| 703 | |
| 704 HRESULT WASAPIAudioOutputStream::ActivateRenderDevice() { | |
| 705 ScopedComPtr<IAudioClient> audio_client; | |
| 706 | |
| 707 // Creates and activates an IAudioClient COM object given the selected | |
| 708 // render endpoint device. | |
| 709 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), | |
| 710 CLSCTX_INPROC_SERVER, | |
| 711 NULL, | |
| 712 audio_client.ReceiveVoid()); | |
| 713 if (SUCCEEDED(hr)) { | |
| 714 // Retrieve the stream format that the audio engine uses for its internal | |
| 715 // processing/mixing of shared-mode streams. | |
| 716 audio_engine_mix_format_.Reset(NULL); | |
| 717 hr = audio_client->GetMixFormat( | |
| 718 reinterpret_cast<WAVEFORMATEX**>(&audio_engine_mix_format_)); | |
| 719 | |
| 720 if (SUCCEEDED(hr)) { | |
| 721 audio_client_ = audio_client; | |
| 722 } | |
| 723 } | |
| 724 | |
| 725 return hr; | |
| 726 } | |
| 727 | |
| 728 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { | |
| 729 // Determine, before calling IAudioClient::Initialize(), whether the audio | |
| 730 // engine supports a particular stream format. | |
| 731 // In shared mode, the audio engine always supports the mix format, | |
| 732 // which is stored in the |audio_engine_mix_format_| member and it is also | |
| 733 // possible to receive a proposed (closest) format if the current format is | |
| 734 // not supported. | |
| 735 base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match; | |
| 736 HRESULT hr = audio_client_->IsFormatSupported( | |
| 737 share_mode_, reinterpret_cast<WAVEFORMATEX*>(&format_), | |
| 738 reinterpret_cast<WAVEFORMATEX**>(&closest_match)); | |
| 739 | |
| 740 // This log can only be triggered for shared mode. | |
| 741 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " | |
| 742 << "but a closest match exists."; | |
| 743 // This log can be triggered both for shared and exclusive modes. | |
| 744 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format."; | |
| 745 if (hr == S_FALSE) { | |
| 746 DVLOG(1) << "wFormatTag : " << closest_match->Format.wFormatTag; | |
| 747 DVLOG(1) << "nChannels : " << closest_match->Format.nChannels; | |
| 748 DVLOG(1) << "nSamplesPerSec: " << closest_match->Format.nSamplesPerSec; | |
| 749 DVLOG(1) << "wBitsPerSample: " << closest_match->Format.wBitsPerSample; | |
| 750 } | |
| 751 | |
| 752 return (hr == S_OK); | |
| 753 } | |
| 754 | |
| 755 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { | |
| 756 #if !defined(NDEBUG) | |
| 757 // The period between processing passes by the audio engine is fixed for a | |
| 758 // particular audio endpoint device and represents the smallest processing | |
| 759 // quantum for the audio engine. This period plus the stream latency between | |
| 760 // the buffer and endpoint device represents the minimum possible latency | |
| 761 // that an audio application can achieve in shared mode. | |
| 762 { | |
| 763 REFERENCE_TIME default_device_period = 0; | |
| 764 REFERENCE_TIME minimum_device_period = 0; | |
| 765 HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period, | |
| 766 &minimum_device_period); | |
| 767 if (SUCCEEDED(hr_dbg)) { | |
| 768 // Shared mode device period. | |
| 769 DVLOG(1) << "shared mode (default) device period: " | |
| 770 << static_cast<double>(default_device_period / 10000.0) | |
| 771 << " [ms]"; | |
| 772 // Exclusive mode device period. | |
| 773 DVLOG(1) << "exclusive mode (minimum) device period: " | |
| 774 << static_cast<double>(minimum_device_period / 10000.0) | |
| 775 << " [ms]"; | |
| 776 } | |
| 777 | |
| 778 REFERENCE_TIME latency = 0; | |
| 779 hr_dbg = audio_client_->GetStreamLatency(&latency); | |
| 780 if (SUCCEEDED(hr_dbg)) { | |
| 781 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) | |
| 782 << " [ms]"; | |
| 783 } | |
| 784 } | |
| 785 #endif | |
| 786 | |
| 787 HRESULT hr = S_FALSE; | |
| 788 | |
| 789 // Perform different initialization depending on if the device shall be | |
| 790 // opened in shared mode or in exclusive mode. | |
| 791 hr = (share_mode_ == AUDCLNT_SHAREMODE_SHARED) ? | |
| 792 SharedModeInitialization() : ExclusiveModeInitialization(); | |
| 793 if (FAILED(hr)) { | |
| 794 LOG(WARNING) << "IAudioClient::Initialize() failed: " << std::hex << hr; | |
| 795 return hr; | |
| 796 } | |
| 797 | |
| 798 // Retrieve the length of the endpoint buffer. The buffer length represents | |
| 799 // the maximum amount of rendering data that the client can write to | |
| 800 // the endpoint buffer during a single processing pass. | |
| 801 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. | |
| 802 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); | |
| 803 if (FAILED(hr)) | |
| 804 return hr; | |
| 805 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ | |
| 806 << " [frames]"; | |
| 807 | |
| 808 // The buffer scheme for exclusive mode streams is not designed for max | |
| 809 // flexibility. We only allow a "perfect match" between the packet size set | |
| 810 // by the user and the actual endpoint buffer size. | |
| 811 if (share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE && | |
| 812 endpoint_buffer_size_frames_ != packet_size_frames_) { | |
| 813 hr = AUDCLNT_E_INVALID_SIZE; | |
| 814 DLOG(ERROR) << "AUDCLNT_E_INVALID_SIZE"; | |
| 815 return hr; | |
| 816 } | |
| 817 | |
| 818 // Set the event handle that the audio engine will signal each time | |
| 819 // a buffer becomes ready to be processed by the client. | |
| 820 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); | |
| 821 if (FAILED(hr)) | |
| 822 return hr; | |
| 823 | |
| 824 // Get access to the IAudioRenderClient interface. This interface | |
| 825 // enables us to write output data to a rendering endpoint buffer. | |
| 826 // The methods in this interface manage the movement of data packets | |
| 827 // that contain audio-rendering data. | |
| 828 hr = audio_client_->GetService(__uuidof(IAudioRenderClient), | |
| 829 audio_render_client_.ReceiveVoid()); | |
| 830 return hr; | |
| 831 } | |
| 832 | |
| 833 HRESULT WASAPIAudioOutputStream::SharedModeInitialization() { | |
| 834 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_SHARED); | |
| 835 | |
| 836 // TODO(henrika): this buffer scheme is still under development. | |
| 837 // The exact details are yet to be determined based on tests with different | |
| 838 // audio clients. | |
| 839 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); | |
| 840 if (audio_engine_mix_format_->Format.nSamplesPerSec % 8000 == 0) { | |
| 841 // Initial tests have shown that we have to add 10 ms extra to | |
| 842 // ensure that we don't run empty for any packet size. | |
| 843 glitch_free_buffer_size_ms += 10; | |
| 844 } else if (audio_engine_mix_format_->Format.nSamplesPerSec % 11025 == 0) { | |
| 845 // Initial tests have shown that we have to add 20 ms extra to | |
| 846 // ensure that we don't run empty for any packet size. | |
| 847 glitch_free_buffer_size_ms += 20; | |
| 848 } else { | |
| 849 DLOG(WARNING) << "Unsupported sample rate " | |
| 850 << audio_engine_mix_format_->Format.nSamplesPerSec << " detected"; | |
| 851 glitch_free_buffer_size_ms += 20; | |
| 852 } | |
| 853 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; | |
| 854 REFERENCE_TIME requested_buffer_duration = | |
| 855 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); | |
| 856 | |
| 857 // Initialize the audio stream between the client and the device. | |
| 858 // We connect indirectly through the audio engine by using shared mode | |
| 859 // and WASAPI is initialized in an event driven mode. | |
| 860 // Note that this API ensures that the buffer is never smaller than the | |
| 861 // minimum buffer size needed to ensure glitch-free rendering. | |
| 862 // If we requests a buffer size that is smaller than the audio engine's | |
| 863 // minimum required buffer size, the method sets the buffer size to this | |
| 864 // minimum buffer size rather than to the buffer size requested. | |
| 865 HRESULT hr = S_FALSE; | |
| 866 hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, | |
| 867 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | |
| 868 AUDCLNT_STREAMFLAGS_NOPERSIST, | |
| 869 requested_buffer_duration, | |
| 870 0, | |
| 871 reinterpret_cast<WAVEFORMATEX*>(&format_), | |
| 872 NULL); | |
| 873 return hr; | |
| 874 } | |
| 875 | |
| 876 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() { | |
| 877 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); | 641 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); |
| 878 | 642 |
| 879 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec; | 643 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec; |
| 880 REFERENCE_TIME requested_buffer_duration = | 644 REFERENCE_TIME requested_buffer_duration = |
| 881 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); | 645 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); |
| 882 | 646 |
| 647 DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST; | |
| 648 bool use_event = (event_handle != NULL && | |
| 649 event_handle != INVALID_HANDLE_VALUE); | |
| 650 if (use_event) | |
| 651 stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK; | |
| 652 DVLOG(2) << "stream_flags: 0x" << std::hex << stream_flags; | |
| 653 | |
| 883 // Initialize the audio stream between the client and the device. | 654 // Initialize the audio stream between the client and the device. |
| 884 // For an exclusive-mode stream that uses event-driven buffering, the | 655 // For an exclusive-mode stream that uses event-driven buffering, the |
| 885 // caller must specify nonzero values for hnsPeriodicity and | 656 // caller must specify nonzero values for hnsPeriodicity and |
| 886 // hnsBufferDuration, and the values of these two parameters must be equal. | 657 // hnsBufferDuration, and the values of these two parameters must be equal. |
| 887 // The Initialize method allocates two buffers for the stream. Each buffer | 658 // The Initialize method allocates two buffers for the stream. Each buffer |
| 888 // is equal in duration to the value of the hnsBufferDuration parameter. | 659 // is equal in duration to the value of the hnsBufferDuration parameter. |
| 889 // Following the Initialize call for a rendering stream, the caller should | 660 // Following the Initialize call for a rendering stream, the caller should |
| 890 // fill the first of the two buffers before starting the stream. | 661 // fill the first of the two buffers before starting the stream. |
| 891 HRESULT hr = S_FALSE; | 662 HRESULT hr = S_FALSE; |
| 892 hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, | 663 hr = client->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, |
| 893 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | 664 stream_flags, |
| 894 AUDCLNT_STREAMFLAGS_NOPERSIST, | 665 requested_buffer_duration, |
| 895 requested_buffer_duration, | 666 requested_buffer_duration, |
| 896 requested_buffer_duration, | 667 reinterpret_cast<WAVEFORMATEX*>(&format_), |
| 897 reinterpret_cast<WAVEFORMATEX*>(&format_), | 668 NULL); |
| 898 NULL); | |
| 899 if (FAILED(hr)) { | 669 if (FAILED(hr)) { |
| 900 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { | 670 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { |
| 901 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED"; | 671 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED"; |
| 902 | 672 |
| 903 UINT32 aligned_buffer_size = 0; | 673 UINT32 aligned_buffer_size = 0; |
| 904 audio_client_->GetBufferSize(&aligned_buffer_size); | 674 client->GetBufferSize(&aligned_buffer_size); |
| 905 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; | 675 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; |
| 906 audio_client_.Release(); | |
| 907 | 676 |
| 908 // Calculate new aligned periodicity. Each unit of reference time | 677 // Calculate new aligned periodicity. Each unit of reference time |
| 909 // is 100 nanoseconds. | 678 // is 100 nanoseconds. |
| 910 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>( | 679 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>( |
| 911 (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec) | 680 (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec) |
| 912 + 0.5); | 681 + 0.5); |
| 913 | 682 |
| 914 // It is possible to re-activate and re-initialize the audio client | 683 // It is possible to re-activate and re-initialize the audio client |
| 915 // at this stage but we bail out with an error code instead and | 684 // at this stage but we bail out with an error code instead and |
| 916 // combine it with a log message which informs about the suggested | 685 // combine it with a log message which informs about the suggested |
| 917 // aligned buffer size which should be used instead. | 686 // aligned buffer size which should be used instead. |
| 918 DVLOG(1) << "aligned_buffer_duration: " | 687 DVLOG(1) << "aligned_buffer_duration: " |
| 919 << static_cast<double>(aligned_buffer_duration / 10000.0) | 688 << static_cast<double>(aligned_buffer_duration / 10000.0) |
| 920 << " [ms]"; | 689 << " [ms]"; |
| 921 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) { | 690 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) { |
| 922 // We will get this error if we try to use a smaller buffer size than | 691 // We will get this error if we try to use a smaller buffer size than |
| 923 // the minimum supported size (usually ~3ms on Windows 7). | 692 // the minimum supported size (usually ~3ms on Windows 7). |
| 924 LOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD"; | 693 LOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD"; |
| 925 } | 694 } |
| 695 return hr; | |
| 926 } | 696 } |
| 927 | 697 |
| 698 if (use_event) { | |
| 699 hr = client->SetEventHandle(event_handle); | |
| 700 if (FAILED(hr)) { | |
| 701 DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr; | |
| 702 return hr; | |
| 703 } | |
| 704 } | |
| 705 | |
| 706 UINT32 buffer_size_in_frames = 0; | |
| 707 hr = client->GetBufferSize(&buffer_size_in_frames); | |
| 708 if (FAILED(hr)) { | |
| 709 DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr; | |
| 710 return hr; | |
| 711 } | |
| 712 | |
| 713 *endpoint_buffer_size = static_cast<size_t>(buffer_size_in_frames); | |
| 714 DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames; | |
| 928 return hr; | 715 return hr; |
| 929 } | 716 } |
| 930 | 717 |
| 931 std::string WASAPIAudioOutputStream::GetDeviceName(LPCWSTR device_id) const { | 718 bool WASAPIAudioOutputStream::FillEndpointBufferWithSilence( |
| 932 std::string name; | 719 UINT32* num_written_frames) { |
| 933 ScopedComPtr<IMMDevice> audio_device; | 720 UINT32 num_queued_frames = 0; |
| 721 HRESULT hr = audio_client_->GetCurrentPadding(&num_queued_frames); | |
| 722 if (FAILED(hr)) | |
| 723 return false; | |
| 934 | 724 |
| 935 // Get the IMMDevice interface corresponding to the given endpoint ID string. | 725 BYTE* data = NULL; |
| 936 HRESULT hr = device_enumerator_->GetDevice(device_id, audio_device.Receive()); | 726 int num_frames_to_fill = endpoint_buffer_size_frames_ - num_queued_frames; |
| 937 if (SUCCEEDED(hr)) { | 727 hr = audio_render_client_->GetBuffer(num_frames_to_fill, &data); |
| 938 // Retrieve user-friendly name of endpoint device. | 728 if (FAILED(hr)) |
| 939 // Example: "Speakers (Realtek High Definition Audio)". | 729 return false; |
| 940 ScopedComPtr<IPropertyStore> properties; | 730 |
| 941 hr = audio_device->OpenPropertyStore(STGM_READ, properties.Receive()); | 731 // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to |
| 942 if (SUCCEEDED(hr)) { | 732 // explicitly write silence data to the rendering buffer. |
| 943 PROPVARIANT friendly_name; | 733 DVLOG(2) << "filling up " << num_frames_to_fill << " frames with silence"; |
| 944 PropVariantInit(&friendly_name); | 734 hr = audio_render_client_->ReleaseBuffer(num_frames_to_fill, |
| 945 hr = properties->GetValue(PKEY_Device_FriendlyName, &friendly_name); | 735 AUDCLNT_BUFFERFLAGS_SILENT); |
| 946 if (SUCCEEDED(hr) && friendly_name.vt == VT_LPWSTR) { | 736 if (FAILED(hr)) |
| 947 if (friendly_name.pwszVal) | 737 return false; |
| 948 name = WideToUTF8(friendly_name.pwszVal); | 738 |
| 949 } | 739 // Get the amount of valid, unread data that the endpoint buffer |
| 950 PropVariantClear(&friendly_name); | 740 // currently contains. This amount corresponds to the number of written |
| 951 } | 741 // audio frames. |
| 952 } | 742 hr = audio_client_->GetCurrentPadding(&num_queued_frames); |
| 953 return name; | 743 if (FAILED(hr)) |
| 744 return false; | |
| 745 *num_written_frames = num_queued_frames; | |
| 746 | |
| 747 return SUCCEEDED(hr); | |
| 954 } | 748 } |
| 955 | 749 |
| 956 } // namespace media | 750 } // namespace media |
| OLD | NEW |