Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/audio/win/audio_low_latency_output_win.h" | 5 #include "media/audio/win/audio_low_latency_output_win.h" |
| 6 | 6 |
| 7 #include <Functiondiscoverykeys_devpkey.h> | 7 #include <Functiondiscoverykeys_devpkey.h> |
| 8 | 8 |
| 9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
| 10 #include "base/debug/trace_event.h" | |
| 10 #include "base/logging.h" | 11 #include "base/logging.h" |
| 11 #include "base/memory/scoped_ptr.h" | 12 #include "base/memory/scoped_ptr.h" |
| 12 #include "base/metrics/histogram.h" | 13 #include "base/metrics/histogram.h" |
| 13 #include "base/utf_string_conversions.h" | 14 #include "base/utf_string_conversions.h" |
| 14 #include "media/audio/audio_util.h" | 15 #include "media/audio/audio_util.h" |
| 15 #include "media/audio/win/audio_manager_win.h" | 16 #include "media/audio/win/audio_manager_win.h" |
| 16 #include "media/audio/win/avrt_wrapper_win.h" | 17 #include "media/audio/win/avrt_wrapper_win.h" |
| 18 #include "media/audio/win/core_audio_util_win.h" | |
| 17 #include "media/base/limits.h" | 19 #include "media/base/limits.h" |
| 18 #include "media/base/media_switches.h" | 20 #include "media/base/media_switches.h" |
| 19 | 21 |
| 20 using base::win::ScopedComPtr; | 22 using base::win::ScopedComPtr; |
| 21 using base::win::ScopedCOMInitializer; | 23 using base::win::ScopedCOMInitializer; |
| 22 using base::win::ScopedCoMem; | 24 using base::win::ScopedCoMem; |
| 23 | 25 |
| 24 namespace media { | 26 namespace media { |
| 25 | 27 |
| 26 typedef uint32 ChannelConfig; | 28 typedef uint32 ChannelConfig; |
| 27 | 29 |
| 28 // Retrieves the stream format that the audio engine uses for its internal | |
| 29 // processing/mixing of shared-mode streams. | |
| 30 static HRESULT GetMixFormat(ERole device_role, WAVEFORMATEX** device_format) { | |
| 31 // Note that we are using the IAudioClient::GetMixFormat() API to get the | |
| 32 // device format in this function. It is in fact possible to be "more native", | |
| 33 // and ask the endpoint device directly for its properties. Given a reference | |
| 34 // to the IMMDevice interface of an endpoint object, a client can obtain a | |
| 35 // reference to the endpoint object's property store by calling the | |
| 36 // IMMDevice::OpenPropertyStore() method. However, I have not been able to | |
| 37 // access any valuable information using this method on my HP Z600 desktop, | |
| 38 // hence it feels more appropriate to use the IAudioClient::GetMixFormat() | |
| 39 // approach instead. | |
| 40 | |
| 41 // Calling this function only makes sense for shared mode streams, since | |
| 42 // if the device will be opened in exclusive mode, then the application | |
| 43 // specified format is used instead. However, the result of this method can | |
| 44 // be useful for testing purposes so we don't DCHECK here. | |
| 45 DLOG_IF(WARNING, WASAPIAudioOutputStream::GetShareMode() == | |
| 46 AUDCLNT_SHAREMODE_EXCLUSIVE) << | |
| 47 "The mixing sample rate will be ignored for exclusive-mode streams."; | |
| 48 | |
| 49 // It is assumed that this static method is called from a COM thread, i.e., | |
| 50 // CoInitializeEx() is not called here again to avoid STA/MTA conflicts. | |
| 51 ScopedComPtr<IMMDeviceEnumerator> enumerator; | |
| 52 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), | |
| 53 NULL, | |
| 54 CLSCTX_INPROC_SERVER, | |
| 55 __uuidof(IMMDeviceEnumerator), | |
| 56 enumerator.ReceiveVoid()); | |
| 57 if (FAILED(hr)) | |
| 58 return hr; | |
| 59 | |
| 60 ScopedComPtr<IMMDevice> endpoint_device; | |
| 61 hr = enumerator->GetDefaultAudioEndpoint(eRender, | |
| 62 device_role, | |
| 63 endpoint_device.Receive()); | |
| 64 if (FAILED(hr)) | |
| 65 return hr; | |
| 66 | |
| 67 ScopedComPtr<IAudioClient> audio_client; | |
| 68 hr = endpoint_device->Activate(__uuidof(IAudioClient), | |
| 69 CLSCTX_INPROC_SERVER, | |
| 70 NULL, | |
| 71 audio_client.ReceiveVoid()); | |
| 72 return SUCCEEDED(hr) ? audio_client->GetMixFormat(device_format) : hr; | |
| 73 } | |
| 74 | |
| 75 // Retrieves an integer mask which corresponds to the channel layout the | 30 // Retrieves an integer mask which corresponds to the channel layout the |
| 76 // audio engine uses for its internal processing/mixing of shared-mode | 31 // audio engine uses for its internal processing/mixing of shared-mode |
| 77 // streams. This mask indicates which channels are present in the multi- | 32 // streams. This mask indicates which channels are present in the multi- |
| 78 // channel stream. The least significant bit corresponds with the Front Left | 33 // channel stream. The least significant bit corresponds with the Front Left |
| 79 // speaker, the next least significant bit corresponds to the Front Right | 34 // speaker, the next least significant bit corresponds to the Front Right |
| 80 // speaker, and so on, continuing in the order defined in KsMedia.h. | 35 // speaker, and so on, continuing in the order defined in KsMedia.h. |
| 81 // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85 ).aspx | 36 // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85 ).aspx |
| 82 // for more details. | 37 // for more details. |
| 83 static ChannelConfig GetChannelConfig() { | 38 static ChannelConfig GetChannelConfig() { |
| 84 // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the | 39 WAVEFORMATPCMEX format; |
| 85 // number of channels and the mapping of channels to speakers for | 40 return SUCCEEDED(CoreAudioUtil::GetDefaultSharedModeMixFormat( |
| 86 // multichannel devices. | 41 eRender, eConsole, &format)) ? |
| 87 base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex; | 42 static_cast<int>(format.dwChannelMask) : 0; |
| 88 HRESULT hr = S_FALSE; | 43 } |
| 89 hr = GetMixFormat(eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex)); | |
| 90 if (FAILED(hr)) | |
| 91 return 0; | |
| 92 | 44 |
| 93 // The dwChannelMask member specifies which channels are present in the | 45 // Compare two sets of audio parameters and return true if they are equal. |
| 94 // multichannel stream. The least significant bit corresponds to the | 46 // Note that bits_per_sample() is excluded from this comparison since Core |
| 95 // front left speaker, the next least significant bit corresponds to the | 47 // Audio can deal with most bit depths. As an example, if the native/mixing |
| 96 // front right speaker, and so on. | 48 // bit depth is 32 bits (default), opening at 16 or 24 still works fine and |
| 97 // See http://msdn.microsoft.com/en-us/library/windows/desktop/dd757714(v=vs.8 5).aspx | 49 // the audio engine will do the required conversion for us. |
| 98 // for more details on the channel mapping. | 50 static bool CompareAudioParametersNoBitDepth(const media::AudioParameters& a, |
| 99 DVLOG(2) << "dwChannelMask: 0x" << std::hex << format_ex->dwChannelMask; | 51 const media::AudioParameters& b) { |
| 100 | 52 return (a.format() == b.format() && |
| 101 #if !defined(NDEBUG) | 53 a.channels() == b.channels() && |
| 102 // See http://en.wikipedia.org/wiki/Surround_sound for more details on | 54 a.sample_rate() == b.sample_rate() && |
| 103 // how to name various speaker configurations. The list below is not complete. | 55 a.frames_per_buffer() == b.frames_per_buffer()); |
| 104 const char* speaker_config = "Undefined"; | |
| 105 switch (format_ex->dwChannelMask) { | |
| 106 case KSAUDIO_SPEAKER_MONO: | |
| 107 speaker_config = "Mono"; | |
| 108 break; | |
| 109 case KSAUDIO_SPEAKER_STEREO: | |
| 110 speaker_config = "Stereo"; | |
| 111 break; | |
| 112 case KSAUDIO_SPEAKER_5POINT1_SURROUND: | |
| 113 speaker_config = "5.1 surround"; | |
| 114 break; | |
| 115 case KSAUDIO_SPEAKER_5POINT1: | |
| 116 speaker_config = "5.1"; | |
| 117 break; | |
| 118 case KSAUDIO_SPEAKER_7POINT1_SURROUND: | |
| 119 speaker_config = "7.1 surround"; | |
| 120 break; | |
| 121 case KSAUDIO_SPEAKER_7POINT1: | |
| 122 speaker_config = "7.1"; | |
| 123 break; | |
| 124 default: | |
| 125 break; | |
| 126 } | |
| 127 DVLOG(2) << "speaker configuration: " << speaker_config; | |
| 128 #endif | |
| 129 | |
| 130 return static_cast<ChannelConfig>(format_ex->dwChannelMask); | |
| 131 } | 56 } |
| 132 | 57 |
| 133 // Converts Microsoft's channel configuration to ChannelLayout. | 58 // Converts Microsoft's channel configuration to ChannelLayout. |
| 134 // This mapping is not perfect but the best we can do given the current | 59 // This mapping is not perfect but the best we can do given the current |
| 135 // ChannelLayout enumerator and the Windows-specific speaker configurations | 60 // ChannelLayout enumerator and the Windows-specific speaker configurations |
| 136 // defined in ksmedia.h. Don't assume that the channel ordering in | 61 // defined in ksmedia.h. Don't assume that the channel ordering in |
| 137 // ChannelLayout is exactly the same as the Windows specific configuration. | 62 // ChannelLayout is exactly the same as the Windows specific configuration. |
| 138 // As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to | 63 // As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to |
| 139 // CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R | 64 // CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R |
| 140 // speakers are different in these two definitions. | 65 // speakers are different in these two definitions. |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 165 } | 90 } |
| 166 | 91 |
| 167 // static | 92 // static |
| 168 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() { | 93 AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() { |
| 169 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); | 94 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); |
| 170 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) | 95 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) |
| 171 return AUDCLNT_SHAREMODE_EXCLUSIVE; | 96 return AUDCLNT_SHAREMODE_EXCLUSIVE; |
| 172 return AUDCLNT_SHAREMODE_SHARED; | 97 return AUDCLNT_SHAREMODE_SHARED; |
| 173 } | 98 } |
| 174 | 99 |
| 100 // static | |
| 101 int WASAPIAudioOutputStream::HardwareChannelCount() { | |
| 102 WAVEFORMATPCMEX format; | |
| 103 return SUCCEEDED(CoreAudioUtil::GetDefaultSharedModeMixFormat( | |
| 104 eRender, eConsole, &format)) ? | |
|
tommi (sloooow) - chröme
2013/01/31 16:18:24
strange indentation since eRender is aligned with
| |
| 105 static_cast<int>(format.Format.nChannels) : 0; | |
| 106 } | |
| 107 | |
| 108 // static | |
| 109 ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() { | |
| 110 return ChannelConfigToChannelLayout(GetChannelConfig()); | |
| 111 } | |
| 112 | |
| 113 // static | |
| 114 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { | |
| 115 WAVEFORMATPCMEX format; | |
| 116 return SUCCEEDED(CoreAudioUtil::GetDefaultSharedModeMixFormat( | |
| 117 eRender, device_role, &format)) ? | |
|
tommi (sloooow) - chröme
2013/01/31 16:18:24
same here.
Btw, this is the same call as above.
| |
| 118 static_cast<int>(format.Format.nSamplesPerSec) : 0; | |
| 119 } | |
| 120 | |
| 175 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, | 121 WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager, |
| 176 const AudioParameters& params, | 122 const AudioParameters& params, |
| 177 ERole device_role) | 123 ERole device_role) |
| 178 : creating_thread_id_(base::PlatformThread::CurrentId()), | 124 : creating_thread_id_(base::PlatformThread::CurrentId()), |
| 179 manager_(manager), | 125 manager_(manager), |
| 180 opened_(false), | 126 opened_(false), |
| 181 restart_rendering_mode_(false), | 127 audio_parmeters_are_valid_(false), |
| 182 volume_(1.0), | 128 volume_(1.0), |
| 183 endpoint_buffer_size_frames_(0), | 129 endpoint_buffer_size_frames_(0), |
| 184 device_role_(device_role), | 130 device_role_(device_role), |
| 185 share_mode_(GetShareMode()), | 131 share_mode_(GetShareMode()), |
| 186 client_channel_count_(params.channels()), | |
| 187 num_written_frames_(0), | 132 num_written_frames_(0), |
| 188 source_(NULL), | 133 source_(NULL), |
| 189 audio_bus_(AudioBus::Create(params)) { | 134 audio_bus_(AudioBus::Create(params)) { |
| 190 DCHECK(manager_); | 135 DCHECK(manager_); |
| 136 DVLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()"; | |
| 137 DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE) | |
| 138 << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled."; | |
| 139 | |
| 140 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | |
| 141 // Verify that the input audio parameters are identical (bit depth is | |
| 142 // excluded) to the preferred (native) audio parameters. Open() will fail | |
| 143 // if this is not the case. | |
| 144 AudioParameters preferred_params; | |
| 145 HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters( | |
| 146 eRender, device_role, &preferred_params); | |
| 147 audio_parmeters_are_valid_ = SUCCEEDED(hr) && | |
| 148 CompareAudioParametersNoBitDepth(params, preferred_params); | |
| 149 DLOG_IF(WARNING, !audio_parmeters_are_valid_) | |
| 150 << "Input and preferred parameters are not identical."; | |
| 151 } | |
| 191 | 152 |
| 192 // Load the Avrt DLL if not already loaded. Required to support MMCSS. | 153 // Load the Avrt DLL if not already loaded. Required to support MMCSS. |
| 193 bool avrt_init = avrt::Initialize(); | 154 bool avrt_init = avrt::Initialize(); |
| 194 DCHECK(avrt_init) << "Failed to load the avrt.dll"; | 155 DCHECK(avrt_init) << "Failed to load the avrt.dll"; |
| 195 | 156 |
| 196 if (share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE) { | |
| 197 VLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<"; | |
| 198 } | |
| 199 | |
| 200 // Set up the desired render format specified by the client. We use the | 157 // Set up the desired render format specified by the client. We use the |
| 201 // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering | 158 // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering |
| 202 // and high precision data can be supported. | 159 // and high precision data can be supported. |
| 203 | 160 |
| 204 // Begin with the WAVEFORMATEX structure that specifies the basic format. | 161 // Begin with the WAVEFORMATEX structure that specifies the basic format. |
| 205 WAVEFORMATEX* format = &format_.Format; | 162 WAVEFORMATEX* format = &format_.Format; |
| 206 format->wFormatTag = WAVE_FORMAT_EXTENSIBLE; | 163 format->wFormatTag = WAVE_FORMAT_EXTENSIBLE; |
| 207 format->nChannels = client_channel_count_; | 164 format->nChannels = params.channels(); |
| 208 format->nSamplesPerSec = params.sample_rate(); | 165 format->nSamplesPerSec = params.sample_rate(); |
| 209 format->wBitsPerSample = params.bits_per_sample(); | 166 format->wBitsPerSample = params.bits_per_sample(); |
| 210 format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels; | 167 format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels; |
| 211 format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign; | 168 format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign; |
| 212 format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX); | 169 format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX); |
| 213 | 170 |
| 214 // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE. | 171 // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE. |
| 215 format_.Samples.wValidBitsPerSample = params.bits_per_sample(); | 172 format_.Samples.wValidBitsPerSample = params.bits_per_sample(); |
| 216 format_.dwChannelMask = GetChannelConfig(); | 173 format_.dwChannelMask = GetChannelConfig(); |
| 217 format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; | 174 format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; |
| 218 | 175 |
| 219 // Size in bytes of each audio frame. | |
| 220 frame_size_ = format->nBlockAlign; | |
| 221 | |
| 222 // Store size (in different units) of audio packets which we expect to | 176 // Store size (in different units) of audio packets which we expect to |
| 223 // get from the audio endpoint device in each render event. | 177 // get from the audio endpoint device in each render event. |
| 224 packet_size_frames_ = params.GetBytesPerBuffer() / format->nBlockAlign; | 178 packet_size_frames_ = params.frames_per_buffer(); |
| 225 packet_size_bytes_ = params.GetBytesPerBuffer(); | 179 packet_size_bytes_ = params.GetBytesPerBuffer(); |
| 226 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate(); | 180 packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate(); |
| 227 DVLOG(1) << "Number of bytes per audio frame : " << frame_size_; | 181 DVLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign; |
| 228 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; | 182 DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_; |
| 229 DVLOG(1) << "Number of bytes per packet : " << packet_size_bytes_; | 183 DVLOG(1) << "Number of bytes per packet : " << packet_size_bytes_; |
| 230 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_; | 184 DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_; |
| 231 | 185 |
| 232 // All events are auto-reset events and non-signaled initially. | 186 // All events are auto-reset events and non-signaled initially. |
| 233 | 187 |
| 234 // Create the event which the audio engine will signal each time | 188 // Create the event which the audio engine will signal each time |
| 235 // a buffer becomes ready to be processed by the client. | 189 // a buffer becomes ready to be processed by the client. |
| 236 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); | 190 audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
| 237 DCHECK(audio_samples_render_event_.IsValid()); | 191 DCHECK(audio_samples_render_event_.IsValid()); |
| 238 | 192 |
| 239 // Create the event which will be set in Stop() when capturing shall stop. | 193 // Create the event which will be set in Stop() when capturing shall stop. |
| 240 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); | 194 stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL)); |
| 241 DCHECK(stop_render_event_.IsValid()); | 195 DCHECK(stop_render_event_.IsValid()); |
| 242 } | 196 } |
| 243 | 197 |
| 244 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} | 198 WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {} |
| 245 | 199 |
| 246 bool WASAPIAudioOutputStream::Open() { | 200 bool WASAPIAudioOutputStream::Open() { |
| 201 DVLOG(1) << "WASAPIAudioOutputStream::Open()"; | |
| 247 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 202 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
| 248 if (opened_) | 203 if (opened_) |
| 249 return true; | 204 return true; |
| 250 | 205 |
| 251 // Channel mixing is not supported, it must be handled by ChannelMixer. | 206 |
| 252 if (format_.Format.nChannels != client_channel_count_) { | 207 // Audio parameters must be identical to the preferred set of parameters |
| 253 LOG(ERROR) << "Channel down-mixing is not supported."; | 208 // if shared mode (default) is utilized. |
| 209 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | |
| 210 if (!audio_parmeters_are_valid_) { | |
| 211 LOG(ERROR) << "Audio parameters are not valid."; | |
| 212 return false; | |
| 213 } | |
| 214 } | |
| 215 | |
| 216 // Create an IAudioClient interface for the default rendering IMMDevice. | |
| 217 ScopedComPtr<IAudioClient> audio_client = | |
| 218 CoreAudioUtil::CreateDefaultClient(eRender, device_role_); | |
| 219 if (!audio_client) | |
| 220 return false; | |
| 221 | |
| 222 // Extra sanity to ensure that the provided device format is still valid. | |
| 223 if (!CoreAudioUtil::IsFormatSupported(audio_client, | |
| 224 share_mode_, | |
| 225 &format_)) { | |
| 254 return false; | 226 return false; |
| 255 } | 227 } |
| 256 | 228 |
| 257 // Create an IMMDeviceEnumerator interface and obtain a reference to | 229 HRESULT hr = S_FALSE; |
| 258 // the IMMDevice interface of the default rendering device with the | 230 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
| 259 // specified role. | 231 // Initialize the audio stream between the client and the device in shared |
| 260 HRESULT hr = SetRenderDevice(); | 232 // mode and using event-driven buffer handling. |
| 261 if (FAILED(hr)) { | 233 hr = CoreAudioUtil::SharedModeInitialize( |
| 262 return false; | 234 audio_client, &format_, audio_samples_render_event_.Get(), |
| 235 &endpoint_buffer_size_frames_); | |
| 236 if (FAILED(hr)) | |
| 237 return false; | |
| 238 | |
| 239 // We know from experience that the best possible callback sequence is | |
| 240 // achieved when the packet size (given by the native device period) | |
| 241 // is an even multiple of the endpoint buffer size. | |
| 242 // Examples: 48kHz => 960 % 480, 44.1kHz => 896 % 448 or 882 % 441. | |
| 243 if (endpoint_buffer_size_frames_ % packet_size_frames_ != 0) { | |
| 244 DLOG(ERROR) << "Bailing out due to non-perfect timing."; | |
| 245 return false; | |
| 246 } | |
| 247 } else { | |
| 248 // TODO(henrika): break out to CoreAudioUtil::ExclusiveModeInitialize() | |
| 249 // when removing the enable-exclusive-audio flag. | |
| 250 hr = ExclusiveModeInitialization(audio_client, | |
| 251 audio_samples_render_event_.Get(), | |
| 252 &endpoint_buffer_size_frames_); | |
| 253 if (FAILED(hr)) | |
| 254 return false; | |
| 255 | |
| 256 // The buffer scheme for exclusive mode streams is not designed for max | |
| 257 // flexibility. We only allow a "perfect match" between the packet size set | |
| 258 // by the user and the actual endpoint buffer size. | |
| 259 if (endpoint_buffer_size_frames_ != packet_size_frames_) { | |
| 260 DLOG(ERROR) << "Bailing out due to non-perfect timing."; | |
| 261 return false; | |
| 262 } | |
| 263 } | 263 } |
| 264 | 264 |
| 265 // Obtain an IAudioClient interface which enables us to create and initialize | 265 // Create an IAudioRenderClient client for an initialized IAudioClient. |
| 266 // an audio stream between an audio application and the audio engine. | 266 // The IAudioRenderClient interface enables us to write output data to |
| 267 hr = ActivateRenderDevice(); | 267 // a rendering endpoint buffer. |
| 268 if (FAILED(hr)) { | 268 ScopedComPtr<IAudioRenderClient> audio_render_client = |
| 269 CoreAudioUtil::CreateRenderClient(audio_client); | |
| 270 if (!audio_render_client) | |
| 269 return false; | 271 return false; |
| 270 } | |
| 271 | 272 |
| 272 // Verify that the selected audio endpoint supports the specified format | 273 // Store valid COM interfaces. |
| 273 // set during construction. | 274 audio_client_ = audio_client; |
| 274 // In exclusive mode, the client can choose to open the stream in any audio | 275 audio_render_client_ = audio_render_client; |
| 275 // format that the endpoint device supports. In shared mode, the client must | |
| 276 // open the stream in the mix format that is currently in use by the audio | |
| 277 // engine (or a format that is similar to the mix format). The audio engine's | |
| 278 // input streams and the output mix from the engine are all in this format. | |
| 279 if (!DesiredFormatIsSupported()) { | |
| 280 return false; | |
| 281 } | |
| 282 | |
| 283 // Initialize the audio stream between the client and the device using | |
| 284 // shared or exclusive mode and a lowest possible glitch-free latency. | |
| 285 // We will enter different code paths depending on the specified share mode. | |
| 286 hr = InitializeAudioEngine(); | |
| 287 if (FAILED(hr)) { | |
| 288 return false; | |
| 289 } | |
| 290 | 276 |
| 291 opened_ = true; | 277 opened_ = true; |
| 292 return true; | 278 return true; |
| 293 } | 279 } |
| 294 | 280 |
| 295 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { | 281 void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) { |
| 282 DVLOG(1) << "WASAPIAudioOutputStream::Start()"; | |
| 296 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 283 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
| 297 CHECK(callback); | 284 CHECK(callback); |
| 298 CHECK(opened_); | 285 CHECK(opened_); |
| 299 | 286 |
| 300 if (render_thread_.get()) { | 287 if (render_thread_.get()) { |
| 301 CHECK_EQ(callback, source_); | 288 CHECK_EQ(callback, source_); |
| 302 return; | 289 return; |
| 303 } | 290 } |
| 304 | 291 |
| 305 if (restart_rendering_mode_) { | |
| 306 // The selected audio device has been removed or disabled and a new | |
| 307 // default device has been enabled instead. The current implementation | |
| 308 // does not to support this sequence of events. Given that Open() | |
| 309 // and Start() are usually called in one sequence; it should be a very | |
| 310 // rare event. | |
| 311 // TODO(henrika): it is possible to extend the functionality here. | |
| 312 LOG(ERROR) << "Unable to start since the selected default device has " | |
| 313 "changed since Open() was called."; | |
| 314 return; | |
| 315 } | |
| 316 | |
| 317 source_ = callback; | 292 source_ = callback; |
| 318 | 293 |
| 319 // Avoid start-up glitches by filling up the endpoint buffer with "silence" | |
| 320 // before starting the stream. | |
| 321 BYTE* data_ptr = NULL; | |
| 322 HRESULT hr = audio_render_client_->GetBuffer(endpoint_buffer_size_frames_, | |
| 323 &data_ptr); | |
| 324 if (FAILED(hr)) { | |
| 325 DLOG(ERROR) << "Failed to use rendering audio buffer: " << std::hex << hr; | |
| 326 return; | |
| 327 } | |
| 328 | |
| 329 // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to | |
| 330 // explicitly write silence data to the rendering buffer. | |
| 331 audio_render_client_->ReleaseBuffer(endpoint_buffer_size_frames_, | |
| 332 AUDCLNT_BUFFERFLAGS_SILENT); | |
| 333 num_written_frames_ = endpoint_buffer_size_frames_; | |
| 334 | |
| 335 // Sanity check: verify that the endpoint buffer is filled with silence. | |
| 336 UINT32 num_queued_frames = 0; | |
| 337 audio_client_->GetCurrentPadding(&num_queued_frames); | |
| 338 DCHECK(num_queued_frames == num_written_frames_); | |
| 339 | |
| 340 // Create and start the thread that will drive the rendering by waiting for | 294 // Create and start the thread that will drive the rendering by waiting for |
| 341 // render events. | 295 // render events. |
| 342 render_thread_.reset( | 296 render_thread_.reset( |
| 343 new base::DelegateSimpleThread(this, "wasapi_render_thread")); | 297 new base::DelegateSimpleThread(this, "wasapi_render_thread")); |
| 344 render_thread_->Start(); | 298 render_thread_->Start(); |
| 299 if (!render_thread_->HasBeenStarted()) { | |
| 300 DLOG(ERROR) << "Failed to start WASAPI render thread."; | |
| 301 return; | |
| 302 } | |
| 303 | |
| 304 // Ensure that the endpoint buffer is prepared with silence. | |
| 305 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | |
| 306 if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence( | |
| 307 audio_client_, audio_render_client_)) { | |
| 308 DLOG(WARNING) << "Failed to prepare endpoint buffers with silence."; | |
| 309 return; | |
| 310 } | |
| 311 } | |
| 312 num_written_frames_ = endpoint_buffer_size_frames_; | |
| 345 | 313 |
| 346 // Start streaming data between the endpoint buffer and the audio engine. | 314 // Start streaming data between the endpoint buffer and the audio engine. |
| 347 hr = audio_client_->Start(); | 315 HRESULT hr = audio_client_->Start(); |
| 348 if (FAILED(hr)) { | 316 if (FAILED(hr)) { |
| 349 SetEvent(stop_render_event_.Get()); | 317 SetEvent(stop_render_event_.Get()); |
| 350 render_thread_->Join(); | 318 render_thread_->Join(); |
| 351 render_thread_.reset(); | 319 render_thread_.reset(); |
|
tommi (sloooow) - chröme
2013/01/31 16:18:24
nit: prefer to indent the parameters to the functi
henrika (OOO until Aug 14)
2013/02/01 10:55:56
Got it. Now using 'C' in CoreAudioUtil as referenc
| |
| 352 HandleError(hr); | 320 HandleError(hr); |
| 353 } | 321 } |
| 354 } | 322 } |
| 355 | 323 |
| 356 void WASAPIAudioOutputStream::Stop() { | 324 void WASAPIAudioOutputStream::Stop() { |
| 325 DVLOG(1) << "WASAPIAudioOutputStream::Stop()"; | |
| 357 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 326 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
| 358 if (!render_thread_.get()) | 327 if (!render_thread_.get()) |
| 359 return; | 328 return; |
| 360 | 329 |
| 361 // Stop output audio streaming. | 330 // Stop output audio streaming. |
| 362 HRESULT hr = audio_client_->Stop(); | 331 HRESULT hr = audio_client_->Stop(); |
| 363 if (FAILED(hr)) { | 332 if (FAILED(hr)) { |
| 364 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) | 333 DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED) |
| 365 << "Failed to stop output streaming: " << std::hex << hr; | 334 << "Failed to stop output streaming: " << std::hex << hr; |
| 366 } | 335 } |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 389 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). | 358 // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer(). |
| 390 // This check is is only needed for shared-mode streams. | 359 // This check is is only needed for shared-mode streams. |
| 391 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | 360 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
| 392 UINT32 num_queued_frames = 0; | 361 UINT32 num_queued_frames = 0; |
| 393 audio_client_->GetCurrentPadding(&num_queued_frames); | 362 audio_client_->GetCurrentPadding(&num_queued_frames); |
| 394 DCHECK_EQ(0u, num_queued_frames); | 363 DCHECK_EQ(0u, num_queued_frames); |
| 395 } | 364 } |
| 396 } | 365 } |
| 397 | 366 |
| 398 void WASAPIAudioOutputStream::Close() { | 367 void WASAPIAudioOutputStream::Close() { |
| 368 DVLOG(1) << "WASAPIAudioOutputStream::Close()"; | |
| 399 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); | 369 DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_); |
| 400 | 370 |
| 401 // It is valid to call Close() before calling open or Start(). | 371 // It is valid to call Close() before calling open or Start(). |
| 402 // It is also valid to call Close() after Start() has been called. | 372 // It is also valid to call Close() after Start() has been called. |
| 403 Stop(); | 373 Stop(); |
| 404 | 374 |
| 405 // Inform the audio manager that we have been closed. This will cause our | 375 // Inform the audio manager that we have been closed. This will cause our |
| 406 // destruction. | 376 // destruction. |
| 407 manager_->ReleaseOutputStream(this); | 377 manager_->ReleaseOutputStream(this); |
| 408 } | 378 } |
| 409 | 379 |
| 410 void WASAPIAudioOutputStream::SetVolume(double volume) { | 380 void WASAPIAudioOutputStream::SetVolume(double volume) { |
| 411 DVLOG(1) << "SetVolume(volume=" << volume << ")"; | 381 DVLOG(1) << "SetVolume(volume=" << volume << ")"; |
| 412 float volume_float = static_cast<float>(volume); | 382 float volume_float = static_cast<float>(volume); |
| 413 if (volume_float < 0.0f || volume_float > 1.0f) { | 383 if (volume_float < 0.0f || volume_float > 1.0f) { |
| 414 return; | 384 return; |
| 415 } | 385 } |
| 416 volume_ = volume_float; | 386 volume_ = volume_float; |
| 417 } | 387 } |
| 418 | 388 |
| 419 void WASAPIAudioOutputStream::GetVolume(double* volume) { | 389 void WASAPIAudioOutputStream::GetVolume(double* volume) { |
| 420 DVLOG(1) << "GetVolume()"; | 390 DVLOG(1) << "GetVolume()"; |
| 421 *volume = static_cast<double>(volume_); | 391 *volume = static_cast<double>(volume_); |
| 422 } | 392 } |
| 423 | 393 |
| 424 // static | |
| 425 int WASAPIAudioOutputStream::HardwareChannelCount() { | |
| 426 // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the | |
| 427 // number of channels and the mapping of channels to speakers for | |
| 428 // multichannel devices. | |
| 429 base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex; | |
| 430 HRESULT hr = GetMixFormat( | |
| 431 eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex)); | |
| 432 if (FAILED(hr)) | |
| 433 return 0; | |
| 434 | |
| 435 // Number of channels in the stream. Corresponds to the number of bits | |
| 436 // set in the dwChannelMask. | |
| 437 DVLOG(1) << "endpoint channels (out): " << format_ex->Format.nChannels; | |
| 438 | |
| 439 return static_cast<int>(format_ex->Format.nChannels); | |
| 440 } | |
| 441 | |
| 442 // static | |
| 443 ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() { | |
| 444 return ChannelConfigToChannelLayout(GetChannelConfig()); | |
| 445 } | |
| 446 | |
| 447 // static | |
| 448 int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) { | |
| 449 base::win::ScopedCoMem<WAVEFORMATEX> format; | |
| 450 HRESULT hr = GetMixFormat(device_role, &format); | |
| 451 if (FAILED(hr)) | |
| 452 return 0; | |
| 453 | |
| 454 DVLOG(2) << "nSamplesPerSec: " << format->nSamplesPerSec; | |
| 455 return static_cast<int>(format->nSamplesPerSec); | |
| 456 } | |
| 457 | |
| 458 void WASAPIAudioOutputStream::Run() { | 394 void WASAPIAudioOutputStream::Run() { |
| 459 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); | 395 ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA); |
| 460 | 396 |
| 461 // Increase the thread priority. | 397 // Increase the thread priority. |
| 462 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); | 398 render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio); |
| 463 | 399 |
| 464 // Enable MMCSS to ensure that this thread receives prioritized access to | 400 // Enable MMCSS to ensure that this thread receives prioritized access to |
| 465 // CPU resources. | 401 // CPU resources. |
| 466 DWORD task_index = 0; | 402 DWORD task_index = 0; |
| 467 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", | 403 HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio", |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 507 FALSE, | 443 FALSE, |
| 508 INFINITE); | 444 INFINITE); |
| 509 | 445 |
| 510 switch (wait_result) { | 446 switch (wait_result) { |
| 511 case WAIT_OBJECT_0 + 0: | 447 case WAIT_OBJECT_0 + 0: |
| 512 // |stop_render_event_| has been set. | 448 // |stop_render_event_| has been set. |
| 513 playing = false; | 449 playing = false; |
| 514 break; | 450 break; |
| 515 case WAIT_OBJECT_0 + 1: | 451 case WAIT_OBJECT_0 + 1: |
| 516 { | 452 { |
| 453 TRACE_EVENT0("audio", "WASAPIAudioOutputStream::Run"); | |
| 454 | |
| 517 // |audio_samples_render_event_| has been set. | 455 // |audio_samples_render_event_| has been set. |
| 518 UINT32 num_queued_frames = 0; | 456 UINT32 num_queued_frames = 0; |
| 519 uint8* audio_data = NULL; | 457 uint8* audio_data = NULL; |
| 520 | 458 |
| 521 // Contains how much new data we can write to the buffer without | 459 // Contains how much new data we can write to the buffer without |
| 522 // the risk of overwriting previously written data that the audio | 460 // the risk of overwriting previously written data that the audio |
| 523 // engine has not yet read from the buffer. | 461 // engine has not yet read from the buffer. |
| 524 size_t num_available_frames = 0; | 462 size_t num_available_frames = 0; |
| 525 | 463 |
| 526 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { | 464 if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) { |
| 527 // Get the padding value which represents the amount of rendering | 465 // Get the padding value which represents the amount of rendering |
| 528 // data that is queued up to play in the endpoint buffer. | 466 // data that is queued up to play in the endpoint buffer. |
| 529 hr = audio_client_->GetCurrentPadding(&num_queued_frames); | 467 hr = audio_client_->GetCurrentPadding(&num_queued_frames); |
| 530 num_available_frames = | 468 num_available_frames = |
| 531 endpoint_buffer_size_frames_ - num_queued_frames; | 469 endpoint_buffer_size_frames_ - num_queued_frames; |
| 532 } else { | 470 } else { |
| 533 // While the stream is running, the system alternately sends one | 471 // While the stream is running, the system alternately sends one |
| 534 // buffer or the other to the client. This form of double buffering | 472 // buffer or the other to the client. This form of double buffering |
| 535 // is referred to as "ping-ponging". Each time the client receives | 473 // is referred to as "ping-ponging". Each time the client receives |
| 536 // a buffer from the system (triggers this event) the client must | 474 // a buffer from the system (triggers this event) the client must |
| 537 // process the entire buffer. Calls to the GetCurrentPadding method | 475 // process the entire buffer. Calls to the GetCurrentPadding method |
| 538 // are unnecessary because the packet size must always equal the | 476 // are unnecessary because the packet size must always equal the |
| 539 // buffer size. In contrast to the shared mode buffering scheme, | 477 // buffer size. In contrast to the shared mode buffering scheme, |
| 540 // the latency for an event-driven, exclusive-mode stream depends | 478 // the latency for an event-driven, exclusive-mode stream depends |
| 541 // directly on the buffer size. | 479 // directly on the buffer size. |
| 542 num_available_frames = endpoint_buffer_size_frames_; | 480 num_available_frames = endpoint_buffer_size_frames_; |
| 543 } | 481 } |
| 482 if (FAILED(hr)) { | |
| 483 DLOG(ERROR) << "Failed to retrieve amount of available space: " | |
| 484 << std::hex << hr; | |
| 485 continue; | |
| 486 } | |
| 544 | 487 |
| 545 // Check if there is enough available space to fit the packet size | 488 // It is my current assumption that we will always end up with a |
| 546 // specified by the client. | 489 // perfect match here where the packet size is identical to what |
| 547 if (FAILED(hr) || (num_available_frames < packet_size_frames_)) | 490 // the audio engine needs (num_available_frames). I am adding a |
| 491 // DLOG to be able to track down any deviations from this theory. | |
| 492 if ((num_available_frames > 0) && | |
| 493 (num_available_frames != packet_size_frames_)) { | |
| 494 DLOG(WARNING) << "Non-perfect timing case detected."; | |
| 548 continue; | 495 continue; |
| 496 } | |
| 549 | 497 |
| 550 // Derive the number of packets we need get from the client to | 498 // Grab all available space in the rendering endpoint buffer |
| 551 // fill up the available area in the endpoint buffer. | 499 // into which the client can write a data packet. |
| 552 // |num_packets| will always be one for exclusive-mode streams. | 500 hr = audio_render_client_->GetBuffer(packet_size_frames_, |
| 553 size_t num_packets = (num_available_frames / packet_size_frames_); | 501 &audio_data); |
| 502 if (FAILED(hr)) { | |
| 503 DLOG(ERROR) << "Failed to use rendering audio buffer: " | |
| 504 << std::hex << hr; | |
| 505 continue; | |
| 506 } | |
| 554 | 507 |
| 555 // Get data from the client/source. | 508 // Derive the audio delay which corresponds to the delay between |
| 556 for (size_t n = 0; n < num_packets; ++n) { | 509 // a render event and the time when the first audio sample in a |
| 557 // Grab all available space in the rendering endpoint buffer | 510 // packet is played out through the speaker. This delay value |
| 558 // into which the client can write a data packet. | 511 // can typically be utilized by an acoustic echo-control (AEC) |
| 559 hr = audio_render_client_->GetBuffer(packet_size_frames_, | 512 // unit at the render side. |
| 560 &audio_data); | 513 UINT64 position = 0; |
| 561 if (FAILED(hr)) { | 514 int audio_delay_bytes = 0; |
| 562 DLOG(ERROR) << "Failed to use rendering audio buffer: " | 515 hr = audio_clock->GetPosition(&position, NULL); |
| 563 << std::hex << hr; | 516 if (SUCCEEDED(hr)) { |
| 564 continue; | 517 // Stream position of the sample that is currently playing |
| 565 } | 518 // through the speaker. |
| 519 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | |
| 520 (static_cast<double>(position) / device_frequency); | |
| 566 | 521 |
| 567 // Derive the audio delay which corresponds to the delay between | 522 // Stream position of the last sample written to the endpoint |
| 568 // a render event and the time when the first audio sample in a | 523 // buffer. Note that, the packet we are about to receive in |
| 569 // packet is played out through the speaker. This delay value | 524 // the upcoming callback is also included. |
| 570 // can typically be utilized by an acoustic echo-control (AEC) | 525 size_t pos_last_sample_written_frames = |
| 571 // unit at the render side. | 526 num_written_frames_ + packet_size_frames_; |
| 572 UINT64 position = 0; | |
| 573 int audio_delay_bytes = 0; | |
| 574 hr = audio_clock->GetPosition(&position, NULL); | |
| 575 if (SUCCEEDED(hr)) { | |
| 576 // Stream position of the sample that is currently playing | |
| 577 // through the speaker. | |
| 578 double pos_sample_playing_frames = format_.Format.nSamplesPerSec * | |
| 579 (static_cast<double>(position) / device_frequency); | |
| 580 | 527 |
| 581 // Stream position of the last sample written to the endpoint | 528 // Derive the actual delay value which will be fed to the |
| 582 // buffer. Note that, the packet we are about to receive in | 529 // render client using the OnMoreData() callback. |
| 583 // the upcoming callback is also included. | 530 audio_delay_bytes = (pos_last_sample_written_frames - |
| 584 size_t pos_last_sample_written_frames = | 531 pos_sample_playing_frames) * format_.Format.nBlockAlign; |
| 585 num_written_frames_ + packet_size_frames_; | 532 } |
| 586 | 533 |
| 587 // Derive the actual delay value which will be fed to the | 534 // Read a data packet from the registered client source and |
| 588 // render client using the OnMoreData() callback. | 535 // deliver a delay estimate in the same callback to the client. |
| 589 audio_delay_bytes = (pos_last_sample_written_frames - | 536 // A time stamp is also stored in the AudioBuffersState. This |
| 590 pos_sample_playing_frames) * frame_size_; | 537 // time stamp can be used at the client side to compensate for |
| 591 } | 538 // the delay between the usage of the delay value and the time |
| 539 // of generation. | |
| 592 | 540 |
| 593 // Read a data packet from the registered client source and | 541 uint32 num_filled_bytes = 0; |
| 594 // deliver a delay estimate in the same callback to the client. | 542 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; |
| 595 // A time stamp is also stored in the AudioBuffersState. This | |
| 596 // time stamp can be used at the client side to compensate for | |
| 597 // the delay between the usage of the delay value and the time | |
| 598 // of generation. | |
| 599 | 543 |
| 600 uint32 num_filled_bytes = 0; | 544 int frames_filled = source_->OnMoreData( |
| 601 const int bytes_per_sample = format_.Format.wBitsPerSample >> 3; | 545 audio_bus_.get(), AudioBuffersState(0, audio_delay_bytes)); |
| 546 num_filled_bytes = frames_filled * format_.Format.nBlockAlign; | |
| 547 DCHECK_LE(num_filled_bytes, packet_size_bytes_); | |
| 602 | 548 |
| 603 int frames_filled = source_->OnMoreData( | 549 // Note: If this ever changes to output raw float the data must be |
| 604 audio_bus_.get(), AudioBuffersState(0, audio_delay_bytes)); | 550 // clipped and sanitized since it may come from an untrusted |
| 605 num_filled_bytes = frames_filled * frame_size_; | 551 // source such as NaCl. |
| 606 DCHECK_LE(num_filled_bytes, packet_size_bytes_); | 552 audio_bus_->ToInterleaved( |
| 607 // Note: If this ever changes to output raw float the data must be | 553 frames_filled, bytes_per_sample, audio_data); |
| 608 // clipped and sanitized since it may come from an untrusted | |
| 609 // source such as NaCl. | |
| 610 audio_bus_->ToInterleaved( | |
| 611 frames_filled, bytes_per_sample, audio_data); | |
| 612 | 554 |
| 613 // Perform in-place, software-volume adjustments. | 555 // Perform in-place, software-volume adjustments. |
| 614 media::AdjustVolume(audio_data, | 556 media::AdjustVolume(audio_data, |
| 615 num_filled_bytes, | 557 num_filled_bytes, |
| 616 audio_bus_->channels(), | 558 audio_bus_->channels(), |
| 617 bytes_per_sample, | 559 bytes_per_sample, |
| 618 volume_); | 560 volume_); |
| 619 | 561 |
| 620 // Zero out the part of the packet which has not been filled by | 562 // Zero out the part of the packet which has not been filled by |
| 621 // the client. Using silence is the least bad option in this | 563 // the client. Using silence is the least bad option in this |
| 622 // situation. | 564 // situation. |
| 623 if (num_filled_bytes < packet_size_bytes_) { | 565 if (num_filled_bytes < packet_size_bytes_) { |
| 624 memset(&audio_data[num_filled_bytes], 0, | 566 memset(&audio_data[num_filled_bytes], 0, |
| 625 (packet_size_bytes_ - num_filled_bytes)); | 567 (packet_size_bytes_ - num_filled_bytes)); |
| 626 } | 568 } |
| 627 | 569 |
| 628 // Release the buffer space acquired in the GetBuffer() call. | 570 // Release the buffer space acquired in the GetBuffer() call. |
| 629 DWORD flags = 0; | 571 DWORD flags = 0; |
| 630 audio_render_client_->ReleaseBuffer(packet_size_frames_, | 572 audio_render_client_->ReleaseBuffer(packet_size_frames_, |
| 631 flags); | 573 flags); |
| 632 | 574 |
| 633 num_written_frames_ += packet_size_frames_; | 575 num_written_frames_ += packet_size_frames_; |
| 634 } | |
| 635 } | 576 } |
| 636 break; | 577 break; |
| 637 default: | 578 default: |
| 638 error = true; | 579 error = true; |
| 639 break; | 580 break; |
| 640 } | 581 } |
| 641 } | 582 } |
| 642 | 583 |
| 643 if (playing && error) { | 584 if (playing && error) { |
| 644 // Stop audio rendering since something has gone wrong in our main thread | 585 // Stop audio rendering since something has gone wrong in our main thread |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 655 } | 596 } |
| 656 | 597 |
| 657 void WASAPIAudioOutputStream::HandleError(HRESULT err) { | 598 void WASAPIAudioOutputStream::HandleError(HRESULT err) { |
| 658 CHECK((started() && GetCurrentThreadId() == render_thread_->tid()) || | 599 CHECK((started() && GetCurrentThreadId() == render_thread_->tid()) || |
| 659 (!started() && GetCurrentThreadId() == creating_thread_id_)); | 600 (!started() && GetCurrentThreadId() == creating_thread_id_)); |
| 660 NOTREACHED() << "Error code: " << std::hex << err; | 601 NOTREACHED() << "Error code: " << std::hex << err; |
| 661 if (source_) | 602 if (source_) |
| 662 source_->OnError(this, static_cast<int>(err)); | 603 source_->OnError(this, static_cast<int>(err)); |
| 663 } | 604 } |
| 664 | 605 |
| 665 HRESULT WASAPIAudioOutputStream::SetRenderDevice() { | 606 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization( |
| 666 ScopedComPtr<IMMDeviceEnumerator> device_enumerator; | 607 IAudioClient* client, HANDLE event_handle, size_t* endpoint_buffer_size) { |
| 667 ScopedComPtr<IMMDevice> endpoint_device; | |
| 668 | |
| 669 // Create the IMMDeviceEnumerator interface. | |
| 670 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), | |
| 671 NULL, | |
| 672 CLSCTX_INPROC_SERVER, | |
| 673 __uuidof(IMMDeviceEnumerator), | |
| 674 device_enumerator.ReceiveVoid()); | |
| 675 if (SUCCEEDED(hr)) { | |
| 676 // Retrieve the default render audio endpoint for the specified role. | |
| 677 // Note that, in Windows Vista, the MMDevice API supports device roles | |
| 678 // but the system-supplied user interface programs do not. | |
| 679 hr = device_enumerator->GetDefaultAudioEndpoint( | |
| 680 eRender, device_role_, endpoint_device.Receive()); | |
| 681 if (FAILED(hr)) | |
| 682 return hr; | |
| 683 | |
| 684 // Verify that the audio endpoint device is active. That is, the audio | |
| 685 // adapter that connects to the endpoint device is present and enabled. | |
| 686 DWORD state = DEVICE_STATE_DISABLED; | |
| 687 hr = endpoint_device->GetState(&state); | |
| 688 if (SUCCEEDED(hr)) { | |
| 689 if (!(state & DEVICE_STATE_ACTIVE)) { | |
| 690 DLOG(ERROR) << "Selected render device is not active."; | |
| 691 hr = E_ACCESSDENIED; | |
| 692 } | |
| 693 } | |
| 694 } | |
| 695 | |
| 696 if (SUCCEEDED(hr)) { | |
| 697 device_enumerator_ = device_enumerator; | |
| 698 endpoint_device_ = endpoint_device; | |
| 699 } | |
| 700 | |
| 701 return hr; | |
| 702 } | |
| 703 | |
| 704 HRESULT WASAPIAudioOutputStream::ActivateRenderDevice() { | |
| 705 ScopedComPtr<IAudioClient> audio_client; | |
| 706 | |
| 707 // Creates and activates an IAudioClient COM object given the selected | |
| 708 // render endpoint device. | |
| 709 HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient), | |
| 710 CLSCTX_INPROC_SERVER, | |
| 711 NULL, | |
| 712 audio_client.ReceiveVoid()); | |
| 713 if (SUCCEEDED(hr)) { | |
| 714 // Retrieve the stream format that the audio engine uses for its internal | |
| 715 // processing/mixing of shared-mode streams. | |
| 716 audio_engine_mix_format_.Reset(NULL); | |
| 717 hr = audio_client->GetMixFormat( | |
| 718 reinterpret_cast<WAVEFORMATEX**>(&audio_engine_mix_format_)); | |
| 719 | |
| 720 if (SUCCEEDED(hr)) { | |
| 721 audio_client_ = audio_client; | |
| 722 } | |
| 723 } | |
| 724 | |
| 725 return hr; | |
| 726 } | |
| 727 | |
| 728 bool WASAPIAudioOutputStream::DesiredFormatIsSupported() { | |
| 729 // Determine, before calling IAudioClient::Initialize(), whether the audio | |
| 730 // engine supports a particular stream format. | |
| 731 // In shared mode, the audio engine always supports the mix format, | |
| 732 // which is stored in the |audio_engine_mix_format_| member and it is also | |
| 733 // possible to receive a proposed (closest) format if the current format is | |
| 734 // not supported. | |
| 735 base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match; | |
| 736 HRESULT hr = audio_client_->IsFormatSupported( | |
| 737 share_mode_, reinterpret_cast<WAVEFORMATEX*>(&format_), | |
| 738 reinterpret_cast<WAVEFORMATEX**>(&closest_match)); | |
| 739 | |
| 740 // This log can only be triggered for shared mode. | |
| 741 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " | |
| 742 << "but a closest match exists."; | |
| 743 // This log can be triggered both for shared and exclusive modes. | |
| 744 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format."; | |
| 745 if (hr == S_FALSE) { | |
| 746 DVLOG(1) << "wFormatTag : " << closest_match->Format.wFormatTag; | |
| 747 DVLOG(1) << "nChannels : " << closest_match->Format.nChannels; | |
| 748 DVLOG(1) << "nSamplesPerSec: " << closest_match->Format.nSamplesPerSec; | |
| 749 DVLOG(1) << "wBitsPerSample: " << closest_match->Format.wBitsPerSample; | |
| 750 } | |
| 751 | |
| 752 return (hr == S_OK); | |
| 753 } | |
| 754 | |
| 755 HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() { | |
| 756 #if !defined(NDEBUG) | |
| 757 // The period between processing passes by the audio engine is fixed for a | |
| 758 // particular audio endpoint device and represents the smallest processing | |
| 759 // quantum for the audio engine. This period plus the stream latency between | |
| 760 // the buffer and endpoint device represents the minimum possible latency | |
| 761 // that an audio application can achieve in shared mode. | |
| 762 { | |
| 763 REFERENCE_TIME default_device_period = 0; | |
| 764 REFERENCE_TIME minimum_device_period = 0; | |
| 765 HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period, | |
| 766 &minimum_device_period); | |
| 767 if (SUCCEEDED(hr_dbg)) { | |
| 768 // Shared mode device period. | |
| 769 DVLOG(1) << "shared mode (default) device period: " | |
| 770 << static_cast<double>(default_device_period / 10000.0) | |
| 771 << " [ms]"; | |
| 772 // Exclusive mode device period. | |
| 773 DVLOG(1) << "exclusive mode (minimum) device period: " | |
| 774 << static_cast<double>(minimum_device_period / 10000.0) | |
| 775 << " [ms]"; | |
| 776 } | |
| 777 | |
| 778 REFERENCE_TIME latency = 0; | |
| 779 hr_dbg = audio_client_->GetStreamLatency(&latency); | |
| 780 if (SUCCEEDED(hr_dbg)) { | |
| 781 DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0) | |
| 782 << " [ms]"; | |
| 783 } | |
| 784 } | |
| 785 #endif | |
| 786 | |
| 787 HRESULT hr = S_FALSE; | |
| 788 | |
| 789 // Perform different initialization depending on if the device shall be | |
| 790 // opened in shared mode or in exclusive mode. | |
| 791 hr = (share_mode_ == AUDCLNT_SHAREMODE_SHARED) ? | |
| 792 SharedModeInitialization() : ExclusiveModeInitialization(); | |
| 793 if (FAILED(hr)) { | |
| 794 LOG(WARNING) << "IAudioClient::Initialize() failed: " << std::hex << hr; | |
| 795 return hr; | |
| 796 } | |
| 797 | |
| 798 // Retrieve the length of the endpoint buffer. The buffer length represents | |
| 799 // the maximum amount of rendering data that the client can write to | |
| 800 // the endpoint buffer during a single processing pass. | |
| 801 // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate. | |
| 802 hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_); | |
| 803 if (FAILED(hr)) | |
| 804 return hr; | |
| 805 DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_ | |
| 806 << " [frames]"; | |
| 807 | |
| 808 // The buffer scheme for exclusive mode streams is not designed for max | |
| 809 // flexibility. We only allow a "perfect match" between the packet size set | |
| 810 // by the user and the actual endpoint buffer size. | |
| 811 if (share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE && | |
| 812 endpoint_buffer_size_frames_ != packet_size_frames_) { | |
| 813 hr = AUDCLNT_E_INVALID_SIZE; | |
| 814 DLOG(ERROR) << "AUDCLNT_E_INVALID_SIZE"; | |
| 815 return hr; | |
| 816 } | |
| 817 | |
| 818 // Set the event handle that the audio engine will signal each time | |
| 819 // a buffer becomes ready to be processed by the client. | |
| 820 hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get()); | |
| 821 if (FAILED(hr)) | |
| 822 return hr; | |
| 823 | |
| 824 // Get access to the IAudioRenderClient interface. This interface | |
| 825 // enables us to write output data to a rendering endpoint buffer. | |
| 826 // The methods in this interface manage the movement of data packets | |
| 827 // that contain audio-rendering data. | |
| 828 hr = audio_client_->GetService(__uuidof(IAudioRenderClient), | |
| 829 audio_render_client_.ReceiveVoid()); | |
| 830 return hr; | |
| 831 } | |
| 832 | |
| 833 HRESULT WASAPIAudioOutputStream::SharedModeInitialization() { | |
| 834 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_SHARED); | |
| 835 | |
| 836 // TODO(henrika): this buffer scheme is still under development. | |
| 837 // The exact details are yet to be determined based on tests with different | |
| 838 // audio clients. | |
| 839 int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5); | |
| 840 if (audio_engine_mix_format_->Format.nSamplesPerSec % 8000 == 0) { | |
| 841 // Initial tests have shown that we have to add 10 ms extra to | |
| 842 // ensure that we don't run empty for any packet size. | |
| 843 glitch_free_buffer_size_ms += 10; | |
| 844 } else if (audio_engine_mix_format_->Format.nSamplesPerSec % 11025 == 0) { | |
| 845 // Initial tests have shown that we have to add 20 ms extra to | |
| 846 // ensure that we don't run empty for any packet size. | |
| 847 glitch_free_buffer_size_ms += 20; | |
| 848 } else { | |
| 849 DLOG(WARNING) << "Unsupported sample rate " | |
| 850 << audio_engine_mix_format_->Format.nSamplesPerSec << " detected"; | |
| 851 glitch_free_buffer_size_ms += 20; | |
| 852 } | |
| 853 DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms; | |
| 854 REFERENCE_TIME requested_buffer_duration = | |
| 855 static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000); | |
| 856 | |
| 857 // Initialize the audio stream between the client and the device. | |
| 858 // We connect indirectly through the audio engine by using shared mode | |
| 859 // and WASAPI is initialized in an event driven mode. | |
| 860 // Note that this API ensures that the buffer is never smaller than the | |
| 861 // minimum buffer size needed to ensure glitch-free rendering. | |
| 862 // If we requests a buffer size that is smaller than the audio engine's | |
| 863 // minimum required buffer size, the method sets the buffer size to this | |
| 864 // minimum buffer size rather than to the buffer size requested. | |
| 865 HRESULT hr = S_FALSE; | |
| 866 hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED, | |
| 867 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | |
| 868 AUDCLNT_STREAMFLAGS_NOPERSIST, | |
| 869 requested_buffer_duration, | |
| 870 0, | |
| 871 reinterpret_cast<WAVEFORMATEX*>(&format_), | |
| 872 NULL); | |
| 873 return hr; | |
| 874 } | |
| 875 | |
| 876 HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() { | |
| 877 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); | 608 DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE); |
| 878 | 609 |
| 879 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec; | 610 float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec; |
| 880 REFERENCE_TIME requested_buffer_duration = | 611 REFERENCE_TIME requested_buffer_duration = |
| 881 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); | 612 static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5); |
| 882 | 613 |
| 614 DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST; | |
| 615 bool use_event = (event_handle != NULL && | |
| 616 event_handle != INVALID_HANDLE_VALUE); | |
| 617 if (use_event) | |
| 618 stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK; | |
| 619 DVLOG(2) << "stream_flags: 0x" << std::hex << stream_flags; | |
| 620 | |
| 883 // Initialize the audio stream between the client and the device. | 621 // Initialize the audio stream between the client and the device. |
| 884 // For an exclusive-mode stream that uses event-driven buffering, the | 622 // For an exclusive-mode stream that uses event-driven buffering, the |
| 885 // caller must specify nonzero values for hnsPeriodicity and | 623 // caller must specify nonzero values for hnsPeriodicity and |
| 886 // hnsBufferDuration, and the values of these two parameters must be equal. | 624 // hnsBufferDuration, and the values of these two parameters must be equal. |
| 887 // The Initialize method allocates two buffers for the stream. Each buffer | 625 // The Initialize method allocates two buffers for the stream. Each buffer |
| 888 // is equal in duration to the value of the hnsBufferDuration parameter. | 626 // is equal in duration to the value of the hnsBufferDuration parameter. |
| 889 // Following the Initialize call for a rendering stream, the caller should | 627 // Following the Initialize call for a rendering stream, the caller should |
| 890 // fill the first of the two buffers before starting the stream. | 628 // fill the first of the two buffers before starting the stream. |
| 891 HRESULT hr = S_FALSE; | 629 HRESULT hr = S_FALSE; |
| 892 hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, | 630 hr = client->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, |
| 893 AUDCLNT_STREAMFLAGS_EVENTCALLBACK | | 631 stream_flags, |
| 894 AUDCLNT_STREAMFLAGS_NOPERSIST, | 632 requested_buffer_duration, |
| 895 requested_buffer_duration, | 633 requested_buffer_duration, |
| 896 requested_buffer_duration, | 634 reinterpret_cast<WAVEFORMATEX*>(&format_), |
| 897 reinterpret_cast<WAVEFORMATEX*>(&format_), | 635 NULL); |
| 898 NULL); | |
| 899 if (FAILED(hr)) { | 636 if (FAILED(hr)) { |
| 900 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { | 637 if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { |
| 901 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED"; | 638 LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED"; |
| 902 | 639 |
| 903 UINT32 aligned_buffer_size = 0; | 640 UINT32 aligned_buffer_size = 0; |
| 904 audio_client_->GetBufferSize(&aligned_buffer_size); | 641 client->GetBufferSize(&aligned_buffer_size); |
| 905 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; | 642 DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size; |
| 906 audio_client_.Release(); | |
| 907 | 643 |
| 908 // Calculate new aligned periodicity. Each unit of reference time | 644 // Calculate new aligned periodicity. Each unit of reference time |
| 909 // is 100 nanoseconds. | 645 // is 100 nanoseconds. |
| 910 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>( | 646 REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>( |
| 911 (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec) | 647 (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec) |
| 912 + 0.5); | 648 + 0.5); |
| 913 | 649 |
| 914 // It is possible to re-activate and re-initialize the audio client | 650 // It is possible to re-activate and re-initialize the audio client |
| 915 // at this stage but we bail out with an error code instead and | 651 // at this stage but we bail out with an error code instead and |
| 916 // combine it with a log message which informs about the suggested | 652 // combine it with a log message which informs about the suggested |
| 917 // aligned buffer size which should be used instead. | 653 // aligned buffer size which should be used instead. |
| 918 DVLOG(1) << "aligned_buffer_duration: " | 654 DVLOG(1) << "aligned_buffer_duration: " |
| 919 << static_cast<double>(aligned_buffer_duration / 10000.0) | 655 << static_cast<double>(aligned_buffer_duration / 10000.0) |
| 920 << " [ms]"; | 656 << " [ms]"; |
| 921 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) { | 657 } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) { |
| 922 // We will get this error if we try to use a smaller buffer size than | 658 // We will get this error if we try to use a smaller buffer size than |
| 923 // the minimum supported size (usually ~3ms on Windows 7). | 659 // the minimum supported size (usually ~3ms on Windows 7). |
| 924 LOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD"; | 660 LOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD"; |
| 925 } | 661 } |
| 662 return hr; | |
| 926 } | 663 } |
| 927 | 664 |
| 665 if (use_event) { | |
| 666 hr = client->SetEventHandle(event_handle); | |
| 667 if (FAILED(hr)) { | |
| 668 DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr; | |
| 669 return hr; | |
| 670 } | |
| 671 } | |
| 672 | |
| 673 UINT32 buffer_size_in_frames = 0; | |
| 674 hr = client->GetBufferSize(&buffer_size_in_frames); | |
| 675 if (FAILED(hr)) { | |
| 676 DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr; | |
| 677 return hr; | |
| 678 } | |
| 679 | |
| 680 *endpoint_buffer_size = static_cast<size_t>(buffer_size_in_frames); | |
| 681 DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames; | |
| 928 return hr; | 682 return hr; |
| 929 } | 683 } |
| 930 | 684 |
| 931 std::string WASAPIAudioOutputStream::GetDeviceName(LPCWSTR device_id) const { | |
| 932 std::string name; | |
| 933 ScopedComPtr<IMMDevice> audio_device; | |
| 934 | |
| 935 // Get the IMMDevice interface corresponding to the given endpoint ID string. | |
| 936 HRESULT hr = device_enumerator_->GetDevice(device_id, audio_device.Receive()); | |
| 937 if (SUCCEEDED(hr)) { | |
| 938 // Retrieve user-friendly name of endpoint device. | |
| 939 // Example: "Speakers (Realtek High Definition Audio)". | |
| 940 ScopedComPtr<IPropertyStore> properties; | |
| 941 hr = audio_device->OpenPropertyStore(STGM_READ, properties.Receive()); | |
| 942 if (SUCCEEDED(hr)) { | |
| 943 PROPVARIANT friendly_name; | |
| 944 PropVariantInit(&friendly_name); | |
| 945 hr = properties->GetValue(PKEY_Device_FriendlyName, &friendly_name); | |
| 946 if (SUCCEEDED(hr) && friendly_name.vt == VT_LPWSTR) { | |
| 947 if (friendly_name.pwszVal) | |
| 948 name = WideToUTF8(friendly_name.pwszVal); | |
| 949 } | |
| 950 PropVariantClear(&friendly_name); | |
| 951 } | |
| 952 } | |
| 953 return name; | |
| 954 } | |
| 955 | |
| 956 } // namespace media | 685 } // namespace media |
| OLD | NEW |