Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(93)

Side by Side Diff: media/audio/win/core_audio_util_win.cc

Issue 12049070: Avoids irregular OnMoreData callbacks on Windows using Core Audio (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Moved FillRenderEndpointBufferWithSilence to CoreAudioUtil Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/audio/win/core_audio_util_win.h" 5 #include "media/audio/win/core_audio_util_win.h"
6 6
7 #include <Audioclient.h> 7 #include <Audioclient.h>
8 #include <Functiondiscoverykeys_devpkey.h> 8 #include <Functiondiscoverykeys_devpkey.h>
9 9
10 #include "base/command_line.h" 10 #include "base/command_line.h"
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
131 } 131 }
132 132
133 AUDCLNT_SHAREMODE CoreAudioUtil::GetShareMode() { 133 AUDCLNT_SHAREMODE CoreAudioUtil::GetShareMode() {
134 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); 134 const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
135 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) 135 if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
136 return AUDCLNT_SHAREMODE_EXCLUSIVE; 136 return AUDCLNT_SHAREMODE_EXCLUSIVE;
137 return AUDCLNT_SHAREMODE_SHARED; 137 return AUDCLNT_SHAREMODE_SHARED;
138 } 138 }
139 139
140 int CoreAudioUtil::NumberOfActiveDevices(EDataFlow data_flow) { 140 int CoreAudioUtil::NumberOfActiveDevices(EDataFlow data_flow) {
141 DCHECK(CoreAudioUtil::IsSupported()); 141 DCHECK(IsSupported());
142 // Create the IMMDeviceEnumerator interface. 142 // Create the IMMDeviceEnumerator interface.
143 ScopedComPtr<IMMDeviceEnumerator> device_enumerator = 143 ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
144 CreateDeviceEnumerator(); 144 CreateDeviceEnumerator();
145 if (!device_enumerator) 145 if (!device_enumerator)
146 return 0; 146 return 0;
147 147
148 // Generate a collection of active (present and not disabled) audio endpoint 148 // Generate a collection of active (present and not disabled) audio endpoint
149 // devices for the specified data-flow direction. 149 // devices for the specified data-flow direction.
150 // This method will succeed even if all devices are disabled. 150 // This method will succeed even if all devices are disabled.
151 ScopedComPtr<IMMDeviceCollection> collection; 151 ScopedComPtr<IMMDeviceCollection> collection;
152 HRESULT hr = device_enumerator->EnumAudioEndpoints(data_flow, 152 HRESULT hr = device_enumerator->EnumAudioEndpoints(data_flow,
153 DEVICE_STATE_ACTIVE, 153 DEVICE_STATE_ACTIVE,
154 collection.Receive()); 154 collection.Receive());
155 if (FAILED(hr)) { 155 if (FAILED(hr)) {
156 LOG(ERROR) << "IMMDeviceCollection::EnumAudioEndpoints: " << std::hex << hr; 156 LOG(ERROR) << "IMMDeviceCollection::EnumAudioEndpoints: " << std::hex << hr;
157 return 0; 157 return 0;
158 } 158 }
159 159
160 // Retrieve the number of active audio devices for the specified direction 160 // Retrieve the number of active audio devices for the specified direction
161 UINT number_of_active_devices = 0; 161 UINT number_of_active_devices = 0;
162 collection->GetCount(&number_of_active_devices); 162 collection->GetCount(&number_of_active_devices);
163 DVLOG(2) << ((data_flow == eCapture) ? "[in ] " : "[out] ") 163 DVLOG(2) << ((data_flow == eCapture) ? "[in ] " : "[out] ")
164 << "number of devices: " << number_of_active_devices; 164 << "number of devices: " << number_of_active_devices;
165 return static_cast<int>(number_of_active_devices); 165 return static_cast<int>(number_of_active_devices);
166 } 166 }
167 167
168 ScopedComPtr<IMMDeviceEnumerator> CoreAudioUtil::CreateDeviceEnumerator() { 168 ScopedComPtr<IMMDeviceEnumerator> CoreAudioUtil::CreateDeviceEnumerator() {
169 DCHECK(CoreAudioUtil::IsSupported()); 169 DCHECK(IsSupported());
170 ScopedComPtr<IMMDeviceEnumerator> device_enumerator; 170 ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
171 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), 171 HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
172 NULL, 172 NULL,
173 CLSCTX_INPROC_SERVER, 173 CLSCTX_INPROC_SERVER,
174 __uuidof(IMMDeviceEnumerator), 174 __uuidof(IMMDeviceEnumerator),
175 device_enumerator.ReceiveVoid()); 175 device_enumerator.ReceiveVoid());
176 // CO_E_NOTINITIALIZED is the most likely reason for failure and if that 176 // CO_E_NOTINITIALIZED is the most likely reason for failure and if that
177 // happens we might as well die here. 177 // happens we might as well die here.
178 CHECK(SUCCEEDED(hr)); 178 CHECK(SUCCEEDED(hr));
179 return device_enumerator; 179 return device_enumerator;
180 } 180 }
181 181
182 ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDefaultDevice(EDataFlow data_flow, 182 ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDefaultDevice(EDataFlow data_flow,
183 ERole role) { 183 ERole role) {
184 DCHECK(CoreAudioUtil::IsSupported()); 184 DCHECK(IsSupported());
185 ScopedComPtr<IMMDevice> endpoint_device; 185 ScopedComPtr<IMMDevice> endpoint_device;
186 186
187 // Create the IMMDeviceEnumerator interface. 187 // Create the IMMDeviceEnumerator interface.
188 ScopedComPtr<IMMDeviceEnumerator> device_enumerator = 188 ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
189 CreateDeviceEnumerator(); 189 CreateDeviceEnumerator();
190 if (!device_enumerator) 190 if (!device_enumerator)
191 return endpoint_device; 191 return endpoint_device;
192 192
193 // Retrieve the default audio endpoint for the specified data-flow 193 // Retrieve the default audio endpoint for the specified data-flow
194 // direction and role. 194 // direction and role.
(...skipping 14 matching lines...) Expand all
209 if (!(state & DEVICE_STATE_ACTIVE)) { 209 if (!(state & DEVICE_STATE_ACTIVE)) {
210 DVLOG(1) << "Selected endpoint device is not active"; 210 DVLOG(1) << "Selected endpoint device is not active";
211 endpoint_device.Release(); 211 endpoint_device.Release();
212 } 212 }
213 } 213 }
214 return endpoint_device; 214 return endpoint_device;
215 } 215 }
216 216
217 ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice( 217 ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
218 const std::string& device_id) { 218 const std::string& device_id) {
219 DCHECK(CoreAudioUtil::IsSupported()); 219 DCHECK(IsSupported());
220 ScopedComPtr<IMMDevice> endpoint_device; 220 ScopedComPtr<IMMDevice> endpoint_device;
221 221
222 // Create the IMMDeviceEnumerator interface. 222 // Create the IMMDeviceEnumerator interface.
223 ScopedComPtr<IMMDeviceEnumerator> device_enumerator = 223 ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
224 CreateDeviceEnumerator(); 224 CreateDeviceEnumerator();
225 if (!device_enumerator) 225 if (!device_enumerator)
226 return endpoint_device; 226 return endpoint_device;
227 227
228 // Retrieve an audio device specified by an endpoint device-identification 228 // Retrieve an audio device specified by an endpoint device-identification
229 // string. 229 // string.
230 HRESULT hr = device_enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(), 230 HRESULT hr = device_enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(),
231 endpoint_device.Receive()); 231 endpoint_device.Receive());
232 DVLOG_IF(1, FAILED(hr)) << "IMMDeviceEnumerator::GetDevice: " 232 DVLOG_IF(1, FAILED(hr)) << "IMMDeviceEnumerator::GetDevice: "
233 << std::hex << hr; 233 << std::hex << hr;
234 return endpoint_device; 234 return endpoint_device;
235 } 235 }
236 236
237 HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) { 237 HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
238 DCHECK(CoreAudioUtil::IsSupported()); 238 DCHECK(IsSupported());
239 239
240 // Retrieve unique name of endpoint device. 240 // Retrieve unique name of endpoint device.
241 // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}". 241 // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
242 AudioDeviceName device_name; 242 AudioDeviceName device_name;
243 ScopedCoMem<WCHAR> endpoint_device_id; 243 ScopedCoMem<WCHAR> endpoint_device_id;
244 HRESULT hr = device->GetId(&endpoint_device_id); 244 HRESULT hr = device->GetId(&endpoint_device_id);
245 if (FAILED(hr)) 245 if (FAILED(hr))
246 return hr; 246 return hr;
247 WideToUTF8(endpoint_device_id, wcslen(endpoint_device_id), 247 WideToUTF8(endpoint_device_id, wcslen(endpoint_device_id),
248 &device_name.unique_id); 248 &device_name.unique_id);
(...skipping 14 matching lines...) Expand all
263 &device_name.device_name); 263 &device_name.device_name);
264 } 264 }
265 265
266 *name = device_name; 266 *name = device_name;
267 DVLOG(2) << "friendly name: " << device_name.device_name; 267 DVLOG(2) << "friendly name: " << device_name.device_name;
268 DVLOG(2) << "unique id : " << device_name.unique_id; 268 DVLOG(2) << "unique id : " << device_name.unique_id;
269 return hr; 269 return hr;
270 } 270 }
271 271
272 std::string CoreAudioUtil::GetFriendlyName(const std::string& device_id) { 272 std::string CoreAudioUtil::GetFriendlyName(const std::string& device_id) {
273 DCHECK(CoreAudioUtil::IsSupported()); 273 DCHECK(IsSupported());
274 ScopedComPtr<IMMDevice> audio_device = CreateDevice(device_id); 274 ScopedComPtr<IMMDevice> audio_device = CreateDevice(device_id);
275 if (!audio_device) 275 if (!audio_device)
276 return std::string(); 276 return std::string();
277 277
278 AudioDeviceName device_name; 278 AudioDeviceName device_name;
279 HRESULT hr = GetDeviceName(audio_device, &device_name); 279 HRESULT hr = GetDeviceName(audio_device, &device_name);
280 if (FAILED(hr)) 280 if (FAILED(hr))
281 return std::string(); 281 return std::string();
282 282
283 return device_name.device_name; 283 return device_name.device_name;
284 } 284 }
285 285
286 bool CoreAudioUtil::DeviceIsDefault(EDataFlow flow, 286 bool CoreAudioUtil::DeviceIsDefault(EDataFlow flow,
287 ERole role, 287 ERole role,
288 std::string device_id) { 288 std::string device_id) {
289 DCHECK(CoreAudioUtil::IsSupported()); 289 DCHECK(IsSupported());
290 ScopedComPtr<IMMDevice> device = CreateDefaultDevice(flow, role); 290 ScopedComPtr<IMMDevice> device = CreateDefaultDevice(flow, role);
291 if (!device) 291 if (!device)
292 return false; 292 return false;
293 293
294 ScopedCoMem<WCHAR> default_device_id; 294 ScopedCoMem<WCHAR> default_device_id;
295 HRESULT hr = device->GetId(&default_device_id); 295 HRESULT hr = device->GetId(&default_device_id);
296 if (FAILED(hr)) 296 if (FAILED(hr))
297 return false; 297 return false;
298 298
299 std::string str_default; 299 std::string str_default;
300 WideToUTF8(default_device_id, wcslen(default_device_id), &str_default); 300 WideToUTF8(default_device_id, wcslen(default_device_id), &str_default);
301 if (device_id.compare(str_default) != 0) 301 if (device_id.compare(str_default) != 0)
302 return false; 302 return false;
303 return true; 303 return true;
304 } 304 }
305 305
306 EDataFlow CoreAudioUtil::GetDataFlow(IMMDevice* device) { 306 EDataFlow CoreAudioUtil::GetDataFlow(IMMDevice* device) {
307 DCHECK(CoreAudioUtil::IsSupported()); 307 DCHECK(IsSupported());
308 ScopedComPtr<IMMEndpoint> endpoint; 308 ScopedComPtr<IMMEndpoint> endpoint;
309 HRESULT hr = device->QueryInterface(endpoint.Receive()); 309 HRESULT hr = device->QueryInterface(endpoint.Receive());
310 if (FAILED(hr)) { 310 if (FAILED(hr)) {
311 DVLOG(1) << "IMMDevice::QueryInterface: " << std::hex << hr; 311 DVLOG(1) << "IMMDevice::QueryInterface: " << std::hex << hr;
312 return eAll; 312 return eAll;
313 } 313 }
314 314
315 EDataFlow data_flow; 315 EDataFlow data_flow;
316 hr = endpoint->GetDataFlow(&data_flow); 316 hr = endpoint->GetDataFlow(&data_flow);
317 if (FAILED(hr)) { 317 if (FAILED(hr)) {
318 DVLOG(1) << "IMMEndpoint::GetDataFlow: " << std::hex << hr; 318 DVLOG(1) << "IMMEndpoint::GetDataFlow: " << std::hex << hr;
319 return eAll; 319 return eAll;
320 } 320 }
321 return data_flow; 321 return data_flow;
322 } 322 }
323 323
324 ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient( 324 ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
325 IMMDevice* audio_device) { 325 IMMDevice* audio_device) {
326 DCHECK(CoreAudioUtil::IsSupported()); 326 DCHECK(IsSupported());
327 327
328 // Creates and activates an IAudioClient COM object given the selected 328 // Creates and activates an IAudioClient COM object given the selected
329 // endpoint device. 329 // endpoint device.
330 ScopedComPtr<IAudioClient> audio_client; 330 ScopedComPtr<IAudioClient> audio_client;
331 HRESULT hr = audio_device->Activate(__uuidof(IAudioClient), 331 HRESULT hr = audio_device->Activate(__uuidof(IAudioClient),
332 CLSCTX_INPROC_SERVER, 332 CLSCTX_INPROC_SERVER,
333 NULL, 333 NULL,
334 audio_client.ReceiveVoid()); 334 audio_client.ReceiveVoid());
335 DVLOG_IF(1, FAILED(hr)) << "IMMDevice::Activate: " << std::hex << hr; 335 DVLOG_IF(1, FAILED(hr)) << "IMMDevice::Activate: " << std::hex << hr;
336 return audio_client; 336 return audio_client;
337 } 337 }
338 338
339 ScopedComPtr<IAudioClient> CoreAudioUtil::CreateDefaultClient( 339 ScopedComPtr<IAudioClient> CoreAudioUtil::CreateDefaultClient(
340 EDataFlow data_flow, ERole role) { 340 EDataFlow data_flow, ERole role) {
341 DCHECK(CoreAudioUtil::IsSupported()); 341 DCHECK(IsSupported());
342 ScopedComPtr<IMMDevice> default_device(CreateDefaultDevice(data_flow, role)); 342 ScopedComPtr<IMMDevice> default_device(CreateDefaultDevice(data_flow, role));
343 return (default_device ? CreateClient(default_device) : 343 return (default_device ? CreateClient(default_device) :
344 ScopedComPtr<IAudioClient>()); 344 ScopedComPtr<IAudioClient>());
345 } 345 }
346 346
347 HRESULT CoreAudioUtil::GetSharedModeMixFormat( 347 HRESULT CoreAudioUtil::GetSharedModeMixFormat(
348 IAudioClient* client, WAVEFORMATPCMEX* format) { 348 IAudioClient* client, WAVEFORMATPCMEX* format) {
349 DCHECK(CoreAudioUtil::IsSupported()); 349 DCHECK(IsSupported());
350 ScopedCoMem<WAVEFORMATPCMEX> format_pcmex; 350 ScopedCoMem<WAVEFORMATPCMEX> format_pcmex;
351 HRESULT hr = client->GetMixFormat( 351 HRESULT hr = client->GetMixFormat(
352 reinterpret_cast<WAVEFORMATEX**>(&format_pcmex)); 352 reinterpret_cast<WAVEFORMATEX**>(&format_pcmex));
353 if (FAILED(hr)) 353 if (FAILED(hr))
354 return hr; 354 return hr;
355 355
356 size_t bytes = sizeof(WAVEFORMATEX) + format_pcmex->Format.cbSize; 356 size_t bytes = sizeof(WAVEFORMATEX) + format_pcmex->Format.cbSize;
357 DCHECK_EQ(bytes, sizeof(WAVEFORMATPCMEX)); 357 DCHECK_EQ(bytes, sizeof(WAVEFORMATPCMEX));
358 358
359 memcpy(format, format_pcmex, bytes); 359 memcpy(format, format_pcmex, bytes);
360 360
361 DVLOG(2) << "wFormatTag: 0x" << std::hex << format->Format.wFormatTag 361 DVLOG(2) << "wFormatTag: 0x" << std::hex << format->Format.wFormatTag
362 << ", nChannels: " << std::dec << format->Format.nChannels 362 << ", nChannels: " << std::dec << format->Format.nChannels
363 << ", nSamplesPerSec: " << format->Format.nSamplesPerSec 363 << ", nSamplesPerSec: " << format->Format.nSamplesPerSec
364 << ", nAvgBytesPerSec: " << format->Format.nAvgBytesPerSec 364 << ", nAvgBytesPerSec: " << format->Format.nAvgBytesPerSec
365 << ", nBlockAlign: " << format->Format.nBlockAlign 365 << ", nBlockAlign: " << format->Format.nBlockAlign
366 << ", wBitsPerSample: " << format->Format.wBitsPerSample 366 << ", wBitsPerSample: " << format->Format.wBitsPerSample
367 << ", cbSize: " << format->Format.cbSize 367 << ", cbSize: " << format->Format.cbSize
368 << ", wValidBitsPerSample: " << format->Samples.wValidBitsPerSample 368 << ", wValidBitsPerSample: " << format->Samples.wValidBitsPerSample
369 << ", dwChannelMask: 0x" << std::hex << format->dwChannelMask; 369 << ", dwChannelMask: 0x" << std::hex << format->dwChannelMask;
370 370
371 return hr; 371 return hr;
372 } 372 }
373 373
374 HRESULT CoreAudioUtil::GetDefaultSharedModeMixFormat(
375 EDataFlow data_flow, ERole role, WAVEFORMATPCMEX* format) {
tommi (sloooow) - chröme 2013/01/31 16:18:24 indent
henrika (OOO until Aug 14) 2013/02/01 10:55:56 Done.
376 DCHECK(IsSupported());
377 ScopedComPtr<IAudioClient> client;
tommi (sloooow) - chröme 2013/01/31 16:18:24 pass CreateDefaultClient to the constructor
henrika (OOO until Aug 14) 2013/02/01 10:55:56 Done.
378 client = CreateDefaultClient(data_flow, role);
379 if (!client) {
380 // Map NULL-pointer to new error code which can be different from the
381 // actual error code. The exact value is not important here.
382 return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
383 }
384 return CoreAudioUtil::GetSharedModeMixFormat(client, format);
385 }
386
374 bool CoreAudioUtil::IsFormatSupported(IAudioClient* client, 387 bool CoreAudioUtil::IsFormatSupported(IAudioClient* client,
375 AUDCLNT_SHAREMODE share_mode, 388 AUDCLNT_SHAREMODE share_mode,
376 const WAVEFORMATPCMEX* format) { 389 const WAVEFORMATPCMEX* format) {
377 DCHECK(CoreAudioUtil::IsSupported()); 390 DCHECK(IsSupported());
378 ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match; 391 ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
379 HRESULT hr = client->IsFormatSupported( 392 HRESULT hr = client->IsFormatSupported(
380 share_mode, reinterpret_cast<const WAVEFORMATEX*>(format), 393 share_mode, reinterpret_cast<const WAVEFORMATEX*>(format),
381 reinterpret_cast<WAVEFORMATEX**>(&closest_match)); 394 reinterpret_cast<WAVEFORMATEX**>(&closest_match));
382 395
383 // This log can only be triggered for shared mode. 396 // This log can only be triggered for shared mode.
384 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported " 397 DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
385 << "but a closest match exists."; 398 << "but a closest match exists.";
386 // This log can be triggered both for shared and exclusive modes. 399 // This log can be triggered both for shared and exclusive modes.
387 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format."; 400 DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
388 if (hr == S_FALSE) { 401 if (hr == S_FALSE) {
389 DVLOG(2) << "wFormatTag: " << closest_match->Format.wFormatTag 402 DVLOG(2) << "wFormatTag: " << closest_match->Format.wFormatTag
390 << ", nChannels: " << closest_match->Format.nChannels 403 << ", nChannels: " << closest_match->Format.nChannels
391 << ", nSamplesPerSec: " << closest_match->Format.nSamplesPerSec 404 << ", nSamplesPerSec: " << closest_match->Format.nSamplesPerSec
392 << ", wBitsPerSample: " << closest_match->Format.wBitsPerSample; 405 << ", wBitsPerSample: " << closest_match->Format.wBitsPerSample;
393 } 406 }
394 407
395 return (hr == S_OK); 408 return (hr == S_OK);
396 } 409 }
397 410
398 HRESULT CoreAudioUtil::GetDevicePeriod(IAudioClient* client, 411 HRESULT CoreAudioUtil::GetDevicePeriod(IAudioClient* client,
399 AUDCLNT_SHAREMODE share_mode, 412 AUDCLNT_SHAREMODE share_mode,
400 REFERENCE_TIME* device_period) { 413 REFERENCE_TIME* device_period) {
401 DCHECK(CoreAudioUtil::IsSupported()); 414 DCHECK(IsSupported());
402 415
403 // Get the period of the engine thread. 416 // Get the period of the engine thread.
404 REFERENCE_TIME default_period = 0; 417 REFERENCE_TIME default_period = 0;
405 REFERENCE_TIME minimum_period = 0; 418 REFERENCE_TIME minimum_period = 0;
406 HRESULT hr = client->GetDevicePeriod(&default_period, &minimum_period); 419 HRESULT hr = client->GetDevicePeriod(&default_period, &minimum_period);
407 if (FAILED(hr)) 420 if (FAILED(hr))
408 return hr; 421 return hr;
409 422
410 *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period : 423 *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period :
411 minimum_period; 424 minimum_period;
412 DVLOG(2) << "device_period: " 425 DVLOG(2) << "device_period: "
413 << RefererenceTimeToTimeDelta(*device_period).InMillisecondsF() 426 << RefererenceTimeToTimeDelta(*device_period).InMillisecondsF()
414 << " [ms]"; 427 << " [ms]";
415 return hr; 428 return hr;
416 } 429 }
417 430
418 HRESULT CoreAudioUtil::GetPreferredAudioParameters( 431 HRESULT CoreAudioUtil::GetPreferredAudioParameters(
419 IAudioClient* client, AudioParameters* params) { 432 IAudioClient* client, AudioParameters* params) {
420 DCHECK(CoreAudioUtil::IsSupported()); 433 DCHECK(IsSupported());
421 WAVEFORMATPCMEX format; 434 WAVEFORMATPCMEX format;
422 HRESULT hr = GetSharedModeMixFormat(client, &format); 435 HRESULT hr = GetSharedModeMixFormat(client, &format);
423 if (FAILED(hr)) 436 if (FAILED(hr))
424 return hr; 437 return hr;
425 438
426 REFERENCE_TIME default_period = 0; 439 REFERENCE_TIME default_period = 0;
427 hr = GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED, &default_period); 440 hr = GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED, &default_period);
428 if (FAILED(hr)) 441 if (FAILED(hr))
429 return hr; 442 return hr;
430 443
(...skipping 30 matching lines...) Expand all
461 sample_rate, 474 sample_rate,
462 bits_per_sample, 475 bits_per_sample,
463 frames_per_buffer); 476 frames_per_buffer);
464 477
465 *params = audio_params; 478 *params = audio_params;
466 return hr; 479 return hr;
467 } 480 }
468 481
469 HRESULT CoreAudioUtil::GetPreferredAudioParameters( 482 HRESULT CoreAudioUtil::GetPreferredAudioParameters(
470 EDataFlow data_flow, ERole role, AudioParameters* params) { 483 EDataFlow data_flow, ERole role, AudioParameters* params) {
471 DCHECK(CoreAudioUtil::IsSupported()); 484 DCHECK(IsSupported());
472
473 ScopedComPtr<IAudioClient> client = CreateDefaultClient(data_flow, role); 485 ScopedComPtr<IAudioClient> client = CreateDefaultClient(data_flow, role);
474 if (!client) { 486 if (!client) {
475 // Map NULL-pointer to new error code which can be different from the 487 // Map NULL-pointer to new error code which can be different from the
476 // actual error code. The exact value is not important here. 488 // actual error code. The exact value is not important here.
477 return AUDCLNT_E_ENDPOINT_CREATE_FAILED; 489 return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
478 } 490 }
479 return GetPreferredAudioParameters(client, params); 491 return GetPreferredAudioParameters(client, params);
480 } 492 }
481 493
482 HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client, 494 HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client,
483 const WAVEFORMATPCMEX* format, 495 const WAVEFORMATPCMEX* format,
484 HANDLE event_handle, 496 HANDLE event_handle,
485 size_t* endpoint_buffer_size) { 497 size_t* endpoint_buffer_size) {
486 DCHECK(CoreAudioUtil::IsSupported()); 498 DCHECK(IsSupported());
487
488 DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST; 499 DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
489 500
490 // Enable event-driven streaming if a valid event handle is provided. 501 // Enable event-driven streaming if a valid event handle is provided.
491 // After the stream starts, the audio engine will signal the event handle 502 // After the stream starts, the audio engine will signal the event handle
492 // to notify the client each time a buffer becomes ready to process. 503 // to notify the client each time a buffer becomes ready to process.
493 // Event-driven buffering is supported for both rendering and capturing. 504 // Event-driven buffering is supported for both rendering and capturing.
494 // Both shared-mode and exclusive-mode streams can use event-driven buffering. 505 // Both shared-mode and exclusive-mode streams can use event-driven buffering.
495 bool use_event = (event_handle != NULL && 506 bool use_event = (event_handle != NULL &&
496 event_handle != INVALID_HANDLE_VALUE); 507 event_handle != INVALID_HANDLE_VALUE);
497 if (use_event) 508 if (use_event)
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
531 // TODO(henrika): utilize when delay measurements are added. 542 // TODO(henrika): utilize when delay measurements are added.
532 REFERENCE_TIME latency = 0; 543 REFERENCE_TIME latency = 0;
533 hr = client->GetStreamLatency(&latency); 544 hr = client->GetStreamLatency(&latency);
534 DVLOG(2) << "stream latency: " 545 DVLOG(2) << "stream latency: "
535 << RefererenceTimeToTimeDelta(latency).InMillisecondsF() << " [ms]"; 546 << RefererenceTimeToTimeDelta(latency).InMillisecondsF() << " [ms]";
536 return hr; 547 return hr;
537 } 548 }
538 549
539 ScopedComPtr<IAudioRenderClient> CoreAudioUtil::CreateRenderClient( 550 ScopedComPtr<IAudioRenderClient> CoreAudioUtil::CreateRenderClient(
540 IAudioClient* client) { 551 IAudioClient* client) {
541 DCHECK(CoreAudioUtil::IsSupported()); 552 DCHECK(IsSupported());
542 553
543 // Get access to the IAudioRenderClient interface. This interface 554 // Get access to the IAudioRenderClient interface. This interface
544 // enables us to write output data to a rendering endpoint buffer. 555 // enables us to write output data to a rendering endpoint buffer.
545 ScopedComPtr<IAudioRenderClient> audio_render_client; 556 ScopedComPtr<IAudioRenderClient> audio_render_client;
546 HRESULT hr = client->GetService(__uuidof(IAudioRenderClient), 557 HRESULT hr = client->GetService(__uuidof(IAudioRenderClient),
547 audio_render_client.ReceiveVoid()); 558 audio_render_client.ReceiveVoid());
548 if (FAILED(hr)) { 559 if (FAILED(hr)) {
549 DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr; 560 DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
550 return ScopedComPtr<IAudioRenderClient>(); 561 return ScopedComPtr<IAudioRenderClient>();
551 } 562 }
552
553 // TODO(henrika): verify that this scheme is the same for shared mode and
554 // exclusive mode streams.
555
556 // Avoid start-up glitches by filling up the endpoint buffer with "silence"
557 // before starting the stream.
558 UINT32 endpoint_buffer_size = 0;
559 hr = client->GetBufferSize(&endpoint_buffer_size);
560 DVLOG_IF(1, FAILED(hr)) << "IAudioClient::GetBufferSize: " << std::hex << hr;
561
562 BYTE* data = NULL;
563 hr = audio_render_client->GetBuffer(endpoint_buffer_size, &data);
564 DVLOG_IF(1, FAILED(hr)) << "IAudioRenderClient::GetBuffer: "
565 << std::hex << hr;
566 if (SUCCEEDED(hr)) {
567 // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
568 // explicitly write silence data to the rendering buffer.
569 hr = audio_render_client->ReleaseBuffer(endpoint_buffer_size,
570 AUDCLNT_BUFFERFLAGS_SILENT);
571 DVLOG_IF(1, FAILED(hr)) << "IAudioRenderClient::ReleaseBuffer: "
572 << std::hex << hr;
573 }
574
575 // Sanity check: verify that the endpoint buffer is filled with silence.
576 UINT32 num_queued_frames = 0;
577 client->GetCurrentPadding(&num_queued_frames);
578 DCHECK(num_queued_frames == endpoint_buffer_size);
579
580 return audio_render_client; 563 return audio_render_client;
581 } 564 }
582 565
583 ScopedComPtr<IAudioCaptureClient> CoreAudioUtil::CreateCaptureClient( 566 ScopedComPtr<IAudioCaptureClient> CoreAudioUtil::CreateCaptureClient(
584 IAudioClient* client) { 567 IAudioClient* client) {
585 DCHECK(CoreAudioUtil::IsSupported()); 568 DCHECK(IsSupported());
586 569
587 // Get access to the IAudioCaptureClient interface. This interface 570 // Get access to the IAudioCaptureClient interface. This interface
588 // enables us to read input data from a capturing endpoint buffer. 571 // enables us to read input data from a capturing endpoint buffer.
589 ScopedComPtr<IAudioCaptureClient> audio_capture_client; 572 ScopedComPtr<IAudioCaptureClient> audio_capture_client;
590 HRESULT hr = client->GetService(__uuidof(IAudioCaptureClient), 573 HRESULT hr = client->GetService(__uuidof(IAudioCaptureClient),
591 audio_capture_client.ReceiveVoid()); 574 audio_capture_client.ReceiveVoid());
592 if (FAILED(hr)) { 575 if (FAILED(hr)) {
593 DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr; 576 DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
594 return ScopedComPtr<IAudioCaptureClient>(); 577 return ScopedComPtr<IAudioCaptureClient>();
595 } 578 }
596 return audio_capture_client; 579 return audio_capture_client;
597 } 580 }
598 581
582 bool CoreAudioUtil::FillRenderEndpointBufferWithSilence(
583 IAudioClient* client, IAudioRenderClient* render_client) {
584 DCHECK(IsSupported());
585
586 UINT32 endpoint_buffer_size = 0;
587 if (FAILED(client->GetBufferSize(&endpoint_buffer_size)))
588 return false;
589
590 UINT32 num_queued_frames = 0;
591 if (FAILED(client->GetCurrentPadding(&num_queued_frames)))
592 return false;
593
594 BYTE* data = NULL;
595 int num_frames_to_fill = endpoint_buffer_size - num_queued_frames;
596 if (FAILED(render_client->GetBuffer(num_frames_to_fill, &data)))
597 return false;
598
599 // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
600 // explicitly write silence data to the rendering buffer.
601 DVLOG(2) << "filling up " << num_frames_to_fill << " frames with silence";
602 if (FAILED(render_client->ReleaseBuffer(num_frames_to_fill,
tommi (sloooow) - chröme 2013/01/31 16:18:24 return SUCCEEDED(render_client->ReleaseBuffer(...)
henrika (OOO until Aug 14) 2013/02/01 10:55:56 Done.
603 AUDCLNT_BUFFERFLAGS_SILENT)))
604 return false;
605 return true;
606 }
607
599 } // namespace media 608 } // namespace media
OLDNEW
« media/audio/win/audio_unified_win.cc ('K') | « media/audio/win/core_audio_util_win.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698