Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "media/audio/mac/audio_synchronized_mac.h" | |
| 6 | |
| 7 #include <CoreServices/CoreServices.h> | |
| 8 #include <algorithm> | |
| 9 | |
| 10 #include "base/basictypes.h" | |
| 11 #include "base/logging.h" | |
| 12 #include "base/mac/mac_logging.h" | |
| 13 #include "media/audio/audio_util.h" | |
| 14 #include "media/audio/mac/audio_manager_mac.h" | |
| 15 | |
| 16 namespace media { | |
| 17 | |
| 18 static const int kHardwareBufferSize = 128; | |
| 19 static const int kFifoSize = 16384; | |
| 20 | |
| 21 // TODO(crogers): handle the non-stereo case. | |
| 22 static const int kChannels = 2; | |
| 23 | |
| 24 // This value was determined empirically for minimum latency while still | |
| 25 // guarding against FIFO under-runs. | |
| 26 // TODO(crogers): refine this, taking into account different input/output | |
| 27 // sample-rate combinations. | |
| 28 static const int kTargetDelayFrames = 256; | |
| 29 | |
| 30 static void ZeroBufferList(AudioBufferList* io_data) { | |
| 31 for (UInt32 i = 0; i < io_data->mNumberBuffers; ++i) | |
| 32 memset(io_data->mBuffers[i].mData, 0, io_data->mBuffers[i].mDataByteSize); | |
| 33 } | |
| 34 | |
| 35 AudioSynchronizedStream::AudioSynchronizedStream( | |
| 36 AudioManagerMac* manager, | |
| 37 const AudioParameters&, | |
| 38 AudioDeviceID input_id, | |
| 39 AudioDeviceID output_id) | |
| 40 : manager_(manager), | |
| 41 input_id_(input_id), | |
| 42 output_id_(output_id), | |
| 43 input_data_(NULL), | |
| 44 fifo_(kChannels, kFifoSize), | |
| 45 is_fifo_initialized_(false), | |
| 46 fifo_rate_compensation_(1.0), | |
| 47 output_sample_rate_(0), | |
| 48 input_unit_(0), | |
| 49 varispeed_unit_(0), | |
| 50 output_unit_(0), | |
| 51 first_input_time_(-1), | |
| 52 first_output_time_(-1), | |
| 53 in_to_out_sample_offset_(0), | |
| 54 is_running_(false), | |
| 55 hardware_buffer_size_(kHardwareBufferSize), | |
| 56 channels_(kChannels) { | |
| 57 // TODO(crogers): actually do something with |params|. | |
| 58 // We at least need to verify the sample-rate matches the hardware | |
| 59 // sample-rate of the output device, and we should take into account | |
| 60 // the |channels|. For now we're limited to stereo output. | |
| 61 } | |
| 62 | |
| 63 AudioSynchronizedStream::~AudioSynchronizedStream() { | |
| 64 DCHECK(!input_unit_); | |
| 65 DCHECK(!output_unit_); | |
| 66 DCHECK(!varispeed_unit_); | |
| 67 } | |
| 68 | |
| 69 bool AudioSynchronizedStream::Open() { | |
| 70 // Create the input, output, and varispeed AudioUnits. | |
| 71 OSStatus result = CreateAudioUnits(); | |
| 72 if (result != noErr) { | |
| 73 LOG(ERROR) << "Cannot create AudioUnits."; | |
| 74 return false; | |
| 75 } | |
| 76 | |
| 77 result = SetupInput(input_id_); | |
| 78 if (result != noErr) { | |
| 79 LOG(ERROR) << "Error configuring input AudioUnit."; | |
| 80 return false; | |
| 81 } | |
| 82 | |
| 83 result = SetupOutput(output_id_); | |
| 84 if (result != noErr) { | |
| 85 LOG(ERROR) << "Error configuring output AudioUnit."; | |
| 86 return false; | |
| 87 } | |
| 88 | |
| 89 result = SetupCallbacks(); | |
| 90 if (result != noErr) { | |
| 91 LOG(ERROR) << "Error setting up callbacks on AudioUnits."; | |
| 92 return false; | |
| 93 } | |
| 94 | |
| 95 result = SetupStreamFormats(); | |
| 96 if (result != noErr) { | |
| 97 LOG(ERROR) << "Error configuring stream formats on AudioUnits."; | |
| 98 return false; | |
| 99 } | |
| 100 | |
| 101 AllocateInputData(); | |
| 102 | |
| 103 // Final initialization of the AudioUnits. | |
| 104 result = AudioUnitInitialize(input_unit_); | |
| 105 if (result != noErr) { | |
| 106 LOG(ERROR) << "Error initializing input AudioUnit."; | |
| 107 return false; | |
| 108 } | |
| 109 | |
| 110 result = AudioUnitInitialize(output_unit_); | |
| 111 if (result != noErr) { | |
| 112 LOG(ERROR) << "Error initializing output AudioUnit."; | |
| 113 return false; | |
| 114 } | |
| 115 | |
| 116 result = AudioUnitInitialize(varispeed_unit_); | |
| 117 if (result != noErr) { | |
| 118 LOG(ERROR) << "Error initializing varispeed AudioUnit."; | |
| 119 return false; | |
| 120 } | |
| 121 | |
| 122 ComputeThruOffset(); | |
| 123 | |
| 124 return true; | |
| 125 } | |
| 126 | |
| 127 void AudioSynchronizedStream::Close() { | |
| 128 DCHECK(!is_running_); | |
| 129 | |
| 130 if (input_data_) { | |
| 131 for (UInt32 i = 0; i < input_data_->mNumberBuffers; ++i) | |
| 132 free(input_data_->mBuffers[i].mData); | |
| 133 free(input_data_); | |
| 134 input_data_ = 0; | |
| 135 } | |
| 136 | |
| 137 if (input_unit_) { | |
| 138 AudioUnitUninitialize(input_unit_); | |
| 139 CloseComponent(input_unit_); | |
| 140 } | |
| 141 | |
| 142 if (output_unit_) { | |
| 143 AudioUnitUninitialize(output_unit_); | |
| 144 CloseComponent(output_unit_); | |
| 145 } | |
| 146 | |
| 147 if (varispeed_unit_) { | |
| 148 AudioUnitUninitialize(varispeed_unit_); | |
| 149 CloseComponent(varispeed_unit_); | |
| 150 } | |
| 151 | |
| 152 input_unit_ = NULL; | |
| 153 output_unit_ = NULL; | |
| 154 varispeed_unit_ = NULL; | |
| 155 | |
| 156 // Inform the audio manager that we have been closed. This can cause our | |
| 157 // destruction. | |
| 158 manager_->ReleaseOutputStream(this); | |
| 159 } | |
| 160 | |
| 161 void AudioSynchronizedStream::Start(AudioSourceCallback* callback) { | |
| 162 DCHECK(callback); | |
| 163 DCHECK(input_unit_); | |
| 164 DCHECK(output_unit_); | |
| 165 DCHECK(varispeed_unit_); | |
| 166 | |
| 167 if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_) | |
| 168 return; | |
| 169 | |
| 170 source_ = callback; | |
| 171 | |
| 172 OSStatus result = noErr; | |
| 173 | |
| 174 if (!is_running_) { | |
| 175 first_input_time_ = -1; | |
| 176 first_output_time_ = -1; | |
| 177 | |
| 178 result = AudioOutputUnitStart(input_unit_); | |
| 179 OSSTATUS_DCHECK(result == noErr, result); | |
| 180 | |
| 181 if (result == noErr) { | |
| 182 result = AudioOutputUnitStart(output_unit_); | |
| 183 OSSTATUS_DCHECK(result == noErr, result); | |
| 184 } | |
| 185 } | |
| 186 | |
| 187 is_running_ = true; | |
| 188 } | |
| 189 | |
| 190 void AudioSynchronizedStream::Stop() { | |
| 191 OSStatus result = noErr; | |
| 192 if (is_running_) { | |
| 193 result = AudioOutputUnitStop(input_unit_); | |
| 194 OSSTATUS_DCHECK(result == noErr, result); | |
| 195 | |
| 196 if (result == noErr) { | |
| 197 result = AudioOutputUnitStop(output_unit_); | |
| 198 OSSTATUS_DCHECK(result == noErr, result); | |
| 199 } | |
| 200 } | |
| 201 | |
| 202 if (result == noErr) | |
| 203 is_running_ = false; | |
| 204 } | |
| 205 | |
| 206 bool AudioSynchronizedStream::IsRunning() { | |
| 207 return is_running_; | |
| 208 } | |
| 209 | |
| 210 // TODO(crogers): | |
| 211 // implement - or remove SetVolume()/GetVolume() from AudioOutputStream. | |
|
scherkus (not reviewing)
2012/09/13 13:06:03
nit: some of this comment can go on previous line
Chris Rogers
2012/09/15 00:06:08
Done.
| |
| 212 void AudioSynchronizedStream::SetVolume(double volume) {} | |
| 213 void AudioSynchronizedStream::GetVolume(double* volume) {} | |
| 214 | |
| 215 OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent( | |
| 216 AudioDeviceID output_id) { | |
| 217 OSStatus result = noErr; | |
| 218 | |
| 219 // Get the default output device if device is unknown. | |
| 220 if (output_id == kAudioDeviceUnknown) { | |
| 221 AudioObjectPropertyAddress pa; | |
| 222 pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice; | |
| 223 pa.mScope = kAudioObjectPropertyScopeGlobal; | |
| 224 pa.mElement = kAudioObjectPropertyElementMaster; | |
| 225 UInt32 size = sizeof(output_id); | |
| 226 | |
| 227 result = AudioObjectGetPropertyData( | |
| 228 kAudioObjectSystemObject, | |
| 229 &pa, | |
| 230 0, | |
| 231 0, | |
| 232 &size, | |
| 233 &output_id); | |
| 234 | |
| 235 OSSTATUS_DCHECK(result == noErr, result); | |
| 236 if (result != noErr) | |
| 237 return result; | |
| 238 } | |
| 239 | |
| 240 // Set the render frame size. | |
| 241 UInt32 frame_size = hardware_buffer_size_; | |
| 242 AudioObjectPropertyAddress pa; | |
| 243 pa.mSelector = kAudioDevicePropertyBufferFrameSize; | |
| 244 pa.mScope = kAudioDevicePropertyScopeInput; | |
| 245 pa.mElement = kAudioObjectPropertyElementMaster; | |
| 246 result = AudioObjectSetPropertyData( | |
| 247 output_id, | |
| 248 &pa, | |
| 249 0, | |
| 250 0, | |
| 251 sizeof(frame_size), | |
| 252 &frame_size); | |
| 253 | |
| 254 OSSTATUS_DCHECK(result == noErr, result); | |
| 255 if (result != noErr) | |
| 256 return result; | |
| 257 | |
| 258 output_info_.Initialize(output_id, false); | |
| 259 | |
| 260 // Set the Current Device to the Default Output Unit. | |
| 261 result = AudioUnitSetProperty( | |
| 262 output_unit_, | |
| 263 kAudioOutputUnitProperty_CurrentDevice, | |
| 264 kAudioUnitScope_Global, | |
| 265 0, | |
| 266 &output_info_.id_, | |
| 267 sizeof(output_info_.id_)); | |
| 268 | |
| 269 OSSTATUS_DCHECK(result == noErr, result); | |
| 270 return result; | |
| 271 } | |
| 272 | |
| 273 OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent( | |
| 274 AudioDeviceID input_id) { | |
| 275 OSStatus result = noErr; | |
| 276 | |
| 277 // Get the default input device if device is unknown. | |
| 278 if (input_id == kAudioDeviceUnknown) { | |
| 279 AudioObjectPropertyAddress pa; | |
| 280 pa.mSelector = kAudioHardwarePropertyDefaultInputDevice; | |
| 281 pa.mScope = kAudioObjectPropertyScopeGlobal; | |
| 282 pa.mElement = kAudioObjectPropertyElementMaster; | |
| 283 UInt32 size = sizeof(input_id); | |
| 284 | |
| 285 result = AudioObjectGetPropertyData( | |
| 286 kAudioObjectSystemObject, | |
| 287 &pa, | |
| 288 0, | |
| 289 0, | |
| 290 &size, | |
| 291 &input_id); | |
| 292 | |
| 293 OSSTATUS_DCHECK(result == noErr, result); | |
| 294 if (result != noErr) | |
| 295 return result; | |
| 296 } | |
| 297 | |
| 298 // Set the render frame size. | |
| 299 UInt32 frame_size = hardware_buffer_size_; | |
| 300 AudioObjectPropertyAddress pa; | |
| 301 pa.mSelector = kAudioDevicePropertyBufferFrameSize; | |
| 302 pa.mScope = kAudioDevicePropertyScopeInput; | |
| 303 pa.mElement = kAudioObjectPropertyElementMaster; | |
| 304 result = AudioObjectSetPropertyData( | |
| 305 input_id, | |
| 306 &pa, | |
| 307 0, | |
| 308 0, | |
| 309 sizeof(frame_size), | |
| 310 &frame_size); | |
| 311 | |
| 312 OSSTATUS_DCHECK(result == noErr, result); | |
| 313 if (result != noErr) | |
| 314 return result; | |
| 315 | |
| 316 input_info_.Initialize(input_id, true); | |
| 317 | |
| 318 // Set the Current Device to the AUHAL. | |
| 319 // This should be done only after I/O has been enabled on the AUHAL. | |
| 320 result = AudioUnitSetProperty( | |
| 321 input_unit_, | |
| 322 kAudioOutputUnitProperty_CurrentDevice, | |
| 323 kAudioUnitScope_Global, | |
| 324 0, | |
| 325 &input_info_.id_, | |
| 326 sizeof(input_info_.id_)); | |
| 327 | |
| 328 OSSTATUS_DCHECK(result == noErr, result); | |
| 329 return result; | |
| 330 } | |
| 331 | |
| 332 OSStatus AudioSynchronizedStream::CreateAudioUnits() { | |
| 333 // Q: Why do we need a varispeed unit? | |
| 334 // A: If the input device and the output device are running at | |
| 335 // different sample rates and/or on different clocks, we will need | |
| 336 // to compensate to avoid a pitch change and | |
| 337 // to avoid buffer under and over runs. | |
| 338 ComponentDescription varispeed_desc; | |
| 339 varispeed_desc.componentType = kAudioUnitType_FormatConverter; | |
| 340 varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed; | |
| 341 varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple; | |
| 342 varispeed_desc.componentFlags = 0; | |
| 343 varispeed_desc.componentFlagsMask = 0; | |
| 344 | |
| 345 Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc); | |
| 346 if (varispeed_comp == NULL) | |
| 347 return -1; | |
| 348 | |
| 349 OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_); | |
| 350 OSSTATUS_DCHECK(result == noErr, result); | |
| 351 if (result != noErr) | |
| 352 return result; | |
| 353 | |
| 354 // Open input AudioUnit. | |
| 355 ComponentDescription input_desc; | |
| 356 input_desc.componentType = kAudioUnitType_Output; | |
| 357 input_desc.componentSubType = kAudioUnitSubType_HALOutput; | |
| 358 input_desc.componentManufacturer = kAudioUnitManufacturer_Apple; | |
| 359 input_desc.componentFlags = 0; | |
| 360 input_desc.componentFlagsMask = 0; | |
| 361 | |
| 362 Component input_comp = FindNextComponent(NULL, &input_desc); | |
| 363 if (input_comp == NULL) | |
| 364 return -1; | |
| 365 | |
| 366 result = OpenAComponent(input_comp, &input_unit_); | |
| 367 OSSTATUS_DCHECK(result == noErr, result); | |
| 368 if (result != noErr) | |
| 369 return result; | |
| 370 | |
| 371 // Open output AudioUnit. | |
| 372 ComponentDescription output_desc; | |
| 373 output_desc.componentType = kAudioUnitType_Output; | |
| 374 output_desc.componentSubType = kAudioUnitSubType_DefaultOutput; | |
| 375 output_desc.componentManufacturer = kAudioUnitManufacturer_Apple; | |
| 376 output_desc.componentFlags = 0; | |
| 377 output_desc.componentFlagsMask = 0; | |
| 378 | |
| 379 Component output_comp = FindNextComponent(NULL, &output_desc); | |
| 380 if (output_comp == NULL) | |
| 381 return -1; | |
| 382 | |
| 383 result = OpenAComponent(output_comp, &output_unit_); | |
| 384 OSSTATUS_DCHECK(result == noErr, result); | |
| 385 if (result != noErr) | |
| 386 return result; | |
| 387 | |
| 388 return noErr; | |
| 389 } | |
| 390 | |
| 391 OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) { | |
| 392 // The AUHAL used for input needs to be initialized | |
| 393 // before anything is done to it. | |
| 394 OSStatus result = AudioUnitInitialize(input_unit_); | |
| 395 OSSTATUS_DCHECK(result == noErr, result); | |
| 396 if (result != noErr) | |
| 397 return result; | |
| 398 | |
| 399 // We must enable the Audio Unit (AUHAL) for input and disable output | |
| 400 // BEFORE setting the AUHAL's current device. | |
| 401 result = EnableIO(); | |
| 402 OSSTATUS_DCHECK(result == noErr, result); | |
| 403 if (result != noErr) | |
| 404 return result; | |
| 405 | |
| 406 result = SetInputDeviceAsCurrent(input_id); | |
| 407 OSSTATUS_DCHECK(result == noErr, result); | |
| 408 | |
| 409 return result; | |
| 410 } | |
| 411 | |
| 412 OSStatus AudioSynchronizedStream::EnableIO() { | |
| 413 // Enable input on the AUHAL. | |
| 414 UInt32 enable_io = 1; | |
| 415 OSStatus result = AudioUnitSetProperty( | |
| 416 input_unit_, | |
| 417 kAudioOutputUnitProperty_EnableIO, | |
| 418 kAudioUnitScope_Input, | |
| 419 1, // input element | |
| 420 &enable_io, | |
| 421 sizeof(enable_io)); | |
| 422 | |
| 423 OSSTATUS_DCHECK(result == noErr, result); | |
| 424 if (result != noErr) | |
| 425 return result; | |
| 426 | |
| 427 // Disable Output on the AUHAL. | |
| 428 enable_io = 0; | |
| 429 result = AudioUnitSetProperty( | |
| 430 input_unit_, | |
| 431 kAudioOutputUnitProperty_EnableIO, | |
| 432 kAudioUnitScope_Output, | |
| 433 0, // output element | |
| 434 &enable_io, | |
| 435 sizeof(enable_io)); | |
| 436 | |
| 437 OSSTATUS_DCHECK(result == noErr, result); | |
| 438 return result; | |
| 439 } | |
| 440 | |
| 441 OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) { | |
| 442 OSStatus result = noErr; | |
| 443 | |
| 444 result = SetOutputDeviceAsCurrent(output_id); | |
| 445 OSSTATUS_DCHECK(result == noErr, result); | |
| 446 if (result != noErr) | |
| 447 return result; | |
| 448 | |
| 449 // Tell the output unit not to reset timestamps. | |
| 450 // Otherwise sample rate changes will cause sync loss. | |
| 451 UInt32 start_at_zero = 0; | |
| 452 result = AudioUnitSetProperty( | |
| 453 output_unit_, | |
| 454 kAudioOutputUnitProperty_StartTimestampsAtZero, | |
| 455 kAudioUnitScope_Global, | |
| 456 0, | |
| 457 &start_at_zero, | |
| 458 sizeof(start_at_zero)); | |
| 459 | |
| 460 OSSTATUS_DCHECK(result == noErr, result); | |
| 461 | |
| 462 return result; | |
| 463 } | |
| 464 | |
| 465 OSStatus AudioSynchronizedStream::SetupCallbacks() { | |
| 466 // Set the input callback. | |
| 467 AURenderCallbackStruct callback; | |
| 468 callback.inputProc = InputProc; | |
| 469 callback.inputProcRefCon = this; | |
| 470 OSStatus result = AudioUnitSetProperty( | |
| 471 input_unit_, | |
| 472 kAudioOutputUnitProperty_SetInputCallback, | |
| 473 kAudioUnitScope_Global, | |
| 474 0, | |
| 475 &callback, | |
| 476 sizeof(callback)); | |
| 477 | |
| 478 OSSTATUS_DCHECK(result == noErr, result); | |
| 479 if (result != noErr) | |
| 480 return result; | |
| 481 | |
| 482 // Set the output callback. | |
| 483 callback.inputProc = OutputProc; | |
| 484 callback.inputProcRefCon = this; | |
| 485 result = AudioUnitSetProperty( | |
| 486 output_unit_, | |
| 487 kAudioUnitProperty_SetRenderCallback, | |
| 488 kAudioUnitScope_Input, | |
| 489 0, | |
| 490 &callback, | |
| 491 sizeof(callback)); | |
| 492 | |
| 493 OSSTATUS_DCHECK(result == noErr, result); | |
| 494 if (result != noErr) | |
| 495 return result; | |
| 496 | |
| 497 // Set the varispeed callback. | |
| 498 callback.inputProc = VarispeedProc; | |
| 499 callback.inputProcRefCon = this; | |
| 500 result = AudioUnitSetProperty( | |
| 501 varispeed_unit_, | |
| 502 kAudioUnitProperty_SetRenderCallback, | |
| 503 kAudioUnitScope_Input, | |
| 504 0, | |
| 505 &callback, | |
| 506 sizeof(callback)); | |
| 507 | |
| 508 OSSTATUS_DCHECK(result == noErr, result); | |
| 509 | |
| 510 return result; | |
| 511 } | |
| 512 | |
| 513 OSStatus AudioSynchronizedStream::SetupStreamFormats() { | |
| 514 AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out; | |
| 515 | |
| 516 // Get the Stream Format (Output client side). | |
| 517 UInt32 property_size = sizeof(asbd_dev1_in); | |
| 518 OSStatus result = AudioUnitGetProperty( | |
| 519 input_unit_, | |
| 520 kAudioUnitProperty_StreamFormat, | |
| 521 kAudioUnitScope_Input, | |
| 522 1, | |
| 523 &asbd_dev1_in, | |
| 524 &property_size); | |
| 525 | |
| 526 OSSTATUS_DCHECK(result == noErr, result); | |
| 527 if (result != noErr) | |
| 528 return result; | |
| 529 | |
| 530 // Get the Stream Format (client side). | |
| 531 property_size = sizeof(asbd); | |
| 532 result = AudioUnitGetProperty( | |
| 533 input_unit_, | |
| 534 kAudioUnitProperty_StreamFormat, | |
| 535 kAudioUnitScope_Output, | |
| 536 1, | |
| 537 &asbd, | |
| 538 &property_size); | |
| 539 | |
| 540 OSSTATUS_DCHECK(result == noErr, result); | |
| 541 if (result != noErr) | |
| 542 return result; | |
| 543 | |
| 544 // Get the Stream Format (Output client side). | |
| 545 property_size = sizeof(asbd_dev2_out); | |
| 546 result = AudioUnitGetProperty( | |
| 547 output_unit_, | |
| 548 kAudioUnitProperty_StreamFormat, | |
| 549 kAudioUnitScope_Output, | |
| 550 0, | |
| 551 &asbd_dev2_out, | |
| 552 &property_size); | |
| 553 | |
| 554 OSSTATUS_DCHECK(result == noErr, result); | |
| 555 if (result != noErr) | |
| 556 return result; | |
| 557 | |
| 558 // Set the format of all the AUs to the input/output devices channel count. | |
| 559 // For a simple case, you want to set this to | |
| 560 // the lower of count of the channels in the input device vs output device. | |
| 561 asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame, | |
| 562 asbd_dev2_out.mChannelsPerFrame); | |
| 563 | |
| 564 // We must get the sample rate of the input device and set it to the | |
| 565 // stream format of AUHAL. | |
| 566 Float64 rate = 0; | |
| 567 property_size = sizeof(rate); | |
| 568 | |
| 569 AudioObjectPropertyAddress pa; | |
| 570 pa.mSelector = kAudioDevicePropertyNominalSampleRate; | |
| 571 pa.mScope = kAudioObjectPropertyScopeWildcard; | |
| 572 pa.mElement = kAudioObjectPropertyElementMaster; | |
| 573 result = AudioObjectGetPropertyData( | |
| 574 input_info_.id_, | |
| 575 &pa, | |
| 576 0, | |
| 577 0, | |
| 578 &property_size, | |
| 579 &rate); | |
| 580 | |
| 581 OSSTATUS_DCHECK(result == noErr, result); | |
| 582 if (result != noErr) | |
| 583 return result; | |
| 584 | |
| 585 asbd.mSampleRate = rate; | |
| 586 property_size = sizeof(asbd); | |
| 587 | |
| 588 // Set the new formats to the AUs... | |
| 589 result = AudioUnitSetProperty( | |
| 590 input_unit_, | |
| 591 kAudioUnitProperty_StreamFormat, | |
| 592 kAudioUnitScope_Output, | |
| 593 1, | |
| 594 &asbd, | |
| 595 property_size); | |
| 596 | |
| 597 OSSTATUS_DCHECK(result == noErr, result); | |
| 598 if (result != noErr) | |
| 599 return result; | |
| 600 | |
| 601 result = AudioUnitSetProperty( | |
| 602 varispeed_unit_, | |
| 603 kAudioUnitProperty_StreamFormat, | |
| 604 kAudioUnitScope_Input, | |
| 605 0, | |
| 606 &asbd, | |
| 607 property_size); | |
| 608 | |
| 609 OSSTATUS_DCHECK(result == noErr, result); | |
| 610 if (result != noErr) | |
| 611 return result; | |
| 612 | |
| 613 // Set the correct sample rate for the output device, | |
| 614 // but keep the channel count the same. | |
| 615 property_size = sizeof(rate); | |
| 616 | |
| 617 pa.mSelector = kAudioDevicePropertyNominalSampleRate; | |
| 618 pa.mScope = kAudioObjectPropertyScopeWildcard; | |
| 619 pa.mElement = kAudioObjectPropertyElementMaster; | |
| 620 result = AudioObjectGetPropertyData( | |
| 621 output_info_.id_, | |
| 622 &pa, | |
| 623 0, | |
| 624 0, | |
| 625 &property_size, | |
| 626 &rate); | |
| 627 | |
| 628 OSSTATUS_DCHECK(result == noErr, result); | |
| 629 if (result != noErr) | |
| 630 return result; | |
| 631 | |
| 632 output_sample_rate_ = rate; | |
| 633 | |
| 634 asbd.mSampleRate = rate; | |
| 635 property_size = sizeof(asbd); | |
| 636 | |
| 637 // Set the new audio stream formats for the rest of the AUs... | |
| 638 result = AudioUnitSetProperty( | |
| 639 varispeed_unit_, | |
| 640 kAudioUnitProperty_StreamFormat, | |
| 641 kAudioUnitScope_Output, | |
| 642 0, | |
| 643 &asbd, | |
| 644 property_size); | |
| 645 | |
| 646 OSSTATUS_DCHECK(result == noErr, result); | |
| 647 if (result != noErr) | |
| 648 return result; | |
| 649 | |
| 650 result = AudioUnitSetProperty( | |
| 651 output_unit_, | |
| 652 kAudioUnitProperty_StreamFormat, | |
| 653 kAudioUnitScope_Input, | |
| 654 0, | |
| 655 &asbd, | |
| 656 property_size); | |
| 657 | |
| 658 OSSTATUS_DCHECK(result == noErr, result); | |
| 659 return result; | |
| 660 } | |
| 661 | |
| 662 void AudioSynchronizedStream::AllocateInputData() { | |
| 663 // Allocate storage for the AudioBufferList used for the | |
| 664 // input data from the input AudioUnit. | |
| 665 // We allocate enough space for with one AudioBuffer per channel. | |
| 666 UInt32 malloc_size = offsetof(AudioBufferList, mBuffers[0]) + | |
|
scherkus (not reviewing)
2012/09/13 13:06:03
for the sake of other chromium developers can you
Chris Rogers
2012/09/15 00:06:08
Done.
| |
| 667 (sizeof(AudioBuffer) * channels_); | |
| 668 | |
| 669 input_data_ = static_cast<AudioBufferList*>(malloc(malloc_size)); | |
|
scherkus (not reviewing)
2012/09/13 13:06:03
any reason to prefer malloc/free over new/delete h
Chris Rogers
2012/09/15 00:06:08
For the AudioBufferList allocation, I'm using mall
| |
| 670 input_data_->mNumberBuffers = channels_; | |
| 671 | |
| 672 // Allocate buffers for AudioBufferList. | |
| 673 UInt32 buffer_size_bytes = hardware_buffer_size_ * sizeof(Float32); | |
| 674 for (UInt32 i = 0; i < input_data_->mNumberBuffers; ++i) { | |
| 675 input_data_->mBuffers[i].mNumberChannels = 1; | |
| 676 input_data_->mBuffers[i].mDataByteSize = buffer_size_bytes; | |
| 677 input_data_->mBuffers[i].mData = malloc(buffer_size_bytes); | |
|
Chris Rogers
2012/09/15 00:06:08
I've removed the malloc() here and am now using an
| |
| 678 } | |
| 679 } | |
| 680 | |
| 681 void AudioSynchronizedStream::ComputeThruOffset() { | |
| 682 // The initial latency will at least be the safety offsets | |
| 683 // of the devices + the buffer sizes. | |
| 684 in_to_out_sample_offset_ = SInt32(input_info_.buffer_size_frames_ + | |
| 685 output_info_.buffer_size_frames_); | |
| 686 } | |
| 687 | |
| 688 OSStatus AudioSynchronizedStream::HandleInputCallback( | |
| 689 AudioUnitRenderActionFlags* io_action_flags, | |
| 690 const AudioTimeStamp* time_stamp, | |
| 691 UInt32 bus_number, | |
| 692 UInt32 number_of_frames, | |
| 693 AudioBufferList* io_data) { | |
| 694 if (first_input_time_ < 0.0) | |
| 695 first_input_time_ = time_stamp->mSampleTime; | |
| 696 | |
| 697 // Get the new audio input data. | |
| 698 OSStatus result = AudioUnitRender( | |
| 699 input_unit_, | |
| 700 io_action_flags, | |
| 701 time_stamp, | |
| 702 bus_number, | |
| 703 number_of_frames, | |
| 704 input_data_); | |
| 705 | |
| 706 OSSTATUS_DCHECK(result == noErr, result); | |
| 707 if (result != noErr) | |
| 708 return result; | |
| 709 | |
| 710 // Buffer input into FIFO. | |
| 711 if (is_fifo_initialized_) { | |
| 712 // TODO(crogers): remove this locking once AudioFifo becomes thread-safe. | |
| 713 if (fifo_lock_.Try()) { | |
| 714 AudioBus bus(channels_, number_of_frames, input_data_); | |
| 715 fifo_.Push(&bus); | |
| 716 fifo_lock_.Release(); | |
| 717 } | |
| 718 } | |
| 719 | |
| 720 return result; | |
| 721 } | |
| 722 | |
| 723 OSStatus AudioSynchronizedStream::HandleVarispeedCallback( | |
| 724 AudioUnitRenderActionFlags* io_action_flags, | |
| 725 const AudioTimeStamp* time_stamp, | |
| 726 UInt32 bus_number, | |
| 727 UInt32 number_of_frames, | |
| 728 AudioBufferList* io_data) { | |
| 729 // Create a wrapper bus on the AudioBufferList. | |
| 730 AudioBus bus(channels_, number_of_frames, io_data); | |
| 731 | |
| 732 if (fifo_.frames() < static_cast<int>(number_of_frames)) { | |
|
scherkus (not reviewing)
2012/09/13 13:06:03
FYI: fifo_ not locked
See my comment on line 817
Chris Rogers
2012/09/15 00:06:08
I've completely removed this fifo_lock_ and have a
| |
| 733 // We don't DCHECK here, since this is a possible run-time condition | |
| 734 // if the machine is bogged down. | |
| 735 bus.Zero(); | |
| 736 return noErr; | |
| 737 } | |
| 738 | |
| 739 // TODO(crogers): remove this locking once AudioFifo becomes thread-safe. | |
| 740 if (fifo_lock_.Try()) { | |
| 741 // Read from the FIFO to feed the varispeed. | |
| 742 fifo_.Consume(&bus, 0, number_of_frames); | |
| 743 fifo_lock_.Release(); | |
| 744 } | |
| 745 | |
| 746 // Calculate a varispeed rate scalar factor to compensate for drift between | |
| 747 // input and output. We use the actual number of frames still in the FIFO | |
| 748 // compared with the ideal value of kTargetDelayFrames. | |
| 749 size_t n = fifo_.frames(); | |
|
scherkus (not reviewing)
2012/09/13 13:06:03
not locked
Chris Rogers
2012/09/15 00:06:08
Fixed in AudioFifo thread-safety
| |
| 750 int delta = n - kTargetDelayFrames; | |
| 751 double sample_rate = output_sample_rate_; | |
| 752 double x = (sample_rate + delta) / sample_rate; | |
| 753 | |
| 754 fifo_rate_compensation_ = x; | |
| 755 | |
| 756 return noErr; | |
| 757 } | |
| 758 | |
| 759 OSStatus AudioSynchronizedStream::HandleOutputCallback( | |
| 760 AudioUnitRenderActionFlags* io_action_flags, | |
| 761 const AudioTimeStamp* time_stamp, | |
| 762 UInt32 bus_number, | |
| 763 UInt32 number_of_frames, | |
| 764 AudioBufferList* io_data) { | |
| 765 if (first_input_time_ < 0.0) { | |
| 766 // Input callback hasn't run yet -> silence. | |
| 767 ZeroBufferList(io_data); | |
| 768 return noErr; | |
| 769 } | |
| 770 | |
| 771 AudioTimeStamp input_ts; | |
| 772 OSStatus result = AudioDeviceGetCurrentTime(input_info_.id_, &input_ts); | |
| 773 | |
| 774 if (result != noErr) { | |
| 775 ZeroBufferList(io_data); | |
| 776 return noErr; | |
| 777 } | |
| 778 | |
| 779 AudioTimeStamp output_ts; | |
| 780 result = AudioDeviceGetCurrentTime(output_info_.id_, &output_ts); | |
| 781 | |
| 782 OSSTATUS_DCHECK(result == noErr, result); | |
| 783 if (result != noErr) | |
| 784 return result; | |
| 785 | |
| 786 // Use the varispeed playback rate to offset small discrepancies | |
| 787 // in hardware clocks, and also any differences in sample-rate | |
| 788 // between input and output devices. | |
| 789 | |
| 790 // Adjust for rate scalars of the input and output devices. | |
| 791 double rate = input_ts.mRateScalar / output_ts.mRateScalar; | |
| 792 | |
| 793 // Adjust for FIFO drift. | |
| 794 rate *= fifo_rate_compensation_; | |
| 795 | |
| 796 result = AudioUnitSetParameter( | |
| 797 varispeed_unit_, | |
| 798 kVarispeedParam_PlaybackRate, | |
| 799 kAudioUnitScope_Global, | |
| 800 0, | |
| 801 rate, | |
| 802 0); | |
| 803 | |
| 804 OSSTATUS_DCHECK(result == noErr, result); | |
| 805 if (result != noErr) | |
| 806 return result; | |
| 807 | |
| 808 // Get the delta between the devices and add it to the offset. | |
| 809 if (first_output_time_ < 0.) { | |
|
scherkus (not reviewing)
2012/09/13 13:06:03
0.0
Chris Rogers
2012/09/15 00:06:08
This code has now been removed.
| |
| 810 first_output_time_ = time_stamp->mSampleTime; | |
| 811 ComputeThruOffset(); | |
| 812 | |
| 813 // Buffer initial silence corresponding to I/O delay. | |
| 814 unsigned n = static_cast<unsigned>(in_to_out_sample_offset_); | |
|
scherkus (not reviewing)
2012/09/13 13:06:03
s/unsigned/int
Chris Rogers
2012/09/15 00:06:08
Removed/simplified
| |
| 815 AudioBus silence(channels_, n); | |
| 816 silence.Zero(); | |
| 817 fifo_.Push(&silence); | |
|
scherkus (not reviewing)
2012/09/13 13:06:03
FYI: not locked
to confirm.. the output+varispeed
Chris Rogers
2012/09/15 00:06:08
The AudioFifo now has changes to make it lock-free
| |
| 818 is_fifo_initialized_ = true; | |
| 819 | |
| 820 ZeroBufferList(io_data); | |
| 821 return noErr; | |
| 822 } | |
| 823 | |
| 824 // Render to the output using the varispeed. | |
| 825 result = AudioUnitRender( | |
| 826 varispeed_unit_, | |
| 827 io_action_flags, | |
| 828 time_stamp, | |
| 829 0, | |
| 830 number_of_frames, | |
| 831 io_data); | |
| 832 | |
| 833 OSSTATUS_DCHECK(result == noErr, result); | |
| 834 if (result != noErr) | |
| 835 return result; | |
| 836 | |
| 837 // Create a wrapper bus on the AudioBufferList. | |
| 838 AudioBus bus(channels_, number_of_frames, io_data); | |
| 839 | |
| 840 // Process in-place! | |
| 841 source_->OnMoreIOData(&bus, &bus, AudioBuffersState(0, 0)); | |
| 842 | |
| 843 return noErr; | |
| 844 } | |
| 845 | |
| 846 OSStatus AudioSynchronizedStream::InputProc( | |
| 847 void* user_data, | |
| 848 AudioUnitRenderActionFlags* io_action_flags, | |
| 849 const AudioTimeStamp* time_stamp, | |
| 850 UInt32 bus_number, | |
| 851 UInt32 number_of_frames, | |
| 852 AudioBufferList* io_data) { | |
| 853 AudioSynchronizedStream* stream = | |
| 854 static_cast<AudioSynchronizedStream*>(user_data); | |
| 855 DCHECK(stream); | |
| 856 | |
| 857 return stream->HandleInputCallback( | |
| 858 io_action_flags, | |
| 859 time_stamp, | |
| 860 bus_number, | |
| 861 number_of_frames, | |
| 862 io_data); | |
| 863 } | |
| 864 | |
| 865 OSStatus AudioSynchronizedStream::VarispeedProc( | |
| 866 void* user_data, | |
| 867 AudioUnitRenderActionFlags* io_action_flags, | |
| 868 const AudioTimeStamp* time_stamp, | |
| 869 UInt32 bus_number, | |
| 870 UInt32 number_of_frames, | |
| 871 AudioBufferList* io_data) { | |
| 872 AudioSynchronizedStream* stream = | |
| 873 static_cast<AudioSynchronizedStream*>(user_data); | |
| 874 DCHECK(stream); | |
| 875 | |
| 876 return stream->HandleVarispeedCallback( | |
| 877 io_action_flags, | |
| 878 time_stamp, | |
| 879 bus_number, | |
| 880 number_of_frames, | |
| 881 io_data); | |
| 882 } | |
| 883 | |
| 884 OSStatus AudioSynchronizedStream::OutputProc( | |
| 885 void* user_data, | |
| 886 AudioUnitRenderActionFlags* io_action_flags, | |
| 887 const AudioTimeStamp* time_stamp, | |
| 888 UInt32 bus_number, | |
| 889 UInt32 number_of_frames, | |
| 890 AudioBufferList* io_data) { | |
| 891 AudioSynchronizedStream* stream = | |
| 892 static_cast<AudioSynchronizedStream*>(user_data); | |
| 893 DCHECK(stream); | |
| 894 | |
| 895 return stream->HandleOutputCallback( | |
| 896 io_action_flags, | |
| 897 time_stamp, | |
| 898 bus_number, | |
| 899 number_of_frames, | |
| 900 io_data); | |
| 901 } | |
| 902 | |
| 903 void AudioSynchronizedStream::AudioDeviceInfo::Initialize( | |
| 904 AudioDeviceID id, bool is_input) { | |
| 905 id_ = id; | |
| 906 is_input_ = is_input; | |
| 907 if (id_ == kAudioDeviceUnknown) | |
| 908 return; | |
| 909 | |
| 910 UInt32 property_size = sizeof(buffer_size_frames_); | |
| 911 | |
| 912 AudioObjectPropertyAddress pa; | |
| 913 pa.mSelector = kAudioDevicePropertyBufferFrameSize; | |
| 914 pa.mScope = kAudioObjectPropertyScopeWildcard; | |
| 915 pa.mElement = kAudioObjectPropertyElementMaster; | |
| 916 OSStatus result = AudioObjectGetPropertyData( | |
| 917 id_, | |
| 918 &pa, | |
| 919 0, | |
| 920 0, | |
| 921 &property_size, | |
| 922 &buffer_size_frames_); | |
| 923 | |
| 924 OSSTATUS_DCHECK(result == noErr, result); | |
| 925 } | |
| 926 | |
| 927 } // namespace media | |
| OLD | NEW |