Chromium Code Reviews| Index: media/audio/mac/audio_synchronized_mac.cc |
| =================================================================== |
| --- media/audio/mac/audio_synchronized_mac.cc (revision 0) |
| +++ media/audio/mac/audio_synchronized_mac.cc (revision 0) |
| @@ -0,0 +1,927 @@ |
| +// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "media/audio/mac/audio_synchronized_mac.h" |
| + |
| +#include <CoreServices/CoreServices.h> |
| +#include <algorithm> |
| + |
| +#include "base/basictypes.h" |
| +#include "base/logging.h" |
| +#include "base/mac/mac_logging.h" |
| +#include "media/audio/audio_util.h" |
| +#include "media/audio/mac/audio_manager_mac.h" |
| + |
| +namespace media { |
| + |
| +static const int kHardwareBufferSize = 128; |
| +static const int kFifoSize = 16384; |
| + |
| +// TODO(crogers): handle the non-stereo case. |
| +static const int kChannels = 2; |
| + |
| +// This value was determined empirically for minimum latency while still |
| +// guarding against FIFO under-runs. |
| +// TODO(crogers): refine this, taking into account different input/output |
| +// sample-rate combinations. |
| +static const int kTargetDelayFrames = 256; |
| + |
| +static void ZeroBufferList(AudioBufferList* io_data) { |
| + for (UInt32 i = 0; i < io_data->mNumberBuffers; ++i) |
| + memset(io_data->mBuffers[i].mData, 0, io_data->mBuffers[i].mDataByteSize); |
| +} |
| + |
| +AudioSynchronizedStream::AudioSynchronizedStream( |
| + AudioManagerMac* manager, |
| + const AudioParameters&, |
| + AudioDeviceID input_id, |
| + AudioDeviceID output_id) |
| + : manager_(manager), |
| + input_id_(input_id), |
| + output_id_(output_id), |
| + input_data_(NULL), |
| + fifo_(kChannels, kFifoSize), |
| + is_fifo_initialized_(false), |
| + fifo_rate_compensation_(1.0), |
| + output_sample_rate_(0), |
| + input_unit_(0), |
| + varispeed_unit_(0), |
| + output_unit_(0), |
| + first_input_time_(-1), |
| + first_output_time_(-1), |
| + in_to_out_sample_offset_(0), |
| + is_running_(false), |
| + hardware_buffer_size_(kHardwareBufferSize), |
| + channels_(kChannels) { |
| + // TODO(crogers): actually do something with |params|. |
| + // We at least need to verify the sample-rate matches the hardware |
| + // sample-rate of the output device, and we should take into account |
| + // the |channels|. For now we're limited to stereo output. |
| +} |
| + |
| +AudioSynchronizedStream::~AudioSynchronizedStream() { |
| + DCHECK(!input_unit_); |
| + DCHECK(!output_unit_); |
| + DCHECK(!varispeed_unit_); |
| +} |
| + |
| +bool AudioSynchronizedStream::Open() { |
| + // Create the input, output, and varispeed AudioUnits. |
| + OSStatus result = CreateAudioUnits(); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Cannot create AudioUnits."; |
| + return false; |
| + } |
| + |
| + result = SetupInput(input_id_); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error configuring input AudioUnit."; |
| + return false; |
| + } |
| + |
| + result = SetupOutput(output_id_); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error configuring output AudioUnit."; |
| + return false; |
| + } |
| + |
| + result = SetupCallbacks(); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error setting up callbacks on AudioUnits."; |
| + return false; |
| + } |
| + |
| + result = SetupStreamFormats(); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error configuring stream formats on AudioUnits."; |
| + return false; |
| + } |
| + |
| + AllocateInputData(); |
| + |
| + // Final initialization of the AudioUnits. |
| + result = AudioUnitInitialize(input_unit_); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error initializing input AudioUnit."; |
| + return false; |
| + } |
| + |
| + result = AudioUnitInitialize(output_unit_); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error initializing output AudioUnit."; |
| + return false; |
| + } |
| + |
| + result = AudioUnitInitialize(varispeed_unit_); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error initializing varispeed AudioUnit."; |
| + return false; |
| + } |
| + |
| + ComputeThruOffset(); |
| + |
| + return true; |
| +} |
| + |
| +void AudioSynchronizedStream::Close() { |
| + DCHECK(!is_running_); |
| + |
| + if (input_data_) { |
| + for (UInt32 i = 0; i < input_data_->mNumberBuffers; ++i) |
| + free(input_data_->mBuffers[i].mData); |
| + free(input_data_); |
| + input_data_ = 0; |
| + } |
| + |
| + if (input_unit_) { |
| + AudioUnitUninitialize(input_unit_); |
| + CloseComponent(input_unit_); |
| + } |
| + |
| + if (output_unit_) { |
| + AudioUnitUninitialize(output_unit_); |
| + CloseComponent(output_unit_); |
| + } |
| + |
| + if (varispeed_unit_) { |
| + AudioUnitUninitialize(varispeed_unit_); |
| + CloseComponent(varispeed_unit_); |
| + } |
| + |
| + input_unit_ = NULL; |
| + output_unit_ = NULL; |
| + varispeed_unit_ = NULL; |
| + |
| + // Inform the audio manager that we have been closed. This can cause our |
| + // destruction. |
| + manager_->ReleaseOutputStream(this); |
| +} |
| + |
| +void AudioSynchronizedStream::Start(AudioSourceCallback* callback) { |
| + DCHECK(callback); |
| + DCHECK(input_unit_); |
| + DCHECK(output_unit_); |
| + DCHECK(varispeed_unit_); |
| + |
| + if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_) |
| + return; |
| + |
| + source_ = callback; |
| + |
| + OSStatus result = noErr; |
| + |
| + if (!is_running_) { |
| + first_input_time_ = -1; |
| + first_output_time_ = -1; |
| + |
| + result = AudioOutputUnitStart(input_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + |
| + if (result == noErr) { |
| + result = AudioOutputUnitStart(output_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + } |
| + } |
| + |
| + is_running_ = true; |
| +} |
| + |
| +void AudioSynchronizedStream::Stop() { |
| + OSStatus result = noErr; |
| + if (is_running_) { |
| + result = AudioOutputUnitStop(input_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + |
| + if (result == noErr) { |
| + result = AudioOutputUnitStop(output_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + } |
| + } |
| + |
| + if (result == noErr) |
| + is_running_ = false; |
| +} |
| + |
| +bool AudioSynchronizedStream::IsRunning() { |
| + return is_running_; |
| +} |
| + |
| +// TODO(crogers): |
| +// implement - or remove SetVolume()/GetVolume() from AudioOutputStream. |
|
scherkus (not reviewing)
2012/09/13 13:06:03
nit: some of this comment can go on previous line
Chris Rogers
2012/09/15 00:06:08
Done.
|
| +void AudioSynchronizedStream::SetVolume(double volume) {} |
| +void AudioSynchronizedStream::GetVolume(double* volume) {} |
| + |
| +OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent( |
| + AudioDeviceID output_id) { |
| + OSStatus result = noErr; |
| + |
| + // Get the default output device if device is unknown. |
| + if (output_id == kAudioDeviceUnknown) { |
| + AudioObjectPropertyAddress pa; |
| + pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice; |
| + pa.mScope = kAudioObjectPropertyScopeGlobal; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + UInt32 size = sizeof(output_id); |
| + |
| + result = AudioObjectGetPropertyData( |
| + kAudioObjectSystemObject, |
| + &pa, |
| + 0, |
| + 0, |
| + &size, |
| + &output_id); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + } |
| + |
| + // Set the render frame size. |
| + UInt32 frame_size = hardware_buffer_size_; |
| + AudioObjectPropertyAddress pa; |
| + pa.mSelector = kAudioDevicePropertyBufferFrameSize; |
| + pa.mScope = kAudioDevicePropertyScopeInput; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + result = AudioObjectSetPropertyData( |
| + output_id, |
| + &pa, |
| + 0, |
| + 0, |
| + sizeof(frame_size), |
| + &frame_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + output_info_.Initialize(output_id, false); |
| + |
| + // Set the Current Device to the Default Output Unit. |
| + result = AudioUnitSetProperty( |
| + output_unit_, |
| + kAudioOutputUnitProperty_CurrentDevice, |
| + kAudioUnitScope_Global, |
| + 0, |
| + &output_info_.id_, |
| + sizeof(output_info_.id_)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent( |
| + AudioDeviceID input_id) { |
| + OSStatus result = noErr; |
| + |
| + // Get the default input device if device is unknown. |
| + if (input_id == kAudioDeviceUnknown) { |
| + AudioObjectPropertyAddress pa; |
| + pa.mSelector = kAudioHardwarePropertyDefaultInputDevice; |
| + pa.mScope = kAudioObjectPropertyScopeGlobal; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + UInt32 size = sizeof(input_id); |
| + |
| + result = AudioObjectGetPropertyData( |
| + kAudioObjectSystemObject, |
| + &pa, |
| + 0, |
| + 0, |
| + &size, |
| + &input_id); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + } |
| + |
| + // Set the render frame size. |
| + UInt32 frame_size = hardware_buffer_size_; |
| + AudioObjectPropertyAddress pa; |
| + pa.mSelector = kAudioDevicePropertyBufferFrameSize; |
| + pa.mScope = kAudioDevicePropertyScopeInput; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + result = AudioObjectSetPropertyData( |
| + input_id, |
| + &pa, |
| + 0, |
| + 0, |
| + sizeof(frame_size), |
| + &frame_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + input_info_.Initialize(input_id, true); |
| + |
| + // Set the Current Device to the AUHAL. |
| + // This should be done only after I/O has been enabled on the AUHAL. |
| + result = AudioUnitSetProperty( |
| + input_unit_, |
| + kAudioOutputUnitProperty_CurrentDevice, |
| + kAudioUnitScope_Global, |
| + 0, |
| + &input_info_.id_, |
| + sizeof(input_info_.id_)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::CreateAudioUnits() { |
| + // Q: Why do we need a varispeed unit? |
| + // A: If the input device and the output device are running at |
| + // different sample rates and/or on different clocks, we will need |
| + // to compensate to avoid a pitch change and |
| + // to avoid buffer under and over runs. |
| + ComponentDescription varispeed_desc; |
| + varispeed_desc.componentType = kAudioUnitType_FormatConverter; |
| + varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed; |
| + varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple; |
| + varispeed_desc.componentFlags = 0; |
| + varispeed_desc.componentFlagsMask = 0; |
| + |
| + Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc); |
| + if (varispeed_comp == NULL) |
| + return -1; |
| + |
| + OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Open input AudioUnit. |
| + ComponentDescription input_desc; |
| + input_desc.componentType = kAudioUnitType_Output; |
| + input_desc.componentSubType = kAudioUnitSubType_HALOutput; |
| + input_desc.componentManufacturer = kAudioUnitManufacturer_Apple; |
| + input_desc.componentFlags = 0; |
| + input_desc.componentFlagsMask = 0; |
| + |
| + Component input_comp = FindNextComponent(NULL, &input_desc); |
| + if (input_comp == NULL) |
| + return -1; |
| + |
| + result = OpenAComponent(input_comp, &input_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Open output AudioUnit. |
| + ComponentDescription output_desc; |
| + output_desc.componentType = kAudioUnitType_Output; |
| + output_desc.componentSubType = kAudioUnitSubType_DefaultOutput; |
| + output_desc.componentManufacturer = kAudioUnitManufacturer_Apple; |
| + output_desc.componentFlags = 0; |
| + output_desc.componentFlagsMask = 0; |
| + |
| + Component output_comp = FindNextComponent(NULL, &output_desc); |
| + if (output_comp == NULL) |
| + return -1; |
| + |
| + result = OpenAComponent(output_comp, &output_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + return noErr; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) { |
| + // The AUHAL used for input needs to be initialized |
| + // before anything is done to it. |
| + OSStatus result = AudioUnitInitialize(input_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // We must enable the Audio Unit (AUHAL) for input and disable output |
| + // BEFORE setting the AUHAL's current device. |
| + result = EnableIO(); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + result = SetInputDeviceAsCurrent(input_id); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::EnableIO() { |
| + // Enable input on the AUHAL. |
| + UInt32 enable_io = 1; |
| + OSStatus result = AudioUnitSetProperty( |
| + input_unit_, |
| + kAudioOutputUnitProperty_EnableIO, |
| + kAudioUnitScope_Input, |
| + 1, // input element |
| + &enable_io, |
| + sizeof(enable_io)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Disable Output on the AUHAL. |
| + enable_io = 0; |
| + result = AudioUnitSetProperty( |
| + input_unit_, |
| + kAudioOutputUnitProperty_EnableIO, |
| + kAudioUnitScope_Output, |
| + 0, // output element |
| + &enable_io, |
| + sizeof(enable_io)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) { |
| + OSStatus result = noErr; |
| + |
| + result = SetOutputDeviceAsCurrent(output_id); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Tell the output unit not to reset timestamps. |
| + // Otherwise sample rate changes will cause sync loss. |
| + UInt32 start_at_zero = 0; |
| + result = AudioUnitSetProperty( |
| + output_unit_, |
| + kAudioOutputUnitProperty_StartTimestampsAtZero, |
| + kAudioUnitScope_Global, |
| + 0, |
| + &start_at_zero, |
| + sizeof(start_at_zero)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::SetupCallbacks() { |
| + // Set the input callback. |
| + AURenderCallbackStruct callback; |
| + callback.inputProc = InputProc; |
| + callback.inputProcRefCon = this; |
| + OSStatus result = AudioUnitSetProperty( |
| + input_unit_, |
| + kAudioOutputUnitProperty_SetInputCallback, |
| + kAudioUnitScope_Global, |
| + 0, |
| + &callback, |
| + sizeof(callback)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Set the output callback. |
| + callback.inputProc = OutputProc; |
| + callback.inputProcRefCon = this; |
| + result = AudioUnitSetProperty( |
| + output_unit_, |
| + kAudioUnitProperty_SetRenderCallback, |
| + kAudioUnitScope_Input, |
| + 0, |
| + &callback, |
| + sizeof(callback)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Set the varispeed callback. |
| + callback.inputProc = VarispeedProc; |
| + callback.inputProcRefCon = this; |
| + result = AudioUnitSetProperty( |
| + varispeed_unit_, |
| + kAudioUnitProperty_SetRenderCallback, |
| + kAudioUnitScope_Input, |
| + 0, |
| + &callback, |
| + sizeof(callback)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::SetupStreamFormats() { |
| + AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out; |
| + |
| + // Get the Stream Format (Output client side). |
| + UInt32 property_size = sizeof(asbd_dev1_in); |
| + OSStatus result = AudioUnitGetProperty( |
| + input_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Input, |
| + 1, |
| + &asbd_dev1_in, |
| + &property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Get the Stream Format (client side). |
| + property_size = sizeof(asbd); |
| + result = AudioUnitGetProperty( |
| + input_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Output, |
| + 1, |
| + &asbd, |
| + &property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Get the Stream Format (Output client side). |
| + property_size = sizeof(asbd_dev2_out); |
| + result = AudioUnitGetProperty( |
| + output_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Output, |
| + 0, |
| + &asbd_dev2_out, |
| + &property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Set the format of all the AUs to the input/output devices channel count. |
| + // For a simple case, you want to set this to |
| + // the lower of count of the channels in the input device vs output device. |
| + asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame, |
| + asbd_dev2_out.mChannelsPerFrame); |
| + |
| + // We must get the sample rate of the input device and set it to the |
| + // stream format of AUHAL. |
| + Float64 rate = 0; |
| + property_size = sizeof(rate); |
| + |
| + AudioObjectPropertyAddress pa; |
| + pa.mSelector = kAudioDevicePropertyNominalSampleRate; |
| + pa.mScope = kAudioObjectPropertyScopeWildcard; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + result = AudioObjectGetPropertyData( |
| + input_info_.id_, |
| + &pa, |
| + 0, |
| + 0, |
| + &property_size, |
| + &rate); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + asbd.mSampleRate = rate; |
| + property_size = sizeof(asbd); |
| + |
| + // Set the new formats to the AUs... |
| + result = AudioUnitSetProperty( |
| + input_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Output, |
| + 1, |
| + &asbd, |
| + property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + result = AudioUnitSetProperty( |
| + varispeed_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Input, |
| + 0, |
| + &asbd, |
| + property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Set the correct sample rate for the output device, |
| + // but keep the channel count the same. |
| + property_size = sizeof(rate); |
| + |
| + pa.mSelector = kAudioDevicePropertyNominalSampleRate; |
| + pa.mScope = kAudioObjectPropertyScopeWildcard; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + result = AudioObjectGetPropertyData( |
| + output_info_.id_, |
| + &pa, |
| + 0, |
| + 0, |
| + &property_size, |
| + &rate); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + output_sample_rate_ = rate; |
| + |
| + asbd.mSampleRate = rate; |
| + property_size = sizeof(asbd); |
| + |
| + // Set the new audio stream formats for the rest of the AUs... |
| + result = AudioUnitSetProperty( |
| + varispeed_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Output, |
| + 0, |
| + &asbd, |
| + property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + result = AudioUnitSetProperty( |
| + output_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Input, |
| + 0, |
| + &asbd, |
| + property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + return result; |
| +} |
| + |
| +void AudioSynchronizedStream::AllocateInputData() { |
| + // Allocate storage for the AudioBufferList used for the |
| + // input data from the input AudioUnit. |
| + // We allocate enough space for with one AudioBuffer per channel. |
| + UInt32 malloc_size = offsetof(AudioBufferList, mBuffers[0]) + |
|
scherkus (not reviewing)
2012/09/13 13:06:03
for the sake of other chromium developers can you
Chris Rogers
2012/09/15 00:06:08
Done.
|
| + (sizeof(AudioBuffer) * channels_); |
| + |
| + input_data_ = static_cast<AudioBufferList*>(malloc(malloc_size)); |
|
scherkus (not reviewing)
2012/09/13 13:06:03
any reason to prefer malloc/free over new/delete h
Chris Rogers
2012/09/15 00:06:08
For the AudioBufferList allocation, I'm using mall
|
| + input_data_->mNumberBuffers = channels_; |
| + |
| + // Allocate buffers for AudioBufferList. |
| + UInt32 buffer_size_bytes = hardware_buffer_size_ * sizeof(Float32); |
| + for (UInt32 i = 0; i < input_data_->mNumberBuffers; ++i) { |
| + input_data_->mBuffers[i].mNumberChannels = 1; |
| + input_data_->mBuffers[i].mDataByteSize = buffer_size_bytes; |
| + input_data_->mBuffers[i].mData = malloc(buffer_size_bytes); |
|
Chris Rogers
2012/09/15 00:06:08
I've removed the malloc() here and am now using an
|
| + } |
| +} |
| + |
| +void AudioSynchronizedStream::ComputeThruOffset() { |
| + // The initial latency will at least be the safety offsets |
| + // of the devices + the buffer sizes. |
| + in_to_out_sample_offset_ = SInt32(input_info_.buffer_size_frames_ + |
| + output_info_.buffer_size_frames_); |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::HandleInputCallback( |
| + AudioUnitRenderActionFlags* io_action_flags, |
| + const AudioTimeStamp* time_stamp, |
| + UInt32 bus_number, |
| + UInt32 number_of_frames, |
| + AudioBufferList* io_data) { |
| + if (first_input_time_ < 0.0) |
| + first_input_time_ = time_stamp->mSampleTime; |
| + |
| + // Get the new audio input data. |
| + OSStatus result = AudioUnitRender( |
| + input_unit_, |
| + io_action_flags, |
| + time_stamp, |
| + bus_number, |
| + number_of_frames, |
| + input_data_); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Buffer input into FIFO. |
| + if (is_fifo_initialized_) { |
| + // TODO(crogers): remove this locking once AudioFifo becomes thread-safe. |
| + if (fifo_lock_.Try()) { |
| + AudioBus bus(channels_, number_of_frames, input_data_); |
| + fifo_.Push(&bus); |
| + fifo_lock_.Release(); |
| + } |
| + } |
| + |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::HandleVarispeedCallback( |
| + AudioUnitRenderActionFlags* io_action_flags, |
| + const AudioTimeStamp* time_stamp, |
| + UInt32 bus_number, |
| + UInt32 number_of_frames, |
| + AudioBufferList* io_data) { |
| + // Create a wrapper bus on the AudioBufferList. |
| + AudioBus bus(channels_, number_of_frames, io_data); |
| + |
| + if (fifo_.frames() < static_cast<int>(number_of_frames)) { |
|
scherkus (not reviewing)
2012/09/13 13:06:03
FYI: fifo_ not locked
See my comment on line 817
Chris Rogers
2012/09/15 00:06:08
I've completely removed this fifo_lock_ and have a
|
| + // We don't DCHECK here, since this is a possible run-time condition |
| + // if the machine is bogged down. |
| + bus.Zero(); |
| + return noErr; |
| + } |
| + |
| + // TODO(crogers): remove this locking once AudioFifo becomes thread-safe. |
| + if (fifo_lock_.Try()) { |
| + // Read from the FIFO to feed the varispeed. |
| + fifo_.Consume(&bus, 0, number_of_frames); |
| + fifo_lock_.Release(); |
| + } |
| + |
| + // Calculate a varispeed rate scalar factor to compensate for drift between |
| + // input and output. We use the actual number of frames still in the FIFO |
| + // compared with the ideal value of kTargetDelayFrames. |
| + size_t n = fifo_.frames(); |
|
scherkus (not reviewing)
2012/09/13 13:06:03
not locked
Chris Rogers
2012/09/15 00:06:08
Fixed in AudioFifo thread-safety
|
| + int delta = n - kTargetDelayFrames; |
| + double sample_rate = output_sample_rate_; |
| + double x = (sample_rate + delta) / sample_rate; |
| + |
| + fifo_rate_compensation_ = x; |
| + |
| + return noErr; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::HandleOutputCallback( |
| + AudioUnitRenderActionFlags* io_action_flags, |
| + const AudioTimeStamp* time_stamp, |
| + UInt32 bus_number, |
| + UInt32 number_of_frames, |
| + AudioBufferList* io_data) { |
| + if (first_input_time_ < 0.0) { |
| + // Input callback hasn't run yet -> silence. |
| + ZeroBufferList(io_data); |
| + return noErr; |
| + } |
| + |
| + AudioTimeStamp input_ts; |
| + OSStatus result = AudioDeviceGetCurrentTime(input_info_.id_, &input_ts); |
| + |
| + if (result != noErr) { |
| + ZeroBufferList(io_data); |
| + return noErr; |
| + } |
| + |
| + AudioTimeStamp output_ts; |
| + result = AudioDeviceGetCurrentTime(output_info_.id_, &output_ts); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Use the varispeed playback rate to offset small discrepancies |
| + // in hardware clocks, and also any differences in sample-rate |
| + // between input and output devices. |
| + |
| + // Adjust for rate scalars of the input and output devices. |
| + double rate = input_ts.mRateScalar / output_ts.mRateScalar; |
| + |
| + // Adjust for FIFO drift. |
| + rate *= fifo_rate_compensation_; |
| + |
| + result = AudioUnitSetParameter( |
| + varispeed_unit_, |
| + kVarispeedParam_PlaybackRate, |
| + kAudioUnitScope_Global, |
| + 0, |
| + rate, |
| + 0); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Get the delta between the devices and add it to the offset. |
| + if (first_output_time_ < 0.) { |
|
scherkus (not reviewing)
2012/09/13 13:06:03
0.0
Chris Rogers
2012/09/15 00:06:08
This code has now been removed.
|
| + first_output_time_ = time_stamp->mSampleTime; |
| + ComputeThruOffset(); |
| + |
| + // Buffer initial silence corresponding to I/O delay. |
| + unsigned n = static_cast<unsigned>(in_to_out_sample_offset_); |
|
scherkus (not reviewing)
2012/09/13 13:06:03
s/unsigned/int
Chris Rogers
2012/09/15 00:06:08
Removed/simplified
|
| + AudioBus silence(channels_, n); |
| + silence.Zero(); |
| + fifo_.Push(&silence); |
|
scherkus (not reviewing)
2012/09/13 13:06:03
FYI: not locked
to confirm.. the output+varispeed
Chris Rogers
2012/09/15 00:06:08
The AudioFifo now has changes to make it lock-free
|
| + is_fifo_initialized_ = true; |
| + |
| + ZeroBufferList(io_data); |
| + return noErr; |
| + } |
| + |
| + // Render to the output using the varispeed. |
| + result = AudioUnitRender( |
| + varispeed_unit_, |
| + io_action_flags, |
| + time_stamp, |
| + 0, |
| + number_of_frames, |
| + io_data); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Create a wrapper bus on the AudioBufferList. |
| + AudioBus bus(channels_, number_of_frames, io_data); |
| + |
| + // Process in-place! |
| + source_->OnMoreIOData(&bus, &bus, AudioBuffersState(0, 0)); |
| + |
| + return noErr; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::InputProc( |
| + void* user_data, |
| + AudioUnitRenderActionFlags* io_action_flags, |
| + const AudioTimeStamp* time_stamp, |
| + UInt32 bus_number, |
| + UInt32 number_of_frames, |
| + AudioBufferList* io_data) { |
| + AudioSynchronizedStream* stream = |
| + static_cast<AudioSynchronizedStream*>(user_data); |
| + DCHECK(stream); |
| + |
| + return stream->HandleInputCallback( |
| + io_action_flags, |
| + time_stamp, |
| + bus_number, |
| + number_of_frames, |
| + io_data); |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::VarispeedProc( |
| + void* user_data, |
| + AudioUnitRenderActionFlags* io_action_flags, |
| + const AudioTimeStamp* time_stamp, |
| + UInt32 bus_number, |
| + UInt32 number_of_frames, |
| + AudioBufferList* io_data) { |
| + AudioSynchronizedStream* stream = |
| + static_cast<AudioSynchronizedStream*>(user_data); |
| + DCHECK(stream); |
| + |
| + return stream->HandleVarispeedCallback( |
| + io_action_flags, |
| + time_stamp, |
| + bus_number, |
| + number_of_frames, |
| + io_data); |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::OutputProc( |
| + void* user_data, |
| + AudioUnitRenderActionFlags* io_action_flags, |
| + const AudioTimeStamp* time_stamp, |
| + UInt32 bus_number, |
| + UInt32 number_of_frames, |
| + AudioBufferList* io_data) { |
| + AudioSynchronizedStream* stream = |
| + static_cast<AudioSynchronizedStream*>(user_data); |
| + DCHECK(stream); |
| + |
| + return stream->HandleOutputCallback( |
| + io_action_flags, |
| + time_stamp, |
| + bus_number, |
| + number_of_frames, |
| + io_data); |
| +} |
| + |
| +void AudioSynchronizedStream::AudioDeviceInfo::Initialize( |
| + AudioDeviceID id, bool is_input) { |
| + id_ = id; |
| + is_input_ = is_input; |
| + if (id_ == kAudioDeviceUnknown) |
| + return; |
| + |
| + UInt32 property_size = sizeof(buffer_size_frames_); |
| + |
| + AudioObjectPropertyAddress pa; |
| + pa.mSelector = kAudioDevicePropertyBufferFrameSize; |
| + pa.mScope = kAudioObjectPropertyScopeWildcard; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + OSStatus result = AudioObjectGetPropertyData( |
| + id_, |
| + &pa, |
| + 0, |
| + 0, |
| + &property_size, |
| + &buffer_size_frames_); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| +} |
| + |
| +} // namespace media |