Chromium Code Reviews| Index: media/audio/mac/audio_synchronized_mac.cc |
| =================================================================== |
| --- media/audio/mac/audio_synchronized_mac.cc (revision 0) |
| +++ media/audio/mac/audio_synchronized_mac.cc (revision 0) |
| @@ -0,0 +1,871 @@ |
| +// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "audio_synchronized_mac.h" |
|
scherkus (not reviewing)
2012/09/12 14:05:29
should be full path
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + |
| +#include <CoreServices/CoreServices.h> |
| + |
| +#if CHROME |
| + #include "base/basictypes.h" |
| + #include "base/logging.h" |
| + #include "base/mac/mac_logging.h" |
| + #include "media/audio/audio_util.h" |
| + #include "media/audio/mac/audio_manager_mac.h" |
| +#endif |
| + |
| +namespace media { |
| + |
| +const int kHardwareBufferSize = 128; |
|
no longer working on chromium
2012/09/12 09:11:56
put them into an anonymous space or static
scherkus (not reviewing)
2012/09/12 14:05:29
static, please! :)
Chris Rogers
2012/09/13 01:03:05
I think that "const int" is static automatically u
|
| +const int kFIFOSize = 16384; |
|
scherkus (not reviewing)
2012/09/12 14:05:29
s/FIFO/Fifo here + below
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + |
| +// TODO(crogers): handle the non-stereo case. |
| +const int kFIFOChannels = 2; |
| + |
| +// This value is empirically determined for minimum latency |
| +// while still guarding against FIFO under-runs. |
| +// TODO(crogers): refine this, taking into account different input/output |
| +// sample-rate combinations. |
| +const int kTargetDelayFrames = 256; |
| + |
| +static void MakeBufferSilent(AudioBufferList* io_data) { |
| + for (UInt32 i = 0; i < io_data->mNumberBuffers; ++i) |
| + memset(io_data->mBuffers[i].mData, 0, io_data->mBuffers[i].mDataByteSize); |
| +} |
| + |
| +AudioSynchronizedStream::AudioSynchronizedStream( |
| + AudioManagerMac* manager, |
| + const AudioParameters&, |
| + AudioDeviceID input_id, |
| + AudioDeviceID output_id) |
| + : manager_(manager), |
| + input_id_(input_id), |
| + output_id_(output_id), |
| + input_buffer_(NULL), |
| + fifo_(kFIFOChannels, kFIFOSize), |
| + is_fifo_initialized_(false), |
|
no longer working on chromium
2012/09/12 09:11:56
Is this a special user case for fifo? why not ask
Chris Rogers
2012/09/13 01:03:05
I can still clean this part up a little - not yet
|
| + fifo_rate_compensation_(1.0), |
| + output_sample_rate_(0), |
| + input_unit_(0), |
| + varispeed_unit_(0), |
| + output_unit_(0), |
| + first_input_time_(-1), |
| + first_output_time_(-1), |
| + in_to_out_sample_offset_(0), |
| + is_running_(false), |
| + hardware_frame_size_(kHardwareBufferSize) { |
| + // TODO(crogers): actually do something with |params| |
| + // We at least need to verify the sample-rate matches |
| + // the hardware sample-rate of the output device, and we |
| + // should take into account the |channels|. |
| + // For now we're limited to stereo output. |
| +} |
| + |
| +AudioSynchronizedStream::~AudioSynchronizedStream() { |
| + DCHECK_EQ(input_unit_, 0); |
| + DCHECK_EQ(output_unit_, 0); |
| + DCHECK_EQ(varispeed_unit_, 0); |
| +} |
| + |
| +bool AudioSynchronizedStream::Open() { |
| + // Create the input, output, and varispeed AudioUnits. |
| + OSStatus result = CreateAudioUnits(); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Cannot create AudioUnits."; |
| + return false; |
| + } |
| + |
| + result = SetupInput(input_id_); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error configuring input AudioUnit."; |
| + return false; |
| + } |
| + |
| + result = SetupOutput(output_id_); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error configuring output AudioUnit."; |
| + return false; |
| + } |
| + |
| + result = SetupCallbacks(); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error setting up callbacks on AudioUnits."; |
| + return false; |
| + } |
| + |
| + result = SetupStreamFormats(); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error configuring stream formats on AudioUnits."; |
| + return false; |
| + } |
| + |
| + // Final initialization of the AudioUnits. |
| + |
|
no longer working on chromium
2012/09/12 09:11:56
remove this empty line.
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + result = AudioUnitInitialize(input_unit_); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error initializing input AudioUnit."; |
| + return false; |
| + } |
| + |
| + result = AudioUnitInitialize(output_unit_); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error initializing output AudioUnit."; |
| + return false; |
| + } |
| + |
| + result = AudioUnitInitialize(varispeed_unit_); |
| + if (result != noErr) { |
| + LOG(ERROR) << "Error initializing varispeed AudioUnit."; |
| + return false; |
| + } |
| + |
| + ComputeThruOffset(); |
| + |
| + return true; |
| +} |
| + |
| +void AudioSynchronizedStream::Close() { |
| + DCHECK(!is_running_); |
| + |
| + if (input_buffer_) { |
| + for (UInt32 i = 0; i < input_buffer_->mNumberBuffers; ++i) |
| + free(input_buffer_->mBuffers[i].mData); |
| + free(input_buffer_); |
| + input_buffer_ = 0; |
| + } |
| + |
| + AudioUnitUninitialize(input_unit_); |
|
no longer working on chromium
2012/09/12 09:11:56
Is it all right to call AudioUnitUninitialize if i
Chris Rogers
2012/09/13 01:03:05
I've added extra checking here. It *is* ok to cal
no longer working on chromium
2012/09/17 08:26:01
good, thanks.
|
| + AudioUnitUninitialize(output_unit_); |
| + AudioUnitUninitialize(varispeed_unit_); |
| + input_unit_ = 0; |
|
scherkus (not reviewing)
2012/09/12 14:05:29
s/0/NULL/?
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + output_unit_ = 0; |
| + varispeed_unit_ = 0; |
| + |
| + // Inform the audio manager that we have been closed. This can cause our |
| + // destruction. |
| + manager_->ReleaseOutputStream(this); |
| +} |
| + |
| +void AudioSynchronizedStream::Start(AudioSourceCallback* callback) { |
| + bool good = input_unit_ && output_unit_ && varispeed_unit_; |
| + |
| + DCHECK(good); |
|
scherkus (not reviewing)
2012/09/12 14:05:29
a more informative DCHECK would be to DCHECK on ea
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + DCHECK(callback); |
| + |
| + if (!good) |
|
no longer working on chromium
2012/09/12 09:11:56
early return if (!good || is_running_)
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + return; |
| + |
| + source_ = callback; |
| + |
| + OSStatus result = noErr; |
| + |
| + if (!is_running_) { |
| + result = AudioOutputUnitStart(input_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + |
| + if (result == noErr) { |
| + result = AudioOutputUnitStart(output_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + } |
| + |
| + first_input_time_ = -1; |
| + first_output_time_ = -1; |
| + } |
| + |
| + if (result == noErr) |
| + is_running_ = true; |
|
no longer working on chromium
2012/09/12 09:11:56
I think we should set is_running_ to true here reg
Chris Rogers
2012/09/13 01:03:05
Done.
|
| +} |
| + |
| +void AudioSynchronizedStream::Stop() { |
| + OSStatus result = noErr; |
| + if (is_running_) { |
| + result = AudioOutputUnitStop(input_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + |
| + if (result == noErr) { |
| + result = AudioOutputUnitStop(output_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + } |
| + |
| + first_input_time_ = -1; |
|
no longer working on chromium
2012/09/12 09:11:56
we are setting the values to -1 in both Start() an
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + first_output_time_ = -1; |
| + } |
| + |
| + if (result == noErr) |
| + is_running_ = false; |
| +} |
| + |
| +bool AudioSynchronizedStream::IsRunning() { |
| + return is_running_; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent( |
| + AudioDeviceID output_id) { |
| + OSStatus result = noErr; |
| + |
| + // Get the default output device if device is unknown. |
| + if (output_id == kAudioDeviceUnknown) { |
| + AudioObjectPropertyAddress pa; |
| + pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice; |
| + pa.mScope = kAudioObjectPropertyScopeGlobal; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + UInt32 size = sizeof(AudioDeviceID); |
|
scherkus (not reviewing)
2012/09/12 14:05:29
if possible sizeof(variable) instead of sizeof(typ
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + |
| + result = AudioObjectGetPropertyData( |
| + kAudioObjectSystemObject, |
| + &pa, |
| + 0, |
| + 0, |
| + &size, |
| + &output_id); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + } |
| + |
| + // Set the render frame size. |
| + UInt32 frame_size = hardware_frame_size_; |
| + AudioObjectPropertyAddress pa; |
| + pa.mSelector = kAudioDevicePropertyBufferFrameSize; |
| + pa.mScope = kAudioDevicePropertyScopeInput; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + result = AudioObjectSetPropertyData( |
| + output_id, |
| + &pa, |
| + 0, |
| + 0, |
| + sizeof(frame_size), |
| + &frame_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + output_info_.Initialize(output_id, false); |
| + |
| + // Set the Current Device to the Default Output Unit. |
| + result = AudioUnitSetProperty( |
| + output_unit_, |
| + kAudioOutputUnitProperty_CurrentDevice, |
| + kAudioUnitScope_Global, |
| + 0, |
| + &output_info_.id_, |
| + sizeof(output_info_.id_)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent( |
| + AudioDeviceID input_id) { |
| + OSStatus result = noErr; |
| + |
| + // Get the default input device if device is unknown. |
| + if (input_id == kAudioDeviceUnknown) { |
| + AudioObjectPropertyAddress pa; |
| + pa.mSelector = kAudioHardwarePropertyDefaultInputDevice; |
| + pa.mScope = kAudioObjectPropertyScopeGlobal; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + UInt32 size = sizeof(AudioDeviceID); |
|
scherkus (not reviewing)
2012/09/12 14:05:29
if possible sizeof(variable) instead of sizeof(typ
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + |
| + result = AudioObjectGetPropertyData( |
| + kAudioObjectSystemObject, |
| + &pa, |
| + 0, |
| + 0, |
| + &size, |
| + &input_id); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + } |
| + |
| + // Set the render frame size. |
| + UInt32 frame_size = hardware_frame_size_; |
| + AudioObjectPropertyAddress pa; |
| + pa.mSelector = kAudioDevicePropertyBufferFrameSize; |
| + pa.mScope = kAudioDevicePropertyScopeInput; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + result = AudioObjectSetPropertyData( |
| + input_id, |
| + &pa, |
| + 0, |
| + 0, |
| + sizeof(frame_size), |
| + &frame_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + input_info_.Initialize(input_id, true); |
| + |
| + // Set the Current Device to the AUHAL. |
| + // This should be done only after I/O has been enabled on the AUHAL. |
| + result = AudioUnitSetProperty( |
| + input_unit_, |
| + kAudioOutputUnitProperty_CurrentDevice, |
| + kAudioUnitScope_Global, |
| + 0, |
| + &input_info_.id_, |
| + sizeof(input_info_.id_)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::CreateAudioUnits() { |
| + // Q: Why do we need a varispeed unit? |
| + // A: If the input device and the output device are running at |
| + // different sample rates and/or on different clocks, we will need |
| + // to compensate to avoid a pitch change and |
| + // to avoid buffer under and over runs. |
| + ComponentDescription varispeed_desc; |
| + varispeed_desc.componentType = kAudioUnitType_FormatConverter; |
| + varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed; |
| + varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple; |
| + varispeed_desc.componentFlags = 0; |
| + varispeed_desc.componentFlagsMask = 0; |
| + |
| + Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc); |
| + if (varispeed_comp == NULL) |
| + return -1; |
| + OpenAComponent(varispeed_comp, &varispeed_unit_); |
| + |
| + // Open input AudioUnit. |
| + ComponentDescription input_desc; |
| + input_desc.componentType = kAudioUnitType_Output; |
| + input_desc.componentSubType = kAudioUnitSubType_HALOutput; |
| + input_desc.componentManufacturer = kAudioUnitManufacturer_Apple; |
| + input_desc.componentFlags = 0; |
| + input_desc.componentFlagsMask = 0; |
| + |
| + Component input_comp = FindNextComponent(NULL, &input_desc); |
| + if (input_comp == NULL) |
| + return -1; |
| + |
| + OpenAComponent(input_comp, &input_unit_); |
|
no longer working on chromium
2012/09/12 09:11:56
why don't we care about the return value here?
Chris Rogers
2012/09/13 01:03:05
We do ;) added checks and early return for these
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + |
| + // Open output AudioUnit. |
| + ComponentDescription output_desc; |
| + output_desc.componentType = kAudioUnitType_Output; |
| + output_desc.componentSubType = kAudioUnitSubType_DefaultOutput; |
| + output_desc.componentManufacturer = kAudioUnitManufacturer_Apple; |
| + output_desc.componentFlags = 0; |
| + output_desc.componentFlagsMask = 0; |
| + |
| + Component output_comp = FindNextComponent(NULL, &output_desc); |
| + if (output_comp == NULL) |
| + return -1; |
| + OpenAComponent(output_comp, &output_unit_); |
|
no longer working on chromium
2012/09/12 09:11:56
ditto
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + |
| + return noErr; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) { |
| + // The AUHAL used for input needs to be initialized |
| + // before anything is done to it. |
| + OSStatus result = AudioUnitInitialize(input_unit_); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // We must enable the Audio Unit (AUHAL) for input and disable output |
| + // BEFORE setting the AUHAL's current device. |
| + result = EnableIO(); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + result = SetInputDeviceAsCurrent(input_id); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::EnableIO() { |
|
no longer working on chromium
2012/09/12 09:11:56
can we change the name to something else like Setu
Chris Rogers
2012/09/13 01:03:05
Leaving name for now, since the property name we'r
|
| + // Enable input on the AUHAL. |
| + UInt32 enable_io = 1; |
| + OSStatus result = AudioUnitSetProperty( |
| + input_unit_, |
| + kAudioOutputUnitProperty_EnableIO, |
| + kAudioUnitScope_Input, |
| + 1, // input element |
| + &enable_io, |
| + sizeof(enable_io)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Disable Output on the AUHAL. |
| + enable_io = 0; |
| + result = AudioUnitSetProperty( |
| + input_unit_, |
| + kAudioOutputUnitProperty_EnableIO, |
| + kAudioUnitScope_Output, |
| + 0, // output element |
| + &enable_io, |
| + sizeof(enable_io)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) { |
| + OSStatus result = noErr; |
| + |
| + result = SetOutputDeviceAsCurrent(output_id); |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Tell the output unit not to reset timestamps. |
| + // Otherwise sample rate changes will cause sync loss. |
| + UInt32 start_at_zero = 0; |
| + result = AudioUnitSetProperty( |
| + output_unit_, |
| + kAudioOutputUnitProperty_StartTimestampsAtZero, |
| + kAudioUnitScope_Global, |
| + 0, |
|
scherkus (not reviewing)
2012/09/12 14:05:29
these zeros and ones aren't very telling -- is the
Chris Rogers
2012/09/13 01:03:05
For now I haven't done anything about this. I cou
|
| + &start_at_zero, |
| + sizeof(start_at_zero)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::SetupCallbacks() { |
| + // Set the input callback. |
| + AURenderCallbackStruct callback; |
| + callback.inputProc = InputProc; |
| + callback.inputProcRefCon = this; |
| + OSStatus result = AudioUnitSetProperty( |
| + input_unit_, |
| + kAudioOutputUnitProperty_SetInputCallback, |
| + kAudioUnitScope_Global, |
| + 0, |
|
no longer working on chromium
2012/09/12 09:11:56
shouldn't the AudioUnitElement to be 1 here?
Chris Rogers
2012/09/13 01:03:05
No, since it's "global" scope there isn't any elem
|
| + &callback, |
| + sizeof(callback)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Set the output callback. |
| + callback.inputProc = OutputProc; |
| + callback.inputProcRefCon = this; |
| + result = AudioUnitSetProperty( |
| + output_unit_, |
| + kAudioUnitProperty_SetRenderCallback, |
| + kAudioUnitScope_Input, |
| + 0, |
| + &callback, |
| + sizeof(callback)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Set the varispeed callback. |
| + callback.inputProc = VarispeedProc; |
| + callback.inputProcRefCon = this; |
| + result = AudioUnitSetProperty( |
| + varispeed_unit_, |
| + kAudioUnitProperty_SetRenderCallback, |
| + kAudioUnitScope_Input, |
| + 0, |
| + &callback, |
| + sizeof(callback)); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::SetupStreamFormats() { |
| + UInt32 buffer_size_frames, buffer_size_bytes; |
| + |
| + AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out; |
| + AudioObjectPropertyAddress pa; |
| + |
| + // Get the size of the IO buffer(s) |
|
no longer working on chromium
2012/09/12 09:11:56
add a period at the end of the comments. Please fi
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + UInt32 property_size = sizeof(buffer_size_frames); |
| + OSStatus result = AudioUnitGetProperty( |
| + input_unit_, |
| + kAudioDevicePropertyBufferFrameSize, |
| + kAudioUnitScope_Global, |
| + 0, |
| + &buffer_size_frames, |
| + &property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + buffer_size_bytes = buffer_size_frames * sizeof(Float32); |
|
scherkus (not reviewing)
2012/09/12 14:05:29
if possible sizeof(variable) instead of sizeof(typ
Chris Rogers
2012/09/13 01:03:05
I've moved |buffer_size_bytes| down closer to usag
|
| + |
| + // Get the Stream Format (Output client side) |
| + property_size = sizeof(asbd_dev1_in); |
| + result = AudioUnitGetProperty( |
| + input_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Input, |
| + 1, |
| + &asbd_dev1_in, |
| + &property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Get the Stream Format (client side) |
| + property_size = sizeof(asbd); |
| + result = AudioUnitGetProperty( |
| + input_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Output, |
| + 1, |
|
no longer working on chromium
2012/09/12 09:11:56
Could you please explain which side of format it i
Chris Rogers
2012/09/13 01:03:05
Sorry, I still need to address this (better commen
|
| + &asbd, |
| + &property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Get the Stream Format (Output client side) |
| + property_size = sizeof(asbd_dev2_out); |
| + result = AudioUnitGetProperty( |
| + output_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Output, |
| + 0, |
| + &asbd_dev2_out, |
| + &property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + ////////////////////////////////////// |
|
no longer working on chromium
2012/09/12 09:11:56
move ///////////////
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + // Set the format of all the AUs to the input/output devices channel count |
| + // For a simple case, you want to set this to |
| + // the lower of count of the channels in the input device vs output device |
| + ////////////////////////////////////// |
| + asbd.mChannelsPerFrame = |
| + ((asbd_dev1_in.mChannelsPerFrame < asbd_dev2_out.mChannelsPerFrame) ? |
| + asbd_dev1_in.mChannelsPerFrame : asbd_dev2_out.mChannelsPerFrame); |
|
no longer working on chromium
2012/09/12 09:11:56
use std::min()
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + |
| + // We must get the sample rate of the input device |
| + // and set it to the stream format of AUHAL |
|
scherkus (not reviewing)
2012/09/12 14:05:29
nit: part of this comment can go on previous line
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + property_size = sizeof(Float64); |
|
scherkus (not reviewing)
2012/09/12 14:05:29
if possible sizeof(variable) instead of sizeof(typ
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + Float64 rate = 0; |
| + |
| + pa.mSelector = kAudioDevicePropertyNominalSampleRate; |
| + pa.mScope = kAudioObjectPropertyScopeWildcard; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + result = AudioObjectGetPropertyData( |
| + input_info_.id_, |
| + &pa, |
| + 0, |
| + 0, |
| + &property_size, |
| + &rate); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + asbd.mSampleRate = rate; |
| + property_size = sizeof(asbd); |
| + |
| + // Set the new formats to the AUs... |
| + result = AudioUnitSetProperty( |
| + input_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Output, |
|
no longer working on chromium
2012/09/12 09:11:56
I don't understand this part, are we setting the f
Chris Rogers
2012/09/13 01:03:05
I still need to do a better job explaining this...
|
| + 1, |
| + &asbd, |
| + property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + result = AudioUnitSetProperty( |
| + varispeed_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Input, |
| + 0, |
| + &asbd, |
| + property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Set the correct sample rate for the output device, |
| + // but keep the channel count the same. |
| + property_size = sizeof(Float64); |
|
scherkus (not reviewing)
2012/09/12 14:05:29
if possible sizeof(variable) instead of sizeof(typ
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + |
| + pa.mSelector = kAudioDevicePropertyNominalSampleRate; |
| + pa.mScope = kAudioObjectPropertyScopeWildcard; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + result = AudioObjectGetPropertyData( |
| + output_info_.id_, |
| + &pa, |
| + 0, |
| + 0, |
| + &property_size, |
| + &rate); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + output_sample_rate_ = rate; |
| + |
| + asbd.mSampleRate = rate; |
| + property_size = sizeof(asbd); |
| + |
| + // Set the new audio stream formats for the rest of the AUs... |
| + result = AudioUnitSetProperty( |
| + varispeed_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Output, |
| + 0, |
| + &asbd, |
| + property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + result = AudioUnitSetProperty( |
| + output_unit_, |
| + kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Input, |
| + 0, |
| + &asbd, |
| + property_size); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Calculate number of buffers from channels. |
| + UInt32 malloc_size = offsetof(AudioBufferList, mBuffers[0]) + |
| + (sizeof(AudioBuffer) * asbd.mChannelsPerFrame); |
|
scherkus (not reviewing)
2012/09/12 14:05:29
if possible sizeof(variable) instead of sizeof(typ
Chris Rogers
2012/09/13 01:03:05
Here I'm not sure that's easily possible
|
| + |
| + // malloc buffer lists |
| + input_buffer_ = static_cast<AudioBufferList*>(malloc(malloc_size)); |
| + input_buffer_->mNumberBuffers = asbd.mChannelsPerFrame; |
| + |
| + // pre-malloc buffers for AudioBufferLists |
| + for (UInt32 i = 0; i < input_buffer_->mNumberBuffers; ++i) { |
| + input_buffer_->mBuffers[i].mNumberChannels = 1; |
| + input_buffer_->mBuffers[i].mDataByteSize = buffer_size_bytes; |
| + input_buffer_->mBuffers[i].mData = malloc(buffer_size_bytes); |
| + } |
| + |
| + return result; |
| +} |
| + |
| +void AudioSynchronizedStream::ComputeThruOffset() { |
| + // The initial latency will at least be the safety offsets |
| + // of the devices + the buffer sizes. |
| + in_to_out_sample_offset_ = SInt32(input_info_.buffer_size_frames_ + |
| + output_info_.buffer_size_frames_); |
| +} |
| + |
| +static int count = 0; |
|
no longer working on chromium
2012/09/12 09:11:56
How this is used in production code?
Chris Rogers
2012/09/13 01:03:05
Removed!
|
| + |
| +OSStatus AudioSynchronizedStream::InputProc( |
| + void* user_data, |
| + AudioUnitRenderActionFlags* io_action_flags, |
| + const AudioTimeStamp* time_stamp, |
| + UInt32 bus_number, |
| + UInt32 number_of_frames, |
| + AudioBufferList* io_data) { |
| + AudioSynchronizedStream* This = |
|
scherkus (not reviewing)
2012/09/12 14:05:29
hrmmm... can you follow the pattern you've done in
Chris Rogers
2012/09/13 01:03:05
Yes, much better this way :)
Done
|
| + static_cast<AudioSynchronizedStream*>(user_data); |
| + if (This->first_input_time_ < 0.) |
|
scherkus (not reviewing)
2012/09/12 14:05:29
0.0? 0.0f? or just 0?
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + This->first_input_time_ = time_stamp->mSampleTime; |
| + |
| + // Get the new audio input data. |
| + OSStatus result = AudioUnitRender( |
| + This->input_unit_, |
| + io_action_flags, |
| + time_stamp, |
| + bus_number, |
| + number_of_frames, |
| + This->input_buffer_); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Buffer input into FIFO. |
| + if (This->is_fifo_initialized_) { |
| + AudioBus bus(This->input_buffer_, number_of_frames); |
| +// printf("pushing %d : n = %d\n", |
|
no longer working on chromium
2012/09/12 09:11:56
remove
|
| +// (int)number_of_frames, (int)This->fifo_.framesInFifo()); |
| +++count; |
| + This->fifo_.Push(&bus); |
| + } |
| + |
| + return result; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::VarispeedProc( |
| + void* user_data, |
| + AudioUnitRenderActionFlags* io_action_flags, |
| + const AudioTimeStamp* time_stamp, |
| + UInt32 bus_number, |
| + UInt32 number_of_frames, |
| + AudioBufferList* io_data) { |
| + AudioSynchronizedStream* This = |
|
no longer working on chromium
2012/09/12 09:11:56
use stream as the name?
scherkus (not reviewing)
2012/09/12 14:05:29
ditto for This
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + static_cast<AudioSynchronizedStream*>(user_data); |
| + |
| + // Create a wrapper bus on the AudioBufferList. |
| + AudioBus bus(io_data, number_of_frames); |
| + |
| + if (This->fifo_.frames() < number_of_frames) { |
| + // We don't DCHECK here, since this is a possible run-time condition |
| + // if the machine is bogged down. |
| + bus.Zero(); |
| + return noErr; |
| + } |
| + |
| + // Read from the FIFO to feed the varispeed. |
| + This->fifo_.Consume(&bus, 0, number_of_frames); |
| + |
| + // Calculate a varispeed rate scalar factor to compensate for drift between |
| + // input and output. We use the actual number of frames still in the FIFO |
| + // compared with the ideal value of kTargetDelayFrames. |
| + size_t n = This->fifo_.frames(); |
| + int delta = n - kTargetDelayFrames; |
| + double sample_rate = This->output_sample_rate_; |
| + double x = (sample_rate + delta) / sample_rate; |
| + |
| + // printf("n = %d : x = %f\n", (int)n, x); |
|
scherkus (not reviewing)
2012/09/12 14:05:29
??
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + |
| + This->fifo_rate_compensation_ = x; |
| + |
| + return noErr; |
| +} |
| + |
| +OSStatus AudioSynchronizedStream::OutputProc( |
| + void* user_data, |
| + AudioUnitRenderActionFlags* io_action_flags, |
| + const AudioTimeStamp* time_stamp, |
| + UInt32 bus_number, |
| + UInt32 number_of_frames, |
| + AudioBufferList* io_data) { |
| + AudioSynchronizedStream* This = |
|
scherkus (not reviewing)
2012/09/12 14:05:29
ditto for This
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + static_cast<AudioSynchronizedStream*>(user_data); |
| + |
| + if (This->first_input_time_ < 0.) { |
|
scherkus (not reviewing)
2012/09/12 14:05:29
0.0? 0.0f? or just 0?
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + // Input callback hasn't run yet -> silence. |
| + MakeBufferSilent(io_data); |
| + return noErr; |
| + } |
| + |
| + --count; |
| + printf("%d: number_of_frames = %d\n", (int)count, (int)number_of_frames); |
| + |
| + AudioTimeStamp input_ts; |
| + OSStatus result = AudioDeviceGetCurrentTime( |
| + This->input_info_.id_, &input_ts); |
| + |
| + if (result != noErr) { |
| + MakeBufferSilent(io_data); |
| + return noErr; |
| + } |
| + |
| + AudioTimeStamp output_ts; |
| + result = AudioDeviceGetCurrentTime(This->output_info_.id_, &output_ts); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Use the varispeed playback rate to offset small discrepancies |
| + // in hardware clocks, and also any differences in sample-rate |
| + // between input and output devices. |
| + |
| + // Adjust for rate scalars of the input and output devices. |
| + Float64 rate = input_ts.mRateScalar / output_ts.mRateScalar; |
| + |
| + // Adjust for FIFO drift. |
| + rate *= This->fifo_rate_compensation_; |
| + |
| + result = AudioUnitSetParameter( |
| + This->varispeed_unit_, |
| + kVarispeedParam_PlaybackRate, |
| + kAudioUnitScope_Global, |
| + 0, |
| + rate, |
| + 0); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + if (result != noErr) |
| + return result; |
| + |
| + // Get the delta between the devices and add it to the offset. |
| + if (This->first_output_time_ < 0.) { |
|
scherkus (not reviewing)
2012/09/12 14:05:29
0.0? 0.0f? or just 0?
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + This->first_output_time_ = time_stamp->mSampleTime; |
| + This->ComputeThruOffset(); |
| + |
| + // Buffer initial silence corresponding to I/O delay. |
| + unsigned n = static_cast<unsigned>(This->in_to_out_sample_offset_); |
| + AudioBus silence(kFIFOChannels, n); |
| + This->fifo_.Push(&silence); |
| + This->is_fifo_initialized_ = true; |
| + |
| + MakeBufferSilent(io_data); |
| + return noErr; |
| + } |
| + |
| + // Render to the output using the varispeed. |
| + result = AudioUnitRender( |
| + This->varispeed_unit_, |
| + io_action_flags, |
| + time_stamp, |
| + 0, |
| + number_of_frames, |
| + io_data); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| + |
| + return noErr; |
| +} |
| + |
| +void AudioSynchronizedStream::AudioDeviceInfo::Initialize( |
| + AudioDeviceID id, bool is_input) { |
| + id_ = id; |
| + is_input_ = is_input; |
| + if (id_ == kAudioDeviceUnknown) |
| + return; |
| + |
| + UInt32 property_size = sizeof(UInt32); |
|
scherkus (not reviewing)
2012/09/12 14:05:29
if possible sizeof(variable) instead of sizeof(typ
Chris Rogers
2012/09/13 01:03:05
Done.
|
| + |
| + AudioObjectPropertyAddress pa; |
| + pa.mSelector = kAudioDevicePropertyBufferFrameSize; |
| + pa.mScope = kAudioObjectPropertyScopeWildcard; |
| + pa.mElement = kAudioObjectPropertyElementMaster; |
| + OSStatus result = AudioObjectGetPropertyData( |
| + id_, |
| + &pa, |
| + 0, |
| + 0, |
| + &property_size, |
| + &buffer_size_frames_); |
| + |
| + OSSTATUS_DCHECK(result == noErr, result); |
| +} |
| + |
| +} // namespace media |