Index: media/audio/mac/audio_low_latency_input_mac.cc |
diff --git a/media/audio/mac/audio_low_latency_input_mac.cc b/media/audio/mac/audio_low_latency_input_mac.cc |
index f1dbdf786fb2910b21c043dddad598cb98b54b30..d3585c9652a933d412c7366a689850eeafba1b1c 100644 |
--- a/media/audio/mac/audio_low_latency_input_mac.cc |
+++ b/media/audio/mac/audio_low_latency_input_mac.cc |
@@ -10,6 +10,7 @@ |
#include "base/logging.h" |
#include "base/mac/mac_logging.h" |
#include "media/audio/mac/audio_manager_mac.h" |
+#include "media/base/audio_block_fifo.h" |
#include "media/base/audio_bus.h" |
#include "media/base/data_buffer.h" |
@@ -31,6 +32,23 @@ static std::ostream& operator<<(std::ostream& os, |
return os; |
} |
+static void WrapBufferList(AudioBufferList* buffer_list, |
+ AudioBus* bus, |
+ int frames) { |
+ DCHECK(buffer_list); |
+ DCHECK(bus); |
+ const int channels = bus->channels(); |
+ const int buffer_list_channels = buffer_list->mNumberBuffers; |
+ CHECK_EQ(channels, buffer_list_channels); |
+ |
+ // Copy pointers from AudioBufferList. |
+ for (int i = 0; i < channels; ++i) |
+ bus->SetChannelData(i, static_cast<float*>(buffer_list->mBuffers[i].mData)); |
+ |
+ // Finally set the actual length. |
+ bus->set_frames(frames); |
+} |
+ |
// See "Technical Note TN2091 - Device input using the HAL Output Audio Unit" |
// http://developer.apple.com/library/mac/#technotes/tn2091/_index.html |
// for more details and background regarding this implementation. |
@@ -46,43 +64,46 @@ AUAudioInputStream::AUAudioInputStream(AudioManagerMac* manager, |
started_(false), |
hardware_latency_frames_(0), |
number_of_channels_in_frame_(0), |
- fifo_(input_params.channels(), |
- number_of_frames_, |
- kNumberOfBlocksBufferInFifo) { |
+ audio_wrapper_(AudioBus::CreateWrapper(input_params.channels())) { |
DCHECK(manager_); |
// Set up the desired (output) format specified by the client. |
format_.mSampleRate = input_params.sample_rate(); |
format_.mFormatID = kAudioFormatLinearPCM; |
- format_.mFormatFlags = kLinearPCMFormatFlagIsPacked | |
- kLinearPCMFormatFlagIsSignedInteger; |
- format_.mBitsPerChannel = input_params.bits_per_sample(); |
+ format_.mFormatFlags = |
+ kAudioFormatFlagsNativeFloatPacked | kLinearPCMFormatFlagIsNonInterleaved; |
+ size_t bytes_per_sample = sizeof(Float32); |
+ format_.mBitsPerChannel = bytes_per_sample * 8; |
format_.mChannelsPerFrame = input_params.channels(); |
- format_.mFramesPerPacket = 1; // uncompressed audio |
- format_.mBytesPerPacket = (format_.mBitsPerChannel * |
- input_params.channels()) / 8; |
- format_.mBytesPerFrame = format_.mBytesPerPacket; |
+ format_.mFramesPerPacket = 1; |
+ format_.mBytesPerFrame = bytes_per_sample; |
+ format_.mBytesPerPacket = format_.mBytesPerFrame * format_.mFramesPerPacket; |
format_.mReserved = 0; |
DVLOG(1) << "Desired ouput format: " << format_; |
- // Derive size (in bytes) of the buffers that we will render to. |
- UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame; |
- DVLOG(1) << "Size of data buffer in bytes : " << data_byte_size; |
+ // Allocate AudioBufferList based on the number of channels. |
+ audio_buffer_list_.reset(static_cast<AudioBufferList*>( |
+ malloc(sizeof(AudioBufferList) * input_params.channels()))); |
+ audio_buffer_list_->mNumberBuffers = input_params.channels(); |
// Allocate AudioBuffers to be used as storage for the received audio. |
// The AudioBufferList structure works as a placeholder for the |
// AudioBuffer structure, which holds a pointer to the actual data buffer. |
- audio_data_buffer_.reset(new uint8[data_byte_size]); |
- audio_buffer_list_.mNumberBuffers = 1; |
- |
- AudioBuffer* audio_buffer = audio_buffer_list_.mBuffers; |
- audio_buffer->mNumberChannels = input_params.channels(); |
- audio_buffer->mDataByteSize = data_byte_size; |
- audio_buffer->mData = audio_data_buffer_.get(); |
+ UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame; |
+ audio_data_buffer_.reset(static_cast<float*>(base::AlignedAlloc( |
+ data_byte_size * audio_buffer_list_->mNumberBuffers, |
+ AudioBus::kChannelAlignment))); |
+ AudioBuffer* audio_buffer = audio_buffer_list_->mBuffers; |
+ for (UInt32 i = 0; i < audio_buffer_list_->mNumberBuffers; ++i) { |
+ audio_buffer[i].mNumberChannels = 1; |
+ audio_buffer[i].mDataByteSize = data_byte_size; |
+ audio_buffer[i].mData = audio_data_buffer_.get() + i * data_byte_size; |
+ } |
} |
-AUAudioInputStream::~AUAudioInputStream() {} |
+AUAudioInputStream::~AUAudioInputStream() { |
+} |
// Obtain and open the AUHAL AudioOutputUnit for recording. |
bool AUAudioInputStream::Open() { |
@@ -165,23 +186,6 @@ bool AUAudioInputStream::Open() { |
return false; |
} |
- // Register the input procedure for the AUHAL. |
- // This procedure will be called when the AUHAL has received new data |
- // from the input device. |
- AURenderCallbackStruct callback; |
- callback.inputProc = InputProc; |
- callback.inputProcRefCon = this; |
- result = AudioUnitSetProperty(audio_unit_, |
- kAudioOutputUnitProperty_SetInputCallback, |
- kAudioUnitScope_Global, |
- 0, |
- &callback, |
- sizeof(callback)); |
- if (result) { |
- HandleError(result); |
- return false; |
- } |
- |
// Set up the the desired (output) format. |
// For obtaining input from a device, the device format is always expressed |
// on the output scope of the AUHAL's Element 1. |
@@ -229,6 +233,23 @@ bool AUAudioInputStream::Open() { |
} |
} |
+ // Register the input procedure for the AUHAL. |
+ // This procedure will be called when the AUHAL has received new data |
+ // from the input device. |
+ AURenderCallbackStruct callback; |
+ callback.inputProc = InputProc; |
+ callback.inputProcRefCon = this; |
+ result = AudioUnitSetProperty(audio_unit_, |
+ kAudioOutputUnitProperty_SetInputCallback, |
+ kAudioUnitScope_Global, |
+ 0, |
+ &callback, |
+ sizeof(callback)); |
+ if (result) { |
+ HandleError(result); |
+ return false; |
+ } |
+ |
// Finally, initialize the audio unit and ensure that it is ready to render. |
// Allocates memory according to the maximum number of audio frames |
// it can produce in response to a single render call. |
@@ -342,9 +363,9 @@ void AUAudioInputStream::SetVolume(double volume) { |
Float32 volume_float32 = static_cast<Float32>(volume); |
AudioObjectPropertyAddress property_address = { |
- kAudioDevicePropertyVolumeScalar, |
- kAudioDevicePropertyScopeInput, |
- kAudioObjectPropertyElementMaster |
+ kAudioDevicePropertyVolumeScalar, |
+ kAudioDevicePropertyScopeInput, |
+ kAudioObjectPropertyElementMaster |
}; |
// Try to set the volume for master volume channel. |
@@ -390,15 +411,15 @@ void AUAudioInputStream::SetVolume(double volume) { |
double AUAudioInputStream::GetVolume() { |
// Verify that we have a valid device. |
- if (input_device_id_ == kAudioObjectUnknown){ |
+ if (input_device_id_ == kAudioObjectUnknown) { |
NOTREACHED() << "Device ID is unknown"; |
return 0.0; |
} |
AudioObjectPropertyAddress property_address = { |
- kAudioDevicePropertyVolumeScalar, |
- kAudioDevicePropertyScopeInput, |
- kAudioObjectPropertyElementMaster |
+ kAudioDevicePropertyVolumeScalar, |
+ kAudioDevicePropertyScopeInput, |
+ kAudioObjectPropertyElementMaster |
}; |
if (AudioObjectHasProperty(input_device_id_, &property_address)) { |
@@ -406,12 +427,8 @@ double AUAudioInputStream::GetVolume() { |
// master channel. |
Float32 volume_float32 = 0.0; |
UInt32 size = sizeof(volume_float32); |
- OSStatus result = AudioObjectGetPropertyData(input_device_id_, |
- &property_address, |
- 0, |
- NULL, |
- &size, |
- &volume_float32); |
+ OSStatus result = AudioObjectGetPropertyData( |
+ input_device_id_, &property_address, 0, NULL, &size, &volume_float32); |
if (result == noErr) |
return static_cast<double>(volume_float32); |
} else { |
@@ -472,9 +489,8 @@ OSStatus AUAudioInputStream::InputProc(void* user_data, |
return result; |
// Deliver recorded data to the consumer as a callback. |
- return audio_input->Provide(number_of_frames, |
- audio_input->audio_buffer_list(), |
- time_stamp); |
+ return audio_input->Provide( |
+ number_of_frames, audio_input->audio_buffer_list(), time_stamp); |
} |
OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames, |
@@ -491,22 +507,42 @@ OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames, |
AudioBuffer& buffer = io_data->mBuffers[0]; |
uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData); |
- uint32 capture_delay_bytes = static_cast<uint32> |
- ((capture_latency_frames + 0.5) * format_.mBytesPerFrame); |
+ uint32 capture_delay_bytes = static_cast<uint32>( |
+ (capture_latency_frames + 0.5) * format_.mBytesPerFrame); |
DCHECK(audio_data); |
if (!audio_data) |
return kAudioUnitErr_InvalidElement; |
- // Copy captured (and interleaved) data into FIFO. |
- fifo_.Push(audio_data, number_of_frames, format_.mBitsPerChannel / 8); |
+ // Wrap the output AudioBufferList to |audio_wrapper_|. |
+ WrapBufferList(io_data, audio_wrapper_.get(), number_of_frames); |
+ |
+ // If the stream parameters change for any reason, we need to insert a FIFO |
+ // since the OnMoreData() pipeline can't handle frame size changes. |
+ if (number_of_frames != number_of_frames_) { |
+ // Create a FIFO on the fly to handle any discrepancies in callback rates. |
+ if (!fifo_) { |
+ fifo_.reset(new AudioBlockFifo(audio_wrapper_->channels(), |
+ number_of_frames_, |
+ kNumberOfBlocksBufferInFifo)); |
+ } |
+ } |
+ // When FIFO does not kick in, data will be directly passed to the callback. |
+ if (!fifo_) { |
+ CHECK_EQ(audio_wrapper_->frames(), static_cast<int>(number_of_frames_)); |
+ sink_->OnData( |
+ this, audio_wrapper_.get(), capture_delay_bytes, normalized_volume); |
+ return noErr; |
+ } |
+ |
+ // Compensate the audio delay caused by the FIFO. |
+ capture_delay_bytes += fifo_->GetAvailableFrames() * format_.mBytesPerFrame; |
+ |
+ fifo_->Push(audio_wrapper_.get()); |
// Consume and deliver the data when the FIFO has a block of available data. |
- while (fifo_.available_blocks()) { |
- const AudioBus* audio_bus = fifo_.Consume(); |
+ while (fifo_->available_blocks()) { |
+ const AudioBus* audio_bus = fifo_->Consume(); |
DCHECK_EQ(audio_bus->frames(), static_cast<int>(number_of_frames_)); |
- |
- // Compensate the audio delay caused by the FIFO. |
- capture_delay_bytes += fifo_.GetAvailableFrames() * format_.mBytesPerFrame; |
sink_->OnData(this, audio_bus, capture_delay_bytes, normalized_volume); |
} |
@@ -519,9 +555,9 @@ int AUAudioInputStream::HardwareSampleRate() { |
UInt32 info_size = sizeof(device_id); |
AudioObjectPropertyAddress default_input_device_address = { |
- kAudioHardwarePropertyDefaultInputDevice, |
- kAudioObjectPropertyScopeGlobal, |
- kAudioObjectPropertyElementMaster |
+ kAudioHardwarePropertyDefaultInputDevice, |
+ kAudioObjectPropertyScopeGlobal, |
+ kAudioObjectPropertyElementMaster |
}; |
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, |
&default_input_device_address, |
@@ -536,10 +572,8 @@ int AUAudioInputStream::HardwareSampleRate() { |
info_size = sizeof(nominal_sample_rate); |
AudioObjectPropertyAddress nominal_sample_rate_address = { |
- kAudioDevicePropertyNominalSampleRate, |
- kAudioObjectPropertyScopeGlobal, |
- kAudioObjectPropertyElementMaster |
- }; |
+ kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, |
+ kAudioObjectPropertyElementMaster}; |
result = AudioObjectGetPropertyData(device_id, |
&nominal_sample_rate_address, |
0, |
@@ -572,9 +606,9 @@ double AUAudioInputStream::GetHardwareLatency() { |
// Get input audio device latency. |
AudioObjectPropertyAddress property_address = { |
- kAudioDevicePropertyLatency, |
- kAudioDevicePropertyScopeInput, |
- kAudioObjectPropertyElementMaster |
+ kAudioDevicePropertyLatency, |
+ kAudioDevicePropertyScopeInput, |
+ kAudioObjectPropertyElementMaster |
}; |
UInt32 device_latency_frames = 0; |
size = sizeof(device_latency_frames); |
@@ -586,19 +620,19 @@ double AUAudioInputStream::GetHardwareLatency() { |
&device_latency_frames); |
DLOG_IF(WARNING, result != noErr) << "Could not get audio device latency."; |
- return static_cast<double>((audio_unit_latency_sec * |
- format_.mSampleRate) + device_latency_frames); |
+ return static_cast<double>((audio_unit_latency_sec * format_.mSampleRate) + |
+ device_latency_frames); |
} |
double AUAudioInputStream::GetCaptureLatency( |
const AudioTimeStamp* input_time_stamp) { |
// Get the delay between between the actual recording instant and the time |
// when the data packet is provided as a callback. |
- UInt64 capture_time_ns = AudioConvertHostTimeToNanos( |
- input_time_stamp->mHostTime); |
+ UInt64 capture_time_ns = |
+ AudioConvertHostTimeToNanos(input_time_stamp->mHostTime); |
UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); |
- double delay_frames = static_cast<double> |
- (1e-9 * (now_ns - capture_time_ns) * format_.mSampleRate); |
+ double delay_frames = static_cast<double>(1e-9 * (now_ns - capture_time_ns) * |
+ format_.mSampleRate); |
// Total latency is composed by the dynamic latency and the fixed |
// hardware latency. |
@@ -608,18 +642,14 @@ double AUAudioInputStream::GetCaptureLatency( |
int AUAudioInputStream::GetNumberOfChannelsFromStream() { |
// Get the stream format, to be able to read the number of channels. |
AudioObjectPropertyAddress property_address = { |
- kAudioDevicePropertyStreamFormat, |
- kAudioDevicePropertyScopeInput, |
- kAudioObjectPropertyElementMaster |
+ kAudioDevicePropertyStreamFormat, |
+ kAudioDevicePropertyScopeInput, |
+ kAudioObjectPropertyElementMaster |
}; |
AudioStreamBasicDescription stream_format; |
UInt32 size = sizeof(stream_format); |
- OSStatus result = AudioObjectGetPropertyData(input_device_id_, |
- &property_address, |
- 0, |
- NULL, |
- &size, |
- &stream_format); |
+ OSStatus result = AudioObjectGetPropertyData( |
+ input_device_id_, &property_address, 0, NULL, &size, &stream_format); |
if (result != noErr) { |
DLOG(WARNING) << "Could not get stream format"; |
return 0; |
@@ -629,8 +659,8 @@ int AUAudioInputStream::GetNumberOfChannelsFromStream() { |
} |
void AUAudioInputStream::HandleError(OSStatus err) { |
- NOTREACHED() << "error " << GetMacOSStatusErrorString(err) |
- << " (" << err << ")"; |
+ NOTREACHED() << "error " << GetMacOSStatusErrorString(err) << " (" << err |
+ << ")"; |
if (sink_) |
sink_->OnError(this); |
} |
@@ -638,13 +668,12 @@ void AUAudioInputStream::HandleError(OSStatus err) { |
bool AUAudioInputStream::IsVolumeSettableOnChannel(int channel) { |
Boolean is_settable = false; |
AudioObjectPropertyAddress property_address = { |
- kAudioDevicePropertyVolumeScalar, |
- kAudioDevicePropertyScopeInput, |
- static_cast<UInt32>(channel) |
+ kAudioDevicePropertyVolumeScalar, |
+ kAudioDevicePropertyScopeInput, |
+ static_cast<UInt32>(channel) |
}; |
- OSStatus result = AudioObjectIsPropertySettable(input_device_id_, |
- &property_address, |
- &is_settable); |
+ OSStatus result = AudioObjectIsPropertySettable( |
+ input_device_id_, &property_address, &is_settable); |
return (result == noErr) ? is_settable : false; |
} |