OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/mac/audio_low_latency_output_mac.h" | 5 #include "media/audio/mac/audio_low_latency_output_mac.h" |
6 | 6 |
7 #include <CoreServices/CoreServices.h> | 7 #include <CoreServices/CoreServices.h> |
8 | 8 |
9 #include "base/basictypes.h" | 9 #include "base/basictypes.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
(...skipping 29 matching lines...) Expand all Loading... |
40 // 4) At some point some thread will call Stop(), which we handle by directly | 40 // 4) At some point some thread will call Stop(), which we handle by directly |
41 // stopping the default output Audio Unit. | 41 // stopping the default output Audio Unit. |
42 // 6) The same thread that called stop will call Close() where we cleanup | 42 // 6) The same thread that called stop will call Close() where we cleanup |
43 // and notify the audio manager, which likely will destroy this object. | 43 // and notify the audio manager, which likely will destroy this object. |
44 | 44 |
45 AUAudioOutputStream::AUAudioOutputStream( | 45 AUAudioOutputStream::AUAudioOutputStream( |
46 AudioManagerMac* manager, const AudioParameters& params) | 46 AudioManagerMac* manager, const AudioParameters& params) |
47 : manager_(manager), | 47 : manager_(manager), |
48 source_(NULL), | 48 source_(NULL), |
49 output_unit_(0), | 49 output_unit_(0), |
50 volume_(1) { | 50 output_device_id_(kAudioObjectUnknown), |
| 51 volume_(1), |
| 52 hardware_latency_frames_(0) { |
51 // We must have a manager. | 53 // We must have a manager. |
52 DCHECK(manager_); | 54 DCHECK(manager_); |
53 // A frame is one sample across all channels. In interleaved audio the per | 55 // A frame is one sample across all channels. In interleaved audio the per |
54 // frame fields identify the set of n |channels|. In uncompressed audio, a | 56 // frame fields identify the set of n |channels|. In uncompressed audio, a |
55 // packet is always one frame. | 57 // packet is always one frame. |
56 format_.mSampleRate = params.sample_rate; | 58 format_.mSampleRate = params.sample_rate; |
57 format_.mFormatID = kAudioFormatLinearPCM; | 59 format_.mFormatID = kAudioFormatLinearPCM; |
58 format_.mFormatFlags = kLinearPCMFormatFlagIsPacked | | 60 format_.mFormatFlags = kLinearPCMFormatFlagIsPacked | |
59 kLinearPCMFormatFlagIsSignedInteger; | 61 kLinearPCMFormatFlagIsSignedInteger; |
60 format_.mBitsPerChannel = params.bits_per_sample; | 62 format_.mBitsPerChannel = params.bits_per_sample; |
61 format_.mChannelsPerFrame = params.channels; | 63 format_.mChannelsPerFrame = params.channels; |
62 format_.mFramesPerPacket = 1; | 64 format_.mFramesPerPacket = 1; |
63 format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels) / 8; | 65 format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels) / 8; |
64 format_.mBytesPerFrame = format_.mBytesPerPacket; | 66 format_.mBytesPerFrame = format_.mBytesPerPacket; |
65 format_.mReserved = 0; | 67 format_.mReserved = 0; |
66 | 68 |
67 // Calculate the number of sample frames per callback. | 69 // Calculate the number of sample frames per callback. |
68 number_of_frames_ = params.GetPacketSize() / format_.mBytesPerPacket; | 70 number_of_frames_ = params.GetPacketSize() / format_.mBytesPerPacket; |
69 } | 71 } |
70 | 72 |
71 AUAudioOutputStream::~AUAudioOutputStream() { | 73 AUAudioOutputStream::~AUAudioOutputStream() { |
72 } | 74 } |
73 | 75 |
74 bool AUAudioOutputStream::Open() { | 76 bool AUAudioOutputStream::Open() { |
| 77 // Obtain the current input device selected by the user. |
| 78 UInt32 size = sizeof(output_device_id_); |
| 79 AudioObjectPropertyAddress default_output_device_address = { |
| 80 kAudioHardwarePropertyDefaultOutputDevice, |
| 81 kAudioObjectPropertyScopeGlobal, |
| 82 kAudioObjectPropertyElementMaster |
| 83 }; |
| 84 OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, |
| 85 &default_output_device_address, |
| 86 0, |
| 87 0, |
| 88 &size, |
| 89 &output_device_id_); |
| 90 DCHECK_EQ(result, 0); |
| 91 if (result) |
| 92 return false; |
| 93 |
75 // Open and initialize the DefaultOutputUnit. | 94 // Open and initialize the DefaultOutputUnit. |
76 Component comp; | 95 Component comp; |
77 ComponentDescription desc; | 96 ComponentDescription desc; |
78 | 97 |
79 desc.componentType = kAudioUnitType_Output; | 98 desc.componentType = kAudioUnitType_Output; |
80 desc.componentSubType = kAudioUnitSubType_DefaultOutput; | 99 desc.componentSubType = kAudioUnitSubType_DefaultOutput; |
81 desc.componentManufacturer = kAudioUnitManufacturer_Apple; | 100 desc.componentManufacturer = kAudioUnitManufacturer_Apple; |
82 desc.componentFlags = 0; | 101 desc.componentFlags = 0; |
83 desc.componentFlagsMask = 0; | 102 desc.componentFlagsMask = 0; |
84 comp = FindNextComponent(0, &desc); | 103 comp = FindNextComponent(0, &desc); |
85 DCHECK(comp); | 104 DCHECK(comp); |
86 | 105 |
87 OSStatus result = OpenAComponent(comp, &output_unit_); | 106 result = OpenAComponent(comp, &output_unit_); |
88 DCHECK_EQ(result, 0); | 107 DCHECK_EQ(result, 0); |
89 if (result) | 108 if (result) |
90 return false; | 109 return false; |
91 | 110 |
92 result = AudioUnitInitialize(output_unit_); | 111 result = AudioUnitInitialize(output_unit_); |
93 | 112 |
94 DCHECK_EQ(result, 0); | 113 DCHECK_EQ(result, 0); |
95 if (result) | 114 if (result) |
96 return false; | 115 return false; |
97 | 116 |
| 117 hardware_latency_frames_ = GetHardwareLatency(); |
| 118 |
98 return Configure(); | 119 return Configure(); |
99 } | 120 } |
100 | 121 |
101 bool AUAudioOutputStream::Configure() { | 122 bool AUAudioOutputStream::Configure() { |
102 // Set the render callback. | 123 // Set the render callback. |
103 AURenderCallbackStruct input; | 124 AURenderCallbackStruct input; |
104 input.inputProc = InputProc; | 125 input.inputProc = InputProc; |
105 input.inputProcRefCon = this; | 126 input.inputProcRefCon = this; |
106 OSStatus result = AudioUnitSetProperty( | 127 OSStatus result = AudioUnitSetProperty( |
107 output_unit_, | 128 output_unit_, |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
178 void AUAudioOutputStream::GetVolume(double* volume) { | 199 void AUAudioOutputStream::GetVolume(double* volume) { |
179 if (!output_unit_) | 200 if (!output_unit_) |
180 return; | 201 return; |
181 *volume = volume_; | 202 *volume = volume_; |
182 } | 203 } |
183 | 204 |
184 // Pulls on our provider to get rendered audio stream. | 205 // Pulls on our provider to get rendered audio stream. |
185 // Note to future hackers of this function: Do not add locks here because this | 206 // Note to future hackers of this function: Do not add locks here because this |
186 // is running on a real-time thread (for low-latency). | 207 // is running on a real-time thread (for low-latency). |
187 OSStatus AUAudioOutputStream::Render(UInt32 number_of_frames, | 208 OSStatus AUAudioOutputStream::Render(UInt32 number_of_frames, |
188 AudioBufferList* io_data) { | 209 AudioBufferList* io_data, |
| 210 const AudioTimeStamp* output_time_stamp) { |
| 211 // Update the playout latency. |
| 212 double playout_latency_frames = GetPlayoutLatency(output_time_stamp); |
| 213 |
189 AudioBuffer& buffer = io_data->mBuffers[0]; | 214 AudioBuffer& buffer = io_data->mBuffers[0]; |
190 uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData); | 215 uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData); |
| 216 uint32 hardware_pending_bytes = static_cast<uint32> |
| 217 ((playout_latency_frames + 0.5) * format_.mBytesPerFrame); |
191 uint32 filled = source_->OnMoreData( | 218 uint32 filled = source_->OnMoreData( |
192 this, audio_data, buffer.mDataByteSize, AudioBuffersState(0, 0)); | 219 this, audio_data, buffer.mDataByteSize, |
| 220 AudioBuffersState(0, hardware_pending_bytes)); |
193 | 221 |
194 // Handle channel order for 5.1 audio. | 222 // Handle channel order for 5.1 audio. |
195 if (format_.mChannelsPerFrame == 6) { | 223 if (format_.mChannelsPerFrame == 6) { |
196 if (format_.mBitsPerChannel == 8) { | 224 if (format_.mBitsPerChannel == 8) { |
197 SwizzleCoreAudioLayout5_1(reinterpret_cast<uint8*>(audio_data), filled); | 225 SwizzleCoreAudioLayout5_1(reinterpret_cast<uint8*>(audio_data), filled); |
198 } else if (format_.mBitsPerChannel == 16) { | 226 } else if (format_.mBitsPerChannel == 16) { |
199 SwizzleCoreAudioLayout5_1(reinterpret_cast<int16*>(audio_data), filled); | 227 SwizzleCoreAudioLayout5_1(reinterpret_cast<int16*>(audio_data), filled); |
200 } else if (format_.mBitsPerChannel == 32) { | 228 } else if (format_.mBitsPerChannel == 32) { |
201 SwizzleCoreAudioLayout5_1(reinterpret_cast<int32*>(audio_data), filled); | 229 SwizzleCoreAudioLayout5_1(reinterpret_cast<int32*>(audio_data), filled); |
202 } | 230 } |
203 } | 231 } |
204 | 232 |
205 return noErr; | 233 return noErr; |
206 } | 234 } |
207 | 235 |
208 // DefaultOutputUnit callback | 236 // DefaultOutputUnit callback |
209 OSStatus AUAudioOutputStream::InputProc(void* user_data, | 237 OSStatus AUAudioOutputStream::InputProc(void* user_data, |
210 AudioUnitRenderActionFlags*, | 238 AudioUnitRenderActionFlags*, |
211 const AudioTimeStamp*, | 239 const AudioTimeStamp* output_time_stamp, |
212 UInt32, | 240 UInt32, |
213 UInt32 number_of_frames, | 241 UInt32 number_of_frames, |
214 AudioBufferList* io_data) { | 242 AudioBufferList* io_data) { |
215 AUAudioOutputStream* audio_output = | 243 AUAudioOutputStream* audio_output = |
216 static_cast<AUAudioOutputStream*>(user_data); | 244 static_cast<AUAudioOutputStream*>(user_data); |
217 DCHECK(audio_output); | 245 DCHECK(audio_output); |
218 if (!audio_output) | 246 if (!audio_output) |
219 return -1; | 247 return -1; |
220 | 248 |
221 return audio_output->Render(number_of_frames, io_data); | 249 return audio_output->Render(number_of_frames, io_data, output_time_stamp); |
222 } | 250 } |
223 | 251 |
224 double AUAudioOutputStream::HardwareSampleRate() { | 252 double AUAudioOutputStream::HardwareSampleRate() { |
225 // Determine the default output device's sample-rate. | 253 // Determine the default output device's sample-rate. |
226 AudioDeviceID device_id = kAudioDeviceUnknown; | 254 AudioDeviceID device_id = kAudioObjectUnknown; |
227 UInt32 info_size = sizeof(device_id); | 255 UInt32 info_size = sizeof(device_id); |
228 | 256 |
229 AudioObjectPropertyAddress default_output_device_address = { | 257 AudioObjectPropertyAddress default_output_device_address = { |
230 kAudioHardwarePropertyDefaultOutputDevice, | 258 kAudioHardwarePropertyDefaultOutputDevice, |
231 kAudioObjectPropertyScopeGlobal, | 259 kAudioObjectPropertyScopeGlobal, |
232 kAudioObjectPropertyElementMaster | 260 kAudioObjectPropertyElementMaster |
233 }; | 261 }; |
234 OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, | 262 OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, |
235 &default_output_device_address, | 263 &default_output_device_address, |
236 0, | 264 0, |
(...skipping 17 matching lines...) Expand all Loading... |
254 0, | 282 0, |
255 0, | 283 0, |
256 &info_size, | 284 &info_size, |
257 &nominal_sample_rate); | 285 &nominal_sample_rate); |
258 DCHECK_EQ(result, 0); | 286 DCHECK_EQ(result, 0); |
259 if (result) | 287 if (result) |
260 return 0.0; // error | 288 return 0.0; // error |
261 | 289 |
262 return nominal_sample_rate; | 290 return nominal_sample_rate; |
263 } | 291 } |
| 292 |
| 293 double AUAudioOutputStream::GetHardwareLatency() { |
| 294 if (!output_unit_ || output_device_id_ == kAudioObjectUnknown) { |
| 295 DLOG(WARNING) << "Audio unit object is NULL or device ID is unknown"; |
| 296 return 0.0; |
| 297 } |
| 298 |
| 299 // Get audio unit latency. |
| 300 Float64 audio_unit_latency_sec = 0.0; |
| 301 UInt32 size = sizeof(audio_unit_latency_sec); |
| 302 OSStatus result = AudioUnitGetProperty(output_unit_, |
| 303 kAudioUnitProperty_Latency, |
| 304 kAudioUnitScope_Global, |
| 305 0, |
| 306 &audio_unit_latency_sec, |
| 307 &size); |
| 308 DLOG_IF(WARNING, result != noErr) << "Could not get audio unit latency."; |
| 309 |
| 310 // Get output audio device latency. |
| 311 AudioObjectPropertyAddress property_address = { |
| 312 kAudioDevicePropertyLatency, |
| 313 kAudioDevicePropertyScopeOutput, |
| 314 kAudioObjectPropertyElementMaster |
| 315 }; |
| 316 UInt32 device_latency_frames = 0; |
| 317 size = sizeof(device_latency_frames); |
| 318 result = AudioObjectGetPropertyData(output_device_id_, |
| 319 &property_address, |
| 320 0, |
| 321 NULL, |
| 322 &size, |
| 323 &device_latency_frames); |
| 324 DLOG_IF(WARNING, result != noErr) << "Could not get audio device latency."; |
| 325 |
| 326 // Get the stream latency. |
| 327 property_address.mSelector = kAudioDevicePropertyStreams; |
| 328 UInt32 stream_latency_frames = 0; |
| 329 result = AudioObjectGetPropertyDataSize(output_device_id_, |
| 330 &property_address, |
| 331 0, |
| 332 NULL, |
| 333 &size); |
| 334 if (!result) { |
| 335 scoped_ptr_malloc<AudioStreamID> |
| 336 streams(reinterpret_cast<AudioStreamID*>(malloc(size))); |
| 337 AudioStreamID* stream_ids = streams.get(); |
| 338 result = AudioObjectGetPropertyData(output_device_id_, |
| 339 &property_address, |
| 340 0, |
| 341 NULL, |
| 342 &size, |
| 343 stream_ids); |
| 344 if (!result) { |
| 345 property_address.mSelector = kAudioStreamPropertyLatency; |
| 346 result = AudioObjectGetPropertyData(stream_ids[0], |
| 347 &property_address, |
| 348 0, |
| 349 NULL, |
| 350 &size, |
| 351 &stream_latency_frames); |
| 352 } |
| 353 } |
| 354 DLOG_IF(WARNING, result != noErr) << "Could not get audio stream latency."; |
| 355 |
| 356 return static_cast<double>((audio_unit_latency_sec * |
| 357 format_.mSampleRate) + device_latency_frames + stream_latency_frames); |
| 358 } |
| 359 |
| 360 double AUAudioOutputStream::GetPlayoutLatency( |
| 361 const AudioTimeStamp* output_time_stamp) { |
| 362 // Get the delay between the moment getting the callback and the scheduled |
| 363 // time stamp that tells when the data is going to be played out. |
| 364 UInt64 output_time_ns = AudioConvertHostTimeToNanos( |
| 365 output_time_stamp->mHostTime); |
| 366 UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); |
| 367 double delay_frames = static_cast<double> |
| 368 (1e-9 * (output_time_ns - now_ns) * format_.mSampleRate); |
| 369 |
| 370 return (delay_frames + hardware_latency_frames_); |
| 371 } |
OLD | NEW |