OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "media/audio/mac/audio_low_latency_output_mac.h" | |
6 | |
7 #include <CoreServices/CoreServices.h> | |
8 | |
9 #include "base/basictypes.h" | |
10 #include "base/command_line.h" | |
11 #include "base/logging.h" | |
12 #include "base/mac/mac_logging.h" | |
13 #include "media/audio/mac/audio_manager_mac.h" | |
14 #include "media/base/media_switches.h" | |
15 | |
16 namespace media { | |
17 | |
18 static std::ostream& operator<<(std::ostream& os, | |
19 const AudioStreamBasicDescription& format) { | |
20 os << "sample rate : " << format.mSampleRate << std::endl | |
21 << "format ID : " << format.mFormatID << std::endl | |
22 << "format flags : " << format.mFormatFlags << std::endl | |
23 << "bytes per packet : " << format.mBytesPerPacket << std::endl | |
24 << "frames per packet : " << format.mFramesPerPacket << std::endl | |
25 << "bytes per frame : " << format.mBytesPerFrame << std::endl | |
26 << "channels per frame: " << format.mChannelsPerFrame << std::endl | |
27 << "bits per channel : " << format.mBitsPerChannel; | |
28 return os; | |
29 } | |
30 | |
31 static AudioObjectPropertyAddress kDefaultOutputDeviceAddress = { | |
32 kAudioHardwarePropertyDefaultOutputDevice, | |
33 kAudioObjectPropertyScopeGlobal, | |
34 kAudioObjectPropertyElementMaster | |
35 }; | |
36 | |
37 // Overview of operation: | |
38 // 1) An object of AUAudioOutputStream is created by the AudioManager | |
39 // factory: audio_man->MakeAudioStream(). | |
40 // 2) Next some thread will call Open(), at that point the underlying | |
41 // default output Audio Unit is created and configured. | |
42 // 3) Then some thread will call Start(source). | |
43 // Then the Audio Unit is started which creates its own thread which | |
44 // periodically will call the source for more data as buffers are being | |
45 // consumed. | |
46 // 4) At some point some thread will call Stop(), which we handle by directly | |
47 // stopping the default output Audio Unit. | |
48 // 6) The same thread that called stop will call Close() where we cleanup | |
49 // and notify the audio manager, which likely will destroy this object. | |
50 | |
51 AUAudioOutputStream::AUAudioOutputStream( | |
52 AudioManagerMac* manager, const AudioParameters& params) | |
53 : manager_(manager), | |
54 source_(NULL), | |
55 output_unit_(0), | |
56 output_device_id_(kAudioObjectUnknown), | |
57 volume_(1), | |
58 hardware_latency_frames_(0), | |
59 stopped_(false), | |
60 audio_bus_(AudioBus::Create(params)) { | |
61 // We must have a manager. | |
62 DCHECK(manager_); | |
63 | |
64 // A frame is one sample across all channels. In interleaved audio the per | |
65 // frame fields identify the set of n |channels|. In uncompressed audio, a | |
66 // packet is always one frame. | |
67 format_.mSampleRate = params.sample_rate(); | |
68 format_.mFormatID = kAudioFormatLinearPCM; | |
69 format_.mFormatFlags = kLinearPCMFormatFlagIsPacked | | |
70 kLinearPCMFormatFlagIsSignedInteger; | |
71 format_.mBitsPerChannel = params.bits_per_sample(); | |
72 format_.mChannelsPerFrame = params.channels(); | |
73 format_.mFramesPerPacket = 1; | |
74 format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8; | |
75 format_.mBytesPerFrame = format_.mBytesPerPacket; | |
76 format_.mReserved = 0; | |
77 | |
78 DVLOG(1) << "Desired ouput format: " << format_; | |
79 | |
80 // Calculate the number of sample frames per callback. | |
81 number_of_frames_ = params.frames_per_buffer(); | |
82 DVLOG(1) << "Number of frames per callback: " << number_of_frames_; | |
83 } | |
84 | |
85 AUAudioOutputStream::~AUAudioOutputStream() { | |
86 } | |
87 | |
88 bool AUAudioOutputStream::Open() { | |
89 // Obtain the current input device selected by the user. | |
90 UInt32 size = sizeof(output_device_id_); | |
91 OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, | |
92 &kDefaultOutputDeviceAddress, | |
93 0, | |
94 0, | |
95 &size, | |
96 &output_device_id_); | |
97 if (result != noErr || output_device_id_ == kAudioObjectUnknown) { | |
98 OSSTATUS_DLOG(ERROR, result) | |
99 << "Could not get default audio output device."; | |
100 return false; | |
101 } | |
102 | |
103 // Open and initialize the DefaultOutputUnit. | |
104 AudioComponent comp; | |
105 AudioComponentDescription desc; | |
106 | |
107 desc.componentType = kAudioUnitType_Output; | |
108 desc.componentSubType = kAudioUnitSubType_DefaultOutput; | |
109 desc.componentManufacturer = kAudioUnitManufacturer_Apple; | |
110 desc.componentFlags = 0; | |
111 desc.componentFlagsMask = 0; | |
112 comp = AudioComponentFindNext(0, &desc); | |
113 if (!comp) | |
114 return false; | |
115 | |
116 result = AudioComponentInstanceNew(comp, &output_unit_); | |
117 if (result != noErr) { | |
118 OSSTATUS_DLOG(ERROR, result) << "AudioComponentInstanceNew() failed."; | |
119 return false; | |
120 } | |
121 | |
122 result = AudioUnitInitialize(output_unit_); | |
123 if (result != noErr) { | |
124 OSSTATUS_DLOG(ERROR, result) << "AudioUnitInitialize() failed."; | |
125 return false; | |
126 } | |
127 | |
128 hardware_latency_frames_ = GetHardwareLatency(); | |
129 | |
130 return Configure(); | |
131 } | |
132 | |
133 bool AUAudioOutputStream::Configure() { | |
134 // Set the render callback. | |
135 AURenderCallbackStruct input; | |
136 input.inputProc = InputProc; | |
137 input.inputProcRefCon = this; | |
138 OSStatus result = AudioUnitSetProperty( | |
139 output_unit_, | |
140 kAudioUnitProperty_SetRenderCallback, | |
141 kAudioUnitScope_Global, | |
142 0, | |
143 &input, | |
144 sizeof(input)); | |
145 if (result != noErr) { | |
146 OSSTATUS_DLOG(ERROR, result) | |
147 << "AudioUnitSetProperty(kAudioUnitProperty_SetRenderCallback) failed."; | |
148 return false; | |
149 } | |
150 | |
151 // Set the stream format. | |
152 result = AudioUnitSetProperty( | |
153 output_unit_, | |
154 kAudioUnitProperty_StreamFormat, | |
155 kAudioUnitScope_Input, | |
156 0, | |
157 &format_, | |
158 sizeof(format_)); | |
159 if (result != noErr) { | |
160 OSSTATUS_DLOG(ERROR, result) | |
161 << "AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed."; | |
162 return false; | |
163 } | |
164 | |
165 // Set the buffer frame size. | |
166 // WARNING: Setting this value changes the frame size for all audio units in | |
167 // the current process. It's imperative that the input and output frame sizes | |
168 // be the same as the frames_per_buffer() returned by | |
169 // GetDefaultOutputStreamParameters. | |
170 // See http://crbug.com/154352 for details. | |
171 const AudioParameters hw_params = | |
172 manager_->GetDefaultOutputStreamParameters(); | |
173 if (number_of_frames_ != static_cast<size_t>(hw_params.frames_per_buffer())) { | |
174 DLOG(ERROR) << "Audio buffer size does not match hardware buffer size."; | |
175 return false; | |
176 } | |
177 | |
178 UInt32 buffer_size = number_of_frames_; | |
179 result = AudioUnitSetProperty( | |
180 output_unit_, | |
181 kAudioDevicePropertyBufferFrameSize, | |
182 kAudioUnitScope_Output, | |
183 0, | |
184 &buffer_size, | |
185 sizeof(buffer_size)); | |
186 if (result != noErr) { | |
187 OSSTATUS_DLOG(ERROR, result) | |
188 << "AudioUnitSetProperty(kAudioDevicePropertyBufferFrameSize) failed."; | |
189 return false; | |
190 } | |
191 | |
192 return true; | |
193 } | |
194 | |
195 void AUAudioOutputStream::Close() { | |
196 if (output_unit_) | |
197 AudioComponentInstanceDispose(output_unit_); | |
198 | |
199 // Inform the audio manager that we have been closed. This can cause our | |
200 // destruction. | |
201 manager_->ReleaseOutputStream(this); | |
202 } | |
203 | |
204 void AUAudioOutputStream::Start(AudioSourceCallback* callback) { | |
205 DCHECK(callback); | |
206 if (!output_unit_) { | |
207 DLOG(ERROR) << "Open() has not been called successfully"; | |
208 return; | |
209 } | |
210 | |
211 stopped_ = false; | |
212 { | |
213 base::AutoLock auto_lock(source_lock_); | |
214 source_ = callback; | |
215 } | |
216 | |
217 AudioOutputUnitStart(output_unit_); | |
218 } | |
219 | |
220 void AUAudioOutputStream::Stop() { | |
221 if (stopped_) | |
222 return; | |
223 | |
224 AudioOutputUnitStop(output_unit_); | |
225 | |
226 base::AutoLock auto_lock(source_lock_); | |
227 source_ = NULL; | |
228 stopped_ = true; | |
229 } | |
230 | |
231 void AUAudioOutputStream::SetVolume(double volume) { | |
232 if (!output_unit_) | |
233 return; | |
234 volume_ = static_cast<float>(volume); | |
235 | |
236 // TODO(crogers): set volume property | |
237 } | |
238 | |
239 void AUAudioOutputStream::GetVolume(double* volume) { | |
240 if (!output_unit_) | |
241 return; | |
242 *volume = volume_; | |
243 } | |
244 | |
245 // Pulls on our provider to get rendered audio stream. | |
246 // Note to future hackers of this function: Do not add locks here because this | |
247 // is running on a real-time thread (for low-latency). | |
248 OSStatus AUAudioOutputStream::Render(UInt32 number_of_frames, | |
249 AudioBufferList* io_data, | |
250 const AudioTimeStamp* output_time_stamp) { | |
251 // Update the playout latency. | |
252 double playout_latency_frames = GetPlayoutLatency(output_time_stamp); | |
253 | |
254 AudioBuffer& buffer = io_data->mBuffers[0]; | |
255 uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData); | |
256 uint32 hardware_pending_bytes = static_cast<uint32> | |
257 ((playout_latency_frames + 0.5) * format_.mBytesPerFrame); | |
258 | |
259 // Unfortunately AUAudioInputStream and AUAudioOutputStream share the frame | |
260 // size set by kAudioDevicePropertyBufferFrameSize above on a per process | |
261 // basis. What this means is that the |number_of_frames| value may be larger | |
262 // or smaller than the value set during Configure(). In this case either | |
263 // audio input or audio output will be broken, so just output silence. | |
264 // TODO(crogers): Figure out what can trigger a change in |number_of_frames|. | |
265 // See http://crbug.com/154352 for details. | |
266 if (number_of_frames != static_cast<UInt32>(audio_bus_->frames())) { | |
267 memset(audio_data, 0, number_of_frames * format_.mBytesPerFrame); | |
268 return noErr; | |
269 } | |
270 | |
271 int frames_filled = 0; | |
272 { | |
273 // Render() shouldn't be called except between AudioOutputUnitStart() and | |
274 // AudioOutputUnitStop() calls, but crash reports have shown otherwise: | |
275 // http://crbug.com/178765. We use |source_lock_| to prevent races and | |
276 // crashes in Render() when |source_| is cleared. | |
277 base::AutoLock auto_lock(source_lock_); | |
278 if (!source_) { | |
279 memset(audio_data, 0, number_of_frames * format_.mBytesPerFrame); | |
280 return noErr; | |
281 } | |
282 | |
283 frames_filled = source_->OnMoreData( | |
284 audio_bus_.get(), AudioBuffersState(0, hardware_pending_bytes)); | |
285 } | |
286 | |
287 // Note: If this ever changes to output raw float the data must be clipped and | |
288 // sanitized since it may come from an untrusted source such as NaCl. | |
289 audio_bus_->Scale(volume_); | |
290 audio_bus_->ToInterleaved( | |
291 frames_filled, format_.mBitsPerChannel / 8, audio_data); | |
292 | |
293 return noErr; | |
294 } | |
295 | |
296 // DefaultOutputUnit callback | |
297 OSStatus AUAudioOutputStream::InputProc(void* user_data, | |
298 AudioUnitRenderActionFlags*, | |
299 const AudioTimeStamp* output_time_stamp, | |
300 UInt32, | |
301 UInt32 number_of_frames, | |
302 AudioBufferList* io_data) { | |
303 AUAudioOutputStream* audio_output = | |
304 static_cast<AUAudioOutputStream*>(user_data); | |
305 if (!audio_output) | |
306 return -1; | |
307 | |
308 return audio_output->Render(number_of_frames, io_data, output_time_stamp); | |
309 } | |
310 | |
311 int AUAudioOutputStream::HardwareSampleRate() { | |
312 // Determine the default output device's sample-rate. | |
313 AudioDeviceID device_id = kAudioObjectUnknown; | |
314 UInt32 info_size = sizeof(device_id); | |
315 OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, | |
316 &kDefaultOutputDeviceAddress, | |
317 0, | |
318 0, | |
319 &info_size, | |
320 &device_id); | |
321 if (result != noErr || device_id == kAudioObjectUnknown) { | |
322 OSSTATUS_DLOG(WARNING, result) | |
323 << "Could not get default audio output device."; | |
324 return 0; | |
325 } | |
326 | |
327 Float64 nominal_sample_rate; | |
328 info_size = sizeof(nominal_sample_rate); | |
329 | |
330 AudioObjectPropertyAddress nominal_sample_rate_address = { | |
331 kAudioDevicePropertyNominalSampleRate, | |
332 kAudioObjectPropertyScopeGlobal, | |
333 kAudioObjectPropertyElementMaster | |
334 }; | |
335 result = AudioObjectGetPropertyData(device_id, | |
336 &nominal_sample_rate_address, | |
337 0, | |
338 0, | |
339 &info_size, | |
340 &nominal_sample_rate); | |
341 if (result != noErr) { | |
342 OSSTATUS_DLOG(WARNING, result) | |
343 << "Could not get default sample rate for device: " << device_id; | |
344 return 0; | |
345 } | |
346 | |
347 return static_cast<int>(nominal_sample_rate); | |
348 } | |
349 | |
350 double AUAudioOutputStream::GetHardwareLatency() { | |
351 if (!output_unit_ || output_device_id_ == kAudioObjectUnknown) { | |
352 DLOG(WARNING) << "Audio unit object is NULL or device ID is unknown"; | |
353 return 0.0; | |
354 } | |
355 | |
356 // Get audio unit latency. | |
357 Float64 audio_unit_latency_sec = 0.0; | |
358 UInt32 size = sizeof(audio_unit_latency_sec); | |
359 OSStatus result = AudioUnitGetProperty(output_unit_, | |
360 kAudioUnitProperty_Latency, | |
361 kAudioUnitScope_Global, | |
362 0, | |
363 &audio_unit_latency_sec, | |
364 &size); | |
365 if (result != noErr) { | |
366 OSSTATUS_DLOG(WARNING, result) << "Could not get audio unit latency"; | |
367 return 0.0; | |
368 } | |
369 | |
370 // Get output audio device latency. | |
371 AudioObjectPropertyAddress property_address = { | |
372 kAudioDevicePropertyLatency, | |
373 kAudioDevicePropertyScopeOutput, | |
374 kAudioObjectPropertyElementMaster | |
375 }; | |
376 UInt32 device_latency_frames = 0; | |
377 size = sizeof(device_latency_frames); | |
378 result = AudioObjectGetPropertyData(output_device_id_, | |
379 &property_address, | |
380 0, | |
381 NULL, | |
382 &size, | |
383 &device_latency_frames); | |
384 if (result != noErr) { | |
385 OSSTATUS_DLOG(WARNING, result) << "Could not get audio unit latency"; | |
386 return 0.0; | |
387 } | |
388 | |
389 return static_cast<double>((audio_unit_latency_sec * | |
390 format_.mSampleRate) + device_latency_frames); | |
391 } | |
392 | |
393 double AUAudioOutputStream::GetPlayoutLatency( | |
394 const AudioTimeStamp* output_time_stamp) { | |
395 // Ensure mHostTime is valid. | |
396 if ((output_time_stamp->mFlags & kAudioTimeStampHostTimeValid) == 0) | |
397 return 0; | |
398 | |
399 // Get the delay between the moment getting the callback and the scheduled | |
400 // time stamp that tells when the data is going to be played out. | |
401 UInt64 output_time_ns = AudioConvertHostTimeToNanos( | |
402 output_time_stamp->mHostTime); | |
403 UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); | |
404 | |
405 // Prevent overflow leading to huge delay information; occurs regularly on | |
406 // the bots, probably less so in the wild. | |
407 if (now_ns > output_time_ns) | |
408 return 0; | |
409 | |
410 double delay_frames = static_cast<double> | |
411 (1e-9 * (output_time_ns - now_ns) * format_.mSampleRate); | |
412 | |
413 return (delay_frames + hardware_latency_frames_); | |
414 } | |
415 | |
416 } // namespace media | |
OLD | NEW |