OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "media/audio/mac/audio_unified_mac.h" | |
6 | |
7 #include <CoreServices/CoreServices.h> | |
8 | |
9 #include "base/basictypes.h" | |
10 #include "base/logging.h" | |
11 #include "base/mac/mac_logging.h" | |
12 #include "media/audio/mac/audio_manager_mac.h" | |
13 | |
14 namespace media { | |
15 | |
16 // TODO(crogers): support more than hard-coded stereo input. | |
17 // Ideally we would like to receive this value as a constructor argument. | |
18 static const int kDefaultInputChannels = 2; | |
19 | |
20 AudioHardwareUnifiedStream::AudioHardwareUnifiedStream( | |
21 AudioManagerMac* manager, const AudioParameters& params) | |
22 : manager_(manager), | |
23 source_(NULL), | |
24 client_input_channels_(kDefaultInputChannels), | |
25 volume_(1.0f), | |
26 input_channels_(0), | |
27 output_channels_(0), | |
28 input_channels_per_frame_(0), | |
29 output_channels_per_frame_(0), | |
30 io_proc_id_(0), | |
31 device_(kAudioObjectUnknown), | |
32 is_playing_(false) { | |
33 DCHECK(manager_); | |
34 | |
35 // A frame is one sample across all channels. In interleaved audio the per | |
36 // frame fields identify the set of n |channels|. In uncompressed audio, a | |
37 // packet is always one frame. | |
38 format_.mSampleRate = params.sample_rate(); | |
39 format_.mFormatID = kAudioFormatLinearPCM; | |
40 format_.mFormatFlags = kLinearPCMFormatFlagIsPacked | | |
41 kLinearPCMFormatFlagIsSignedInteger; | |
42 format_.mBitsPerChannel = params.bits_per_sample(); | |
43 format_.mChannelsPerFrame = params.channels(); | |
44 format_.mFramesPerPacket = 1; | |
45 format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8; | |
46 format_.mBytesPerFrame = format_.mBytesPerPacket; | |
47 format_.mReserved = 0; | |
48 | |
49 // Calculate the number of sample frames per callback. | |
50 number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket; | |
51 | |
52 input_bus_ = AudioBus::Create(client_input_channels_, | |
53 params.frames_per_buffer()); | |
54 output_bus_ = AudioBus::Create(params); | |
55 } | |
56 | |
57 AudioHardwareUnifiedStream::~AudioHardwareUnifiedStream() { | |
58 DCHECK_EQ(device_, kAudioObjectUnknown); | |
59 } | |
60 | |
61 bool AudioHardwareUnifiedStream::Open() { | |
62 // Obtain the current output device selected by the user. | |
63 AudioObjectPropertyAddress pa; | |
64 pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice; | |
65 pa.mScope = kAudioObjectPropertyScopeGlobal; | |
66 pa.mElement = kAudioObjectPropertyElementMaster; | |
67 | |
68 UInt32 size = sizeof(device_); | |
69 | |
70 OSStatus result = AudioObjectGetPropertyData( | |
71 kAudioObjectSystemObject, | |
72 &pa, | |
73 0, | |
74 0, | |
75 &size, | |
76 &device_); | |
77 | |
78 if ((result != kAudioHardwareNoError) || (device_ == kAudioDeviceUnknown)) { | |
79 LOG(ERROR) << "Cannot open unified AudioDevice."; | |
80 return false; | |
81 } | |
82 | |
83 // The requested sample-rate must match the hardware sample-rate. | |
84 Float64 sample_rate = 0.0; | |
85 size = sizeof(sample_rate); | |
86 | |
87 pa.mSelector = kAudioDevicePropertyNominalSampleRate; | |
88 pa.mScope = kAudioObjectPropertyScopeWildcard; | |
89 pa.mElement = kAudioObjectPropertyElementMaster; | |
90 | |
91 result = AudioObjectGetPropertyData( | |
92 device_, | |
93 &pa, | |
94 0, | |
95 0, | |
96 &size, | |
97 &sample_rate); | |
98 | |
99 if (result != noErr || sample_rate != format_.mSampleRate) { | |
100 LOG(ERROR) << "Requested sample-rate: " << format_.mSampleRate | |
101 << " must match the hardware sample-rate: " << sample_rate; | |
102 return false; | |
103 } | |
104 | |
105 // Configure buffer frame size. | |
106 UInt32 frame_size = number_of_frames_; | |
107 | |
108 pa.mSelector = kAudioDevicePropertyBufferFrameSize; | |
109 pa.mScope = kAudioDevicePropertyScopeInput; | |
110 pa.mElement = kAudioObjectPropertyElementMaster; | |
111 result = AudioObjectSetPropertyData( | |
112 device_, | |
113 &pa, | |
114 0, | |
115 0, | |
116 sizeof(frame_size), | |
117 &frame_size); | |
118 | |
119 if (result != noErr) { | |
120 LOG(ERROR) << "Unable to set input buffer frame size: " << frame_size; | |
121 return false; | |
122 } | |
123 | |
124 pa.mScope = kAudioDevicePropertyScopeOutput; | |
125 result = AudioObjectSetPropertyData( | |
126 device_, | |
127 &pa, | |
128 0, | |
129 0, | |
130 sizeof(frame_size), | |
131 &frame_size); | |
132 | |
133 if (result != noErr) { | |
134 LOG(ERROR) << "Unable to set output buffer frame size: " << frame_size; | |
135 return false; | |
136 } | |
137 | |
138 DVLOG(1) << "Sample rate: " << sample_rate; | |
139 DVLOG(1) << "Frame size: " << frame_size; | |
140 | |
141 // Determine the number of input and output channels. | |
142 // We handle both the interleaved and non-interleaved cases. | |
143 | |
144 // Get input stream configuration. | |
145 pa.mSelector = kAudioDevicePropertyStreamConfiguration; | |
146 pa.mScope = kAudioDevicePropertyScopeInput; | |
147 pa.mElement = kAudioObjectPropertyElementMaster; | |
148 | |
149 result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size); | |
150 OSSTATUS_DCHECK(result == noErr, result); | |
151 | |
152 if (result == noErr && size > 0) { | |
153 // Allocate storage. | |
154 scoped_ptr<uint8[]> input_list_storage(new uint8[size]); | |
155 AudioBufferList& input_list = | |
156 *reinterpret_cast<AudioBufferList*>(input_list_storage.get()); | |
157 | |
158 result = AudioObjectGetPropertyData( | |
159 device_, | |
160 &pa, | |
161 0, | |
162 0, | |
163 &size, | |
164 &input_list); | |
165 OSSTATUS_DCHECK(result == noErr, result); | |
166 | |
167 if (result == noErr) { | |
168 // Determine number of input channels. | |
169 input_channels_per_frame_ = input_list.mNumberBuffers > 0 ? | |
170 input_list.mBuffers[0].mNumberChannels : 0; | |
171 if (input_channels_per_frame_ == 1 && input_list.mNumberBuffers > 1) { | |
172 // Non-interleaved. | |
173 input_channels_ = input_list.mNumberBuffers; | |
174 } else { | |
175 // Interleaved. | |
176 input_channels_ = input_channels_per_frame_; | |
177 } | |
178 } | |
179 } | |
180 | |
181 DVLOG(1) << "Input channels: " << input_channels_; | |
182 DVLOG(1) << "Input channels per frame: " << input_channels_per_frame_; | |
183 | |
184 // The hardware must have at least the requested input channels. | |
185 if (result != noErr || client_input_channels_ > input_channels_) { | |
186 LOG(ERROR) << "AudioDevice does not support requested input channels."; | |
187 return false; | |
188 } | |
189 | |
190 // Get output stream configuration. | |
191 pa.mSelector = kAudioDevicePropertyStreamConfiguration; | |
192 pa.mScope = kAudioDevicePropertyScopeOutput; | |
193 pa.mElement = kAudioObjectPropertyElementMaster; | |
194 | |
195 result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size); | |
196 OSSTATUS_DCHECK(result == noErr, result); | |
197 | |
198 if (result == noErr && size > 0) { | |
199 // Allocate storage. | |
200 scoped_ptr<uint8[]> output_list_storage(new uint8[size]); | |
201 AudioBufferList& output_list = | |
202 *reinterpret_cast<AudioBufferList*>(output_list_storage.get()); | |
203 | |
204 result = AudioObjectGetPropertyData( | |
205 device_, | |
206 &pa, | |
207 0, | |
208 0, | |
209 &size, | |
210 &output_list); | |
211 OSSTATUS_DCHECK(result == noErr, result); | |
212 | |
213 if (result == noErr) { | |
214 // Determine number of output channels. | |
215 output_channels_per_frame_ = output_list.mBuffers[0].mNumberChannels; | |
216 if (output_channels_per_frame_ == 1 && output_list.mNumberBuffers > 1) { | |
217 // Non-interleaved. | |
218 output_channels_ = output_list.mNumberBuffers; | |
219 } else { | |
220 // Interleaved. | |
221 output_channels_ = output_channels_per_frame_; | |
222 } | |
223 } | |
224 } | |
225 | |
226 DVLOG(1) << "Output channels: " << output_channels_; | |
227 DVLOG(1) << "Output channels per frame: " << output_channels_per_frame_; | |
228 | |
229 // The hardware must have at least the requested output channels. | |
230 if (result != noErr || | |
231 output_channels_ < static_cast<int>(format_.mChannelsPerFrame)) { | |
232 LOG(ERROR) << "AudioDevice does not support requested output channels."; | |
233 return false; | |
234 } | |
235 | |
236 // Setup the I/O proc. | |
237 result = AudioDeviceCreateIOProcID(device_, RenderProc, this, &io_proc_id_); | |
238 if (result != noErr) { | |
239 LOG(ERROR) << "Error creating IOProc."; | |
240 return false; | |
241 } | |
242 | |
243 return true; | |
244 } | |
245 | |
246 void AudioHardwareUnifiedStream::Close() { | |
247 DCHECK(!is_playing_); | |
248 | |
249 OSStatus result = AudioDeviceDestroyIOProcID(device_, io_proc_id_); | |
250 OSSTATUS_DCHECK(result == noErr, result); | |
251 | |
252 io_proc_id_ = 0; | |
253 device_ = kAudioObjectUnknown; | |
254 | |
255 // Inform the audio manager that we have been closed. This can cause our | |
256 // destruction. | |
257 manager_->ReleaseOutputStream(this); | |
258 } | |
259 | |
260 void AudioHardwareUnifiedStream::Start(AudioSourceCallback* callback) { | |
261 DCHECK(callback); | |
262 DCHECK_NE(device_, kAudioObjectUnknown); | |
263 DCHECK(!is_playing_); | |
264 if (device_ == kAudioObjectUnknown || is_playing_) | |
265 return; | |
266 | |
267 source_ = callback; | |
268 | |
269 OSStatus result = AudioDeviceStart(device_, io_proc_id_); | |
270 OSSTATUS_DCHECK(result == noErr, result); | |
271 | |
272 if (result == noErr) | |
273 is_playing_ = true; | |
274 } | |
275 | |
276 void AudioHardwareUnifiedStream::Stop() { | |
277 if (!is_playing_) | |
278 return; | |
279 | |
280 if (device_ != kAudioObjectUnknown) { | |
281 OSStatus result = AudioDeviceStop(device_, io_proc_id_); | |
282 OSSTATUS_DCHECK(result == noErr, result); | |
283 } | |
284 | |
285 is_playing_ = false; | |
286 source_ = NULL; | |
287 } | |
288 | |
289 void AudioHardwareUnifiedStream::SetVolume(double volume) { | |
290 volume_ = static_cast<float>(volume); | |
291 // TODO(crogers): set volume property | |
292 } | |
293 | |
294 void AudioHardwareUnifiedStream::GetVolume(double* volume) { | |
295 *volume = volume_; | |
296 } | |
297 | |
298 // Pulls on our provider with optional input, asking it to render output. | |
299 // Note to future hackers of this function: Do not add locks here because this | |
300 // is running on a real-time thread (for low-latency). | |
301 OSStatus AudioHardwareUnifiedStream::Render( | |
302 AudioDeviceID device, | |
303 const AudioTimeStamp* now, | |
304 const AudioBufferList* input_data, | |
305 const AudioTimeStamp* input_time, | |
306 AudioBufferList* output_data, | |
307 const AudioTimeStamp* output_time) { | |
308 // Convert the input data accounting for possible interleaving. | |
309 // TODO(crogers): it's better to simply memcpy() if source is already planar. | |
310 if (input_channels_ >= client_input_channels_) { | |
311 for (int channel_index = 0; channel_index < client_input_channels_; | |
312 ++channel_index) { | |
313 float* source; | |
314 | |
315 int source_channel_index = channel_index; | |
316 | |
317 if (input_channels_per_frame_ > 1) { | |
318 // Interleaved. | |
319 source = static_cast<float*>(input_data->mBuffers[0].mData) + | |
320 source_channel_index; | |
321 } else { | |
322 // Non-interleaved. | |
323 source = static_cast<float*>( | |
324 input_data->mBuffers[source_channel_index].mData); | |
325 } | |
326 | |
327 float* p = input_bus_->channel(channel_index); | |
328 for (int i = 0; i < number_of_frames_; ++i) { | |
329 p[i] = *source; | |
330 source += input_channels_per_frame_; | |
331 } | |
332 } | |
333 } else if (input_channels_) { | |
334 input_bus_->Zero(); | |
335 } | |
336 | |
337 // Give the client optional input data and have it render the output data. | |
338 source_->OnMoreIOData(input_bus_.get(), | |
339 output_bus_.get(), | |
340 AudioBuffersState(0, 0)); | |
341 | |
342 // TODO(crogers): handle final Core Audio 5.1 layout for 5.1 audio. | |
343 | |
344 // Handle interleaving as necessary. | |
345 // TODO(crogers): it's better to simply memcpy() if dest is already planar. | |
346 | |
347 for (int channel_index = 0; | |
348 channel_index < static_cast<int>(format_.mChannelsPerFrame); | |
349 ++channel_index) { | |
350 float* dest; | |
351 | |
352 int dest_channel_index = channel_index; | |
353 | |
354 if (output_channels_per_frame_ > 1) { | |
355 // Interleaved. | |
356 dest = static_cast<float*>(output_data->mBuffers[0].mData) + | |
357 dest_channel_index; | |
358 } else { | |
359 // Non-interleaved. | |
360 dest = static_cast<float*>( | |
361 output_data->mBuffers[dest_channel_index].mData); | |
362 } | |
363 | |
364 float* p = output_bus_->channel(channel_index); | |
365 for (int i = 0; i < number_of_frames_; ++i) { | |
366 *dest = p[i]; | |
367 dest += output_channels_per_frame_; | |
368 } | |
369 } | |
370 | |
371 return noErr; | |
372 } | |
373 | |
374 OSStatus AudioHardwareUnifiedStream::RenderProc( | |
375 AudioDeviceID device, | |
376 const AudioTimeStamp* now, | |
377 const AudioBufferList* input_data, | |
378 const AudioTimeStamp* input_time, | |
379 AudioBufferList* output_data, | |
380 const AudioTimeStamp* output_time, | |
381 void* user_data) { | |
382 AudioHardwareUnifiedStream* audio_output = | |
383 static_cast<AudioHardwareUnifiedStream*>(user_data); | |
384 DCHECK(audio_output); | |
385 if (!audio_output) | |
386 return -1; | |
387 | |
388 return audio_output->Render( | |
389 device, | |
390 now, | |
391 input_data, | |
392 input_time, | |
393 output_data, | |
394 output_time); | |
395 } | |
396 | |
397 } // namespace media | |
OLD | NEW |