OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "media/audio/mac/audio_unified_mac.h" |
| 6 |
| 7 #include <CoreServices/CoreServices.h> |
| 8 |
| 9 #include "base/basictypes.h" |
| 10 #include "base/logging.h" |
| 11 #include "base/mac/mac_logging.h" |
| 12 #include "media/audio/audio_util.h" |
| 13 #include "media/audio/mac/audio_manager_mac.h" |
| 14 |
| 15 namespace media { |
| 16 |
| 17 // TODO(crogers): support more than hard-coded stereo input. |
| 18 // Ideally we would like to receive this value as a constructor argument. |
| 19 static const int kDefaultInputChannels = 2; |
| 20 |
| 21 AudioHardwareUnifiedStream::AudioHardwareUnifiedStream( |
| 22 AudioManagerMac* manager, const AudioParameters& params) |
| 23 : manager_(manager), |
| 24 source_(NULL), |
| 25 client_input_channels_(kDefaultInputChannels), |
| 26 volume_(1.0f), |
| 27 input_channels_(0), |
| 28 output_channels_(0), |
| 29 input_channels_per_frame_(0), |
| 30 output_channels_per_frame_(0), |
| 31 io_proc_id_(0), |
| 32 device_(kAudioObjectUnknown), |
| 33 is_playing_(false) { |
| 34 DCHECK(manager_); |
| 35 |
| 36 // A frame is one sample across all channels. In interleaved audio the per |
| 37 // frame fields identify the set of n |channels|. In uncompressed audio, a |
| 38 // packet is always one frame. |
| 39 format_.mSampleRate = params.sample_rate(); |
| 40 format_.mFormatID = kAudioFormatLinearPCM; |
| 41 format_.mFormatFlags = kLinearPCMFormatFlagIsPacked | |
| 42 kLinearPCMFormatFlagIsSignedInteger; |
| 43 format_.mBitsPerChannel = params.bits_per_sample(); |
| 44 format_.mChannelsPerFrame = params.channels(); |
| 45 format_.mFramesPerPacket = 1; |
| 46 format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8; |
| 47 format_.mBytesPerFrame = format_.mBytesPerPacket; |
| 48 format_.mReserved = 0; |
| 49 |
| 50 // Calculate the number of sample frames per callback. |
| 51 number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket; |
| 52 |
| 53 input_bus_ = AudioBus::Create(client_input_channels_, |
| 54 params.frames_per_buffer()); |
| 55 output_bus_ = AudioBus::Create(params); |
| 56 } |
| 57 |
| 58 AudioHardwareUnifiedStream::~AudioHardwareUnifiedStream() { |
| 59 DCHECK_EQ(device_, kAudioObjectUnknown); |
| 60 } |
| 61 |
| 62 bool AudioHardwareUnifiedStream::Open() { |
| 63 // Obtain the current output device selected by the user. |
| 64 AudioObjectPropertyAddress pa; |
| 65 pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice; |
| 66 pa.mScope = kAudioObjectPropertyScopeGlobal; |
| 67 pa.mElement = kAudioObjectPropertyElementMaster; |
| 68 |
| 69 UInt32 size = sizeof(device_); |
| 70 |
| 71 OSStatus result = AudioObjectGetPropertyData( |
| 72 kAudioObjectSystemObject, |
| 73 &pa, |
| 74 0, |
| 75 0, |
| 76 &size, |
| 77 &device_); |
| 78 |
| 79 if ((result != kAudioHardwareNoError) || (device_ == kAudioDeviceUnknown)) { |
| 80 LOG(ERROR) << "Cannot open unified AudioDevice."; |
| 81 return false; |
| 82 } |
| 83 |
| 84 // The requested sample-rate must match the hardware sample-rate. |
| 85 Float64 sample_rate = 0.0; |
| 86 size = sizeof(sample_rate); |
| 87 |
| 88 pa.mSelector = kAudioDevicePropertyNominalSampleRate; |
| 89 pa.mScope = kAudioObjectPropertyScopeWildcard; |
| 90 pa.mElement = kAudioObjectPropertyElementMaster; |
| 91 |
| 92 result = AudioObjectGetPropertyData( |
| 93 device_, |
| 94 &pa, |
| 95 0, |
| 96 0, |
| 97 &size, |
| 98 &sample_rate); |
| 99 |
| 100 if (result != noErr || sample_rate != format_.mSampleRate) { |
| 101 LOG(ERROR) << "Requested sample-rate: " << format_.mSampleRate |
| 102 << " must match the hardware sample-rate: " << sample_rate; |
| 103 return false; |
| 104 } |
| 105 |
| 106 // Configure buffer frame size. |
| 107 UInt32 frame_size = number_of_frames_; |
| 108 |
| 109 pa.mSelector = kAudioDevicePropertyBufferFrameSize; |
| 110 pa.mScope = kAudioDevicePropertyScopeInput; |
| 111 pa.mElement = kAudioObjectPropertyElementMaster; |
| 112 result = AudioObjectSetPropertyData( |
| 113 device_, |
| 114 &pa, |
| 115 0, |
| 116 0, |
| 117 sizeof(frame_size), |
| 118 &frame_size); |
| 119 |
| 120 if (result != noErr) { |
| 121 LOG(ERROR) << "Unable to set input buffer frame size: " << frame_size; |
| 122 return false; |
| 123 } |
| 124 |
| 125 pa.mScope = kAudioDevicePropertyScopeOutput; |
| 126 result = AudioObjectSetPropertyData( |
| 127 device_, |
| 128 &pa, |
| 129 0, |
| 130 0, |
| 131 sizeof(frame_size), |
| 132 &frame_size); |
| 133 |
| 134 if (result != noErr) { |
| 135 LOG(ERROR) << "Unable to set output buffer frame size: " << frame_size; |
| 136 return false; |
| 137 } |
| 138 |
| 139 DVLOG(1) << "Sample rate: " << sample_rate; |
| 140 DVLOG(1) << "Frame size: " << frame_size; |
| 141 |
| 142 // Determine the number of input and output channels. |
| 143 // We handle both the interleaved and non-interleaved cases. |
| 144 |
| 145 // Get input stream configuration. |
| 146 pa.mSelector = kAudioDevicePropertyStreamConfiguration; |
| 147 pa.mScope = kAudioDevicePropertyScopeInput; |
| 148 pa.mElement = kAudioObjectPropertyElementMaster; |
| 149 |
| 150 result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size); |
| 151 OSSTATUS_DCHECK(result == noErr, result); |
| 152 |
| 153 if (result == noErr && size > 0) { |
| 154 // Allocate storage. |
| 155 scoped_array<uint8> input_list_storage(new uint8[size]); |
| 156 AudioBufferList& input_list = |
| 157 *reinterpret_cast<AudioBufferList*>(input_list_storage.get()); |
| 158 |
| 159 result = AudioObjectGetPropertyData( |
| 160 device_, |
| 161 &pa, |
| 162 0, |
| 163 0, |
| 164 &size, |
| 165 &input_list); |
| 166 OSSTATUS_DCHECK(result == noErr, result); |
| 167 |
| 168 if (result == noErr) { |
| 169 // Determine number of input channels. |
| 170 input_channels_per_frame_ = input_list.mNumberBuffers > 0 ? |
| 171 input_list.mBuffers[0].mNumberChannels : 0; |
| 172 if (input_channels_per_frame_ == 1 && input_list.mNumberBuffers > 1) { |
| 173 // Non-interleaved. |
| 174 input_channels_ = input_list.mNumberBuffers; |
| 175 } else { |
| 176 // Interleaved. |
| 177 input_channels_ = input_channels_per_frame_; |
| 178 } |
| 179 } |
| 180 } |
| 181 |
| 182 DVLOG(1) << "Input channels: " << input_channels_; |
| 183 DVLOG(1) << "Input channels per frame: " << input_channels_per_frame_; |
| 184 |
| 185 // The hardware must have at least the requested input channels. |
| 186 if (result != noErr || client_input_channels_ > input_channels_) { |
| 187 LOG(ERROR) << "AudioDevice does not support requested input channels."; |
| 188 return false; |
| 189 } |
| 190 |
| 191 // Get output stream configuration. |
| 192 pa.mSelector = kAudioDevicePropertyStreamConfiguration; |
| 193 pa.mScope = kAudioDevicePropertyScopeOutput; |
| 194 pa.mElement = kAudioObjectPropertyElementMaster; |
| 195 |
| 196 result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size); |
| 197 OSSTATUS_DCHECK(result == noErr, result); |
| 198 |
| 199 if (result == noErr && size > 0) { |
| 200 // Allocate storage. |
| 201 scoped_array<uint8> output_list_storage(new uint8[size]); |
| 202 AudioBufferList& output_list = |
| 203 *reinterpret_cast<AudioBufferList*>(output_list_storage.get()); |
| 204 |
| 205 result = AudioObjectGetPropertyData( |
| 206 device_, |
| 207 &pa, |
| 208 0, |
| 209 0, |
| 210 &size, |
| 211 &output_list); |
| 212 OSSTATUS_DCHECK(result == noErr, result); |
| 213 |
| 214 if (result == noErr) { |
| 215 // Determine number of output channels. |
| 216 output_channels_per_frame_ = output_list.mBuffers[0].mNumberChannels; |
| 217 if (output_channels_per_frame_ == 1 && output_list.mNumberBuffers > 1) { |
| 218 // Non-interleaved. |
| 219 output_channels_ = output_list.mNumberBuffers; |
| 220 } else { |
| 221 // Interleaved. |
| 222 output_channels_ = output_channels_per_frame_; |
| 223 } |
| 224 } |
| 225 } |
| 226 |
| 227 DVLOG(1) << "Output channels: " << output_channels_; |
| 228 DVLOG(1) << "Output channels per frame: " << output_channels_per_frame_; |
| 229 |
| 230 // The hardware must have at least the requested output channels. |
| 231 if (result != noErr || |
| 232 output_channels_ < static_cast<int>(format_.mChannelsPerFrame)) { |
| 233 LOG(ERROR) << "AudioDevice does not support requested output channels."; |
| 234 return false; |
| 235 } |
| 236 |
| 237 // Setup the I/O proc. |
| 238 result = AudioDeviceCreateIOProcID(device_, RenderProc, this, &io_proc_id_); |
| 239 if (result != noErr) { |
| 240 LOG(ERROR) << "Error creating IOProc."; |
| 241 return false; |
| 242 } |
| 243 |
| 244 return true; |
| 245 } |
| 246 |
| 247 void AudioHardwareUnifiedStream::Close() { |
| 248 DCHECK(!is_playing_); |
| 249 |
| 250 OSStatus result = AudioDeviceDestroyIOProcID(device_, io_proc_id_); |
| 251 OSSTATUS_DCHECK(result == noErr, result); |
| 252 |
| 253 io_proc_id_ = 0; |
| 254 device_ = kAudioObjectUnknown; |
| 255 |
| 256 // Inform the audio manager that we have been closed. This can cause our |
| 257 // destruction. |
| 258 manager_->ReleaseOutputStream(this); |
| 259 } |
| 260 |
| 261 void AudioHardwareUnifiedStream::Start(AudioSourceCallback* callback) { |
| 262 DCHECK(callback); |
| 263 DCHECK_NE(device_, kAudioObjectUnknown); |
| 264 DCHECK(!is_playing_); |
| 265 if (device_ == kAudioObjectUnknown || is_playing_) |
| 266 return; |
| 267 |
| 268 source_ = callback; |
| 269 |
| 270 OSStatus result = AudioDeviceStart(device_, io_proc_id_); |
| 271 OSSTATUS_DCHECK(result == noErr, result); |
| 272 |
| 273 if (result == noErr) |
| 274 is_playing_ = true; |
| 275 } |
| 276 |
| 277 void AudioHardwareUnifiedStream::Stop() { |
| 278 if (!is_playing_) |
| 279 return; |
| 280 |
| 281 source_ = NULL; |
| 282 |
| 283 if (device_ != kAudioObjectUnknown) { |
| 284 OSStatus result = AudioDeviceStop(device_, io_proc_id_); |
| 285 OSSTATUS_DCHECK(result == noErr, result); |
| 286 } |
| 287 |
| 288 is_playing_ = false; |
| 289 } |
| 290 |
| 291 void AudioHardwareUnifiedStream::SetVolume(double volume) { |
| 292 volume_ = static_cast<float>(volume); |
| 293 // TODO(crogers): set volume property |
| 294 } |
| 295 |
| 296 void AudioHardwareUnifiedStream::GetVolume(double* volume) { |
| 297 *volume = volume_; |
| 298 } |
| 299 |
| 300 // Pulls on our provider with optional input, asking it to render output. |
| 301 // Note to future hackers of this function: Do not add locks here because this |
| 302 // is running on a real-time thread (for low-latency). |
| 303 OSStatus AudioHardwareUnifiedStream::Render( |
| 304 AudioDeviceID device, |
| 305 const AudioTimeStamp* now, |
| 306 const AudioBufferList* input_data, |
| 307 const AudioTimeStamp* input_time, |
| 308 AudioBufferList* output_data, |
| 309 const AudioTimeStamp* output_time) { |
| 310 // Convert the input data accounting for possible interleaving. |
| 311 // TODO(crogers): it's better to simply memcpy() if source is already planar. |
| 312 if (input_channels_ >= client_input_channels_) { |
| 313 for (int channel_index = 0; channel_index < client_input_channels_; |
| 314 ++channel_index) { |
| 315 float* source; |
| 316 |
| 317 int source_channel_index = channel_index; |
| 318 |
| 319 if (input_channels_per_frame_ > 1) { |
| 320 // Interleaved. |
| 321 source = static_cast<float*>(input_data->mBuffers[0].mData) + |
| 322 source_channel_index; |
| 323 } else { |
| 324 // Non-interleaved. |
| 325 source = static_cast<float*>( |
| 326 input_data->mBuffers[source_channel_index].mData); |
| 327 } |
| 328 |
| 329 float* p = input_bus_->channel(channel_index); |
| 330 for (int i = 0; i < number_of_frames_; ++i) { |
| 331 p[i] = *source; |
| 332 source += input_channels_per_frame_; |
| 333 } |
| 334 } |
| 335 } else if (input_channels_) { |
| 336 input_bus_->Zero(); |
| 337 } |
| 338 |
| 339 // Give the client optional input data and have it render the output data. |
| 340 source_->OnMoreIOData(input_bus_.get(), |
| 341 output_bus_.get(), |
| 342 AudioBuffersState(0, 0)); |
| 343 |
| 344 // TODO(crogers): handle final Core Audio 5.1 layout for 5.1 audio. |
| 345 |
| 346 // Handle interleaving as necessary. |
| 347 // TODO(crogers): it's better to simply memcpy() if dest is already planar. |
| 348 |
| 349 for (int channel_index = 0; |
| 350 channel_index < static_cast<int>(format_.mChannelsPerFrame); |
| 351 ++channel_index) { |
| 352 float* dest; |
| 353 |
| 354 int dest_channel_index = channel_index; |
| 355 |
| 356 if (output_channels_per_frame_ > 1) { |
| 357 // Interleaved. |
| 358 dest = static_cast<float*>(output_data->mBuffers[0].mData) + |
| 359 dest_channel_index; |
| 360 } else { |
| 361 // Non-interleaved. |
| 362 dest = static_cast<float*>( |
| 363 output_data->mBuffers[dest_channel_index].mData); |
| 364 } |
| 365 |
| 366 float* p = output_bus_->channel(channel_index); |
| 367 for (int i = 0; i < number_of_frames_; ++i) { |
| 368 *dest = p[i]; |
| 369 dest += output_channels_per_frame_; |
| 370 } |
| 371 } |
| 372 |
| 373 return noErr; |
| 374 } |
| 375 |
| 376 OSStatus AudioHardwareUnifiedStream::RenderProc( |
| 377 AudioDeviceID device, |
| 378 const AudioTimeStamp* now, |
| 379 const AudioBufferList* input_data, |
| 380 const AudioTimeStamp* input_time, |
| 381 AudioBufferList* output_data, |
| 382 const AudioTimeStamp* output_time, |
| 383 void* user_data) { |
| 384 AudioHardwareUnifiedStream* audio_output = |
| 385 static_cast<AudioHardwareUnifiedStream*>(user_data); |
| 386 DCHECK(audio_output); |
| 387 if (!audio_output) |
| 388 return -1; |
| 389 |
| 390 return audio_output->Render( |
| 391 device, |
| 392 now, |
| 393 input_data, |
| 394 input_time, |
| 395 output_data, |
| 396 output_time); |
| 397 } |
| 398 |
| 399 } // namespace media |
OLD | NEW |