Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(286)

Side by Side Diff: media/audio/mac/audio_synchronized_mac.cc

Issue 10909185: Add Mac OS X synchronized audio I/O back-end (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Created 8 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "media/audio/mac/audio_synchronized_mac.h"
6
7 #include <CoreServices/CoreServices.h>
8 #include <algorithm>
no longer working on chromium 2012/09/17 20:47:46 nit, add an empty line?
9
10 #include "base/basictypes.h"
11 #include "base/debug/trace_event.h"
12 #include "base/logging.h"
13 #include "base/mac/mac_logging.h"
14 #include "media/audio/audio_util.h"
15 #include "media/audio/mac/audio_manager_mac.h"
16
17 namespace media {
18
19 static const int kHardwareBufferSize = 128;
20 static const int kFifoSize = 16384;
21
22 // TODO(crogers): handle the non-stereo case.
23 static const int kChannels = 2;
24
25 // This value was determined empirically for minimum latency while still
26 // guarding against FIFO under-runs.
27 static const int kBaseTargetFifoFrames = 256 + 64;
28
29 // If the input and output sample-rate don't match, then we need to maintain
30 // an additional safety margin due to the callback timing jitter and the
31 // varispeed buffering. This value was empirically tuned.
32 static const int kAdditionalTargetFifoFrames = 128;
33
34 static void ZeroBufferList(AudioBufferList* io_data) {
35 for (unsigned i = 0; i < io_data->mNumberBuffers; ++i)
scherkus (not reviewing) 2012/09/17 14:51:17 s/unsigned/int/ or size_t etc...
Chris Rogers 2012/09/17 20:44:23 Done.
36 memset(io_data->mBuffers[i].mData, 0, io_data->mBuffers[i].mDataByteSize);
37 }
38
39 AudioSynchronizedStream::AudioSynchronizedStream(
40 AudioManagerMac* manager,
41 const AudioParameters& params,
42 AudioDeviceID input_id,
43 AudioDeviceID output_id)
44 : manager_(manager),
45 params_(params),
46 input_sample_rate_(0),
47 output_sample_rate_(0),
48 input_id_(input_id),
49 output_id_(output_id),
50 input_buffer_list_(NULL),
51 fifo_(kChannels, kFifoSize),
52 fifo_rate_compensation_(1.0),
53 ave_delta_(0.0),
54 target_fifo_frames_(kBaseTargetFifoFrames),
55 input_unit_(0),
56 varispeed_unit_(0),
57 output_unit_(0),
58 first_input_time_(-1),
59 is_running_(false),
60 hardware_buffer_size_(kHardwareBufferSize),
61 channels_(kChannels) {
62 }
63
64 AudioSynchronizedStream::~AudioSynchronizedStream() {
65 DCHECK(!input_unit_);
66 DCHECK(!output_unit_);
67 DCHECK(!varispeed_unit_);
68 }
69
70 bool AudioSynchronizedStream::Open() {
71 if (params_.channels() != kChannels) {
72 LOG(ERROR) << "Only stereo output is currently supported.";
73 return false;
74 }
75
76 // Create the input, output, and varispeed AudioUnits.
77 OSStatus result = CreateAudioUnits();
78 if (result != noErr) {
79 LOG(ERROR) << "Cannot create AudioUnits.";
80 return false;
81 }
82
83 result = SetupInput(input_id_);
84 if (result != noErr) {
85 LOG(ERROR) << "Error configuring input AudioUnit.";
86 return false;
87 }
88
89 result = SetupOutput(output_id_);
90 if (result != noErr) {
91 LOG(ERROR) << "Error configuring output AudioUnit.";
92 return false;
93 }
94
95 result = SetupCallbacks();
96 if (result != noErr) {
97 LOG(ERROR) << "Error setting up callbacks on AudioUnits.";
98 return false;
99 }
100
101 result = SetupStreamFormats();
102 if (result != noErr) {
103 LOG(ERROR) << "Error configuring stream formats on AudioUnits.";
104 return false;
105 }
106
107 AllocateInputData();
108
109 // Final initialization of the AudioUnits.
110 result = AudioUnitInitialize(input_unit_);
111 if (result != noErr) {
112 LOG(ERROR) << "Error initializing input AudioUnit.";
113 return false;
114 }
115
116 result = AudioUnitInitialize(output_unit_);
117 if (result != noErr) {
118 LOG(ERROR) << "Error initializing output AudioUnit.";
119 return false;
120 }
121
122 result = AudioUnitInitialize(varispeed_unit_);
123 if (result != noErr) {
124 LOG(ERROR) << "Error initializing varispeed AudioUnit.";
125 return false;
126 }
127
128 if (input_sample_rate_ != output_sample_rate_) {
129 // Add extra safety margin.
130 target_fifo_frames_ += kAdditionalTargetFifoFrames;
131 }
132
133 // Buffer initial silence corresponding to target I/O buffering.
134 fifo_.Clear();
135 AudioBus silence(channels_, target_fifo_frames_);
136 silence.Zero();
137 fifo_.Push(&silence);
138
139 return true;
140 }
141
142 void AudioSynchronizedStream::Close() {
143 DCHECK(!is_running_);
144
145 if (input_buffer_list_) {
146 free(input_buffer_list_);
147 input_buffer_list_ = 0;
148 input_bus_.reset(NULL);
149 }
150
151 if (input_unit_) {
152 AudioUnitUninitialize(input_unit_);
153 CloseComponent(input_unit_);
154 }
155
156 if (output_unit_) {
157 AudioUnitUninitialize(output_unit_);
158 CloseComponent(output_unit_);
159 }
160
161 if (varispeed_unit_) {
162 AudioUnitUninitialize(varispeed_unit_);
163 CloseComponent(varispeed_unit_);
164 }
165
166 input_unit_ = NULL;
167 output_unit_ = NULL;
168 varispeed_unit_ = NULL;
169
170 // Inform the audio manager that we have been closed. This can cause our
171 // destruction.
172 manager_->ReleaseOutputStream(this);
173 }
174
175 void AudioSynchronizedStream::Start(AudioSourceCallback* callback) {
176 DCHECK(callback);
177 DCHECK(input_unit_);
178 DCHECK(output_unit_);
179 DCHECK(varispeed_unit_);
180
181 if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_)
182 return;
183
184 source_ = callback;
185
186 // Reset state variables each time we Start().
187 fifo_rate_compensation_ = 1.0;
188 ave_delta_ = 0.0;
189
190 OSStatus result = noErr;
191
192 if (!is_running_) {
193 first_input_time_ = -1;
194
195 result = AudioOutputUnitStart(input_unit_);
196 OSSTATUS_DCHECK(result == noErr, result);
197
198 if (result == noErr) {
199 result = AudioOutputUnitStart(output_unit_);
200 OSSTATUS_DCHECK(result == noErr, result);
201 }
202 }
203
204 is_running_ = true;
205 }
206
207 void AudioSynchronizedStream::Stop() {
208 OSStatus result = noErr;
209 if (is_running_) {
210 result = AudioOutputUnitStop(input_unit_);
211 OSSTATUS_DCHECK(result == noErr, result);
212
213 if (result == noErr) {
214 result = AudioOutputUnitStop(output_unit_);
215 OSSTATUS_DCHECK(result == noErr, result);
216 }
217 }
218
219 if (result == noErr)
220 is_running_ = false;
221 }
222
223 bool AudioSynchronizedStream::IsRunning() {
224 return is_running_;
225 }
226
227 // TODO(crogers): implement - or remove from AudioOutputStream.
228 void AudioSynchronizedStream::SetVolume(double volume) {}
229 void AudioSynchronizedStream::GetVolume(double* volume) {}
230
231 OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent(
232 AudioDeviceID output_id) {
233 OSStatus result = noErr;
234
235 // Get the default output device if device is unknown.
236 if (output_id == kAudioDeviceUnknown) {
237 AudioObjectPropertyAddress pa;
238 pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
239 pa.mScope = kAudioObjectPropertyScopeGlobal;
240 pa.mElement = kAudioObjectPropertyElementMaster;
241 UInt32 size = sizeof(output_id);
242
243 result = AudioObjectGetPropertyData(
244 kAudioObjectSystemObject,
245 &pa,
246 0,
247 0,
248 &size,
249 &output_id);
250
251 OSSTATUS_DCHECK(result == noErr, result);
252 if (result != noErr)
253 return result;
254 }
255
256 // Set the render frame size.
257 UInt32 frame_size = hardware_buffer_size_;
258 AudioObjectPropertyAddress pa;
259 pa.mSelector = kAudioDevicePropertyBufferFrameSize;
260 pa.mScope = kAudioDevicePropertyScopeInput;
261 pa.mElement = kAudioObjectPropertyElementMaster;
262 result = AudioObjectSetPropertyData(
263 output_id,
264 &pa,
265 0,
266 0,
267 sizeof(frame_size),
268 &frame_size);
269
270 OSSTATUS_DCHECK(result == noErr, result);
271 if (result != noErr)
272 return result;
273
274 output_info_.Initialize(output_id, false);
275
276 // Set the Current Device to the Default Output Unit.
277 result = AudioUnitSetProperty(
278 output_unit_,
279 kAudioOutputUnitProperty_CurrentDevice,
280 kAudioUnitScope_Global,
281 0,
282 &output_info_.id_,
283 sizeof(output_info_.id_));
284
285 OSSTATUS_DCHECK(result == noErr, result);
286 return result;
287 }
288
289 OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent(
290 AudioDeviceID input_id) {
291 OSStatus result = noErr;
292
293 // Get the default input device if device is unknown.
294 if (input_id == kAudioDeviceUnknown) {
295 AudioObjectPropertyAddress pa;
296 pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
297 pa.mScope = kAudioObjectPropertyScopeGlobal;
298 pa.mElement = kAudioObjectPropertyElementMaster;
299 UInt32 size = sizeof(input_id);
300
301 result = AudioObjectGetPropertyData(
302 kAudioObjectSystemObject,
303 &pa,
304 0,
305 0,
306 &size,
307 &input_id);
308
309 OSSTATUS_DCHECK(result == noErr, result);
310 if (result != noErr)
311 return result;
312 }
313
314 // Set the render frame size.
315 UInt32 frame_size = hardware_buffer_size_;
316 AudioObjectPropertyAddress pa;
317 pa.mSelector = kAudioDevicePropertyBufferFrameSize;
318 pa.mScope = kAudioDevicePropertyScopeInput;
319 pa.mElement = kAudioObjectPropertyElementMaster;
320 result = AudioObjectSetPropertyData(
321 input_id,
322 &pa,
323 0,
324 0,
325 sizeof(frame_size),
326 &frame_size);
327
328 OSSTATUS_DCHECK(result == noErr, result);
329 if (result != noErr)
330 return result;
331
332 input_info_.Initialize(input_id, true);
333
334 // Set the Current Device to the AUHAL.
335 // This should be done only after I/O has been enabled on the AUHAL.
336 result = AudioUnitSetProperty(
337 input_unit_,
338 kAudioOutputUnitProperty_CurrentDevice,
339 kAudioUnitScope_Global,
340 0,
341 &input_info_.id_,
342 sizeof(input_info_.id_));
343
344 OSSTATUS_DCHECK(result == noErr, result);
345 return result;
346 }
347
348 OSStatus AudioSynchronizedStream::CreateAudioUnits() {
349 // Q: Why do we need a varispeed unit?
350 // A: If the input device and the output device are running at
351 // different sample rates and/or on different clocks, we will need
352 // to compensate to avoid a pitch change and
353 // to avoid buffer under and over runs.
354 ComponentDescription varispeed_desc;
355 varispeed_desc.componentType = kAudioUnitType_FormatConverter;
356 varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed;
357 varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
358 varispeed_desc.componentFlags = 0;
359 varispeed_desc.componentFlagsMask = 0;
360
361 Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc);
362 if (varispeed_comp == NULL)
363 return -1;
364
365 OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_);
366 OSSTATUS_DCHECK(result == noErr, result);
367 if (result != noErr)
368 return result;
369
370 // Open input AudioUnit.
371 ComponentDescription input_desc;
372 input_desc.componentType = kAudioUnitType_Output;
373 input_desc.componentSubType = kAudioUnitSubType_HALOutput;
374 input_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
375 input_desc.componentFlags = 0;
376 input_desc.componentFlagsMask = 0;
377
378 Component input_comp = FindNextComponent(NULL, &input_desc);
379 if (input_comp == NULL)
380 return -1;
381
382 result = OpenAComponent(input_comp, &input_unit_);
383 OSSTATUS_DCHECK(result == noErr, result);
384 if (result != noErr)
385 return result;
386
387 // Open output AudioUnit.
388 ComponentDescription output_desc;
389 output_desc.componentType = kAudioUnitType_Output;
390 output_desc.componentSubType = kAudioUnitSubType_DefaultOutput;
391 output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
392 output_desc.componentFlags = 0;
393 output_desc.componentFlagsMask = 0;
394
395 Component output_comp = FindNextComponent(NULL, &output_desc);
396 if (output_comp == NULL)
397 return -1;
398
399 result = OpenAComponent(output_comp, &output_unit_);
400 OSSTATUS_DCHECK(result == noErr, result);
401 if (result != noErr)
402 return result;
403
404 return noErr;
405 }
406
407 OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) {
408 // The AUHAL used for input needs to be initialized
409 // before anything is done to it.
410 OSStatus result = AudioUnitInitialize(input_unit_);
411 OSSTATUS_DCHECK(result == noErr, result);
412 if (result != noErr)
413 return result;
414
415 // We must enable the Audio Unit (AUHAL) for input and disable output
416 // BEFORE setting the AUHAL's current device.
417 result = EnableIO();
418 OSSTATUS_DCHECK(result == noErr, result);
419 if (result != noErr)
420 return result;
421
422 result = SetInputDeviceAsCurrent(input_id);
423 OSSTATUS_DCHECK(result == noErr, result);
424
425 return result;
426 }
427
428 OSStatus AudioSynchronizedStream::EnableIO() {
429 // Enable input on the AUHAL.
430 UInt32 enable_io = 1;
431 OSStatus result = AudioUnitSetProperty(
432 input_unit_,
433 kAudioOutputUnitProperty_EnableIO,
434 kAudioUnitScope_Input,
435 1, // input element
436 &enable_io,
437 sizeof(enable_io));
438
439 OSSTATUS_DCHECK(result == noErr, result);
440 if (result != noErr)
441 return result;
442
443 // Disable Output on the AUHAL.
444 enable_io = 0;
445 result = AudioUnitSetProperty(
446 input_unit_,
447 kAudioOutputUnitProperty_EnableIO,
448 kAudioUnitScope_Output,
449 0, // output element
450 &enable_io,
451 sizeof(enable_io));
452
453 OSSTATUS_DCHECK(result == noErr, result);
454 return result;
455 }
456
457 OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) {
458 OSStatus result = noErr;
459
460 result = SetOutputDeviceAsCurrent(output_id);
461 OSSTATUS_DCHECK(result == noErr, result);
462 if (result != noErr)
463 return result;
464
465 // Tell the output unit not to reset timestamps.
466 // Otherwise sample rate changes will cause sync loss.
467 UInt32 start_at_zero = 0;
468 result = AudioUnitSetProperty(
469 output_unit_,
470 kAudioOutputUnitProperty_StartTimestampsAtZero,
471 kAudioUnitScope_Global,
472 0,
473 &start_at_zero,
474 sizeof(start_at_zero));
475
476 OSSTATUS_DCHECK(result == noErr, result);
477
478 return result;
479 }
480
481 OSStatus AudioSynchronizedStream::SetupCallbacks() {
482 // Set the input callback.
483 AURenderCallbackStruct callback;
484 callback.inputProc = InputProc;
485 callback.inputProcRefCon = this;
486 OSStatus result = AudioUnitSetProperty(
487 input_unit_,
488 kAudioOutputUnitProperty_SetInputCallback,
489 kAudioUnitScope_Global,
490 0,
491 &callback,
492 sizeof(callback));
493
494 OSSTATUS_DCHECK(result == noErr, result);
495 if (result != noErr)
496 return result;
497
498 // Set the output callback.
499 callback.inputProc = OutputProc;
500 callback.inputProcRefCon = this;
501 result = AudioUnitSetProperty(
502 output_unit_,
503 kAudioUnitProperty_SetRenderCallback,
504 kAudioUnitScope_Input,
505 0,
506 &callback,
507 sizeof(callback));
508
509 OSSTATUS_DCHECK(result == noErr, result);
510 if (result != noErr)
511 return result;
512
513 // Set the varispeed callback.
514 callback.inputProc = VarispeedProc;
515 callback.inputProcRefCon = this;
516 result = AudioUnitSetProperty(
517 varispeed_unit_,
518 kAudioUnitProperty_SetRenderCallback,
519 kAudioUnitScope_Input,
520 0,
521 &callback,
522 sizeof(callback));
523
524 OSSTATUS_DCHECK(result == noErr, result);
525
526 return result;
527 }
528
529 OSStatus AudioSynchronizedStream::SetupStreamFormats() {
530 AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out;
531
532 // Get the Stream Format (Output client side).
533 UInt32 property_size = sizeof(asbd_dev1_in);
534 OSStatus result = AudioUnitGetProperty(
535 input_unit_,
536 kAudioUnitProperty_StreamFormat,
537 kAudioUnitScope_Input,
538 1,
539 &asbd_dev1_in,
540 &property_size);
541
542 OSSTATUS_DCHECK(result == noErr, result);
543 if (result != noErr)
544 return result;
545
546 // Get the Stream Format (client side).
547 property_size = sizeof(asbd);
548 result = AudioUnitGetProperty(
549 input_unit_,
550 kAudioUnitProperty_StreamFormat,
551 kAudioUnitScope_Output,
552 1,
553 &asbd,
554 &property_size);
555
556 OSSTATUS_DCHECK(result == noErr, result);
557 if (result != noErr)
558 return result;
559
560 // Get the Stream Format (Output client side).
561 property_size = sizeof(asbd_dev2_out);
562 result = AudioUnitGetProperty(
563 output_unit_,
564 kAudioUnitProperty_StreamFormat,
565 kAudioUnitScope_Output,
566 0,
567 &asbd_dev2_out,
568 &property_size);
569
570 OSSTATUS_DCHECK(result == noErr, result);
571 if (result != noErr)
572 return result;
573
574 // Set the format of all the AUs to the input/output devices channel count.
575 // For a simple case, you want to set this to
576 // the lower of count of the channels in the input device vs output device.
577 asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame,
578 asbd_dev2_out.mChannelsPerFrame);
579
580 // We must get the sample rate of the input device and set it to the
581 // stream format of AUHAL.
582 Float64 rate = 0;
583 property_size = sizeof(rate);
584
585 AudioObjectPropertyAddress pa;
586 pa.mSelector = kAudioDevicePropertyNominalSampleRate;
587 pa.mScope = kAudioObjectPropertyScopeWildcard;
588 pa.mElement = kAudioObjectPropertyElementMaster;
589 result = AudioObjectGetPropertyData(
590 input_info_.id_,
591 &pa,
592 0,
593 0,
594 &property_size,
595 &rate);
596
597 OSSTATUS_DCHECK(result == noErr, result);
598 if (result != noErr)
599 return result;
600
601 input_sample_rate_ = rate;
602
603 asbd.mSampleRate = rate;
604 property_size = sizeof(asbd);
605
606 // Set the new formats to the AUs...
607 result = AudioUnitSetProperty(
608 input_unit_,
609 kAudioUnitProperty_StreamFormat,
610 kAudioUnitScope_Output,
611 1,
612 &asbd,
613 property_size);
614
615 OSSTATUS_DCHECK(result == noErr, result);
616 if (result != noErr)
617 return result;
618
619 result = AudioUnitSetProperty(
620 varispeed_unit_,
621 kAudioUnitProperty_StreamFormat,
622 kAudioUnitScope_Input,
623 0,
624 &asbd,
625 property_size);
626
627 OSSTATUS_DCHECK(result == noErr, result);
628 if (result != noErr)
629 return result;
630
631 // Set the correct sample rate for the output device,
632 // but keep the channel count the same.
633 property_size = sizeof(rate);
634
635 pa.mSelector = kAudioDevicePropertyNominalSampleRate;
636 pa.mScope = kAudioObjectPropertyScopeWildcard;
637 pa.mElement = kAudioObjectPropertyElementMaster;
638 result = AudioObjectGetPropertyData(
639 output_info_.id_,
640 &pa,
641 0,
642 0,
643 &property_size,
644 &rate);
645
646 OSSTATUS_DCHECK(result == noErr, result);
647 if (result != noErr)
648 return result;
649
650 output_sample_rate_ = rate;
651
652 // The requested sample-rate must match the hardware sample-rate.
653 if (output_sample_rate_ != params_.sample_rate()) {
654 LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
655 << " must match the hardware sample-rate: " << output_sample_rate_;
656 return kAudioDeviceUnsupportedFormatError;
657 }
658
659 asbd.mSampleRate = rate;
660 property_size = sizeof(asbd);
661
662 // Set the new audio stream formats for the rest of the AUs...
663 result = AudioUnitSetProperty(
664 varispeed_unit_,
665 kAudioUnitProperty_StreamFormat,
666 kAudioUnitScope_Output,
667 0,
668 &asbd,
669 property_size);
670
671 OSSTATUS_DCHECK(result == noErr, result);
672 if (result != noErr)
673 return result;
674
675 result = AudioUnitSetProperty(
676 output_unit_,
677 kAudioUnitProperty_StreamFormat,
678 kAudioUnitScope_Input,
679 0,
680 &asbd,
681 property_size);
682
683 OSSTATUS_DCHECK(result == noErr, result);
684 return result;
685 }
686
687 void AudioSynchronizedStream::AllocateInputData() {
688 // Allocate storage for the AudioBufferList used for the
689 // input data from the input AudioUnit.
690 // We allocate enough space for with one AudioBuffer per channel.
691 size_t malloc_size = offsetof(AudioBufferList, mBuffers[0]) +
692 (sizeof(AudioBuffer) * channels_);
693
694 input_buffer_list_ = static_cast<AudioBufferList*>(malloc(malloc_size));
695 input_buffer_list_->mNumberBuffers = channels_;
696
697 input_bus_ = AudioBus::Create(channels_, hardware_buffer_size_);
698
699 // Allocate buffers for AudioBufferList.
700 UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
701 for (unsigned i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
scherkus (not reviewing) 2012/09/17 14:51:17 ditto for unsigned
Chris Rogers 2012/09/17 20:44:23 Done.
702 input_buffer_list_->mBuffers[i].mNumberChannels = 1;
703 input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
704 input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
705 }
706 }
707
708 OSStatus AudioSynchronizedStream::HandleInputCallback(
709 AudioUnitRenderActionFlags* io_action_flags,
710 const AudioTimeStamp* time_stamp,
711 UInt32 bus_number,
712 UInt32 number_of_frames,
713 AudioBufferList* io_data) {
714 TRACE_EVENT0("audio", "AudioSynchronizedStream::HandleInputCallback");
715
716 if (first_input_time_ < 0.0)
717 first_input_time_ = time_stamp->mSampleTime;
718
719 // Get the new audio input data.
720 OSStatus result = AudioUnitRender(
721 input_unit_,
722 io_action_flags,
723 time_stamp,
724 bus_number,
725 number_of_frames,
726 input_buffer_list_);
727
728 OSSTATUS_DCHECK(result == noErr, result);
729 if (result != noErr)
730 return result;
731
732 // Buffer input into FIFO.
733 int available_frames = fifo_.max_frames() - fifo_.frames();
734 if (input_bus_->frames() < available_frames)
no longer working on chromium 2012/09/17 20:47:46 <=?
Chris Rogers 2012/09/17 22:00:42 Done.
735 fifo_.Push(input_bus_.get());
736
737 return result;
738 }
739
740 OSStatus AudioSynchronizedStream::HandleVarispeedCallback(
741 AudioUnitRenderActionFlags* io_action_flags,
742 const AudioTimeStamp* time_stamp,
743 UInt32 bus_number,
744 UInt32 number_of_frames,
745 AudioBufferList* io_data) {
746 // Create a wrapper bus on the AudioBufferList.
747 AudioBus bus(channels_, number_of_frames, io_data);
748
749 if (fifo_.frames() < static_cast<int>(number_of_frames)) {
no longer working on chromium 2012/09/17 20:47:46 nit, do this before creating the bus.
Chris Rogers 2012/09/17 22:00:42 Can't do that because we also need access to the b
750 // We don't DCHECK here, since this is a possible run-time condition
751 // if the machine is bogged down.
752 bus.Zero();
753 return noErr;
754 }
755
756 // Read from the FIFO to feed the varispeed.
757 fifo_.Consume(&bus, 0, number_of_frames);
758
759 return noErr;
760 }
761
762 OSStatus AudioSynchronizedStream::HandleOutputCallback(
763 AudioUnitRenderActionFlags* io_action_flags,
764 const AudioTimeStamp* time_stamp,
765 UInt32 bus_number,
766 UInt32 number_of_frames,
767 AudioBufferList* io_data) {
768 if (first_input_time_ < 0.0) {
769 // Input callback hasn't run yet -> silence.
770 ZeroBufferList(io_data);
771 return noErr;
772 }
773
774 // Use the varispeed playback rate to offset small discrepancies
775 // in hardware clocks, and also any differences in sample-rate
776 // between input and output devices.
777
778 // Calculate a varispeed rate scalar factor to compensate for drift between
779 // input and output. We use the actual number of frames still in the FIFO
780 // compared with the ideal value of |target_fifo_frames_|.
781 int delta = fifo_.frames() - target_fifo_frames_;
782
783 // Average |delta| because it can jitter back/forth quite frequently
784 // by +/- the hardware buffer-size *if* the input and output callbacks are
785 // happening at almost exactly the same time. Also, if the input and output
786 // sample-rates are different then |delta| will jitter quite a bit due to
787 // the rate conversion happening in the varispeed, plus the jittering of
788 // the callbacks. The average value is what's important here.
789 ave_delta_ += (delta - ave_delta_) * 0.1;
790
791 // Compute a rate compensation which always attracts us back to the
792 // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
793 const double kCorrectionTimeSeconds = 0.100;
no longer working on chromium 2012/09/17 20:47:46 0.1?
Chris Rogers 2012/09/17 22:00:42 Done.
794 double correction_time_frames = kCorrectionTimeSeconds * output_sample_rate_;
795 fifo_rate_compensation_ =
no longer working on chromium 2012/09/17 20:47:46 why this needs to be a member? can it be a local v
Chris Rogers 2012/09/17 22:00:42 Good point, but actually I intend to maintain this
796 (correction_time_frames + ave_delta_) / correction_time_frames;
797
798 // Adjust for FIFO drift.
799 OSStatus result = AudioUnitSetParameter(
800 varispeed_unit_,
801 kVarispeedParam_PlaybackRate,
802 kAudioUnitScope_Global,
803 0,
804 fifo_rate_compensation_,
805 0);
806
807 OSSTATUS_DCHECK(result == noErr, result);
808 if (result != noErr)
809 return result;
810
811 // Render to the output using the varispeed.
812 result = AudioUnitRender(
813 varispeed_unit_,
814 io_action_flags,
815 time_stamp,
816 0,
817 number_of_frames,
818 io_data);
819
820 OSSTATUS_DCHECK(result == noErr, result);
821 if (result != noErr)
822 return result;
823
824 // Create a wrapper bus on the AudioBufferList.
825 AudioBus bus(channels_, number_of_frames, io_data);
826
827 // Process in-place!
828 source_->OnMoreIOData(&bus, &bus, AudioBuffersState(0, 0));
829
830 return noErr;
831 }
832
833 OSStatus AudioSynchronizedStream::InputProc(
834 void* user_data,
835 AudioUnitRenderActionFlags* io_action_flags,
836 const AudioTimeStamp* time_stamp,
837 UInt32 bus_number,
838 UInt32 number_of_frames,
839 AudioBufferList* io_data) {
840 AudioSynchronizedStream* stream =
841 static_cast<AudioSynchronizedStream*>(user_data);
842 DCHECK(stream);
843
844 return stream->HandleInputCallback(
845 io_action_flags,
846 time_stamp,
847 bus_number,
848 number_of_frames,
849 io_data);
850 }
851
852 OSStatus AudioSynchronizedStream::VarispeedProc(
853 void* user_data,
854 AudioUnitRenderActionFlags* io_action_flags,
855 const AudioTimeStamp* time_stamp,
856 UInt32 bus_number,
857 UInt32 number_of_frames,
858 AudioBufferList* io_data) {
859 AudioSynchronizedStream* stream =
860 static_cast<AudioSynchronizedStream*>(user_data);
861 DCHECK(stream);
862
863 return stream->HandleVarispeedCallback(
864 io_action_flags,
865 time_stamp,
866 bus_number,
867 number_of_frames,
868 io_data);
869 }
870
871 OSStatus AudioSynchronizedStream::OutputProc(
872 void* user_data,
873 AudioUnitRenderActionFlags* io_action_flags,
874 const AudioTimeStamp* time_stamp,
875 UInt32 bus_number,
876 UInt32 number_of_frames,
877 AudioBufferList* io_data) {
878 AudioSynchronizedStream* stream =
879 static_cast<AudioSynchronizedStream*>(user_data);
880 DCHECK(stream);
881
882 return stream->HandleOutputCallback(
883 io_action_flags,
884 time_stamp,
885 bus_number,
886 number_of_frames,
887 io_data);
888 }
889
890 void AudioSynchronizedStream::AudioDeviceInfo::Initialize(
891 AudioDeviceID id, bool is_input) {
892 id_ = id;
893 is_input_ = is_input;
894 if (id_ == kAudioDeviceUnknown)
895 return;
896
897 UInt32 property_size = sizeof(buffer_size_frames_);
898
899 AudioObjectPropertyAddress pa;
900 pa.mSelector = kAudioDevicePropertyBufferFrameSize;
901 pa.mScope = kAudioObjectPropertyScopeWildcard;
902 pa.mElement = kAudioObjectPropertyElementMaster;
903 OSStatus result = AudioObjectGetPropertyData(
904 id_,
905 &pa,
906 0,
907 0,
908 &property_size,
909 &buffer_size_frames_);
910
911 OSSTATUS_DCHECK(result == noErr, result);
912 }
913
914 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698