| Index: media/audio/mac/audio_low_latency_input_mac.cc
|
| diff --git a/media/audio/mac/audio_low_latency_input_mac.cc b/media/audio/mac/audio_low_latency_input_mac.cc
|
| index 5b78a80b319f2ee9df00e5e40a531a2d8767d13c..e7d1a1b2d6b5a715cf946ab1f4aa1dca7fbc5add 100644
|
| --- a/media/audio/mac/audio_low_latency_input_mac.cc
|
| +++ b/media/audio/mac/audio_low_latency_input_mac.cc
|
| @@ -64,9 +64,6 @@ AUAudioInputStream::AUAudioInputStream(
|
| // Set number of sample frames per callback used by the internal audio layer.
|
| // An internal FIFO is then utilized to adapt the internal size to the size
|
| // requested by the client.
|
| - // Note that we use the same native buffer size as for the output side here
|
| - // since the AUHAL implementation requires that both capture and render side
|
| - // use the same buffer size. See http://crbug.com/154352 for more details.
|
| number_of_frames_ = output_params.frames_per_buffer();
|
| DVLOG(1) << "Size of data buffer in frames : " << number_of_frames_;
|
|
|
| @@ -233,23 +230,38 @@ bool AUAudioInputStream::Open() {
|
| }
|
|
|
| // Set the desired number of frames in the IO buffer (output scope).
|
| - // WARNING: Setting this value changes the frame size for all audio units in
|
| - // the current process. It's imperative that the input and output frame sizes
|
| - // be the same as the frames_per_buffer() returned by
|
| - // GetInputStreamParameters().
|
| - // TODO(henrika): Due to http://crrev.com/159666 this is currently not true
|
| - // and should be fixed, a CHECK() should be added at that time.
|
| - result = AudioUnitSetProperty(audio_unit_,
|
| + // WARNING: Setting this value changes the frame size for all input audio
|
| + // units in the current process. As a result, the AURenderCallback must be
|
| + // able to handle arbitrary buffer sizes and FIFO appropriately.
|
| + UInt32 buffer_size = 0;
|
| + UInt32 property_size = sizeof(buffer_size);
|
| + result = AudioUnitGetProperty(audio_unit_,
|
| kAudioDevicePropertyBufferFrameSize,
|
| kAudioUnitScope_Output,
|
| 1,
|
| - &number_of_frames_, // size is set in the ctor
|
| - sizeof(number_of_frames_));
|
| - if (result) {
|
| + &buffer_size,
|
| + &property_size);
|
| + if (result != noErr) {
|
| HandleError(result);
|
| return false;
|
| }
|
|
|
| + // Only set the buffer size if we're the only active stream or the buffer size
|
| + // is lower than the current buffer size.
|
| + if (manager_->input_stream_count() == 1 || number_of_frames_ < buffer_size) {
|
| + buffer_size = number_of_frames_;
|
| + result = AudioUnitSetProperty(audio_unit_,
|
| + kAudioDevicePropertyBufferFrameSize,
|
| + kAudioUnitScope_Output,
|
| + 1,
|
| + &buffer_size,
|
| + sizeof(buffer_size));
|
| + if (result != noErr) {
|
| + HandleError(result);
|
| + return false;
|
| + }
|
| + }
|
| +
|
| // Finally, initialize the audio unit and ensure that it is ready to render.
|
| // Allocates memory according to the maximum number of audio frames
|
| // it can produce in response to a single render call.
|
|
|