OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/audio/mac/audio_low_latency_input_mac.h" | 5 #include "media/audio/mac/audio_low_latency_input_mac.h" |
6 | 6 |
7 #include <CoreServices/CoreServices.h> | 7 #include <CoreServices/CoreServices.h> |
8 | 8 |
9 #include "base/basictypes.h" | 9 #include "base/basictypes.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
11 #include "base/mac/mac_logging.h" | 11 #include "base/mac/mac_logging.h" |
12 #include "media/audio/mac/audio_manager_mac.h" | 12 #include "media/audio/mac/audio_manager_mac.h" |
13 #include "media/base/audio_bus.h" | 13 #include "media/base/audio_bus.h" |
14 #include "media/base/audio_fifo.h" | |
14 #include "media/base/data_buffer.h" | 15 #include "media/base/data_buffer.h" |
15 | 16 |
16 namespace media { | 17 namespace media { |
17 | 18 |
18 static std::ostream& operator<<(std::ostream& os, | 19 static std::ostream& operator<<(std::ostream& os, |
19 const AudioStreamBasicDescription& format) { | 20 const AudioStreamBasicDescription& format) { |
20 os << "sample rate : " << format.mSampleRate << std::endl | 21 os << "sample rate : " << format.mSampleRate << std::endl |
21 << "format ID : " << format.mFormatID << std::endl | 22 << "format ID : " << format.mFormatID << std::endl |
22 << "format flags : " << format.mFormatFlags << std::endl | 23 << "format flags : " << format.mFormatFlags << std::endl |
23 << "bytes per packet : " << format.mBytesPerPacket << std::endl | 24 << "bytes per packet : " << format.mBytesPerPacket << std::endl |
24 << "frames per packet : " << format.mFramesPerPacket << std::endl | 25 << "frames per packet : " << format.mFramesPerPacket << std::endl |
25 << "bytes per frame : " << format.mBytesPerFrame << std::endl | 26 << "bytes per frame : " << format.mBytesPerFrame << std::endl |
26 << "channels per frame: " << format.mChannelsPerFrame << std::endl | 27 << "channels per frame: " << format.mChannelsPerFrame << std::endl |
27 << "bits per channel : " << format.mBitsPerChannel; | 28 << "bits per channel : " << format.mBitsPerChannel; |
28 return os; | 29 return os; |
29 } | 30 } |
30 | 31 |
31 // See "Technical Note TN2091 - Device input using the HAL Output Audio Unit" | 32 // See "Technical Note TN2091 - Device input using the HAL Output Audio Unit" |
32 // http://developer.apple.com/library/mac/#technotes/tn2091/_index.html | 33 // http://developer.apple.com/library/mac/#technotes/tn2091/_index.html |
33 // for more details and background regarding this implementation. | 34 // for more details and background regarding this implementation. |
34 | 35 |
35 AUAudioInputStream::AUAudioInputStream(AudioManagerMac* manager, | 36 AUAudioInputStream::AUAudioInputStream(AudioManagerMac* manager, |
36 const AudioParameters& input_params, | 37 const AudioParameters& input_params, |
37 const AudioParameters& output_params, | |
38 AudioDeviceID audio_device_id) | 38 AudioDeviceID audio_device_id) |
39 : manager_(manager), | 39 : manager_(manager), |
40 number_of_frames_(input_params.frames_per_buffer()), | |
40 sink_(NULL), | 41 sink_(NULL), |
41 audio_unit_(0), | 42 audio_unit_(0), |
42 input_device_id_(audio_device_id), | 43 input_device_id_(audio_device_id), |
43 started_(false), | 44 started_(false), |
44 hardware_latency_frames_(0), | 45 hardware_latency_frames_(0), |
45 fifo_delay_bytes_(0), | |
46 number_of_channels_in_frame_(0), | 46 number_of_channels_in_frame_(0), |
47 audio_bus_(media::AudioBus::Create(input_params)) { | 47 audio_bus_(media::AudioBus::Create(input_params)), |
48 audio_wrapper_(media::AudioBus::Create(input_params)) { | |
48 DCHECK(manager_); | 49 DCHECK(manager_); |
49 | 50 |
50 // Set up the desired (output) format specified by the client. | 51 // Set up the desired (output) format specified by the client. |
51 format_.mSampleRate = input_params.sample_rate(); | 52 format_.mSampleRate = input_params.sample_rate(); |
52 format_.mFormatID = kAudioFormatLinearPCM; | 53 format_.mFormatID = kAudioFormatLinearPCM; |
53 format_.mFormatFlags = kLinearPCMFormatFlagIsPacked | | 54 format_.mFormatFlags = kLinearPCMFormatFlagIsPacked | |
54 kLinearPCMFormatFlagIsSignedInteger; | 55 kLinearPCMFormatFlagIsSignedInteger; |
55 format_.mBitsPerChannel = input_params.bits_per_sample(); | 56 format_.mBitsPerChannel = input_params.bits_per_sample(); |
56 format_.mChannelsPerFrame = input_params.channels(); | 57 format_.mChannelsPerFrame = input_params.channels(); |
57 format_.mFramesPerPacket = 1; // uncompressed audio | 58 format_.mFramesPerPacket = 1; // uncompressed audio |
58 format_.mBytesPerPacket = (format_.mBitsPerChannel * | 59 format_.mBytesPerPacket = (format_.mBitsPerChannel * |
59 input_params.channels()) / 8; | 60 input_params.channels()) / 8; |
60 format_.mBytesPerFrame = format_.mBytesPerPacket; | 61 format_.mBytesPerFrame = format_.mBytesPerPacket; |
61 format_.mReserved = 0; | 62 format_.mReserved = 0; |
62 | 63 |
63 DVLOG(1) << "Desired ouput format: " << format_; | 64 DVLOG(1) << "Desired ouput format: " << format_; |
64 | 65 |
65 // Set number of sample frames per callback used by the internal audio layer. | |
66 // An internal FIFO is then utilized to adapt the internal size to the size | |
67 // requested by the client. | |
68 number_of_frames_ = output_params.frames_per_buffer(); | |
69 DVLOG(1) << "Size of data buffer in frames : " << number_of_frames_; | |
70 | |
71 // Derive size (in bytes) of the buffers that we will render to. | 66 // Derive size (in bytes) of the buffers that we will render to. |
72 UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame; | 67 UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame; |
73 DVLOG(1) << "Size of data buffer in bytes : " << data_byte_size; | 68 DVLOG(1) << "Size of data buffer in bytes : " << data_byte_size; |
74 | 69 |
75 // Allocate AudioBuffers to be used as storage for the received audio. | 70 // Allocate AudioBuffers to be used as storage for the received audio. |
76 // The AudioBufferList structure works as a placeholder for the | 71 // The AudioBufferList structure works as a placeholder for the |
77 // AudioBuffer structure, which holds a pointer to the actual data buffer. | 72 // AudioBuffer structure, which holds a pointer to the actual data buffer. |
78 audio_data_buffer_.reset(new uint8[data_byte_size]); | 73 audio_data_buffer_.reset(new uint8[data_byte_size]); |
79 audio_buffer_list_.mNumberBuffers = 1; | 74 audio_buffer_list_.mNumberBuffers = 1; |
80 | 75 |
81 AudioBuffer* audio_buffer = audio_buffer_list_.mBuffers; | 76 AudioBuffer* audio_buffer = audio_buffer_list_.mBuffers; |
82 audio_buffer->mNumberChannels = input_params.channels(); | 77 audio_buffer->mNumberChannels = input_params.channels(); |
83 audio_buffer->mDataByteSize = data_byte_size; | 78 audio_buffer->mDataByteSize = data_byte_size; |
84 audio_buffer->mData = audio_data_buffer_.get(); | 79 audio_buffer->mData = audio_data_buffer_.get(); |
85 | |
86 // Set up an internal FIFO buffer that will accumulate recorded audio frames | |
87 // until a requested size is ready to be sent to the client. | |
88 // It is not possible to ask for less than |kAudioFramesPerCallback| number of | |
89 // audio frames. | |
90 size_t requested_size_frames = | |
91 input_params.GetBytesPerBuffer() / format_.mBytesPerPacket; | |
92 if (requested_size_frames < number_of_frames_) { | |
93 // For devices that only support a low sample rate like 8kHz, we adjust the | |
94 // buffer size to match number_of_frames_. The value of number_of_frames_ | |
95 // in this case has not been calculated based on hardware settings but | |
96 // rather our hardcoded defaults (see ChooseBufferSize). | |
97 requested_size_frames = number_of_frames_; | |
98 } | |
99 | |
100 requested_size_bytes_ = requested_size_frames * format_.mBytesPerFrame; | |
101 DVLOG(1) << "Requested buffer size in bytes : " << requested_size_bytes_; | |
102 DVLOG_IF(0, requested_size_frames > number_of_frames_) << "FIFO is used"; | |
103 | |
104 const int number_of_bytes = number_of_frames_ * format_.mBytesPerFrame; | |
105 fifo_delay_bytes_ = requested_size_bytes_ - number_of_bytes; | |
106 | |
107 // Allocate some extra memory to avoid memory reallocations. | |
108 // Ensure that the size is an even multiple of |number_of_frames_ and | |
109 // larger than |requested_size_frames|. | |
110 // Example: number_of_frames_=128, requested_size_frames=480 => | |
111 // allocated space equals 4*128=512 audio frames | |
112 const int max_forward_capacity = number_of_bytes * | |
113 ((requested_size_frames / number_of_frames_) + 1); | |
114 fifo_.reset(new media::SeekableBuffer(0, max_forward_capacity)); | |
115 | |
116 data_ = new media::DataBuffer(requested_size_bytes_); | |
117 } | 80 } |
118 | 81 |
119 AUAudioInputStream::~AUAudioInputStream() {} | 82 AUAudioInputStream::~AUAudioInputStream() {} |
120 | 83 |
121 // Obtain and open the AUHAL AudioOutputUnit for recording. | 84 // Obtain and open the AUHAL AudioOutputUnit for recording. |
122 bool AUAudioInputStream::Open() { | 85 bool AUAudioInputStream::Open() { |
123 // Verify that we are not already opened. | 86 // Verify that we are not already opened. |
124 if (audio_unit_) | 87 if (audio_unit_) |
125 return false; | 88 return false; |
126 | 89 |
127 // Verify that we have a valid device. | 90 // Verify that we have a valid device. |
128 if (input_device_id_ == kAudioObjectUnknown) { | 91 if (input_device_id_ == kAudioObjectUnknown) { |
129 NOTREACHED() << "Device ID is unknown"; | 92 NOTREACHED() << "Device ID is unknown"; |
130 return false; | 93 return false; |
131 } | 94 } |
132 | 95 |
133 // Start by obtaining an AudioOuputUnit using an AUHAL component description. | 96 // Start by obtaining an AudioOuputUnit using an AUHAL component description. |
134 | 97 |
135 Component comp; | 98 // Description for the Audio Unit we want to use (AUHAL in this case). |
136 ComponentDescription desc; | 99 AudioComponentDescription desc = { |
100 kAudioUnitType_Output, | |
101 kAudioUnitSubType_HALOutput, | |
102 kAudioUnitManufacturer_Apple, | |
103 0, | |
104 0 | |
105 }; | |
137 | 106 |
138 // Description for the Audio Unit we want to use (AUHAL in this case). | 107 AudioComponent comp = AudioComponentFindNext(0, &desc); |
139 desc.componentType = kAudioUnitType_Output; | |
140 desc.componentSubType = kAudioUnitSubType_HALOutput; | |
141 desc.componentManufacturer = kAudioUnitManufacturer_Apple; | |
142 desc.componentFlags = 0; | |
143 desc.componentFlagsMask = 0; | |
144 comp = FindNextComponent(0, &desc); | |
145 DCHECK(comp); | 108 DCHECK(comp); |
146 | 109 |
147 // Get access to the service provided by the specified Audio Unit. | 110 // Get access to the service provided by the specified Audio Unit. |
148 OSStatus result = OpenAComponent(comp, &audio_unit_); | 111 OSStatus result = AudioComponentInstanceNew(comp, &audio_unit_); |
149 if (result) { | 112 if (result) { |
150 HandleError(result); | 113 HandleError(result); |
151 return false; | 114 return false; |
152 } | 115 } |
153 | 116 |
154 // Enable IO on the input scope of the Audio Unit. | 117 // Enable IO on the input scope of the Audio Unit. |
155 | 118 |
156 // After creating the AUHAL object, we must enable IO on the input scope | 119 // After creating the AUHAL object, we must enable IO on the input scope |
157 // of the Audio Unit to obtain the device input. Input must be explicitly | 120 // of the Audio Unit to obtain the device input. Input must be explicitly |
158 // enabled with the kAudioOutputUnitProperty_EnableIO property on Element 1 | 121 // enabled with the kAudioOutputUnitProperty_EnableIO property on Element 1 |
(...skipping 361 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
520 // The AGC volume level is updated once every second on a separate thread. | 483 // The AGC volume level is updated once every second on a separate thread. |
521 // Note that, |volume| is also updated each time SetVolume() is called | 484 // Note that, |volume| is also updated each time SetVolume() is called |
522 // through IPC by the render-side AGC. | 485 // through IPC by the render-side AGC. |
523 double normalized_volume = 0.0; | 486 double normalized_volume = 0.0; |
524 GetAgcVolume(&normalized_volume); | 487 GetAgcVolume(&normalized_volume); |
525 | 488 |
526 AudioBuffer& buffer = io_data->mBuffers[0]; | 489 AudioBuffer& buffer = io_data->mBuffers[0]; |
527 uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData); | 490 uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData); |
528 uint32 capture_delay_bytes = static_cast<uint32> | 491 uint32 capture_delay_bytes = static_cast<uint32> |
529 ((capture_latency_frames + 0.5) * format_.mBytesPerFrame); | 492 ((capture_latency_frames + 0.5) * format_.mBytesPerFrame); |
530 // Account for the extra delay added by the FIFO. | |
531 capture_delay_bytes += fifo_delay_bytes_; | |
532 DCHECK(audio_data); | 493 DCHECK(audio_data); |
533 if (!audio_data) | 494 if (!audio_data) |
534 return kAudioUnitErr_InvalidElement; | 495 return kAudioUnitErr_InvalidElement; |
535 | 496 |
536 // Accumulate captured audio in FIFO until we can match the output size | 497 if (number_of_frames != number_of_frames_) { |
537 // requested by the client. | 498 // Create a FIFO on the fly to handle any discrepancies in callback rates. |
538 fifo_->Append(audio_data, buffer.mDataByteSize); | 499 if (!fifo_) { |
500 VLOG(1) << "Audio frame size changed from " << number_of_frames_ << " to " | |
501 << number_of_frames << "; adding FIFO to compensate."; | |
502 fifo_.reset(new AudioFifo( | |
503 format_.mChannelsPerFrame, number_of_frames_ + number_of_frames)); | |
504 } | |
539 | 505 |
540 // Deliver recorded data to the client as soon as the FIFO contains a | 506 if (audio_wrapper_->frames() != static_cast<int>(number_of_frames)) { |
541 // sufficient amount. | 507 audio_wrapper_ = media::AudioBus::Create(format_.mChannelsPerFrame, |
542 if (fifo_->forward_bytes() >= requested_size_bytes_) { | 508 number_of_frames); |
543 // Read from FIFO into temporary data buffer. | 509 } |
544 fifo_->Read(data_->writable_data(), requested_size_bytes_); | 510 } |
545 | 511 |
546 // Copy captured (and interleaved) data into deinterleaved audio bus. | 512 // Copy captured (and interleaved) data into deinterleaved audio bus. |
547 audio_bus_->FromInterleaved( | 513 audio_wrapper_->FromInterleaved( |
DaleCurtis
2014/06/27 00:00:42
You should be able to set it up such that the AUHA
no longer working on chromium
2014/06/27 20:25:37
I guess you meant opening the device with format:
DaleCurtis
2014/06/28 00:45:50
Ah, that's unfortunate.
| |
548 data_->data(), audio_bus_->frames(), format_.mBitsPerChannel / 8); | 514 audio_data, audio_wrapper_->frames(), format_.mBitsPerChannel / 8); |
549 | 515 |
550 // Deliver data packet, delay estimation and volume level to the user. | 516 // When FIFO does not kick in, data will be directly passed to the callback. |
551 sink_->OnData( | 517 if (!fifo_) { |
552 this, audio_bus_.get(), capture_delay_bytes, normalized_volume); | 518 CHECK_EQ(audio_wrapper_->frames(), static_cast<int>(number_of_frames_)); |
519 sink_->OnData(this, audio_wrapper_.get(), capture_delay_bytes, | |
DaleCurtis
2014/06/27 00:00:42
clang-format? I think this style is frowned upon.
no longer working on chromium
2014/06/27 20:25:37
Done.
| |
520 normalized_volume); | |
521 return noErr; | |
522 } | |
523 | |
524 // Compensate the audio delay caused by the FIFO. | |
525 capture_delay_bytes += fifo_->frames() * format_.mBytesPerFrame; | |
526 fifo_->Push(audio_wrapper_.get()); | |
527 if (fifo_->frames() >= static_cast<int>(number_of_frames_)) { | |
528 // Consume the audio from the FIFO. | |
529 fifo_->Consume(audio_bus_.get(), 0, audio_bus_->frames()); | |
DaleCurtis
2014/06/27 00:00:42
We could consider making a FIFO::Peek() object whi
no longer working on chromium
2014/06/27 20:25:37
It is great if we can avoid the copying.
I took a
DaleCurtis
2014/06/28 00:45:50
Ah I forgot about the circular buffer properties,
no longer working on chromium
2014/06/29 20:30:00
Both are good ideas, and I like the second one par
DaleCurtis
2014/06/30 21:00:29
I think that's a great idea. Thanks! I don't thin
| |
530 DCHECK(fifo_->frames() < static_cast<int>(number_of_frames_)); | |
531 | |
532 sink_->OnData(this, audio_bus_.get(), capture_delay_bytes, | |
533 normalized_volume); | |
553 } | 534 } |
554 | 535 |
555 return noErr; | 536 return noErr; |
556 } | 537 } |
557 | 538 |
558 int AUAudioInputStream::HardwareSampleRate() { | 539 int AUAudioInputStream::HardwareSampleRate() { |
559 // Determine the default input device's sample-rate. | 540 // Determine the default input device's sample-rate. |
560 AudioDeviceID device_id = kAudioObjectUnknown; | 541 AudioDeviceID device_id = kAudioObjectUnknown; |
561 UInt32 info_size = sizeof(device_id); | 542 UInt32 info_size = sizeof(device_id); |
562 | 543 |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
684 kAudioDevicePropertyScopeInput, | 665 kAudioDevicePropertyScopeInput, |
685 static_cast<UInt32>(channel) | 666 static_cast<UInt32>(channel) |
686 }; | 667 }; |
687 OSStatus result = AudioObjectIsPropertySettable(input_device_id_, | 668 OSStatus result = AudioObjectIsPropertySettable(input_device_id_, |
688 &property_address, | 669 &property_address, |
689 &is_settable); | 670 &is_settable); |
690 return (result == noErr) ? is_settable : false; | 671 return (result == noErr) ? is_settable : false; |
691 } | 672 } |
692 | 673 |
693 } // namespace media | 674 } // namespace media |
OLD | NEW |