OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "media/base/audio_buffer_converter.h" |
| 6 |
| 7 #include <cstdlib> |
| 8 #include <list> |
| 9 |
| 10 #include "base/logging.h" |
| 11 #include "media/base/audio_buffer.h" |
| 12 #include "media/base/audio_bus.h" |
| 13 #include "media/base/audio_decoder_config.h" |
| 14 #include "media/base/audio_timestamp_helper.h" |
| 15 #include "media/base/buffers.h" |
| 16 #include "media/base/sinc_resampler.h" |
| 17 #include "media/base/vector_math.h" |
| 18 |
| 19 namespace media { |
| 20 |
| 21 // Is the config presented by |buffer| a config change from |params|? |
| 22 static bool IsConfigChange(const AudioParameters& params, |
| 23 const scoped_refptr<AudioBuffer>& buffer) { |
| 24 return buffer->sample_rate() != params.sample_rate() || |
| 25 buffer->channel_count() != params.channels() || |
| 26 buffer->channel_layout() != params.channel_layout(); |
| 27 } |
| 28 |
| 29 AudioBufferConverter::AudioBufferConverter(const AudioParameters& output_params) |
| 30 : output_params_(output_params), |
| 31 input_params_(output_params), |
| 32 offset_into_queue_(0), |
| 33 input_frames_(0), |
| 34 buffered_input_frames_(0.0), |
| 35 io_sample_rate_ratio_(1.0), |
| 36 timestamp_helper_(output_params_.sample_rate()), |
| 37 is_flushing_(false) {} |
| 38 |
| 39 AudioBufferConverter::~AudioBufferConverter() {} |
| 40 |
| 41 void AudioBufferConverter::AddInput(const scoped_refptr<AudioBuffer>& buffer) { |
| 42 |
| 43 // On EOS flush any remaining buffered data. |
| 44 if (buffer->end_of_stream()) { |
| 45 Flush(); |
| 46 queued_outputs_.push_back(buffer); |
| 47 return; |
| 48 } |
| 49 |
| 50 // We'll need a new |audio_converter_| if there was a config change. |
| 51 if (IsConfigChange(input_params_, buffer)) |
| 52 ResetConverter(buffer); |
| 53 |
| 54 // Pass straight through if there's no work to be done. |
| 55 if (!audio_converter_) { |
| 56 queued_outputs_.push_back(buffer); |
| 57 return; |
| 58 } |
| 59 |
| 60 if (timestamp_helper_.base_timestamp() == kNoTimestamp()) |
| 61 timestamp_helper_.SetBaseTimestamp(buffer->timestamp()); |
| 62 |
| 63 queued_inputs_.push_back(buffer); |
| 64 input_frames_ += buffer->frame_count(); |
| 65 |
| 66 ConvertIfPossible(); |
| 67 } |
| 68 |
| 69 bool AudioBufferConverter::HasNextBuffer() { return !queued_outputs_.empty(); } |
| 70 |
| 71 scoped_refptr<AudioBuffer> AudioBufferConverter::GetNextBuffer() { |
| 72 DCHECK(!queued_outputs_.empty()); |
| 73 scoped_refptr<AudioBuffer> out = queued_outputs_.front(); |
| 74 queued_outputs_.pop_front(); |
| 75 return out; |
| 76 } |
| 77 |
| 78 void AudioBufferConverter::Reset() { |
| 79 audio_converter_.reset(); |
| 80 queued_inputs_.clear(); |
| 81 queued_outputs_.clear(); |
| 82 timestamp_helper_.SetBaseTimestamp(kNoTimestamp()); |
| 83 input_params_ = output_params_; |
| 84 input_frames_ = 0; |
| 85 buffered_input_frames_ = 0.0; |
| 86 offset_into_queue_ = 0; |
| 87 } |
| 88 |
| 89 double AudioBufferConverter::ProvideInput(AudioBus* audio_bus, |
| 90 base::TimeDelta buffer_delay) { |
| 91 DCHECK(is_flushing_ || input_frames_ >= audio_bus->frames()); |
| 92 |
| 93 int requested_frames_left = audio_bus->frames(); |
| 94 int dest_index = 0; |
| 95 |
| 96 while (requested_frames_left > 0 && !queued_inputs_.empty()) { |
| 97 scoped_refptr<AudioBuffer> input_buffer = queued_inputs_.front(); |
| 98 |
| 99 int frames_to_read = |
| 100 std::min(requested_frames_left, |
| 101 input_buffer->frame_count() - offset_into_queue_); |
| 102 input_buffer->ReadFrames( |
| 103 frames_to_read, offset_into_queue_, dest_index, audio_bus); |
| 104 offset_into_queue_ += frames_to_read; |
| 105 |
| 106 if (offset_into_queue_ == input_buffer->frame_count()) { |
| 107 // We've consumed all the frames in |input_buffer|. |
| 108 queued_inputs_.pop_front(); |
| 109 offset_into_queue_ = 0; |
| 110 } |
| 111 |
| 112 requested_frames_left -= frames_to_read; |
| 113 dest_index += frames_to_read; |
| 114 } |
| 115 |
| 116 // If we're flushing, zero any extra space, otherwise we should always have |
| 117 // enough data to completely fulfill the request. |
| 118 if (is_flushing_ && requested_frames_left > 0) { |
| 119 audio_bus->ZeroFramesPartial(audio_bus->frames() - requested_frames_left, |
| 120 requested_frames_left); |
| 121 } else { |
| 122 DCHECK_EQ(requested_frames_left, 0); |
| 123 } |
| 124 |
| 125 input_frames_ -= (audio_bus->frames() - requested_frames_left); |
| 126 DCHECK_GE(input_frames_, 0); |
| 127 |
| 128 buffered_input_frames_ += audio_bus->frames() - requested_frames_left; |
| 129 |
| 130 // Full volume. |
| 131 return 1.0; |
| 132 } |
| 133 |
| 134 void AudioBufferConverter::ResetConverter( |
| 135 const scoped_refptr<AudioBuffer>& buffer) { |
| 136 Flush(); |
| 137 audio_converter_.reset(NULL); |
| 138 input_params_.Reset( |
| 139 input_params_.format(), |
| 140 buffer->channel_layout(), |
| 141 buffer->channel_count(), |
| 142 0, |
| 143 buffer->sample_rate(), |
| 144 input_params_.bits_per_sample(), |
| 145 // This is arbitrary, but small buffer sizes result in a lot of tiny |
| 146 // ProvideInput calls, so we'll use at least the SincResampler's default |
| 147 // request size. |
| 148 std::max(buffer->frame_count(), |
| 149 static_cast<int>(SincResampler::kDefaultRequestSize))); |
| 150 |
| 151 io_sample_rate_ratio_ = static_cast<double>(input_params_.sample_rate()) / |
| 152 output_params_.sample_rate(); |
| 153 |
| 154 // If |buffer| matches |output_params_| we don't need an AudioConverter at |
| 155 // all, and can early-out here. |
| 156 if (!IsConfigChange(output_params_, buffer)) |
| 157 return; |
| 158 |
| 159 audio_converter_.reset( |
| 160 new AudioConverter(input_params_, output_params_, false)); |
| 161 audio_converter_->AddInput(this); |
| 162 } |
| 163 |
| 164 void AudioBufferConverter::ConvertIfPossible() { |
| 165 DCHECK(audio_converter_); |
| 166 |
| 167 int request_frames = 0; |
| 168 |
| 169 if (is_flushing_) { |
| 170 // If we're flushing we want to convert *everything* even if this means |
| 171 // we'll have to pad some silence in ProvideInput(). |
| 172 request_frames = |
| 173 ceil((buffered_input_frames_ + input_frames_) / io_sample_rate_ratio_); |
| 174 } else { |
| 175 // How many calls to ProvideInput() we can satisfy completely. |
| 176 int chunks = input_frames_ / input_params_.frames_per_buffer(); |
| 177 |
| 178 // How many output frames that corresponds to: |
| 179 request_frames = chunks * audio_converter_->ChunkSize(); |
| 180 } |
| 181 |
| 182 if (!request_frames) |
| 183 return; |
| 184 |
| 185 scoped_refptr<AudioBuffer> output_buffer = |
| 186 AudioBuffer::CreateBuffer(kSampleFormatPlanarF32, |
| 187 output_params_.channel_layout(), |
| 188 output_params_.sample_rate(), |
| 189 request_frames); |
| 190 scoped_ptr<AudioBus> output_bus = |
| 191 AudioBus::CreateWrapper(output_buffer->channel_count()); |
| 192 |
| 193 int frames_remaining = request_frames; |
| 194 |
| 195 // The AudioConverter wants requests of a fixed size, so we'll slide an |
| 196 // AudioBus of that size across the |output_buffer|. |
| 197 while (frames_remaining != 0) { |
| 198 int frames_this_iteration = |
| 199 std::min(output_params_.frames_per_buffer(), frames_remaining); |
| 200 |
| 201 int offset_into_buffer = output_buffer->frame_count() - frames_remaining; |
| 202 |
| 203 // Wrap the portion of the AudioBuffer in an AudioBus so the AudioConverter |
| 204 // can fill it. |
| 205 output_bus->set_frames(frames_this_iteration); |
| 206 for (int ch = 0; ch < output_buffer->channel_count(); ++ch) { |
| 207 output_bus->SetChannelData( |
| 208 ch, |
| 209 reinterpret_cast<float*>(output_buffer->channel_data()[ch]) + |
| 210 offset_into_buffer); |
| 211 } |
| 212 |
| 213 // Do the actual conversion. |
| 214 audio_converter_->Convert(output_bus.get()); |
| 215 frames_remaining -= frames_this_iteration; |
| 216 buffered_input_frames_ -= frames_this_iteration * io_sample_rate_ratio_; |
| 217 } |
| 218 |
| 219 // Compute the timestamp. |
| 220 output_buffer->set_timestamp(timestamp_helper_.GetTimestamp()); |
| 221 output_buffer->set_duration( |
| 222 timestamp_helper_.GetFrameDuration(request_frames)); |
| 223 timestamp_helper_.AddFrames(request_frames); |
| 224 |
| 225 queued_outputs_.push_back(output_buffer); |
| 226 } |
| 227 |
| 228 void AudioBufferConverter::Flush() { |
| 229 if (!audio_converter_) |
| 230 return; |
| 231 is_flushing_ = true; |
| 232 ConvertIfPossible(); |
| 233 is_flushing_ = false; |
| 234 DCHECK_EQ(input_frames_, 0); |
| 235 DCHECK_EQ(offset_into_queue_, 0); |
| 236 DCHECK_LT(buffered_input_frames_, 1.0); |
| 237 DCHECK(queued_inputs_.empty()); |
| 238 buffered_input_frames_ = 0.0; |
| 239 } |
| 240 |
| 241 } // namespace media |
OLD | NEW |