Chromium Code Reviews| Index: media/filters/ffmpeg_audio_decoder.cc |
| diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc |
| index a3e0a61c6dbfd9c1cae9c40c1235e789b808995c..de4038cc34f84fb69c733ab8ea143a1752fa1129 100644 |
| --- a/media/filters/ffmpeg_audio_decoder.cc |
| +++ b/media/filters/ffmpeg_audio_decoder.cc |
| @@ -8,6 +8,7 @@ |
| #include "base/callback_helpers.h" |
| #include "base/location.h" |
| #include "base/message_loop_proxy.h" |
| +#include "media/base/audio_bus.h" |
| #include "media/base/audio_decoder_config.h" |
| #include "media/base/data_buffer.h" |
| #include "media/base/decoder_buffer.h" |
| @@ -149,6 +150,21 @@ void FFmpegAudioDecoder::DoInitialize( |
| return; |
| } |
| + // Some codecs will only output float data, so we need to convert to integer |
| + // before returning the decoded buffer. |
| + if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLTP || |
| + codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) { |
| + DCHECK_EQ(static_cast<size_t>(config.bits_per_channel()), sizeof(float)); |
| + |
| + // Preallocate the AudioBus for float conversions. We can treat interleaved |
| + // float data as a single planar channel since our output is expected in an |
| + // interleaved format anyways. |
| + int channels = codec_context_->channels; |
| + if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) |
| + channels = 1; |
| + converter_bus_ = AudioBus::CreateWrapper(channels); |
| + } |
| + |
| // Success! |
| av_frame_ = avcodec_alloc_frame(); |
| bits_per_channel_ = config.bits_per_channel(); |
| @@ -295,7 +311,6 @@ void FFmpegAudioDecoder::DoDecodeBuffer( |
| } |
| } |
| - const uint8* decoded_audio_data = NULL; |
| int decoded_audio_size = 0; |
| if (frame_decoded) { |
| int output_sample_rate = av_frame_->sample_rate; |
| @@ -309,27 +324,57 @@ void FFmpegAudioDecoder::DoDecodeBuffer( |
| break; |
| } |
| - decoded_audio_data = av_frame_->data[0]; |
| decoded_audio_size = av_samples_get_buffer_size( |
| NULL, codec_context_->channels, av_frame_->nb_samples, |
| codec_context_->sample_fmt, 1); |
| } |
| - scoped_refptr<DataBuffer> output; |
| - |
| + int start_sample = 0; |
| if (decoded_audio_size > 0 && output_bytes_to_drop_ > 0) { |
| + DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0) |
| + << "Decoder didn't output full frames"; |
| + |
| int dropped_size = std::min(decoded_audio_size, output_bytes_to_drop_); |
| - decoded_audio_data += dropped_size; |
| + start_sample = dropped_size / bytes_per_frame_; |
| decoded_audio_size -= dropped_size; |
| output_bytes_to_drop_ -= dropped_size; |
| } |
| + scoped_refptr<DataBuffer> output; |
| if (decoded_audio_size > 0) { |
| DCHECK_EQ(decoded_audio_size % bytes_per_frame_, 0) |
| << "Decoder didn't output full frames"; |
| - // Copy the audio samples into an output buffer. |
| - output = new DataBuffer(decoded_audio_data, decoded_audio_size); |
| + // Convert float data using an AudioBus. |
| + if (converter_bus_) { |
| + // Setup the AudioBus as a wrapper of the AVFrame data and then use |
| + // AudioBus::ToInterleaved() to convert the data as necessary. |
|
scherkus (not reviewing)
2012/12/06 17:25:32
don't we immediately call FromInterleaved() in ARI
DaleCurtis
2012/12/06 22:15:41
Not quite immediately, but yes. The data first go
|
| + int skip_frames = start_sample; |
| + int total_frames = av_frame_->nb_samples - start_sample; |
| + if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) { |
| + DCHECK_EQ(converter_bus_->channels(), 1); |
| + total_frames *= codec_context_->channels; |
| + skip_frames *= codec_context_->channels; |
| + } |
| + converter_bus_->set_frames(total_frames); |
| + DCHECK_EQ(decoded_audio_size, |
| + converter_bus_->frames() * bytes_per_frame_); |
| + |
| + for (int i = 0; i < converter_bus_->channels(); ++i) { |
| + converter_bus_->SetChannelData(i, reinterpret_cast<float*>( |
| + av_frame_->extended_data[i]) + skip_frames); |
| + } |
| + |
| + output = new DataBuffer(decoded_audio_size); |
| + output->SetDataSize(decoded_audio_size); |
| + converter_bus_->ToInterleaved( |
| + converter_bus_->frames(), bits_per_channel_ / 8, |
| + output->GetWritableData()); |
| + } else { |
| + output = new DataBuffer( |
| + av_frame_->extended_data[0] + start_sample * bytes_per_frame_, |
| + decoded_audio_size); |
| + } |
| base::TimeDelta timestamp = GetNextOutputTimestamp(); |
| total_frames_decoded_ += decoded_audio_size / bytes_per_frame_; |