Index: services/media/framework_ffmpeg/ffmpeg_audio_decoder.cc |
diff --git a/services/media/framework_ffmpeg/ffmpeg_audio_decoder.cc b/services/media/framework_ffmpeg/ffmpeg_audio_decoder.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..d73b491b34c2ee27cd980fddce3b2659461594c8 |
--- /dev/null |
+++ b/services/media/framework_ffmpeg/ffmpeg_audio_decoder.cc |
@@ -0,0 +1,208 @@ |
+// Copyright 2016 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "base/logging.h" |
+#include "services/media/framework_ffmpeg/ffmpeg_audio_decoder.h" |
+ |
+namespace mojo { |
+namespace media { |
+ |
+FfmpegAudioDecoder::FfmpegAudioDecoder(AVCodecContext *av_codec_context) : |
+ FfmpegDecoderBase(av_codec_context) { |
+ DCHECK(av_codec_context_); |
+ av_codec_context_->opaque = this; |
+ av_codec_context_->get_buffer2 = AllocateBufferForAvFrame; |
+ av_codec_context_->refcounted_frames = 1; |
+ |
+ if (av_sample_fmt_is_planar(av_codec_context->sample_fmt)) { |
johngro
2016/03/01 18:05:35
DCHECK(av_codec_context_->channels);
There is a n
dalesat
2016/03/03 20:41:10
Done.
|
+ // Prepare for interleaving. |
+ stream_type_ = output_stream_type(); |
+ lpcm_util_ = LpcmUtil::Create(*stream_type_->lpcm()); |
+ // Because we'll be copying the output frames when we interleave, we use |
+ // the default allocator to make buffers for the non-interelaved frames. |
+ // When we interleave, we'll get the output buffer from the provided |
+ // allocator. |
+ allocator_ = PayloadAllocator::GetDefault(); |
+ } |
+} |
+ |
+FfmpegAudioDecoder::~FfmpegAudioDecoder() {} |
+ |
+int FfmpegAudioDecoder::Decode( |
+ PayloadAllocator* allocator, |
+ bool* frame_decoded_out) { |
+ DCHECK(allocator); |
+ DCHECK(frame_decoded_out); |
+ DCHECK(av_codec_context_); |
+ DCHECK(av_frame_); |
+ |
+ // These get set in AllocateBufferForAvFrame. |
+ packet_size_ = 0; |
+ packet_buffer_ = nullptr; |
+ |
+ // Use the provided allocator unless we intend to interleave later. |
+ if (!lpcm_util_) { |
+ allocator_ = allocator; |
+ } |
+ |
+ int frame_decoded = 0; |
+ int input_bytes_used = avcodec_decode_audio4( |
+ av_codec_context_.get(), |
+ av_frame_.get(), |
+ &frame_decoded, |
+ &av_packet_); |
+ *frame_decoded_out = frame_decoded != 0; |
+ |
+ // Unless we are interleaving, we're done with this allocator. |
+ if (!lpcm_util_) { |
+ allocator_ = nullptr; |
+ } |
+ |
+ // Make sure allocation occurred as expected. |
+ DCHECK(!frame_decoded || packet_size_ != 0); |
+ DCHECK(!frame_decoded || packet_buffer_ == av_frame_->data[0]); |
+ |
+ return input_bytes_used; |
+} |
+ |
+PacketPtr FfmpegAudioDecoder::CreateOutputPacket(PayloadAllocator* allocator) { |
+ DCHECK(allocator); |
+ DCHECK(av_frame_); |
+ |
+ int64_t presentation_time = av_frame_->pts; |
+ if (presentation_time == AV_NOPTS_VALUE) { |
+ presentation_time = next_presentation_time_; |
johngro
2016/03/01 01:31:38
I don't think that you should do this. If the gen
dalesat
2016/03/01 20:43:01
We should have design discussions like this in ano
|
+ } |
+ // TODO(dalesat): Are we sure all decoders use frames as time unit? |
+ |
+ uint64_t payload_size; |
+ void *payload_buffer; |
+ |
+ if (lpcm_util_) { |
+ // We need to interleave. The non-interleaved frames are in a buffer that |
+ // was allocated from allocator_. That buffer will get released later in |
+ // ReleaseBufferForAvFrame. We need a new buffer for the interleaved frames, |
+ // which we get from the provided allocator. |
+ DCHECK(stream_type_); |
+ DCHECK(stream_type_->lpcm()); |
+ payload_size = stream_type_->lpcm()->min_buffer_size(av_frame_->nb_samples); |
+ payload_buffer = allocator->AllocatePayloadBuffer(payload_size); |
+ |
+ lpcm_util_->Interleave( |
+ av_frame_->data[0], |
+ packet_size_, |
+ payload_buffer, |
+ av_frame_->nb_samples); |
+ } else { |
+ payload_size = packet_size_; |
+ payload_buffer = av_frame_->data[0]; |
johngro
2016/03/01 18:05:35
So, I think that ffmpeg is going to end up freeing
dalesat
2016/03/03 20:41:10
This code actually works (ReleaseBufferForAvFrame
|
+ } |
+ |
+ return Packet::Create( |
+ presentation_time, |
+ av_frame_->nb_samples, |
+ false, // The base class is responsible for end-of-stream. |
+ payload_size, |
+ payload_buffer, |
+ allocator); |
+} |
+ |
+int FfmpegAudioDecoder::AllocateBufferForAvFrame( |
+ AVCodecContext* av_codec_context, |
+ AVFrame* av_frame, |
+ int flags) { |
+ // CODEC_CAP_DR1 is required in order to do allocation this way. |
+ DCHECK(av_codec_context->codec->capabilities & CODEC_CAP_DR1); |
+ |
+ FfmpegAudioDecoder* self = |
+ reinterpret_cast<FfmpegAudioDecoder*>(av_codec_context->opaque); |
+ DCHECK(self); |
+ DCHECK(self->allocator_); |
+ DCHECK(self->packet_size_ == 0) << "multiple allocations per decode"; |
+ |
+ AVSampleFormat av_sample_format = |
+ static_cast<AVSampleFormat>(av_frame->format); |
+ |
+ int buffer_size = av_samples_get_buffer_size( |
+ &av_frame->linesize[0], |
+ av_codec_context->channels, |
+ av_frame->nb_samples, |
+ av_sample_format, |
+ FfmpegAudioDecoder::kChannelAlign); |
+ if (buffer_size < 0) { |
johngro
2016/03/01 18:05:35
<= 0
Also, need to make sure that the rest of the
dalesat
2016/03/03 20:41:10
Opted to tolerate buffer_size == 0
|
+ return buffer_size; |
+ } |
+ |
+ uint8_t* buffer = static_cast<uint8_t*>( |
+ self->allocator_->AllocatePayloadBuffer(buffer_size)); |
+ |
+ if (!av_sample_fmt_is_planar(av_sample_format)) { |
+ // Samples are interleaved. There's just one buffer. |
+ av_frame->data[0] = buffer; |
+ } else { |
+ // Samples are not interleaved. There's one buffer per channel. |
+ int channels = av_codec_context->channels; |
+ int bytes_per_channel = buffer_size / channels; |
+ uint8_t* channel_buffer = buffer; |
+ |
+ if (channels <= AV_NUM_DATA_POINTERS) { |
+ // The buffer pointers will fit in av_frame->data. |
+ DCHECK_EQ(av_frame->extended_data, av_frame->data); |
+ for (int channel = 0; channel < channels; ++channel) { |
+ av_frame->data[channel] = channel_buffer; |
+ channel_buffer += bytes_per_channel; |
+ } |
+ } else { |
+ // Too many channels for av_frame->data. We have to use |
+ // av_frame->extended_data |
+ av_frame->extended_data = static_cast<uint8_t**>( |
+ av_malloc(channels * sizeof(*av_frame->extended_data))); |
+ |
+ // The first AV_NUM_DATA_POINTERS go in both data and extended_data. |
+ int channel = 0; |
+ for (; channel < AV_NUM_DATA_POINTERS; ++channel) { |
+ av_frame->extended_data[channel] = av_frame->data[channel] = |
+ channel_buffer; |
+ channel_buffer += bytes_per_channel; |
+ } |
+ |
+ // The rest go only in extended_data. |
+ for (; channel < channels; ++channel) { |
+ av_frame->extended_data[channel] = channel_buffer; |
+ channel_buffer += bytes_per_channel; |
+ } |
+ } |
+ } |
+ |
+ av_frame->buf[0] = av_buffer_create( |
+ buffer, |
+ buffer_size, |
+ ReleaseBufferForAvFrame, |
+ self, |
+ 0); // flags |
+ |
+ // We lose the buffer above before CreatePacket gets called, so we save the |
johngro
2016/03/01 18:05:35
Why do we lose the buffer? Isn't the pointer to i
dalesat
2016/03/03 20:41:10
Done.
|
+ // size here. |
+ self->packet_size_ = buffer_size; |
+ |
+ // This is just to make sure the buffer is used as we intended. |
+ self->packet_buffer_ = buffer; |
+ |
+ return 0; |
+} |
+ |
+void FfmpegAudioDecoder::ReleaseBufferForAvFrame( |
+ void* opaque, |
+ uint8_t* buffer) { |
+ FfmpegAudioDecoder* self = reinterpret_cast<FfmpegAudioDecoder*>(opaque); |
+ if (self->allocator_ != nullptr) { |
+ // Either the decoder is releasing this buffer before returning from |
+ // avcodec_decode_audio4, or we're interleaving. In either case, we need |
+ // to release this buffer, because it won't end up in an output packet. |
+ self->allocator_->ReleasePayloadBuffer(self->packet_size_, buffer); |
+ } |
+} |
+ |
+} // namespace media |
+} // namespace mojo |