Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: services/media/framework_ffmpeg/ffmpeg_audio_decoder.cc

Issue 1686363002: Motown: ffmpeg implementations of framework 'parts' (Closed) Base URL: https://github.com/domokit/mojo.git@master
Patch Set: Retype some const unique_ptr<T>& parameters to const T&. Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/logging.h"
6 #include "services/media/framework_ffmpeg/ffmpeg_audio_decoder.h"
7
8 namespace mojo {
9 namespace media {
10
11 FfmpegAudioDecoder::FfmpegAudioDecoder(AVCodecContext *av_codec_context) :
12 FfmpegDecoderBase(av_codec_context) {
13 DCHECK(av_codec_context_);
14 av_codec_context_->opaque = this;
15 av_codec_context_->get_buffer2 = AllocateBufferForAvFrame;
16 av_codec_context_->refcounted_frames = 1;
17
18 if (av_sample_fmt_is_planar(av_codec_context->sample_fmt)) {
johngro 2016/03/01 18:05:35 DCHECK(av_codec_context_->channels); There is a n
dalesat 2016/03/03 20:41:10 Done.
19 // Prepare for interleaving.
20 stream_type_ = output_stream_type();
21 lpcm_util_ = LpcmUtil::Create(*stream_type_->lpcm());
22 // Because we'll be copying the output frames when we interleave, we use
23 // the default allocator to make buffers for the non-interelaved frames.
24 // When we interleave, we'll get the output buffer from the provided
25 // allocator.
26 allocator_ = PayloadAllocator::GetDefault();
27 }
28 }
29
30 FfmpegAudioDecoder::~FfmpegAudioDecoder() {}
31
32 int FfmpegAudioDecoder::Decode(
33 PayloadAllocator* allocator,
34 bool* frame_decoded_out) {
35 DCHECK(allocator);
36 DCHECK(frame_decoded_out);
37 DCHECK(av_codec_context_);
38 DCHECK(av_frame_);
39
40 // These get set in AllocateBufferForAvFrame.
41 packet_size_ = 0;
42 packet_buffer_ = nullptr;
43
44 // Use the provided allocator unless we intend to interleave later.
45 if (!lpcm_util_) {
46 allocator_ = allocator;
47 }
48
49 int frame_decoded = 0;
50 int input_bytes_used = avcodec_decode_audio4(
51 av_codec_context_.get(),
52 av_frame_.get(),
53 &frame_decoded,
54 &av_packet_);
55 *frame_decoded_out = frame_decoded != 0;
56
57 // Unless we are interleaving, we're done with this allocator.
58 if (!lpcm_util_) {
59 allocator_ = nullptr;
60 }
61
62 // Make sure allocation occurred as expected.
63 DCHECK(!frame_decoded || packet_size_ != 0);
64 DCHECK(!frame_decoded || packet_buffer_ == av_frame_->data[0]);
65
66 return input_bytes_used;
67 }
68
69 PacketPtr FfmpegAudioDecoder::CreateOutputPacket(PayloadAllocator* allocator) {
70 DCHECK(allocator);
71 DCHECK(av_frame_);
72
73 int64_t presentation_time = av_frame_->pts;
74 if (presentation_time == AV_NOPTS_VALUE) {
75 presentation_time = next_presentation_time_;
johngro 2016/03/01 01:31:38 I don't think that you should do this. If the gen
dalesat 2016/03/01 20:43:01 We should have design discussions like this in ano
76 }
77 // TODO(dalesat): Are we sure all decoders use frames as time unit?
78
79 uint64_t payload_size;
80 void *payload_buffer;
81
82 if (lpcm_util_) {
83 // We need to interleave. The non-interleaved frames are in a buffer that
84 // was allocated from allocator_. That buffer will get released later in
85 // ReleaseBufferForAvFrame. We need a new buffer for the interleaved frames,
86 // which we get from the provided allocator.
87 DCHECK(stream_type_);
88 DCHECK(stream_type_->lpcm());
89 payload_size = stream_type_->lpcm()->min_buffer_size(av_frame_->nb_samples);
90 payload_buffer = allocator->AllocatePayloadBuffer(payload_size);
91
92 lpcm_util_->Interleave(
93 av_frame_->data[0],
94 packet_size_,
95 payload_buffer,
96 av_frame_->nb_samples);
97 } else {
98 payload_size = packet_size_;
99 payload_buffer = av_frame_->data[0];
johngro 2016/03/01 18:05:35 So, I think that ffmpeg is going to end up freeing
dalesat 2016/03/03 20:41:10 This code actually works (ReleaseBufferForAvFrame
100 }
101
102 return Packet::Create(
103 presentation_time,
104 av_frame_->nb_samples,
105 false, // The base class is responsible for end-of-stream.
106 payload_size,
107 payload_buffer,
108 allocator);
109 }
110
111 int FfmpegAudioDecoder::AllocateBufferForAvFrame(
112 AVCodecContext* av_codec_context,
113 AVFrame* av_frame,
114 int flags) {
115 // CODEC_CAP_DR1 is required in order to do allocation this way.
116 DCHECK(av_codec_context->codec->capabilities & CODEC_CAP_DR1);
117
118 FfmpegAudioDecoder* self =
119 reinterpret_cast<FfmpegAudioDecoder*>(av_codec_context->opaque);
120 DCHECK(self);
121 DCHECK(self->allocator_);
122 DCHECK(self->packet_size_ == 0) << "multiple allocations per decode";
123
124 AVSampleFormat av_sample_format =
125 static_cast<AVSampleFormat>(av_frame->format);
126
127 int buffer_size = av_samples_get_buffer_size(
128 &av_frame->linesize[0],
129 av_codec_context->channels,
130 av_frame->nb_samples,
131 av_sample_format,
132 FfmpegAudioDecoder::kChannelAlign);
133 if (buffer_size < 0) {
johngro 2016/03/01 18:05:35 <= 0 Also, need to make sure that the rest of the
dalesat 2016/03/03 20:41:10 Opted to tolerate buffer_size == 0
134 return buffer_size;
135 }
136
137 uint8_t* buffer = static_cast<uint8_t*>(
138 self->allocator_->AllocatePayloadBuffer(buffer_size));
139
140 if (!av_sample_fmt_is_planar(av_sample_format)) {
141 // Samples are interleaved. There's just one buffer.
142 av_frame->data[0] = buffer;
143 } else {
144 // Samples are not interleaved. There's one buffer per channel.
145 int channels = av_codec_context->channels;
146 int bytes_per_channel = buffer_size / channels;
147 uint8_t* channel_buffer = buffer;
148
149 if (channels <= AV_NUM_DATA_POINTERS) {
150 // The buffer pointers will fit in av_frame->data.
151 DCHECK_EQ(av_frame->extended_data, av_frame->data);
152 for (int channel = 0; channel < channels; ++channel) {
153 av_frame->data[channel] = channel_buffer;
154 channel_buffer += bytes_per_channel;
155 }
156 } else {
157 // Too many channels for av_frame->data. We have to use
158 // av_frame->extended_data
159 av_frame->extended_data = static_cast<uint8_t**>(
160 av_malloc(channels * sizeof(*av_frame->extended_data)));
161
162 // The first AV_NUM_DATA_POINTERS go in both data and extended_data.
163 int channel = 0;
164 for (; channel < AV_NUM_DATA_POINTERS; ++channel) {
165 av_frame->extended_data[channel] = av_frame->data[channel] =
166 channel_buffer;
167 channel_buffer += bytes_per_channel;
168 }
169
170 // The rest go only in extended_data.
171 for (; channel < channels; ++channel) {
172 av_frame->extended_data[channel] = channel_buffer;
173 channel_buffer += bytes_per_channel;
174 }
175 }
176 }
177
178 av_frame->buf[0] = av_buffer_create(
179 buffer,
180 buffer_size,
181 ReleaseBufferForAvFrame,
182 self,
183 0); // flags
184
185 // We lose the buffer above before CreatePacket gets called, so we save the
johngro 2016/03/01 18:05:35 Why do we lose the buffer? Isn't the pointer to i
dalesat 2016/03/03 20:41:10 Done.
186 // size here.
187 self->packet_size_ = buffer_size;
188
189 // This is just to make sure the buffer is used as we intended.
190 self->packet_buffer_ = buffer;
191
192 return 0;
193 }
194
195 void FfmpegAudioDecoder::ReleaseBufferForAvFrame(
196 void* opaque,
197 uint8_t* buffer) {
198 FfmpegAudioDecoder* self = reinterpret_cast<FfmpegAudioDecoder*>(opaque);
199 if (self->allocator_ != nullptr) {
200 // Either the decoder is releasing this buffer before returning from
201 // avcodec_decode_audio4, or we're interleaving. In either case, we need
202 // to release this buffer, because it won't end up in an output packet.
203 self->allocator_->ReleasePayloadBuffer(self->packet_size_, buffer);
204 }
205 }
206
207 } // namespace media
208 } // namespace mojo
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698