Chromium Code Reviews

Side by Side Diff: services/media/framework_ffmpeg/ffmpeg_audio_decoder.cc

Issue 1686363002: Motown: ffmpeg implementations of framework 'parts' (Closed) Base URL: https://github.com/domokit/mojo.git@master
Patch Set: Changed the way AVBuffer allocation/deallocation is done in the ffmpeg audio decoder. Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View unified diff |
OLDNEW
(Empty)
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/logging.h"
6 #include "services/media/framework_ffmpeg/ffmpeg_audio_decoder.h"
7
8 namespace mojo {
9 namespace media {
10
11 FfmpegAudioDecoder::FfmpegAudioDecoder(AvCodecContextPtr av_codec_context) :
12 FfmpegDecoderBase(std::move(av_codec_context)) {
13 DCHECK(context());
14 DCHECK(context()->channels > 0);
15
16 context()->opaque = this;
17 context()->get_buffer2 = AllocateBufferForAvFrame;
18 context()->refcounted_frames = 1;
19
20 if (av_sample_fmt_is_planar(context()->sample_fmt)) {
21 // Prepare for interleaving.
22 stream_type_ = output_stream_type();
23 lpcm_util_ = LpcmUtil::Create(*stream_type_->lpcm());
24 }
25 }
26
27 FfmpegAudioDecoder::~FfmpegAudioDecoder() {}
28
29 int FfmpegAudioDecoder::Decode(
30 PayloadAllocator* allocator,
31 bool* frame_decoded_out) {
32 DCHECK(allocator);
33 DCHECK(frame_decoded_out);
34 DCHECK(context());
35 DCHECK(frame());
36
37 // Use the provided allocator (for allocations in AllocateBufferForAvFrame)
38 // unless we intend to interleave later, in which case use the default
39 // allocator. We'll interleave into a buffer from the provided allocator
40 // in CreateOutputPacket.
41 allocator_ = lpcm_util_ ? PayloadAllocator::GetDefault() : allocator;
42
43 int frame_decoded = 0;
44 int input_bytes_used = avcodec_decode_audio4(
45 context().get(),
46 frame().get(),
47 &frame_decoded,
48 &packet());
49 *frame_decoded_out = frame_decoded != 0;
50
51 // We're done with this allocator.
52 allocator_ = nullptr;
53
54 return input_bytes_used;
55 }
56
57 PacketPtr FfmpegAudioDecoder::CreateOutputPacket(PayloadAllocator* allocator) {
58 DCHECK(allocator);
59 DCHECK(frame());
60
61 int64_t presentation_time = frame()->pts;
62 if (presentation_time == AV_NOPTS_VALUE) {
63 // TODO(dalesat): Adjust next_presentation_time_ for seek/non-zero start.
64 presentation_time = next_presentation_time_;
65 next_presentation_time_ += frame()->nb_samples;
66 }
67
68 uint64_t payload_size;
69 void *payload_buffer;
70
71 AvBufferContext* av_buffer_context =
72 reinterpret_cast<AvBufferContext*>(av_buffer_get_opaque(frame()->buf[0]));
73
74 if (lpcm_util_) {
75 // We need to interleave. The non-interleaved frames are in a buffer that
76 // was allocated from the default allocator. That buffer will get released
77 // later in ReleaseBufferForAvFrame. We need a new buffer for the
78 // interleaved frames, which we get from the provided allocator.
79 DCHECK(stream_type_);
80 DCHECK(stream_type_->lpcm());
81 payload_size = stream_type_->lpcm()->min_buffer_size(frame()->nb_samples);
82 payload_buffer = allocator->AllocatePayloadBuffer(payload_size);
83
84 lpcm_util_->Interleave(
85 av_buffer_context->buffer(),
86 av_buffer_context->size(),
87 payload_buffer,
88 frame()->nb_samples);
89 } else {
90 // We don't need to interleave. The interleaved frames are in a buffer that
91 // was allocated from the correct allocator. We take ownership of the buffer
92 // by calling Release here so that ReleaseBufferForAvFrame won't release it.
93 payload_size = av_buffer_context->size();
94 payload_buffer = av_buffer_context->Release();
95 }
96
97 return Packet::Create(
98 presentation_time,
99 frame()->nb_samples,
100 false, // The base class is responsible for end-of-stream.
101 payload_size,
102 payload_buffer,
103 allocator);
104 }
105
106 PacketPtr FfmpegAudioDecoder::CreateOutputEndOfStreamPacket() {
107 return Packet::CreateEndOfStream(next_presentation_time_);
108 }
109
110 int FfmpegAudioDecoder::AllocateBufferForAvFrame(
111 AVCodecContext* av_codec_context,
112 AVFrame* av_frame,
113 int flags) {
114 // CODEC_CAP_DR1 is required in order to do allocation this way.
115 DCHECK(av_codec_context->codec->capabilities & CODEC_CAP_DR1);
116
117 FfmpegAudioDecoder* self =
118 reinterpret_cast<FfmpegAudioDecoder*>(av_codec_context->opaque);
119 DCHECK(self);
120 DCHECK(self->allocator_);
121
122 AVSampleFormat av_sample_format =
123 static_cast<AVSampleFormat>(av_frame->format);
124
125 int buffer_size = av_samples_get_buffer_size(
126 &av_frame->linesize[0],
127 av_codec_context->channels,
128 av_frame->nb_samples,
129 av_sample_format,
130 FfmpegAudioDecoder::kChannelAlign);
131 if (buffer_size < 0) {
132 LOG(WARNING) << "av_samples_get_buffer_size failed";
133 return buffer_size;
134 }
135
136 AvBufferContext* av_buffer_context =
137 new AvBufferContext(buffer_size, self->allocator_);
138 uint8_t* buffer = av_buffer_context->buffer();
139
140 if (!av_sample_fmt_is_planar(av_sample_format)) {
141 // Samples are interleaved. There's just one buffer.
142 av_frame->data[0] = buffer;
143 } else {
144 // Samples are not interleaved. There's one buffer per channel.
145 int channels = av_codec_context->channels;
146 int bytes_per_channel = buffer_size / channels;
147 uint8_t* channel_buffer = buffer;
148
149 DCHECK(buffer != nullptr || bytes_per_channel == 0);
150
151 if (channels <= AV_NUM_DATA_POINTERS) {
152 // The buffer pointers will fit in av_frame->data.
153 DCHECK_EQ(av_frame->extended_data, av_frame->data);
154 for (int channel = 0; channel < channels; ++channel) {
155 av_frame->data[channel] = channel_buffer;
156 channel_buffer += bytes_per_channel;
157 }
158 } else {
159 // Too many channels for av_frame->data. We have to use
160 // av_frame->extended_data
161 av_frame->extended_data = static_cast<uint8_t**>(
162 av_malloc(channels * sizeof(*av_frame->extended_data)));
163
164 // The first AV_NUM_DATA_POINTERS go in both data and extended_data.
165 int channel = 0;
166 for (; channel < AV_NUM_DATA_POINTERS; ++channel) {
167 av_frame->extended_data[channel] = av_frame->data[channel] =
168 channel_buffer;
169 channel_buffer += bytes_per_channel;
170 }
171
172 // The rest go only in extended_data.
173 for (; channel < channels; ++channel) {
174 av_frame->extended_data[channel] = channel_buffer;
175 channel_buffer += bytes_per_channel;
176 }
177 }
178 }
179
180 av_frame->buf[0] = av_buffer_create(
181 buffer,
182 buffer_size,
183 ReleaseBufferForAvFrame,
184 av_buffer_context,
185 0); // flags
186
187 return 0;
188 }
189
190 void FfmpegAudioDecoder::ReleaseBufferForAvFrame(
191 void* opaque,
192 uint8_t* buffer) {
193 AvBufferContext* av_buffer_context =
194 reinterpret_cast<AvBufferContext*>(opaque);
195 DCHECK(av_buffer_context);
196 // Either this buffer has already been released to someone else's ownership,
197 // or it's the same as the buffer parameter.
johngro 2016/03/03 23:03:16 this still makes me vaguely uneasy... The FFmpeg
198 DCHECK(
199 av_buffer_context->buffer() == nullptr ||
200 av_buffer_context->buffer() == buffer);
201 delete av_buffer_context;
202 }
203
204 } // namespace media
205 } // namespace mojo
OLDNEW
« no previous file with comments | « services/media/framework_ffmpeg/ffmpeg_audio_decoder.h ('k') | services/media/framework_ffmpeg/ffmpeg_decoder.h » ('j') | no next file with comments »

Powered by Google App Engine