| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/logging.h" | 5 #include "base/logging.h" |
| 6 #include "services/media/framework_ffmpeg/ffmpeg_audio_decoder.h" | 6 #include "services/media/framework_ffmpeg/ffmpeg_audio_decoder.h" |
| 7 | 7 |
| 8 namespace mojo { | 8 namespace mojo { |
| 9 namespace media { | 9 namespace media { |
| 10 | 10 |
| 11 FfmpegAudioDecoder::FfmpegAudioDecoder(AvCodecContextPtr av_codec_context) : | 11 FfmpegAudioDecoder::FfmpegAudioDecoder(AvCodecContextPtr av_codec_context) |
| 12 FfmpegDecoderBase(std::move(av_codec_context)) { | 12 : FfmpegDecoderBase(std::move(av_codec_context)) { |
| 13 DCHECK(context()); | 13 DCHECK(context()); |
| 14 DCHECK(context()->channels > 0); | 14 DCHECK(context()->channels > 0); |
| 15 | 15 |
| 16 context()->opaque = this; | 16 context()->opaque = this; |
| 17 context()->get_buffer2 = AllocateBufferForAvFrame; | 17 context()->get_buffer2 = AllocateBufferForAvFrame; |
| 18 context()->refcounted_frames = 1; | 18 context()->refcounted_frames = 1; |
| 19 | 19 |
| 20 if (av_sample_fmt_is_planar(context()->sample_fmt)) { | 20 if (av_sample_fmt_is_planar(context()->sample_fmt)) { |
| 21 // Prepare for interleaving. | 21 // Prepare for interleaving. |
| 22 stream_type_ = output_stream_type(); | 22 stream_type_ = output_stream_type(); |
| 23 lpcm_util_ = LpcmUtil::Create(*stream_type_->lpcm()); | 23 lpcm_util_ = LpcmUtil::Create(*stream_type_->lpcm()); |
| 24 } | 24 } |
| 25 } | 25 } |
| 26 | 26 |
| 27 FfmpegAudioDecoder::~FfmpegAudioDecoder() {} | 27 FfmpegAudioDecoder::~FfmpegAudioDecoder() {} |
| 28 | 28 |
| 29 void FfmpegAudioDecoder::Flush() { | 29 void FfmpegAudioDecoder::Flush() { |
| 30 FfmpegDecoderBase::Flush(); | 30 FfmpegDecoderBase::Flush(); |
| 31 next_pts_= Packet::kUnknownPts; | 31 next_pts_ = Packet::kUnknownPts; |
| 32 } | 32 } |
| 33 | 33 |
| 34 int FfmpegAudioDecoder::Decode( | 34 int FfmpegAudioDecoder::Decode(const AVPacket& av_packet, |
| 35 const AVPacket& av_packet, | 35 const ffmpeg::AvFramePtr& av_frame_ptr, |
| 36 const ffmpeg::AvFramePtr& av_frame_ptr, | 36 PayloadAllocator* allocator, |
| 37 PayloadAllocator* allocator, | 37 bool* frame_decoded_out) { |
| 38 bool* frame_decoded_out) { | |
| 39 DCHECK(allocator); | 38 DCHECK(allocator); |
| 40 DCHECK(frame_decoded_out); | 39 DCHECK(frame_decoded_out); |
| 41 DCHECK(context()); | 40 DCHECK(context()); |
| 42 DCHECK(av_frame_ptr); | 41 DCHECK(av_frame_ptr); |
| 43 | 42 |
| 44 if (next_pts_ == Packet::kUnknownPts) { | 43 if (next_pts_ == Packet::kUnknownPts) { |
| 45 if (av_packet.pts == AV_NOPTS_VALUE) { | 44 if (av_packet.pts == AV_NOPTS_VALUE) { |
| 46 next_pts_ = 0; | 45 next_pts_ = 0; |
| 47 } else { | 46 } else { |
| 48 next_pts_ = av_packet.pts; | 47 next_pts_ = av_packet.pts; |
| 49 } | 48 } |
| 50 } | 49 } |
| 51 | 50 |
| 52 // Use the provided allocator (for allocations in AllocateBufferForAvFrame) | 51 // Use the provided allocator (for allocations in AllocateBufferForAvFrame) |
| 53 // unless we intend to interleave later, in which case use the default | 52 // unless we intend to interleave later, in which case use the default |
| 54 // allocator. We'll interleave into a buffer from the provided allocator | 53 // allocator. We'll interleave into a buffer from the provided allocator |
| 55 // in CreateOutputPacket. | 54 // in CreateOutputPacket. |
| 56 allocator_ = lpcm_util_ ? PayloadAllocator::GetDefault() : allocator; | 55 allocator_ = lpcm_util_ ? PayloadAllocator::GetDefault() : allocator; |
| 57 | 56 |
| 58 int frame_decoded = 0; | 57 int frame_decoded = 0; |
| 59 int input_bytes_used = avcodec_decode_audio4( | 58 int input_bytes_used = avcodec_decode_audio4( |
| 60 context().get(), | 59 context().get(), av_frame_ptr.get(), &frame_decoded, &av_packet); |
| 61 av_frame_ptr.get(), | |
| 62 &frame_decoded, | |
| 63 &av_packet); | |
| 64 *frame_decoded_out = frame_decoded != 0; | 60 *frame_decoded_out = frame_decoded != 0; |
| 65 | 61 |
| 66 // We're done with this allocator. | 62 // We're done with this allocator. |
| 67 allocator_ = nullptr; | 63 allocator_ = nullptr; |
| 68 | 64 |
| 69 return input_bytes_used; | 65 return input_bytes_used; |
| 70 } | 66 } |
| 71 | 67 |
| 72 PacketPtr FfmpegAudioDecoder::CreateOutputPacket( | 68 PacketPtr FfmpegAudioDecoder::CreateOutputPacket(const AVFrame& av_frame, |
| 73 const AVFrame& av_frame, | 69 PayloadAllocator* allocator) { |
| 74 PayloadAllocator* allocator) { | |
| 75 DCHECK(allocator); | 70 DCHECK(allocator); |
| 76 | 71 |
| 77 int64_t pts = av_frame.pts; | 72 int64_t pts = av_frame.pts; |
| 78 if (pts == AV_NOPTS_VALUE) { | 73 if (pts == AV_NOPTS_VALUE) { |
| 79 pts = next_pts_; | 74 pts = next_pts_; |
| 80 next_pts_ += av_frame.nb_samples; | 75 next_pts_ += av_frame.nb_samples; |
| 81 } | 76 } |
| 82 | 77 |
| 83 uint64_t payload_size; | 78 uint64_t payload_size; |
| 84 void *payload_buffer; | 79 void* payload_buffer; |
| 85 | 80 |
| 86 AvBufferContext* av_buffer_context = | 81 AvBufferContext* av_buffer_context = |
| 87 reinterpret_cast<AvBufferContext*>(av_buffer_get_opaque(av_frame.buf[0])); | 82 reinterpret_cast<AvBufferContext*>(av_buffer_get_opaque(av_frame.buf[0])); |
| 88 | 83 |
| 89 if (lpcm_util_) { | 84 if (lpcm_util_) { |
| 90 // We need to interleave. The non-interleaved frames are in a buffer that | 85 // We need to interleave. The non-interleaved frames are in a buffer that |
| 91 // was allocated from the default allocator. That buffer will get released | 86 // was allocated from the default allocator. That buffer will get released |
| 92 // later in ReleaseBufferForAvFrame. We need a new buffer for the | 87 // later in ReleaseBufferForAvFrame. We need a new buffer for the |
| 93 // interleaved frames, which we get from the provided allocator. | 88 // interleaved frames, which we get from the provided allocator. |
| 94 DCHECK(stream_type_); | 89 DCHECK(stream_type_); |
| 95 DCHECK(stream_type_->lpcm()); | 90 DCHECK(stream_type_->lpcm()); |
| 96 payload_size = stream_type_->lpcm()->min_buffer_size(av_frame.nb_samples); | 91 payload_size = stream_type_->lpcm()->min_buffer_size(av_frame.nb_samples); |
| 97 payload_buffer = allocator->AllocatePayloadBuffer(payload_size); | 92 payload_buffer = allocator->AllocatePayloadBuffer(payload_size); |
| 98 | 93 |
| 99 lpcm_util_->Interleave( | 94 lpcm_util_->Interleave(av_buffer_context->buffer(), |
| 100 av_buffer_context->buffer(), | 95 av_buffer_context->size(), payload_buffer, |
| 101 av_buffer_context->size(), | 96 av_frame.nb_samples); |
| 102 payload_buffer, | |
| 103 av_frame.nb_samples); | |
| 104 } else { | 97 } else { |
| 105 // We don't need to interleave. The interleaved frames are in a buffer that | 98 // We don't need to interleave. The interleaved frames are in a buffer that |
| 106 // was allocated from the correct allocator. We take ownership of the buffer | 99 // was allocated from the correct allocator. We take ownership of the buffer |
| 107 // by calling Release here so that ReleaseBufferForAvFrame won't release it. | 100 // by calling Release here so that ReleaseBufferForAvFrame won't release it. |
| 108 payload_size = av_buffer_context->size(); | 101 payload_size = av_buffer_context->size(); |
| 109 payload_buffer = av_buffer_context->Release(); | 102 payload_buffer = av_buffer_context->Release(); |
| 110 } | 103 } |
| 111 | 104 |
| 112 return Packet::Create( | 105 return Packet::Create( |
| 113 pts, | 106 pts, |
| 114 false, // The base class is responsible for end-of-stream. | 107 false, // The base class is responsible for end-of-stream. |
| 115 payload_size, | 108 payload_size, payload_buffer, allocator); |
| 116 payload_buffer, | |
| 117 allocator); | |
| 118 } | 109 } |
| 119 | 110 |
| 120 PacketPtr FfmpegAudioDecoder::CreateOutputEndOfStreamPacket() { | 111 PacketPtr FfmpegAudioDecoder::CreateOutputEndOfStreamPacket() { |
| 121 return Packet::CreateEndOfStream(next_pts_); | 112 return Packet::CreateEndOfStream(next_pts_); |
| 122 } | 113 } |
| 123 | 114 |
| 124 int FfmpegAudioDecoder::AllocateBufferForAvFrame( | 115 int FfmpegAudioDecoder::AllocateBufferForAvFrame( |
| 125 AVCodecContext* av_codec_context, | 116 AVCodecContext* av_codec_context, |
| 126 AVFrame* av_frame, | 117 AVFrame* av_frame, |
| 127 int flags) { | 118 int flags) { |
| 128 // CODEC_CAP_DR1 is required in order to do allocation this way. | 119 // CODEC_CAP_DR1 is required in order to do allocation this way. |
| 129 DCHECK(av_codec_context->codec->capabilities & CODEC_CAP_DR1); | 120 DCHECK(av_codec_context->codec->capabilities & CODEC_CAP_DR1); |
| 130 | 121 |
| 131 FfmpegAudioDecoder* self = | 122 FfmpegAudioDecoder* self = |
| 132 reinterpret_cast<FfmpegAudioDecoder*>(av_codec_context->opaque); | 123 reinterpret_cast<FfmpegAudioDecoder*>(av_codec_context->opaque); |
| 133 DCHECK(self); | 124 DCHECK(self); |
| 134 DCHECK(self->allocator_); | 125 DCHECK(self->allocator_); |
| 135 | 126 |
| 136 AVSampleFormat av_sample_format = | 127 AVSampleFormat av_sample_format = |
| 137 static_cast<AVSampleFormat>(av_frame->format); | 128 static_cast<AVSampleFormat>(av_frame->format); |
| 138 | 129 |
| 139 int buffer_size = av_samples_get_buffer_size( | 130 int buffer_size = av_samples_get_buffer_size( |
| 140 &av_frame->linesize[0], | 131 &av_frame->linesize[0], av_codec_context->channels, av_frame->nb_samples, |
| 141 av_codec_context->channels, | 132 av_sample_format, FfmpegAudioDecoder::kChannelAlign); |
| 142 av_frame->nb_samples, | |
| 143 av_sample_format, | |
| 144 FfmpegAudioDecoder::kChannelAlign); | |
| 145 if (buffer_size < 0) { | 133 if (buffer_size < 0) { |
| 146 LOG(WARNING) << "av_samples_get_buffer_size failed"; | 134 LOG(WARNING) << "av_samples_get_buffer_size failed"; |
| 147 return buffer_size; | 135 return buffer_size; |
| 148 } | 136 } |
| 149 | 137 |
| 150 AvBufferContext* av_buffer_context = | 138 AvBufferContext* av_buffer_context = |
| 151 new AvBufferContext(buffer_size, self->allocator_); | 139 new AvBufferContext(buffer_size, self->allocator_); |
| 152 uint8_t* buffer = av_buffer_context->buffer(); | 140 uint8_t* buffer = av_buffer_context->buffer(); |
| 153 | 141 |
| 154 if (!av_sample_fmt_is_planar(av_sample_format)) { | 142 if (!av_sample_fmt_is_planar(av_sample_format)) { |
| (...skipping 30 matching lines...) Expand all Loading... |
| 185 | 173 |
| 186 // The rest go only in extended_data. | 174 // The rest go only in extended_data. |
| 187 for (; channel < channels; ++channel) { | 175 for (; channel < channels; ++channel) { |
| 188 av_frame->extended_data[channel] = channel_buffer; | 176 av_frame->extended_data[channel] = channel_buffer; |
| 189 channel_buffer += bytes_per_channel; | 177 channel_buffer += bytes_per_channel; |
| 190 } | 178 } |
| 191 } | 179 } |
| 192 } | 180 } |
| 193 | 181 |
| 194 av_frame->buf[0] = av_buffer_create( | 182 av_frame->buf[0] = av_buffer_create( |
| 195 buffer, | 183 buffer, buffer_size, ReleaseBufferForAvFrame, av_buffer_context, |
| 196 buffer_size, | 184 0); // flags |
| 197 ReleaseBufferForAvFrame, | |
| 198 av_buffer_context, | |
| 199 0); // flags | |
| 200 | 185 |
| 201 return 0; | 186 return 0; |
| 202 } | 187 } |
| 203 | 188 |
| 204 void FfmpegAudioDecoder::ReleaseBufferForAvFrame( | 189 void FfmpegAudioDecoder::ReleaseBufferForAvFrame(void* opaque, |
| 205 void* opaque, | 190 uint8_t* buffer) { |
| 206 uint8_t* buffer) { | |
| 207 AvBufferContext* av_buffer_context = | 191 AvBufferContext* av_buffer_context = |
| 208 reinterpret_cast<AvBufferContext*>(opaque); | 192 reinterpret_cast<AvBufferContext*>(opaque); |
| 209 DCHECK(av_buffer_context); | 193 DCHECK(av_buffer_context); |
| 210 // Either this buffer has already been released to someone else's ownership, | 194 // Either this buffer has already been released to someone else's ownership, |
| 211 // or it's the same as the buffer parameter. | 195 // or it's the same as the buffer parameter. |
| 212 DCHECK( | 196 DCHECK(av_buffer_context->buffer() == nullptr || |
| 213 av_buffer_context->buffer() == nullptr || | 197 av_buffer_context->buffer() == buffer); |
| 214 av_buffer_context->buffer() == buffer); | |
| 215 delete av_buffer_context; | 198 delete av_buffer_context; |
| 216 } | 199 } |
| 217 | 200 |
| 218 } // namespace media | 201 } // namespace media |
| 219 } // namespace mojo | 202 } // namespace mojo |
| OLD | NEW |