OLD | NEW |
---|---|
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/logging.h" | 5 #include "base/logging.h" |
6 #include "services/media/framework_ffmpeg/ffmpeg_audio_decoder.h" | 6 #include "services/media/framework_ffmpeg/ffmpeg_audio_decoder.h" |
7 | 7 |
8 namespace mojo { | 8 namespace mojo { |
9 namespace media { | 9 namespace media { |
10 | 10 |
(...skipping 10 matching lines...) Expand all Loading... | |
21 // Prepare for interleaving. | 21 // Prepare for interleaving. |
22 stream_type_ = output_stream_type(); | 22 stream_type_ = output_stream_type(); |
23 lpcm_util_ = LpcmUtil::Create(*stream_type_->lpcm()); | 23 lpcm_util_ = LpcmUtil::Create(*stream_type_->lpcm()); |
24 } | 24 } |
25 } | 25 } |
26 | 26 |
27 FfmpegAudioDecoder::~FfmpegAudioDecoder() {} | 27 FfmpegAudioDecoder::~FfmpegAudioDecoder() {} |
28 | 28 |
29 void FfmpegAudioDecoder::Flush() { | 29 void FfmpegAudioDecoder::Flush() { |
30 FfmpegDecoderBase::Flush(); | 30 FfmpegDecoderBase::Flush(); |
31 next_presentation_time_= Packet::kUnknownPresentationTime; | 31 next_pts_= Packet::kUnknownPts; |
32 } | 32 } |
33 | 33 |
34 int FfmpegAudioDecoder::Decode( | 34 int FfmpegAudioDecoder::Decode( |
35 const AVPacket& av_packet, | 35 const AVPacket& av_packet, |
36 const AvFramePtr& av_frame_ptr, | 36 const AvFramePtr& av_frame_ptr, |
37 PayloadAllocator* allocator, | 37 PayloadAllocator* allocator, |
38 bool* frame_decoded_out) { | 38 bool* frame_decoded_out) { |
39 DCHECK(allocator); | 39 DCHECK(allocator); |
40 DCHECK(frame_decoded_out); | 40 DCHECK(frame_decoded_out); |
41 DCHECK(context()); | 41 DCHECK(context()); |
42 DCHECK(av_frame_ptr); | 42 DCHECK(av_frame_ptr); |
43 | 43 |
44 if (next_presentation_time_ == Packet::kUnknownPresentationTime) { | 44 if (next_pts_ == Packet::kUnknownPts) { |
45 if (av_packet.pts == AV_NOPTS_VALUE) { | 45 if (av_packet.pts == AV_NOPTS_VALUE) { |
46 next_presentation_time_ = 0; | 46 next_pts_ = 0; |
johngro
2016/03/21 19:47:56
I'm still a bit confused by this logic; eg, why is
dalesat
2016/03/21 22:30:59
Acknowledged.
| |
47 } else { | 47 } else { |
48 next_presentation_time_ = av_packet.pts; | 48 next_pts_ = av_packet.pts; |
49 } | 49 } |
50 } | 50 } |
51 | 51 |
52 // Use the provided allocator (for allocations in AllocateBufferForAvFrame) | 52 // Use the provided allocator (for allocations in AllocateBufferForAvFrame) |
53 // unless we intend to interleave later, in which case use the default | 53 // unless we intend to interleave later, in which case use the default |
54 // allocator. We'll interleave into a buffer from the provided allocator | 54 // allocator. We'll interleave into a buffer from the provided allocator |
55 // in CreateOutputPacket. | 55 // in CreateOutputPacket. |
56 allocator_ = lpcm_util_ ? PayloadAllocator::GetDefault() : allocator; | 56 allocator_ = lpcm_util_ ? PayloadAllocator::GetDefault() : allocator; |
57 | 57 |
58 int frame_decoded = 0; | 58 int frame_decoded = 0; |
59 int input_bytes_used = avcodec_decode_audio4( | 59 int input_bytes_used = avcodec_decode_audio4( |
60 context().get(), | 60 context().get(), |
61 av_frame_ptr.get(), | 61 av_frame_ptr.get(), |
62 &frame_decoded, | 62 &frame_decoded, |
63 &av_packet); | 63 &av_packet); |
64 *frame_decoded_out = frame_decoded != 0; | 64 *frame_decoded_out = frame_decoded != 0; |
65 | 65 |
66 // We're done with this allocator. | 66 // We're done with this allocator. |
67 allocator_ = nullptr; | 67 allocator_ = nullptr; |
68 | 68 |
69 return input_bytes_used; | 69 return input_bytes_used; |
70 } | 70 } |
71 | 71 |
72 PacketPtr FfmpegAudioDecoder::CreateOutputPacket( | 72 PacketPtr FfmpegAudioDecoder::CreateOutputPacket( |
73 const AVFrame& av_frame, | 73 const AVFrame& av_frame, |
74 PayloadAllocator* allocator) { | 74 PayloadAllocator* allocator) { |
75 DCHECK(allocator); | 75 DCHECK(allocator); |
76 | 76 |
77 int64_t presentation_time = av_frame.pts; | 77 int64_t pts = av_frame.pts; |
78 if (presentation_time == AV_NOPTS_VALUE) { | 78 if (pts == AV_NOPTS_VALUE) { |
79 presentation_time = next_presentation_time_; | 79 pts = next_pts_; |
80 next_presentation_time_ += av_frame.nb_samples; | 80 next_pts_ += av_frame.nb_samples; |
81 } | 81 } |
82 | 82 |
83 uint64_t payload_size; | 83 uint64_t payload_size; |
84 void *payload_buffer; | 84 void *payload_buffer; |
85 | 85 |
86 AvBufferContext* av_buffer_context = | 86 AvBufferContext* av_buffer_context = |
87 reinterpret_cast<AvBufferContext*>(av_buffer_get_opaque(av_frame.buf[0])); | 87 reinterpret_cast<AvBufferContext*>(av_buffer_get_opaque(av_frame.buf[0])); |
88 | 88 |
89 if (lpcm_util_) { | 89 if (lpcm_util_) { |
90 // We need to interleave. The non-interleaved frames are in a buffer that | 90 // We need to interleave. The non-interleaved frames are in a buffer that |
(...skipping 12 matching lines...) Expand all Loading... | |
103 av_frame.nb_samples); | 103 av_frame.nb_samples); |
104 } else { | 104 } else { |
105 // We don't need to interleave. The interleaved frames are in a buffer that | 105 // We don't need to interleave. The interleaved frames are in a buffer that |
106 // was allocated from the correct allocator. We take ownership of the buffer | 106 // was allocated from the correct allocator. We take ownership of the buffer |
107 // by calling Release here so that ReleaseBufferForAvFrame won't release it. | 107 // by calling Release here so that ReleaseBufferForAvFrame won't release it. |
108 payload_size = av_buffer_context->size(); | 108 payload_size = av_buffer_context->size(); |
109 payload_buffer = av_buffer_context->Release(); | 109 payload_buffer = av_buffer_context->Release(); |
110 } | 110 } |
111 | 111 |
112 return Packet::Create( | 112 return Packet::Create( |
113 presentation_time, | 113 pts, |
114 av_frame.nb_samples, | |
115 false, // The base class is responsible for end-of-stream. | 114 false, // The base class is responsible for end-of-stream. |
116 payload_size, | 115 payload_size, |
117 payload_buffer, | 116 payload_buffer, |
118 allocator); | 117 allocator); |
119 } | 118 } |
120 | 119 |
121 PacketPtr FfmpegAudioDecoder::CreateOutputEndOfStreamPacket() { | 120 PacketPtr FfmpegAudioDecoder::CreateOutputEndOfStreamPacket() { |
122 return Packet::CreateEndOfStream(next_presentation_time_); | 121 return Packet::CreateEndOfStream(next_pts_); |
123 } | 122 } |
124 | 123 |
125 int FfmpegAudioDecoder::AllocateBufferForAvFrame( | 124 int FfmpegAudioDecoder::AllocateBufferForAvFrame( |
126 AVCodecContext* av_codec_context, | 125 AVCodecContext* av_codec_context, |
127 AVFrame* av_frame, | 126 AVFrame* av_frame, |
128 int flags) { | 127 int flags) { |
129 // CODEC_CAP_DR1 is required in order to do allocation this way. | 128 // CODEC_CAP_DR1 is required in order to do allocation this way. |
130 DCHECK(av_codec_context->codec->capabilities & CODEC_CAP_DR1); | 129 DCHECK(av_codec_context->codec->capabilities & CODEC_CAP_DR1); |
131 | 130 |
132 FfmpegAudioDecoder* self = | 131 FfmpegAudioDecoder* self = |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
211 // Either this buffer has already been released to someone else's ownership, | 210 // Either this buffer has already been released to someone else's ownership, |
212 // or it's the same as the buffer parameter. | 211 // or it's the same as the buffer parameter. |
213 DCHECK( | 212 DCHECK( |
214 av_buffer_context->buffer() == nullptr || | 213 av_buffer_context->buffer() == nullptr || |
215 av_buffer_context->buffer() == buffer); | 214 av_buffer_context->buffer() == buffer); |
216 delete av_buffer_context; | 215 delete av_buffer_context; |
217 } | 216 } |
218 | 217 |
219 } // namespace media | 218 } // namespace media |
220 } // namespace mojo | 219 } // namespace mojo |
OLD | NEW |