OLD | NEW |
---|---|
(Empty) | |
1 /* | |
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 * | |
10 */ | |
11 | |
12 #include "webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h" | |
13 | |
14 #include <algorithm> | |
15 | |
16 extern "C" { | |
17 #include "third_party/ffmpeg/libavcodec/avcodec.h" | |
18 #include "third_party/ffmpeg/libavformat/avformat.h" | |
19 #include "third_party/ffmpeg/libavutil/imgutils.h" | |
20 } // extern "C" | |
21 | |
22 #include "webrtc/base/checks.h" | |
23 #include "webrtc/base/criticalsection.h" | |
24 #include "webrtc/base/logging.h" | |
25 | |
26 namespace webrtc { | |
27 | |
28 namespace { | |
29 | |
30 const AVPixelFormat kPixelFormat = AV_PIX_FMT_YUV420P; | |
31 const size_t kYPlaneIndex = 0; | |
32 const size_t kUPlaneIndex = 1; | |
33 const size_t kVPlaneIndex = 2; | |
34 | |
35 #if !defined(WEBRTC_CHROMIUM_BUILD) | |
36 | |
37 bool ffmpeg_initialized = false; | |
38 | |
39 // Called by FFmpeg to do mutex operations if initialized using | |
40 // |InitializeFFmpeg|. | |
41 int LockManagerOperation(void** lock, AVLockOp op) | |
42 EXCLUSIVE_LOCK_FUNCTION() UNLOCK_FUNCTION() { | |
43 switch (op) { | |
44 case AV_LOCK_CREATE: | |
45 *lock = new rtc::CriticalSection(); | |
46 return 0; | |
47 case AV_LOCK_OBTAIN: | |
48 static_cast<rtc::CriticalSection*>(*lock)->Enter(); | |
49 return 0; | |
50 case AV_LOCK_RELEASE: | |
51 static_cast<rtc::CriticalSection*>(*lock)->Leave(); | |
52 return 0; | |
53 case AV_LOCK_DESTROY: | |
54 delete static_cast<rtc::CriticalSection*>(*lock); | |
55 *lock = nullptr; | |
56 return 0; | |
57 } | |
58 RTC_NOTREACHED() << "Unrecognized AVLockOp."; | |
59 return -1; | |
60 } | |
61 | |
62 // TODO(hbos): Assumed to be called on a single thread. Should DCHECK that | |
63 // InitializeFFmpeg is only called on one thread or make it thread safe. | |
64 // See https://bugs.chromium.org/p/webrtc/issues/detail?id=5427. | |
65 void InitializeFFmpeg() { | |
66 if (!ffmpeg_initialized) { | |
67 if (av_lockmgr_register(LockManagerOperation) < 0) { | |
68 RTC_NOTREACHED() << "av_lockmgr_register failed."; | |
69 return; | |
70 } | |
71 av_register_all(); | |
72 ffmpeg_initialized = true; | |
73 } | |
74 } | |
75 | |
76 #endif // !defined(WEBRTC_CHROMIUM_BUILD) | |
77 | |
78 // Called by FFmpeg when it is done with a frame buffer, see AVGetBuffer2. | |
79 void AVFreeBuffer2(void* opaque, uint8_t* data) { | |
80 VideoFrame* video_frame = static_cast<VideoFrame*>(opaque); | |
81 delete video_frame; | |
82 } | |
83 | |
84 // Called by FFmpeg when it needs a frame buffer to store decoded frames in. | |
85 // The VideoFrames returned by FFmpeg at Decode originate from here. They are | |
86 // reference counted and freed by FFmpeg using AVFreeBuffer2. | |
87 // TODO(hbos): Use a frame pool for better performance instead of create/free. | |
88 // Could be owned by decoder, static_cast<H264DecoderImpl*>(context->opaque). | |
89 // Consider verifying that the buffer was allocated by us to avoid unsafe type | |
90 // cast. See https://bugs.chromium.org/p/webrtc/issues/detail?id=5428. | |
palmer
2016/01/20 19:17:47
Cool, thanks!
Nit: delineate identifiers with |..
hbos_chromium
2016/01/21 22:31:49
Done. Question: Do you think it's important to avo
| |
91 int AVGetBuffer2(AVCodecContext* context, AVFrame* av_frame, int flags) { | |
92 RTC_CHECK_EQ(context->pix_fmt, kPixelFormat); // Same as in InitDecode. | |
93 // width/height and coded_width/coded_height can be different due to cropping | |
94 // or |lowres|. | |
95 int width = std::max(context->width, context->coded_width); | |
96 int height = std::max(context->height, context->coded_height); | |
97 // See |lowres|, if used the decoder scales the image by 1/2^(lowres). This | |
98 // has implications on which resolutions are valid, but we don't use it. | |
99 RTC_CHECK_EQ(context->lowres, 0); | |
100 | |
101 RTC_CHECK_GE(width, 0); | |
102 RTC_CHECK_GE(height, 0); | |
103 int ret = av_image_check_size(static_cast<unsigned int>(width), | |
104 static_cast<unsigned int>(height), 0, nullptr); | |
105 if (ret < 0) { | |
palmer
2016/01/20 19:17:47
Looking at the implementation of |av_image_check_s
hbos_chromium
2016/01/21 22:31:49
The doc says "@return >= 0 if valid, a negative er
| |
106 LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height; | |
107 return ret; | |
108 } | |
109 | |
110 // The video frame is stored in |video_frame|. |av_frame| is FFmpeg's version | |
111 // of a video frame and will be set up to reference |video_frame|'s buffers. | |
112 VideoFrame* video_frame = new VideoFrame(); | |
113 int stride_y = width; | |
114 int stride_uv = (width + 1) / 2; | |
115 RTC_CHECK_EQ(0, video_frame->CreateEmptyFrame( | |
116 width, height, stride_y, stride_uv, stride_uv)); | |
117 int total_size = video_frame->allocated_size(kYPlane) + | |
118 video_frame->allocated_size(kUPlane) + | |
119 video_frame->allocated_size(kVPlane); | |
120 RTC_DCHECK_EQ(total_size, stride_y * height + | |
121 (stride_uv + stride_uv) * ((height + 1) / 2)); | |
122 | |
123 // FFmpeg expects the initial allocation to be zero-initialized according to | |
124 // http://crbug.com/390941. | |
125 // Using a single |av_frame->buf| - YUV is required to be a continuous blob of | |
126 // memory. We can zero-initialize with one memset operation for all planes. | |
127 RTC_DCHECK_EQ(video_frame->buffer(kUPlane), | |
128 video_frame->buffer(kYPlane) + video_frame->allocated_size(kYPlane)); | |
129 RTC_DCHECK_EQ(video_frame->buffer(kVPlane), | |
130 video_frame->buffer(kUPlane) + video_frame->allocated_size(kUPlane)); | |
131 memset(video_frame->buffer(kYPlane), 0, total_size); | |
132 | |
133 RTC_DCHECK_EQ(av_frame->width, width); | |
134 RTC_DCHECK_EQ(av_frame->height, height); | |
135 av_frame->format = context->pix_fmt; | |
136 av_frame->reordered_opaque = context->reordered_opaque; | |
137 | |
138 // Set |av_frame| members as required by FFmpeg. | |
139 av_frame->data[kYPlaneIndex] = video_frame->buffer(kYPlane); | |
140 av_frame->linesize[kYPlaneIndex] = video_frame->stride(kYPlane); | |
141 av_frame->data[kUPlaneIndex] = video_frame->buffer(kUPlane); | |
142 av_frame->linesize[kUPlaneIndex] = video_frame->stride(kUPlane); | |
143 av_frame->data[kVPlaneIndex] = video_frame->buffer(kVPlane); | |
144 av_frame->linesize[kVPlaneIndex] = video_frame->stride(kVPlane); | |
145 RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data); | |
146 | |
147 av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex], | |
148 total_size, | |
149 AVFreeBuffer2, | |
150 static_cast<void*>(video_frame), | |
151 0); | |
152 RTC_CHECK(av_frame->buf[0]); | |
153 return 0; | |
154 } | |
155 | |
156 } // namespace | |
157 | |
158 H264DecoderImpl::H264DecoderImpl() | |
159 : decoded_image_callback_(nullptr) { | |
160 } | |
161 | |
162 H264DecoderImpl::~H264DecoderImpl() { | |
163 Release(); | |
164 } | |
165 | |
166 int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings, | |
167 int32_t number_of_cores) { | |
168 if (codec_settings && | |
169 codec_settings->codecType != kVideoCodecH264) { | |
170 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | |
171 } | |
172 | |
173 // In Chromium FFmpeg will be initialized outside of WebRTC and we should not | |
174 // attempt to do so ourselves or it will be initialized twice. | |
175 // TODO(hbos): Put behind a different flag in case non-chromium project wants | |
176 // to initialize externally. | |
177 // See https://bugs.chromium.org/p/webrtc/issues/detail?id=5427. | |
178 #if !defined(WEBRTC_CHROMIUM_BUILD) | |
179 // Make sure FFmpeg has been initialized. | |
180 InitializeFFmpeg(); | |
181 #endif | |
182 | |
183 // Release necessary in case of re-initializing. | |
184 int32_t ret = Release(); | |
185 if (ret != WEBRTC_VIDEO_CODEC_OK) | |
186 return ret; | |
187 RTC_DCHECK(!av_context_); | |
188 | |
189 // Initialize AVCodecContext. | |
190 av_context_.reset(avcodec_alloc_context3(nullptr)); | |
191 | |
192 av_context_->codec_type = AVMEDIA_TYPE_VIDEO; | |
193 av_context_->codec_id = AV_CODEC_ID_H264; | |
194 if (codec_settings) { | |
195 av_context_->coded_width = codec_settings->width; | |
196 av_context_->coded_height = codec_settings->height; | |
197 } | |
198 av_context_->pix_fmt = kPixelFormat; | |
199 av_context_->extradata = nullptr; | |
200 av_context_->extradata_size = 0; | |
201 | |
202 av_context_->thread_count = 1; | |
203 av_context_->thread_type = FF_THREAD_SLICE; | |
204 | |
205 // FFmpeg will get video buffers from our AVGetBuffer2, memory managed by us. | |
206 av_context_->get_buffer2 = AVGetBuffer2; | |
207 // get_buffer2 is called with the context, there |opaque| can be used to get a | |
208 // pointer |this|. | |
209 av_context_->opaque = this; | |
210 // Use ref counted frames (av_frame_unref). | |
211 av_context_->refcounted_frames = 1; // true | |
212 | |
213 AVCodec* codec = avcodec_find_decoder(av_context_->codec_id); | |
214 if (!codec) { | |
215 // This is an indication that FFmpeg has not been initialized or it has not | |
216 // been compiled/initialized with the correct set of codecs. | |
217 LOG(LS_ERROR) << "FFmpeg H.264 decoder not found."; | |
218 Release(); | |
219 return WEBRTC_VIDEO_CODEC_ERROR; | |
220 } | |
221 int res = avcodec_open2(av_context_.get(), codec, nullptr); | |
222 if (res < 0) { | |
223 LOG(LS_ERROR) << "avcodec_open2 error: " << res; | |
224 Release(); | |
225 return WEBRTC_VIDEO_CODEC_ERROR; | |
226 } | |
227 | |
228 av_frame_.reset(av_frame_alloc()); | |
229 return WEBRTC_VIDEO_CODEC_OK; | |
230 } | |
231 | |
232 int32_t H264DecoderImpl::Release() { | |
233 av_context_.reset(); | |
234 av_frame_.reset(); | |
235 return WEBRTC_VIDEO_CODEC_OK; | |
236 } | |
237 | |
238 int32_t H264DecoderImpl::Reset() { | |
239 if (!IsInitialized()) | |
240 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
241 InitDecode(nullptr, 1); | |
242 return WEBRTC_VIDEO_CODEC_OK; | |
243 } | |
244 | |
245 int32_t H264DecoderImpl::RegisterDecodeCompleteCallback( | |
246 DecodedImageCallback* callback) { | |
247 decoded_image_callback_ = callback; | |
248 return WEBRTC_VIDEO_CODEC_OK; | |
249 } | |
250 | |
251 int32_t H264DecoderImpl::Decode(const EncodedImage& input_image, | |
252 bool /*missing_frames*/, | |
253 const RTPFragmentationHeader* /*fragmentation*/, | |
254 const CodecSpecificInfo* codec_specific_info, | |
255 int64_t /*render_time_ms*/) { | |
256 if (!IsInitialized()) | |
257 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
258 if (!decoded_image_callback_) { | |
259 LOG(LS_WARNING) << "InitDecode() has been called, but a callback function " | |
260 "has not been set with RegisterDecodeCompleteCallback()"; | |
261 return WEBRTC_VIDEO_CODEC_UNINITIALIZED; | |
262 } | |
263 if (!input_image._buffer || !input_image._length) | |
264 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | |
265 if (codec_specific_info && | |
266 codec_specific_info->codecType != kVideoCodecH264) { | |
267 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; | |
268 } | |
269 | |
270 AVPacket packet; | |
271 av_init_packet(&packet); | |
272 // TODO(hbos): "The input buffer must be AV_INPUT_BUFFER_PADDING_SIZE larger | |
273 // than the actual read bytes because some optimized bitstream readers read 32 | |
274 // or 64 bits at once and could read over the end." See avcodec_decode_video2. | |
275 // - Is this an issue? Do we have to make sure EncodedImage is allocated with | |
276 // additional bytes or do we have to do an otherwise unnecessary copy? Might | |
277 // only be a problem with non-mul-32 frame widths? | |
278 // ("If the first 23 bits of the additional bytes are not 0, then damaged MPEG | |
279 // bitstreams could cause overread and segfault.") | |
280 // See issue: https://bugs.chromium.org/p/webrtc/issues/detail?id=5424 | |
281 packet.data = input_image._buffer; | |
282 packet.size = input_image._length; | |
283 av_context_->reordered_opaque = input_image.ntp_time_ms_ * 1000; // ms -> μs | |
284 | |
285 int frame_decoded = 0; | |
286 int result = avcodec_decode_video2(av_context_.get(), | |
287 av_frame_.get(), | |
288 &frame_decoded, | |
289 &packet); | |
290 if (result < 0) { | |
291 LOG(LS_ERROR) << "avcodec_decode_video2 error: " << result; | |
292 return WEBRTC_VIDEO_CODEC_ERROR; | |
293 } | |
294 // |result| is number of bytes used, which should be all of them. | |
295 if (result != packet.size) { | |
296 LOG(LS_ERROR) << "avcodec_decode_video2 consumed " << result << " bytes " | |
297 "when " << packet.size << " bytes were expected."; | |
298 return WEBRTC_VIDEO_CODEC_ERROR; | |
299 } | |
300 | |
301 if (!frame_decoded) { | |
302 LOG(LS_WARNING) << "avcodec_decode_video2 successful but no frame was " | |
303 "decoded."; | |
304 return WEBRTC_VIDEO_CODEC_OK; | |
305 } | |
306 | |
307 // Obtain the |video_frame| containing the decoded image. | |
308 VideoFrame* video_frame = static_cast<VideoFrame*>( | |
309 av_buffer_get_opaque(av_frame_->buf[0])); | |
310 RTC_DCHECK(video_frame); | |
311 RTC_CHECK_EQ(av_frame_->data[kYPlane], video_frame->buffer(kYPlane)); | |
312 RTC_CHECK_EQ(av_frame_->data[kUPlane], video_frame->buffer(kUPlane)); | |
313 RTC_CHECK_EQ(av_frame_->data[kVPlane], video_frame->buffer(kVPlane)); | |
314 video_frame->set_timestamp(input_image._timeStamp); | |
315 | |
316 // Return decoded frame. | |
317 int32_t ret = decoded_image_callback_->Decoded(*video_frame); | |
318 // Stop referencing it, possibly freeing |video_frame|. | |
319 av_frame_unref(av_frame_.get()); | |
320 video_frame = nullptr; | |
321 | |
322 if (ret) { | |
323 LOG(LS_WARNING) << "DecodedImageCallback::Decoded returned " << ret; | |
324 return ret; | |
325 } | |
326 return WEBRTC_VIDEO_CODEC_OK; | |
327 } | |
328 | |
329 bool H264DecoderImpl::IsInitialized() const { | |
330 return av_context_ != nullptr; | |
331 } | |
332 | |
333 } // namespace webrtc | |
OLD | NEW |