OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/ffmpeg_video_decoder.h" | 5 #include "media/filters/ffmpeg_video_decoder.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <string> | 8 #include <string> |
9 | 9 |
10 #include "base/bind.h" | 10 #include "base/bind.h" |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
55 return decode_threads; | 55 return decode_threads; |
56 } | 56 } |
57 | 57 |
58 FFmpegVideoDecoder::FFmpegVideoDecoder( | 58 FFmpegVideoDecoder::FFmpegVideoDecoder( |
59 const scoped_refptr<base::MessageLoopProxy>& message_loop) | 59 const scoped_refptr<base::MessageLoopProxy>& message_loop) |
60 : message_loop_(message_loop), | 60 : message_loop_(message_loop), |
61 weak_factory_(this), | 61 weak_factory_(this), |
62 state_(kUninitialized), | 62 state_(kUninitialized), |
63 codec_context_(NULL), | 63 codec_context_(NULL), |
64 av_frame_(NULL), | 64 av_frame_(NULL), |
65 demuxer_stream_(NULL) { | 65 demuxer_stream_(NULL) {} |
66 } | |
67 | 66 |
68 int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, | 67 int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, |
69 AVFrame* frame) { | 68 AVFrame* frame) { |
70 // Don't use |codec_context_| here! With threaded decoding, | 69 // Don't use |codec_context_| here! With threaded decoding, |
71 // it will contain unsynchronized width/height/pix_fmt values, | 70 // it will contain unsynchronized width/height/pix_fmt values, |
72 // whereas |codec_context| contains the current threads's | 71 // whereas |codec_context| contains the current threads's |
73 // updated width/height/pix_fmt, which can change for adaptive | 72 // updated width/height/pix_fmt, which can change for adaptive |
74 // content. | 73 // content. |
75 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt); | 74 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt); |
76 if (format == VideoFrame::INVALID) | 75 if (format == VideoFrame::INVALID) return AVERROR(EINVAL); |
77 return AVERROR(EINVAL); | |
78 DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16); | 76 DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16); |
79 | 77 |
80 gfx::Size size(codec_context->width, codec_context->height); | 78 gfx::Size size(codec_context->width, codec_context->height); |
81 int ret; | 79 int ret; |
82 if ((ret = av_image_check_size(size.width(), size.height(), 0, NULL)) < 0) | 80 if ((ret = av_image_check_size(size.width(), size.height(), 0, NULL)) < 0) |
83 return ret; | 81 return ret; |
84 | 82 |
85 gfx::Size natural_size; | 83 gfx::Size natural_size; |
86 if (codec_context->sample_aspect_ratio.num > 0) { | 84 if (codec_context->sample_aspect_ratio.num > 0) { |
87 natural_size = GetNaturalSize(size, | 85 natural_size = GetNaturalSize(size, codec_context->sample_aspect_ratio.num, |
88 codec_context->sample_aspect_ratio.num, | |
89 codec_context->sample_aspect_ratio.den); | 86 codec_context->sample_aspect_ratio.den); |
90 } else { | 87 } else { |
91 natural_size = demuxer_stream_->video_decoder_config().natural_size(); | 88 natural_size = demuxer_stream_->video_decoder_config().natural_size(); |
92 } | 89 } |
93 | 90 |
94 if (!VideoFrame::IsValidConfig(format, size, gfx::Rect(size), natural_size)) | 91 if (!VideoFrame::IsValidConfig(format, size, gfx::Rect(size), natural_size)) |
95 return AVERROR(EINVAL); | 92 return AVERROR(EINVAL); |
96 | 93 |
97 scoped_refptr<VideoFrame> video_frame = | 94 scoped_refptr<VideoFrame> video_frame = VideoFrame::CreateFrame( |
98 VideoFrame::CreateFrame(format, size, gfx::Rect(size), natural_size, | 95 format, size, gfx::Rect(size), natural_size, kNoTimestamp()); |
99 kNoTimestamp()); | |
100 | 96 |
101 for (int i = 0; i < 3; i++) { | 97 for (int i = 0; i < 3; i++) { |
102 frame->base[i] = video_frame->data(i); | 98 frame->base[i] = video_frame->data(i); |
103 frame->data[i] = video_frame->data(i); | 99 frame->data[i] = video_frame->data(i); |
104 frame->linesize[i] = video_frame->stride(i); | 100 frame->linesize[i] = video_frame->stride(i); |
105 } | 101 } |
106 | 102 |
107 frame->opaque = NULL; | 103 frame->opaque = NULL; |
108 video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque)); | 104 video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque)); |
109 frame->type = FF_BUFFER_TYPE_USER; | 105 frame->type = FF_BUFFER_TYPE_USER; |
110 frame->pkt_pts = codec_context->pkt ? codec_context->pkt->pts : | 106 frame->pkt_pts = |
111 AV_NOPTS_VALUE; | 107 codec_context->pkt ? codec_context->pkt->pts : AV_NOPTS_VALUE; |
112 frame->width = codec_context->width; | 108 frame->width = codec_context->width; |
113 frame->height = codec_context->height; | 109 frame->height = codec_context->height; |
114 frame->format = codec_context->pix_fmt; | 110 frame->format = codec_context->pix_fmt; |
115 | 111 |
116 return 0; | 112 return 0; |
117 } | 113 } |
118 | 114 |
119 static int GetVideoBufferImpl(AVCodecContext* s, AVFrame* frame) { | 115 static int GetVideoBufferImpl(AVCodecContext* s, AVFrame* frame) { |
120 FFmpegVideoDecoder* vd = static_cast<FFmpegVideoDecoder*>(s->opaque); | 116 FFmpegVideoDecoder* vd = static_cast<FFmpegVideoDecoder*>(s->opaque); |
121 return vd->GetVideoBuffer(s, frame); | 117 return vd->GetVideoBuffer(s, frame); |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
176 | 172 |
177 ReadFromDemuxerStream(); | 173 ReadFromDemuxerStream(); |
178 } | 174 } |
179 | 175 |
180 void FFmpegVideoDecoder::Reset(const base::Closure& closure) { | 176 void FFmpegVideoDecoder::Reset(const base::Closure& closure) { |
181 DCHECK(message_loop_->BelongsToCurrentThread()); | 177 DCHECK(message_loop_->BelongsToCurrentThread()); |
182 DCHECK(reset_cb_.is_null()); | 178 DCHECK(reset_cb_.is_null()); |
183 reset_cb_ = BindToCurrentLoop(closure); | 179 reset_cb_ = BindToCurrentLoop(closure); |
184 | 180 |
185 // Defer the reset if a read is pending. | 181 // Defer the reset if a read is pending. |
186 if (!read_cb_.is_null()) | 182 if (!read_cb_.is_null()) return; |
187 return; | |
188 | 183 |
189 DoReset(); | 184 DoReset(); |
190 } | 185 } |
191 | 186 |
192 void FFmpegVideoDecoder::DoReset() { | 187 void FFmpegVideoDecoder::DoReset() { |
193 DCHECK(read_cb_.is_null()); | 188 DCHECK(read_cb_.is_null()); |
194 | 189 |
195 avcodec_flush_buffers(codec_context_); | 190 avcodec_flush_buffers(codec_context_); |
196 state_ = kNormal; | 191 state_ = kNormal; |
197 base::ResetAndReturn(&reset_cb_).Run(); | 192 base::ResetAndReturn(&reset_cb_).Run(); |
198 } | 193 } |
199 | 194 |
200 void FFmpegVideoDecoder::Stop(const base::Closure& closure) { | 195 void FFmpegVideoDecoder::Stop(const base::Closure& closure) { |
201 DCHECK(message_loop_->BelongsToCurrentThread()); | 196 DCHECK(message_loop_->BelongsToCurrentThread()); |
202 base::ScopedClosureRunner runner(BindToCurrentLoop(closure)); | 197 base::ScopedClosureRunner runner(BindToCurrentLoop(closure)); |
203 | 198 |
204 if (state_ == kUninitialized) | 199 if (state_ == kUninitialized) return; |
205 return; | |
206 | 200 |
207 if (!read_cb_.is_null()) | 201 if (!read_cb_.is_null()) base::ResetAndReturn(&read_cb_).Run(kOk, NULL); |
208 base::ResetAndReturn(&read_cb_).Run(kOk, NULL); | |
209 | 202 |
210 ReleaseFFmpegResources(); | 203 ReleaseFFmpegResources(); |
211 state_ = kUninitialized; | 204 state_ = kUninitialized; |
212 } | 205 } |
213 | 206 |
214 FFmpegVideoDecoder::~FFmpegVideoDecoder() { | 207 FFmpegVideoDecoder::~FFmpegVideoDecoder() { |
215 DCHECK_EQ(kUninitialized, state_); | 208 DCHECK_EQ(kUninitialized, state_); |
216 DCHECK(!codec_context_); | 209 DCHECK(!codec_context_); |
217 DCHECK(!av_frame_); | 210 DCHECK(!av_frame_); |
218 } | 211 } |
219 | 212 |
220 void FFmpegVideoDecoder::ReadFromDemuxerStream() { | 213 void FFmpegVideoDecoder::ReadFromDemuxerStream() { |
221 DCHECK_NE(state_, kUninitialized); | 214 DCHECK_NE(state_, kUninitialized); |
222 DCHECK_NE(state_, kDecodeFinished); | 215 DCHECK_NE(state_, kDecodeFinished); |
223 DCHECK_NE(state_, kError); | 216 DCHECK_NE(state_, kError); |
224 DCHECK(!read_cb_.is_null()); | 217 DCHECK(!read_cb_.is_null()); |
225 | 218 |
226 demuxer_stream_->Read(base::Bind( | 219 demuxer_stream_->Read( |
227 &FFmpegVideoDecoder::BufferReady, weak_this_)); | 220 base::Bind(&FFmpegVideoDecoder::BufferReady, weak_this_)); |
228 } | 221 } |
229 | 222 |
230 void FFmpegVideoDecoder::BufferReady( | 223 void FFmpegVideoDecoder::BufferReady( |
231 DemuxerStream::Status status, | 224 DemuxerStream::Status status, const scoped_refptr<DecoderBuffer>& buffer) { |
232 const scoped_refptr<DecoderBuffer>& buffer) { | |
233 DCHECK(message_loop_->BelongsToCurrentThread()); | 225 DCHECK(message_loop_->BelongsToCurrentThread()); |
234 DCHECK_NE(state_, kDecodeFinished); | 226 DCHECK_NE(state_, kDecodeFinished); |
235 DCHECK_NE(state_, kError); | 227 DCHECK_NE(state_, kError); |
236 DCHECK_EQ(status != DemuxerStream::kOk, !buffer.get()) << status; | 228 DCHECK_EQ(status != DemuxerStream::kOk, !buffer.get()) << status; |
237 | 229 |
238 if (state_ == kUninitialized) | 230 if (state_ == kUninitialized) return; |
239 return; | |
240 | 231 |
241 DCHECK(!read_cb_.is_null()); | 232 DCHECK(!read_cb_.is_null()); |
242 | 233 |
243 if (!reset_cb_.is_null()) { | 234 if (!reset_cb_.is_null()) { |
244 base::ResetAndReturn(&read_cb_).Run(kOk, NULL); | 235 base::ResetAndReturn(&read_cb_).Run(kOk, NULL); |
245 DoReset(); | 236 DoReset(); |
246 return; | 237 return; |
247 } | 238 } |
248 | 239 |
249 if (status == DemuxerStream::kAborted) { | 240 if (status == DemuxerStream::kAborted) { |
(...skipping 27 matching lines...) Expand all Loading... |
277 // are discarded. | 268 // are discarded. |
278 // kFlushCodec: There isn't any more input data. Call avcodec_decode_video2 | 269 // kFlushCodec: There isn't any more input data. Call avcodec_decode_video2 |
279 // until no more data is returned to flush out remaining | 270 // until no more data is returned to flush out remaining |
280 // frames. The input buffer is ignored at this point. | 271 // frames. The input buffer is ignored at this point. |
281 // kDecodeFinished: All calls return empty frames. | 272 // kDecodeFinished: All calls return empty frames. |
282 // kError: Unexpected error happened. | 273 // kError: Unexpected error happened. |
283 // | 274 // |
284 // These are the possible state transitions. | 275 // These are the possible state transitions. |
285 // | 276 // |
286 // kNormal -> kFlushCodec: | 277 // kNormal -> kFlushCodec: |
287 // When buffer->IsEndOfStream() is first true. | 278 // When buffer->is_end_of_stream() is first true. |
288 // kNormal -> kError: | 279 // kNormal -> kError: |
289 // A decoding error occurs and decoding needs to stop. | 280 // A decoding error occurs and decoding needs to stop. |
290 // kFlushCodec -> kDecodeFinished: | 281 // kFlushCodec -> kDecodeFinished: |
291 // When avcodec_decode_video2() returns 0 data. | 282 // When avcodec_decode_video2() returns 0 data. |
292 // kFlushCodec -> kError: | 283 // kFlushCodec -> kError: |
293 // When avcodec_decode_video2() errors out. | 284 // When avcodec_decode_video2() errors out. |
294 // (any state) -> kNormal: | 285 // (any state) -> kNormal: |
295 // Any time Reset() is called. | 286 // Any time Reset() is called. |
296 | 287 |
297 // Transition to kFlushCodec on the first end of stream buffer. | 288 // Transition to kFlushCodec on the first end of stream buffer. |
298 if (state_ == kNormal && buffer->IsEndOfStream()) { | 289 if (state_ == kNormal && buffer->is_end_of_stream()) { |
299 state_ = kFlushCodec; | 290 state_ = kFlushCodec; |
300 } | 291 } |
301 | 292 |
302 scoped_refptr<VideoFrame> video_frame; | 293 scoped_refptr<VideoFrame> video_frame; |
303 if (!Decode(buffer, &video_frame)) { | 294 if (!Decode(buffer, &video_frame)) { |
304 state_ = kError; | 295 state_ = kError; |
305 base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL); | 296 base::ResetAndReturn(&read_cb_).Run(kDecodeError, NULL); |
306 return; | 297 return; |
307 } | 298 } |
308 | 299 |
309 // Any successful decode counts! | 300 // Any successful decode counts! |
310 if (!buffer->IsEndOfStream() && buffer->GetDataSize() > 0) { | 301 if (!buffer->is_end_of_stream() && buffer->get_data_size() > 0) { |
311 PipelineStatistics statistics; | 302 PipelineStatistics statistics; |
312 statistics.video_bytes_decoded = buffer->GetDataSize(); | 303 statistics.video_bytes_decoded = buffer->get_data_size(); |
313 statistics_cb_.Run(statistics); | 304 statistics_cb_.Run(statistics); |
314 } | 305 } |
315 | 306 |
316 if (!video_frame.get()) { | 307 if (!video_frame.get()) { |
317 if (state_ == kFlushCodec) { | 308 if (state_ == kFlushCodec) { |
318 DCHECK(buffer->IsEndOfStream()); | 309 DCHECK(buffer->is_end_of_stream()); |
319 state_ = kDecodeFinished; | 310 state_ = kDecodeFinished; |
320 base::ResetAndReturn(&read_cb_).Run(kOk, VideoFrame::CreateEmptyFrame()); | 311 base::ResetAndReturn(&read_cb_).Run(kOk, VideoFrame::CreateEmptyFrame()); |
321 return; | 312 return; |
322 } | 313 } |
323 | 314 |
324 ReadFromDemuxerStream(); | 315 ReadFromDemuxerStream(); |
325 return; | 316 return; |
326 } | 317 } |
327 | 318 |
328 base::ResetAndReturn(&read_cb_).Run(kOk, video_frame); | 319 base::ResetAndReturn(&read_cb_).Run(kOk, video_frame); |
329 } | 320 } |
330 | 321 |
331 bool FFmpegVideoDecoder::Decode( | 322 bool FFmpegVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer, |
332 const scoped_refptr<DecoderBuffer>& buffer, | 323 scoped_refptr<VideoFrame>* video_frame) { |
333 scoped_refptr<VideoFrame>* video_frame) { | |
334 DCHECK(video_frame); | 324 DCHECK(video_frame); |
335 | 325 |
336 // Reset frame to default values. | 326 // Reset frame to default values. |
337 avcodec_get_frame_defaults(av_frame_); | 327 avcodec_get_frame_defaults(av_frame_); |
338 | 328 |
339 // Create a packet for input data. | 329 // Create a packet for input data. |
340 // Due to FFmpeg API changes we no longer have const read-only pointers. | 330 // Due to FFmpeg API changes we no longer have const read-only pointers. |
341 AVPacket packet; | 331 AVPacket packet; |
342 av_init_packet(&packet); | 332 av_init_packet(&packet); |
343 if (buffer->IsEndOfStream()) { | 333 if (buffer->is_end_of_stream()) { |
344 packet.data = NULL; | 334 packet.data = NULL; |
345 packet.size = 0; | 335 packet.size = 0; |
346 } else { | 336 } else { |
347 packet.data = const_cast<uint8*>(buffer->GetData()); | 337 packet.data = const_cast<uint8*>(buffer->get_data()); |
348 packet.size = buffer->GetDataSize(); | 338 packet.size = buffer->get_data_size(); |
349 | 339 |
350 // Let FFmpeg handle presentation timestamp reordering. | 340 // Let FFmpeg handle presentation timestamp reordering. |
351 codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds(); | 341 codec_context_->reordered_opaque = buffer->get_timestamp().InMicroseconds(); |
352 | 342 |
353 // This is for codecs not using get_buffer to initialize | 343 // This is for codecs not using get_buffer to initialize |
354 // |av_frame_->reordered_opaque| | 344 // |av_frame_->reordered_opaque| |
355 av_frame_->reordered_opaque = codec_context_->reordered_opaque; | 345 av_frame_->reordered_opaque = codec_context_->reordered_opaque; |
356 } | 346 } |
357 | 347 |
358 int frame_decoded = 0; | 348 int frame_decoded = 0; |
359 int result = avcodec_decode_video2(codec_context_, | 349 int result = |
360 av_frame_, | 350 avcodec_decode_video2(codec_context_, av_frame_, &frame_decoded, &packet); |
361 &frame_decoded, | |
362 &packet); | |
363 // Log the problem if we can't decode a video frame and exit early. | 351 // Log the problem if we can't decode a video frame and exit early. |
364 if (result < 0) { | 352 if (result < 0) { |
365 LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString(); | 353 LOG(ERROR) |
| 354 << "Error decoding video: " << buffer->as_human_readable_string(); |
366 *video_frame = NULL; | 355 *video_frame = NULL; |
367 return false; | 356 return false; |
368 } | 357 } |
369 | 358 |
370 // If no frame was produced then signal that more data is required to | 359 // If no frame was produced then signal that more data is required to |
371 // produce more frames. This can happen under two circumstances: | 360 // produce more frames. This can happen under two circumstances: |
372 // 1) Decoder was recently initialized/flushed | 361 // 1) Decoder was recently initialized/flushed |
373 // 2) End of stream was reached and all internal frames have been output | 362 // 2) End of stream was reached and all internal frames have been output |
374 if (frame_decoded == 0) { | 363 if (frame_decoded == 0) { |
375 *video_frame = NULL; | 364 *video_frame = NULL; |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
445 if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) { | 434 if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) { |
446 ReleaseFFmpegResources(); | 435 ReleaseFFmpegResources(); |
447 return false; | 436 return false; |
448 } | 437 } |
449 | 438 |
450 av_frame_ = avcodec_alloc_frame(); | 439 av_frame_ = avcodec_alloc_frame(); |
451 return true; | 440 return true; |
452 } | 441 } |
453 | 442 |
454 } // namespace media | 443 } // namespace media |
OLD | NEW |