OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/ffmpeg_video_decoder.h" | 5 #include "media/filters/ffmpeg_video_decoder.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/callback_helpers.h" | 8 #include "base/callback_helpers.h" |
9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
10 #include "base/message_loop.h" | 10 #include "base/message_loop.h" |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
54 : message_loop_factory_cb_(message_loop_cb), | 54 : message_loop_factory_cb_(message_loop_cb), |
55 message_loop_(NULL), | 55 message_loop_(NULL), |
56 state_(kUninitialized), | 56 state_(kUninitialized), |
57 codec_context_(NULL), | 57 codec_context_(NULL), |
58 av_frame_(NULL), | 58 av_frame_(NULL), |
59 frame_rate_numerator_(0), | 59 frame_rate_numerator_(0), |
60 frame_rate_denominator_(0), | 60 frame_rate_denominator_(0), |
61 decryptor_(NULL) { | 61 decryptor_(NULL) { |
62 } | 62 } |
63 | 63 |
64 int FFmpegVideoDecoder::GetVideoBuffer(AVFrame *frame) { | |
65 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context_->pix_fmt); | |
66 if (format == VideoFrame::INVALID) | |
67 return AVERROR(EINVAL); | |
68 DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16); | |
69 int w = codec_context_->width, h = codec_context_->height; | |
70 avcodec_align_dimensions(codec_context_, &w, &h); | |
71 int ret; | |
72 if ((ret = av_image_check_size(w, h, 0, NULL)) < 0) | |
73 return ret; | |
74 int strides[4]; | |
75 if ((ret = av_image_fill_linesizes(strides, codec_context_->pix_fmt, w)) < 0) | |
76 return ret; | |
77 scoped_refptr<VideoFrame> buf = VideoFrame::CreateFrame(format, | |
78 codec_context_->width, codec_context_->height, strides[0], | |
79 strides[1], h, kNoTimestamp(), kNoTimestamp()); | |
80 | |
81 for (int i = 0; i < 3; i++) { | |
82 frame->base[i] = buf->data(i); | |
83 frame->data[i] = buf->data(i); | |
84 frame->linesize[i] = buf->stride(i); | |
85 } | |
86 | |
87 frame->opaque = buf.release(); | |
88 frame->type = FF_BUFFER_TYPE_USER; | |
89 frame->pkt_pts = codec_context_->pkt ? codec_context_->pkt->pts : AV_NOPTS_VAL UE; | |
90 frame->width = codec_context_->width; | |
91 frame->height = codec_context_->height; | |
92 frame->format = codec_context_->pix_fmt; | |
93 | |
94 return 0; | |
95 } | |
96 | |
97 static int callbackGetVideoBuffer(AVCodecContext *s, AVFrame *frame) { | |
98 FFmpegVideoDecoder *vd = static_cast<FFmpegVideoDecoder *>(s->opaque); | |
99 return vd->GetVideoBuffer(frame); | |
100 } | |
101 | |
102 static void callbackReleaseVideoBuffer(AVCodecContext *s, AVFrame *frame) { | |
103 // We're releasing the refenence to the buffer allocated in | |
104 // GetVideoBuffer() here, so the explicit Release() here is | |
105 // intentional. Would be nice if scoped_refptr::adopt existed. | |
106 scoped_refptr<VideoFrame> buf = static_cast<VideoFrame *>(frame->opaque); | |
107 buf->Release(); | |
108 memset(frame->data, 0, sizeof(frame->data)); | |
109 frame->opaque = NULL; | |
110 } | |
111 | |
64 void FFmpegVideoDecoder::Initialize(const scoped_refptr<DemuxerStream>& stream, | 112 void FFmpegVideoDecoder::Initialize(const scoped_refptr<DemuxerStream>& stream, |
65 const PipelineStatusCB& status_cb, | 113 const PipelineStatusCB& status_cb, |
66 const StatisticsCB& statistics_cb) { | 114 const StatisticsCB& statistics_cb) { |
67 if (!message_loop_) { | 115 if (!message_loop_) { |
68 message_loop_ = message_loop_factory_cb_.Run(); | 116 message_loop_ = message_loop_factory_cb_.Run(); |
69 message_loop_factory_cb_.Reset(); | 117 message_loop_factory_cb_.Reset(); |
70 | 118 |
71 message_loop_->PostTask(FROM_HERE, base::Bind( | 119 message_loop_->PostTask(FROM_HERE, base::Bind( |
72 &FFmpegVideoDecoder::Initialize, this, | 120 &FFmpegVideoDecoder::Initialize, this, |
73 stream, status_cb, statistics_cb)); | 121 stream, status_cb, statistics_cb)); |
(...skipping 23 matching lines...) Expand all Loading... | |
97 | 145 |
98 // Initialize AVCodecContext structure. | 146 // Initialize AVCodecContext structure. |
99 codec_context_ = avcodec_alloc_context(); | 147 codec_context_ = avcodec_alloc_context(); |
100 VideoDecoderConfigToAVCodecContext(config, codec_context_); | 148 VideoDecoderConfigToAVCodecContext(config, codec_context_); |
101 | 149 |
102 // Enable motion vector search (potentially slow), strong deblocking filter | 150 // Enable motion vector search (potentially slow), strong deblocking filter |
103 // for damaged macroblocks, and set our error detection sensitivity. | 151 // for damaged macroblocks, and set our error detection sensitivity. |
104 codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; | 152 codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; |
105 codec_context_->err_recognition = AV_EF_CAREFUL; | 153 codec_context_->err_recognition = AV_EF_CAREFUL; |
106 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); | 154 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); |
155 codec_context_->opaque = this; | |
156 codec_context_->flags |= CODEC_FLAG_EMU_EDGE; | |
157 codec_context_->get_buffer = callbackGetVideoBuffer; | |
Ami GONE FROM CHROMIUM
2012/06/09 02:51:23
I still think it should be possible to use base::B
| |
158 codec_context_->release_buffer = callbackReleaseVideoBuffer; | |
107 | 159 |
108 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); | 160 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |
109 if (!codec) { | 161 if (!codec) { |
110 status_cb.Run(PIPELINE_ERROR_DECODE); | 162 status_cb.Run(PIPELINE_ERROR_DECODE); |
111 return; | 163 return; |
112 } | 164 } |
113 | 165 |
114 if (avcodec_open2(codec_context_, codec, NULL) < 0) { | 166 if (avcodec_open2(codec_context_, codec, NULL) < 0) { |
115 status_cb.Run(PIPELINE_ERROR_DECODE); | 167 status_cb.Run(PIPELINE_ERROR_DECODE); |
116 return; | 168 return; |
(...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
355 // The decoder is in a bad state and not decoding correctly. | 407 // The decoder is in a bad state and not decoding correctly. |
356 // Checking for NULL avoids a crash in CopyPlane(). | 408 // Checking for NULL avoids a crash in CopyPlane(). |
357 if (!av_frame_->data[VideoFrame::kYPlane] || | 409 if (!av_frame_->data[VideoFrame::kYPlane] || |
358 !av_frame_->data[VideoFrame::kUPlane] || | 410 !av_frame_->data[VideoFrame::kUPlane] || |
359 !av_frame_->data[VideoFrame::kVPlane]) { | 411 !av_frame_->data[VideoFrame::kVPlane]) { |
360 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; | 412 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; |
361 *video_frame = NULL; | 413 *video_frame = NULL; |
362 return false; | 414 return false; |
363 } | 415 } |
364 | 416 |
365 // We've got a frame! Make sure we have a place to store it. | 417 if (!av_frame_->opaque) { |
366 *video_frame = AllocateVideoFrame(); | 418 LOG(ERROR) << "VideoFrame object associated with frame data not set."; |
367 if (!(*video_frame)) { | |
368 LOG(ERROR) << "Failed to allocate video frame"; | |
369 return false; | 419 return false; |
370 } | 420 } |
421 *video_frame = static_cast<VideoFrame *>(av_frame_->opaque); | |
371 | 422 |
372 // Determine timestamp and calculate the duration based on the repeat picture | 423 // Determine timestamp and calculate the duration based on the repeat picture |
373 // count. According to FFmpeg docs, the total duration can be calculated as | 424 // count. According to FFmpeg docs, the total duration can be calculated as |
374 // follows: | 425 // follows: |
375 // fps = 1 / time_base | 426 // fps = 1 / time_base |
376 // | 427 // |
377 // duration = (1 / fps) + (repeat_pict) / (2 * fps) | 428 // duration = (1 / fps) + (repeat_pict) / (2 * fps) |
378 // = (2 + repeat_pict) / (2 * fps) | 429 // = (2 + repeat_pict) / (2 * fps) |
379 // = (2 + repeat_pict) / (2 * (1 / time_base)) | 430 // = (2 + repeat_pict) / (2 * (1 / time_base)) |
380 DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check. | 431 DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check. |
381 AVRational doubled_time_base; | 432 AVRational doubled_time_base; |
382 doubled_time_base.num = frame_rate_denominator_; | 433 doubled_time_base.num = frame_rate_denominator_; |
383 doubled_time_base.den = frame_rate_numerator_ * 2; | 434 doubled_time_base.den = frame_rate_numerator_ * 2; |
384 | 435 |
385 (*video_frame)->SetTimestamp( | 436 (*video_frame)->SetTimestamp( |
386 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); | 437 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); |
387 (*video_frame)->SetDuration( | 438 (*video_frame)->SetDuration( |
388 ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict)); | 439 ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict)); |
389 | 440 |
390 // Copy the frame data since FFmpeg reuses internal buffers for AVFrame | |
391 // output, meaning the data is only valid until the next | |
392 // avcodec_decode_video() call. | |
393 int y_rows = codec_context_->height; | |
394 int uv_rows = codec_context_->height; | |
395 if (codec_context_->pix_fmt == PIX_FMT_YUV420P) { | |
396 uv_rows /= 2; | |
397 } | |
398 | |
399 CopyYPlane(av_frame_->data[0], av_frame_->linesize[0], y_rows, *video_frame); | |
400 CopyUPlane(av_frame_->data[1], av_frame_->linesize[1], uv_rows, *video_frame); | |
401 CopyVPlane(av_frame_->data[2], av_frame_->linesize[2], uv_rows, *video_frame); | |
402 | |
403 return true; | 441 return true; |
404 } | 442 } |
405 | 443 |
406 void FFmpegVideoDecoder::DeliverFrame( | 444 void FFmpegVideoDecoder::DeliverFrame( |
407 const scoped_refptr<VideoFrame>& video_frame) { | 445 const scoped_refptr<VideoFrame>& video_frame) { |
408 // Reset the callback before running to protect against reentrancy. | 446 // Reset the callback before running to protect against reentrancy. |
409 base::ResetAndReturn(&read_cb_).Run(kOk, video_frame); | 447 base::ResetAndReturn(&read_cb_).Run(kOk, video_frame); |
410 } | 448 } |
411 | 449 |
412 void FFmpegVideoDecoder::ReleaseFFmpegResources() { | 450 void FFmpegVideoDecoder::ReleaseFFmpegResources() { |
413 if (codec_context_) { | 451 if (codec_context_) { |
414 av_free(codec_context_->extradata); | 452 av_free(codec_context_->extradata); |
415 avcodec_close(codec_context_); | 453 avcodec_close(codec_context_); |
416 av_free(codec_context_); | 454 av_free(codec_context_); |
417 codec_context_ = NULL; | 455 codec_context_ = NULL; |
418 } | 456 } |
419 if (av_frame_) { | 457 if (av_frame_) { |
420 av_free(av_frame_); | 458 av_free(av_frame_); |
421 av_frame_ = NULL; | 459 av_frame_ = NULL; |
422 } | 460 } |
423 } | 461 } |
424 | 462 |
425 scoped_refptr<VideoFrame> FFmpegVideoDecoder::AllocateVideoFrame() { | |
426 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context_->pix_fmt); | |
427 size_t width = codec_context_->width; | |
428 size_t height = codec_context_->height; | |
429 | |
430 return VideoFrame::CreateFrame(format, width, height, | |
431 kNoTimestamp(), kNoTimestamp()); | |
432 } | |
433 | |
434 } // namespace media | 463 } // namespace media |
OLD | NEW |