| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/ffmpeg_video_decoder.h" | 5 #include "media/filters/ffmpeg_video_decoder.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "base/callback_helpers.h" | 8 #include "base/callback_helpers.h" |
| 9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
| 10 #include "base/message_loop.h" | 10 #include "base/message_loop.h" |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 55 : message_loop_factory_cb_(message_loop_cb), | 55 : message_loop_factory_cb_(message_loop_cb), |
| 56 message_loop_(NULL), | 56 message_loop_(NULL), |
| 57 state_(kUninitialized), | 57 state_(kUninitialized), |
| 58 codec_context_(NULL), | 58 codec_context_(NULL), |
| 59 av_frame_(NULL), | 59 av_frame_(NULL), |
| 60 frame_rate_numerator_(0), | 60 frame_rate_numerator_(0), |
| 61 frame_rate_denominator_(0), | 61 frame_rate_denominator_(0), |
| 62 decryptor_(NULL) { | 62 decryptor_(NULL) { |
| 63 } | 63 } |
| 64 | 64 |
| 65 int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, |
| 66 AVFrame* frame) { |
| 67 // Don't use |codec_context_| here! With threaded decoding, |
| 68 // it will contain unsynchronized width/height/pix_fmt values, |
| 69 // whereas |codec_context| contains the current threads's |
| 70 // updated width/height/pix_fmt, which can change for adaptive |
| 71 // content. |
| 72 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt); |
| 73 if (format == VideoFrame::INVALID) |
| 74 return AVERROR(EINVAL); |
| 75 DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16); |
| 76 |
| 77 int width = codec_context->width; |
| 78 int height = codec_context->height; |
| 79 int ret; |
| 80 if ((ret = av_image_check_size(width, height, 0, NULL)) < 0) |
| 81 return ret; |
| 82 |
| 83 scoped_refptr<VideoFrame> video_frame = |
| 84 VideoFrame::CreateFrame(format, width, height, |
| 85 kNoTimestamp(), kNoTimestamp()); |
| 86 |
| 87 for (int i = 0; i < 3; i++) { |
| 88 frame->base[i] = video_frame->data(i); |
| 89 frame->data[i] = video_frame->data(i); |
| 90 frame->linesize[i] = video_frame->stride(i); |
| 91 } |
| 92 |
| 93 frame->opaque = video_frame.release(); |
| 94 frame->type = FF_BUFFER_TYPE_USER; |
| 95 frame->pkt_pts = codec_context->pkt ? codec_context->pkt->pts : |
| 96 AV_NOPTS_VALUE; |
| 97 frame->width = codec_context->width; |
| 98 frame->height = codec_context->height; |
| 99 frame->format = codec_context->pix_fmt; |
| 100 |
| 101 return 0; |
| 102 } |
| 103 |
| 104 static int GetVideoBufferImpl(AVCodecContext* s, AVFrame* frame) { |
| 105 FFmpegVideoDecoder* vd = static_cast<FFmpegVideoDecoder*>(s->opaque); |
| 106 return vd->GetVideoBuffer(s, frame); |
| 107 } |
| 108 |
| 109 static void ReleaseVideoBufferImpl(AVCodecContext* s, AVFrame* frame) { |
| 110 // We're releasing the reference to the buffer allocated in |
| 111 // GetVideoBuffer() here, so the explicit Release() here is |
| 112 // intentional. |
| 113 scoped_refptr<VideoFrame> video_frame = |
| 114 static_cast<VideoFrame*>(frame->opaque); |
| 115 video_frame->Release(); |
| 116 |
| 117 // The FFmpeg API expects us to zero the data pointers in |
| 118 // this callback |
| 119 memset(frame->data, 0, sizeof(frame->data)); |
| 120 frame->opaque = NULL; |
| 121 } |
| 122 |
| 65 void FFmpegVideoDecoder::Initialize(const scoped_refptr<DemuxerStream>& stream, | 123 void FFmpegVideoDecoder::Initialize(const scoped_refptr<DemuxerStream>& stream, |
| 66 const PipelineStatusCB& status_cb, | 124 const PipelineStatusCB& status_cb, |
| 67 const StatisticsCB& statistics_cb) { | 125 const StatisticsCB& statistics_cb) { |
| 68 // Ensure FFmpeg has been initialized | 126 // Ensure FFmpeg has been initialized |
| 69 FFmpegGlue::GetInstance(); | 127 FFmpegGlue::GetInstance(); |
| 70 | 128 |
| 71 if (!message_loop_) { | 129 if (!message_loop_) { |
| 72 message_loop_ = message_loop_factory_cb_.Run(); | 130 message_loop_ = message_loop_factory_cb_.Run(); |
| 73 message_loop_factory_cb_.Reset(); | 131 message_loop_factory_cb_.Reset(); |
| 74 | 132 |
| (...skipping 26 matching lines...) Expand all Loading... |
| 101 | 159 |
| 102 // Initialize AVCodecContext structure. | 160 // Initialize AVCodecContext structure. |
| 103 codec_context_ = avcodec_alloc_context3(NULL); | 161 codec_context_ = avcodec_alloc_context3(NULL); |
| 104 VideoDecoderConfigToAVCodecContext(config, codec_context_); | 162 VideoDecoderConfigToAVCodecContext(config, codec_context_); |
| 105 | 163 |
| 106 // Enable motion vector search (potentially slow), strong deblocking filter | 164 // Enable motion vector search (potentially slow), strong deblocking filter |
| 107 // for damaged macroblocks, and set our error detection sensitivity. | 165 // for damaged macroblocks, and set our error detection sensitivity. |
| 108 codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; | 166 codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; |
| 109 codec_context_->err_recognition = AV_EF_CAREFUL; | 167 codec_context_->err_recognition = AV_EF_CAREFUL; |
| 110 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); | 168 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); |
| 169 codec_context_->opaque = this; |
| 170 codec_context_->flags |= CODEC_FLAG_EMU_EDGE; |
| 171 codec_context_->get_buffer = GetVideoBufferImpl; |
| 172 codec_context_->release_buffer = ReleaseVideoBufferImpl; |
| 111 | 173 |
| 112 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); | 174 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |
| 113 if (!codec) { | 175 if (!codec) { |
| 114 status_cb.Run(PIPELINE_ERROR_DECODE); | 176 status_cb.Run(PIPELINE_ERROR_DECODE); |
| 115 return; | 177 return; |
| 116 } | 178 } |
| 117 | 179 |
| 118 if (avcodec_open2(codec_context_, codec, NULL) < 0) { | 180 if (avcodec_open2(codec_context_, codec, NULL) < 0) { |
| 119 status_cb.Run(PIPELINE_ERROR_DECODE); | 181 status_cb.Run(PIPELINE_ERROR_DECODE); |
| 120 return; | 182 return; |
| (...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 362 // The decoder is in a bad state and not decoding correctly. | 424 // The decoder is in a bad state and not decoding correctly. |
| 363 // Checking for NULL avoids a crash in CopyPlane(). | 425 // Checking for NULL avoids a crash in CopyPlane(). |
| 364 if (!av_frame_->data[VideoFrame::kYPlane] || | 426 if (!av_frame_->data[VideoFrame::kYPlane] || |
| 365 !av_frame_->data[VideoFrame::kUPlane] || | 427 !av_frame_->data[VideoFrame::kUPlane] || |
| 366 !av_frame_->data[VideoFrame::kVPlane]) { | 428 !av_frame_->data[VideoFrame::kVPlane]) { |
| 367 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; | 429 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; |
| 368 *video_frame = NULL; | 430 *video_frame = NULL; |
| 369 return false; | 431 return false; |
| 370 } | 432 } |
| 371 | 433 |
| 372 // We've got a frame! Make sure we have a place to store it. | 434 if (!av_frame_->opaque) { |
| 373 *video_frame = AllocateVideoFrame(); | 435 LOG(ERROR) << "VideoFrame object associated with frame data not set."; |
| 374 if (!(*video_frame)) { | |
| 375 LOG(ERROR) << "Failed to allocate video frame"; | |
| 376 return false; | 436 return false; |
| 377 } | 437 } |
| 438 *video_frame = static_cast<VideoFrame*>(av_frame_->opaque); |
| 378 | 439 |
| 379 // Determine timestamp and calculate the duration based on the repeat picture | 440 // Determine timestamp and calculate the duration based on the repeat picture |
| 380 // count. According to FFmpeg docs, the total duration can be calculated as | 441 // count. According to FFmpeg docs, the total duration can be calculated as |
| 381 // follows: | 442 // follows: |
| 382 // fps = 1 / time_base | 443 // fps = 1 / time_base |
| 383 // | 444 // |
| 384 // duration = (1 / fps) + (repeat_pict) / (2 * fps) | 445 // duration = (1 / fps) + (repeat_pict) / (2 * fps) |
| 385 // = (2 + repeat_pict) / (2 * fps) | 446 // = (2 + repeat_pict) / (2 * fps) |
| 386 // = (2 + repeat_pict) / (2 * (1 / time_base)) | 447 // = (2 + repeat_pict) / (2 * (1 / time_base)) |
| 387 DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check. | 448 DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check. |
| 388 AVRational doubled_time_base; | 449 AVRational doubled_time_base; |
| 389 doubled_time_base.num = frame_rate_denominator_; | 450 doubled_time_base.num = frame_rate_denominator_; |
| 390 doubled_time_base.den = frame_rate_numerator_ * 2; | 451 doubled_time_base.den = frame_rate_numerator_ * 2; |
| 391 | 452 |
| 392 (*video_frame)->SetTimestamp( | 453 (*video_frame)->SetTimestamp( |
| 393 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); | 454 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); |
| 394 (*video_frame)->SetDuration( | 455 (*video_frame)->SetDuration( |
| 395 ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict)); | 456 ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict)); |
| 396 | 457 |
| 397 // Copy the frame data since FFmpeg reuses internal buffers for AVFrame | |
| 398 // output, meaning the data is only valid until the next | |
| 399 // avcodec_decode_video() call. | |
| 400 int y_rows = codec_context_->height; | |
| 401 int uv_rows = codec_context_->height; | |
| 402 if (codec_context_->pix_fmt == PIX_FMT_YUV420P) { | |
| 403 uv_rows /= 2; | |
| 404 } | |
| 405 | |
| 406 CopyYPlane(av_frame_->data[0], av_frame_->linesize[0], y_rows, *video_frame); | |
| 407 CopyUPlane(av_frame_->data[1], av_frame_->linesize[1], uv_rows, *video_frame); | |
| 408 CopyVPlane(av_frame_->data[2], av_frame_->linesize[2], uv_rows, *video_frame); | |
| 409 | |
| 410 return true; | 458 return true; |
| 411 } | 459 } |
| 412 | 460 |
| 413 void FFmpegVideoDecoder::DeliverFrame( | 461 void FFmpegVideoDecoder::DeliverFrame( |
| 414 const scoped_refptr<VideoFrame>& video_frame) { | 462 const scoped_refptr<VideoFrame>& video_frame) { |
| 415 // Reset the callback before running to protect against reentrancy. | 463 // Reset the callback before running to protect against reentrancy. |
| 416 base::ResetAndReturn(&read_cb_).Run(kOk, video_frame); | 464 base::ResetAndReturn(&read_cb_).Run(kOk, video_frame); |
| 417 } | 465 } |
| 418 | 466 |
| 419 void FFmpegVideoDecoder::ReleaseFFmpegResources() { | 467 void FFmpegVideoDecoder::ReleaseFFmpegResources() { |
| 420 if (codec_context_) { | 468 if (codec_context_) { |
| 421 av_free(codec_context_->extradata); | 469 av_free(codec_context_->extradata); |
| 422 avcodec_close(codec_context_); | 470 avcodec_close(codec_context_); |
| 423 av_free(codec_context_); | 471 av_free(codec_context_); |
| 424 codec_context_ = NULL; | 472 codec_context_ = NULL; |
| 425 } | 473 } |
| 426 if (av_frame_) { | 474 if (av_frame_) { |
| 427 av_free(av_frame_); | 475 av_free(av_frame_); |
| 428 av_frame_ = NULL; | 476 av_frame_ = NULL; |
| 429 } | 477 } |
| 430 } | 478 } |
| 431 | 479 |
| 432 scoped_refptr<VideoFrame> FFmpegVideoDecoder::AllocateVideoFrame() { | |
| 433 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context_->pix_fmt); | |
| 434 size_t width = codec_context_->width; | |
| 435 size_t height = codec_context_->height; | |
| 436 | |
| 437 return VideoFrame::CreateFrame(format, width, height, | |
| 438 kNoTimestamp(), kNoTimestamp()); | |
| 439 } | |
| 440 | |
| 441 } // namespace media | 480 } // namespace media |
| OLD | NEW |