Chromium Code Reviews| Index: media/filters/ffmpeg_video_decoder.cc |
| diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc |
| index bc2346ddf8843526fde8ae980e847488e7ba0b13..ac9e29f485c106c9f9dbc5f0d8f3b0b90cab51dd 100644 |
| --- a/media/filters/ffmpeg_video_decoder.cc |
| +++ b/media/filters/ffmpeg_video_decoder.cc |
| @@ -54,12 +54,25 @@ static int GetThreadCount(AVCodecID codec_id) { |
| return decode_threads; |
| } |
| +static int GetVideoBufferImpl(struct AVCodecContext* s, |
|
wolenetz
2014/05/16 19:45:58
Curious - why do we need this Impl vs just assigni
DaleCurtis
2014/05/16 19:54:26
Because video uses a frame pool which is owned the
|
| + AVFrame* frame, |
| + int flags) { |
| + FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque); |
| + return decoder->GetVideoBuffer(s, frame, flags); |
| +} |
| + |
| +static void ReleaseVideoBufferImpl(void* opaque, uint8* data) { |
| + scoped_refptr<VideoFrame> video_frame; |
| + video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque)); |
| +} |
| + |
| FFmpegVideoDecoder::FFmpegVideoDecoder( |
| const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) |
| : task_runner_(task_runner), state_(kUninitialized) {} |
| -int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, |
| - AVFrame* frame) { |
| +int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context, |
| + AVFrame* frame, |
| + int flags) { |
| // Don't use |codec_context_| here! With threaded decoding, |
| // it will contain unsynchronized width/height/pix_fmt values, |
| // whereas |codec_context| contains the current threads's |
| @@ -108,31 +121,24 @@ int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, |
| frame->linesize[i] = video_frame->stride(i); |
| } |
| - frame->opaque = NULL; |
| - video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque)); |
| - frame->type = FF_BUFFER_TYPE_USER; |
| frame->width = coded_size.width(); |
| frame->height = coded_size.height(); |
| frame->format = codec_context->pix_fmt; |
| + // Now create an AVBufferRef for the data just allocated. It will own the |
| + // reference to the VideoFrame object. |
| + void* opaque = NULL; |
| + video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque)); |
| + frame->type = FF_BUFFER_TYPE_USER; |
| + frame->buf[0] = |
| + av_buffer_create(frame->data[0], |
| + VideoFrame::AllocationSize(format, coded_size), |
| + ReleaseVideoBufferImpl, |
| + opaque, |
| + 0); |
| return 0; |
| } |
| -static int GetVideoBufferImpl(AVCodecContext* s, AVFrame* frame) { |
| - FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque); |
| - return decoder->GetVideoBuffer(s, frame); |
| -} |
| - |
| -static void ReleaseVideoBufferImpl(AVCodecContext* s, AVFrame* frame) { |
| - scoped_refptr<VideoFrame> video_frame; |
| - video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque)); |
| - |
| - // The FFmpeg API expects us to zero the data pointers in |
| - // this callback |
| - memset(frame->data, 0, sizeof(frame->data)); |
| - frame->opaque = NULL; |
| -} |
| - |
| void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config, |
| bool low_delay, |
| const PipelineStatusCB& status_cb) { |
| @@ -272,9 +278,6 @@ bool FFmpegVideoDecoder::FFmpegDecode( |
| scoped_refptr<VideoFrame>* video_frame) { |
| DCHECK(video_frame); |
| - // Reset frame to default values. |
| - avcodec_get_frame_defaults(av_frame_.get()); |
| - |
| // Create a packet for input data. |
| // Due to FFmpeg API changes we no longer have const read-only pointers. |
| AVPacket packet; |
| @@ -323,18 +326,17 @@ bool FFmpegVideoDecoder::FFmpegDecode( |
| !av_frame_->data[VideoFrame::kVPlane]) { |
| LOG(ERROR) << "Video frame was produced yet has invalid frame data."; |
| *video_frame = NULL; |
| + av_frame_unref(av_frame_.get()); |
| return false; |
| } |
| - if (!av_frame_->opaque) { |
| - LOG(ERROR) << "VideoFrame object associated with frame data not set."; |
| - return false; |
| - } |
| - *video_frame = static_cast<VideoFrame*>(av_frame_->opaque); |
| + *video_frame = |
| + reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0])); |
| (*video_frame)->set_timestamp( |
| base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); |
| + av_frame_unref(av_frame_.get()); |
| return true; |
| } |
| @@ -351,15 +353,12 @@ bool FFmpegVideoDecoder::ConfigureDecoder(bool low_delay) { |
| codec_context_.reset(avcodec_alloc_context3(NULL)); |
| VideoDecoderConfigToAVCodecContext(config_, codec_context_.get()); |
| - // Enable motion vector search (potentially slow), strong deblocking filter |
| - // for damaged macroblocks, and set our error detection sensitivity. |
| - codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; |
| codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); |
| codec_context_->thread_type = low_delay ? FF_THREAD_SLICE : FF_THREAD_FRAME; |
| codec_context_->opaque = this; |
| codec_context_->flags |= CODEC_FLAG_EMU_EDGE; |
| - codec_context_->get_buffer = GetVideoBufferImpl; |
| - codec_context_->release_buffer = ReleaseVideoBufferImpl; |
| + codec_context_->get_buffer2 = GetVideoBufferImpl; |
| + codec_context_->refcounted_frames = 1; |
| AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |
| if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { |