Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2187)

Unified Diff: media/filters/vpx_video_decoder.cc

Issue 11644078: Add wrapper class to media for support of VP9 video, and add a command line flag to enable the supp… (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Roll libvpx_revision to pick up a bug fix. Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « media/filters/vpx_video_decoder.h ('k') | media/media.gyp » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: media/filters/vpx_video_decoder.cc
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/vpx_video_decoder.cc
similarity index 34%
copy from media/filters/ffmpeg_video_decoder.cc
copy to media/filters/vpx_video_decoder.cc
index 6f6c49506d54ebe41f26fd474d913bdbd67c202a..662b8cc62bf8d2c0ec67cbfd56fc7825676bee4d 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/vpx_video_decoder.cc
@@ -2,46 +2,45 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/filters/ffmpeg_video_decoder.h"
-
-#include <algorithm>
-#include <string>
+#include "media/filters/vpx_video_decoder.h"
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/location.h"
+#include "base/logging.h"
#include "base/message_loop_proxy.h"
#include "base/string_number_conversions.h"
#include "media/base/bind_to_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/demuxer_stream.h"
-#include "media/base/limits.h"
#include "media/base/media_switches.h"
#include "media/base/pipeline.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
-#include "media/ffmpeg/ffmpeg_common.h"
-#include "media/filters/ffmpeg_glue.h"
+
+// Include libvpx header files.
+// VPX_CODEC_DISABLE_COMPAT excludes parts of the libvpx API that provide
+// backwards compatibility for legacy applications using the library.
+#define VPX_CODEC_DISABLE_COMPAT 1
+extern "C" {
+#include "third_party/libvpx/libvpx.h"
+}
namespace media {
// Always try to use three threads for video decoding. There is little reason
// not to since current day CPUs tend to be multi-core and we measured
// performance benefits on older machines such as P4s with hyperthreading.
-//
-// Handling decoding on separate threads also frees up the pipeline thread to
-// continue processing. Although it'd be nice to have the option of a single
-// decoding thread, FFmpeg treats having one thread the same as having zero
-// threads (i.e., avcodec_decode_video() will execute on the calling thread).
-// Yet another reason for having two threads :)
static const int kDecodeThreads = 2;
static const int kMaxDecodeThreads = 16;
-// Returns the number of threads given the FFmpeg CodecID. Also inspects the
-// command line for a valid --video-threads flag.
-static int GetThreadCount(CodecID codec_id) {
+// Returns the number of threads.
+static int GetThreadCount() {
+ // TODO(scherkus): De-duplicate this function and the one used by
+ // FFmpegVideoDecoder.
+
// Refer to http://crbug.com/93932 for tsan suppressions on decoding.
int decode_threads = kDecodeThreads;
@@ -55,91 +54,27 @@ static int GetThreadCount(CodecID codec_id) {
return decode_threads;
}
-FFmpegVideoDecoder::FFmpegVideoDecoder(
+VpxVideoDecoder::VpxVideoDecoder(
const scoped_refptr<base::MessageLoopProxy>& message_loop)
: message_loop_(message_loop),
state_(kUninitialized),
- codec_context_(NULL),
- av_frame_(NULL) {
-}
-
-int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context,
- AVFrame* frame) {
- // Don't use |codec_context_| here! With threaded decoding,
- // it will contain unsynchronized width/height/pix_fmt values,
- // whereas |codec_context| contains the current threads's
- // updated width/height/pix_fmt, which can change for adaptive
- // content.
- VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt);
- if (format == VideoFrame::INVALID)
- return AVERROR(EINVAL);
- DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16);
-
- gfx::Size size(codec_context->width, codec_context->height);
- int ret;
- if ((ret = av_image_check_size(size.width(), size.height(), 0, NULL)) < 0)
- return ret;
-
- gfx::Size natural_size;
- if (codec_context->sample_aspect_ratio.num > 0) {
- natural_size = GetNaturalSize(size,
- codec_context->sample_aspect_ratio.num,
- codec_context->sample_aspect_ratio.den);
- } else {
- natural_size = demuxer_stream_->video_decoder_config().natural_size();
- }
-
- if (!VideoFrame::IsValidConfig(format, size, gfx::Rect(size), natural_size))
- return AVERROR(EINVAL);
-
- scoped_refptr<VideoFrame> video_frame =
- VideoFrame::CreateFrame(format, size, gfx::Rect(size), natural_size,
- kNoTimestamp());
-
- for (int i = 0; i < 3; i++) {
- frame->base[i] = video_frame->data(i);
- frame->data[i] = video_frame->data(i);
- frame->linesize[i] = video_frame->stride(i);
- }
-
- frame->opaque = NULL;
- video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque));
- frame->type = FF_BUFFER_TYPE_USER;
- frame->pkt_pts = codec_context->pkt ? codec_context->pkt->pts :
- AV_NOPTS_VALUE;
- frame->width = codec_context->width;
- frame->height = codec_context->height;
- frame->format = codec_context->pix_fmt;
-
- return 0;
-}
-
-static int GetVideoBufferImpl(AVCodecContext* s, AVFrame* frame) {
- FFmpegVideoDecoder* vd = static_cast<FFmpegVideoDecoder*>(s->opaque);
- return vd->GetVideoBuffer(s, frame);
+ vpx_codec_(NULL) {
}
-static void ReleaseVideoBufferImpl(AVCodecContext* s, AVFrame* frame) {
- scoped_refptr<VideoFrame> video_frame;
- video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque));
-
- // The FFmpeg API expects us to zero the data pointers in
- // this callback
- memset(frame->data, 0, sizeof(frame->data));
- frame->opaque = NULL;
+VpxVideoDecoder::~VpxVideoDecoder() {
+ DCHECK_EQ(kUninitialized, state_);
+ CloseDecoder();
}
-void FFmpegVideoDecoder::Initialize(const scoped_refptr<DemuxerStream>& stream,
- const PipelineStatusCB& status_cb,
- const StatisticsCB& statistics_cb) {
+void VpxVideoDecoder::Initialize(
+ const scoped_refptr<DemuxerStream>& stream,
+ const PipelineStatusCB& status_cb,
+ const StatisticsCB& statistics_cb) {
DCHECK(message_loop_->BelongsToCurrentThread());
- PipelineStatusCB initialize_cb = BindToCurrentLoop(status_cb);
-
- FFmpegGlue::InitializeFFmpeg();
DCHECK(!demuxer_stream_) << "Already initialized.";
if (!stream) {
- initialize_cb.Run(PIPELINE_ERROR_DECODE);
+ status_cb.Run(PIPELINE_ERROR_DECODE);
return;
}
@@ -147,16 +82,57 @@ void FFmpegVideoDecoder::Initialize(const scoped_refptr<DemuxerStream>& stream,
statistics_cb_ = statistics_cb;
if (!ConfigureDecoder()) {
- initialize_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
+ status_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
return;
}
// Success!
state_ = kNormal;
- initialize_cb.Run(PIPELINE_OK);
+ status_cb.Run(PIPELINE_OK);
}
-void FFmpegVideoDecoder::Read(const ReadCB& read_cb) {
+bool VpxVideoDecoder::ConfigureDecoder() {
+ const VideoDecoderConfig& config = demuxer_stream_->video_decoder_config();
+ if (!config.IsValidConfig()) {
+ DLOG(ERROR) << "Invalid video stream config: "
+ << config.AsHumanReadableString();
+ return false;
+ }
+
+ if (config.codec() != kCodecVP9)
+ return false;
+
+ CloseDecoder();
+
+ vpx_codec_ = new vpx_codec_ctx();
+ vpx_codec_dec_cfg_t vpx_config = {0};
+ vpx_config.w = config.coded_size().width();
+ vpx_config.h = config.coded_size().height();
+ vpx_config.threads = GetThreadCount();
+
+ vpx_codec_err_t status = vpx_codec_dec_init(vpx_codec_,
+ vpx_codec_vp9_dx(),
+ &vpx_config,
+ 0);
+ if (status != VPX_CODEC_OK) {
+ LOG(ERROR) << "vpx_codec_dec_init failed, status=" << status;
+ delete vpx_codec_;
+ vpx_codec_ = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+void VpxVideoDecoder::CloseDecoder() {
+ if (vpx_codec_) {
+ vpx_codec_destroy(vpx_codec_);
+ delete vpx_codec_;
+ vpx_codec_ = NULL;
+ }
+}
+
+void VpxVideoDecoder::Read(const ReadCB& read_cb) {
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK(!read_cb.is_null());
CHECK_NE(state_, kUninitialized);
@@ -165,14 +141,14 @@ void FFmpegVideoDecoder::Read(const ReadCB& read_cb) {
// Return empty frames if decoding has finished.
if (state_ == kDecodeFinished) {
- base::ResetAndReturn(&read_cb_).Run(kOk, VideoFrame::CreateEmptyFrame());
+ read_cb.Run(kOk, VideoFrame::CreateEmptyFrame());
return;
}
ReadFromDemuxerStream();
}
-void FFmpegVideoDecoder::Reset(const base::Closure& closure) {
+void VpxVideoDecoder::Reset(const base::Closure& closure) {
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK(reset_cb_.is_null());
reset_cb_ = BindToCurrentLoop(closure);
@@ -184,44 +160,31 @@ void FFmpegVideoDecoder::Reset(const base::Closure& closure) {
DoReset();
}
-void FFmpegVideoDecoder::DoReset() {
- DCHECK(read_cb_.is_null());
-
- avcodec_flush_buffers(codec_context_);
- state_ = kNormal;
- base::ResetAndReturn(&reset_cb_).Run();
-}
-
-void FFmpegVideoDecoder::Stop(const base::Closure& closure) {
+void VpxVideoDecoder::Stop(const base::Closure& closure) {
DCHECK(message_loop_->BelongsToCurrentThread());
- base::ScopedClosureRunner runner(BindToCurrentLoop(closure));
- if (state_ == kUninitialized)
+ if (state_ == kUninitialized) {
+ closure.Run();
return;
+ }
if (!read_cb_.is_null())
base::ResetAndReturn(&read_cb_).Run(kOk, NULL);
- ReleaseFFmpegResources();
state_ = kUninitialized;
+ closure.Run();
}
-FFmpegVideoDecoder::~FFmpegVideoDecoder() {
- DCHECK_EQ(kUninitialized, state_);
- DCHECK(!codec_context_);
- DCHECK(!av_frame_);
-}
-
-void FFmpegVideoDecoder::ReadFromDemuxerStream() {
+void VpxVideoDecoder::ReadFromDemuxerStream() {
DCHECK_NE(state_, kUninitialized);
DCHECK_NE(state_, kDecodeFinished);
DCHECK(!read_cb_.is_null());
demuxer_stream_->Read(base::Bind(
- &FFmpegVideoDecoder::BufferReady, this));
+ &VpxVideoDecoder::DoDecryptOrDecodeBuffer, this));
}
-void FFmpegVideoDecoder::BufferReady(
+void VpxVideoDecoder::DoDecryptOrDecodeBuffer(
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
DCHECK(message_loop_->BelongsToCurrentThread());
@@ -258,7 +221,7 @@ void FFmpegVideoDecoder::BufferReady(
DecodeBuffer(buffer);
}
-void FFmpegVideoDecoder::DecodeBuffer(
+void VpxVideoDecoder::DecodeBuffer(
const scoped_refptr<DecoderBuffer>& buffer) {
DCHECK(message_loop_->BelongsToCurrentThread());
DCHECK_NE(state_, kUninitialized);
@@ -267,34 +230,11 @@ void FFmpegVideoDecoder::DecodeBuffer(
DCHECK(!read_cb_.is_null());
DCHECK(buffer);
- // During decode, because reads are issued asynchronously, it is possible to
- // receive multiple end of stream buffers since each read is acked. When the
- // first end of stream buffer is read, FFmpeg may still have frames queued
- // up in the decoder so we need to go through the decode loop until it stops
- // giving sensible data. After that, the decoder should output empty
- // frames. There are three states the decoder can be in:
- //
- // kNormal: This is the starting state. Buffers are decoded. Decode errors
- // are discarded.
- // kFlushCodec: There isn't any more input data. Call avcodec_decode_video2
- // until no more data is returned to flush out remaining
- // frames. The input buffer is ignored at this point.
- // kDecodeFinished: All calls return empty frames.
- //
- // These are the possible state transitions.
- //
- // kNormal -> kFlushCodec:
- // When buffer->IsEndOfStream() is first true.
- // kNormal -> kDecodeFinished:
- // A decoding error occurs and decoding needs to stop.
- // kFlushCodec -> kDecodeFinished:
- // When avcodec_decode_video2() returns 0 data or errors out.
- // (any state) -> kNormal:
- // Any time Reset() is called.
-
- // Transition to kFlushCodec on the first end of stream buffer.
+ // Transition to kDecodeFinished on the first end of stream buffer.
if (state_ == kNormal && buffer->IsEndOfStream()) {
- state_ = kFlushCodec;
+ state_ = kDecodeFinished;
+ base::ResetAndReturn(&read_cb_).Run(kOk, VideoFrame::CreateEmptyFrame());
+ return;
}
scoped_refptr<VideoFrame> video_frame;
@@ -311,15 +251,8 @@ void FFmpegVideoDecoder::DecodeBuffer(
statistics_cb_.Run(statistics);
}
- // If we didn't get a frame then we've either completely finished decoding or
- // we need more data.
+ // If we didn't get a frame we need more data.
if (!video_frame) {
- if (state_ == kFlushCodec) {
- state_ = kDecodeFinished;
- base::ResetAndReturn(&read_cb_).Run(kOk, VideoFrame::CreateEmptyFrame());
- return;
- }
-
ReadFromDemuxerStream();
return;
}
@@ -327,125 +260,80 @@ void FFmpegVideoDecoder::DecodeBuffer(
base::ResetAndReturn(&read_cb_).Run(kOk, video_frame);
}
-bool FFmpegVideoDecoder::Decode(
+bool VpxVideoDecoder::Decode(
const scoped_refptr<DecoderBuffer>& buffer,
scoped_refptr<VideoFrame>* video_frame) {
DCHECK(video_frame);
- // Create a packet for input data.
- // Due to FFmpeg API changes we no longer have const read-only pointers.
- AVPacket packet;
- av_init_packet(&packet);
- packet.data = const_cast<uint8*>(buffer->GetData());
- packet.size = buffer->GetDataSize();
-
- // Let FFmpeg handle presentation timestamp reordering.
- codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds();
-
- // Reset frame to default values.
- avcodec_get_frame_defaults(av_frame_);
-
- // This is for codecs not using get_buffer to initialize
- // |av_frame_->reordered_opaque|
- av_frame_->reordered_opaque = codec_context_->reordered_opaque;
-
- int frame_decoded = 0;
- int result = avcodec_decode_video2(codec_context_,
- av_frame_,
- &frame_decoded,
- &packet);
- // Log the problem if we can't decode a video frame and exit early.
- if (result < 0) {
- LOG(ERROR) << "Error decoding a video frame with timestamp: "
- << buffer->GetTimestamp().InMicroseconds() << " us, duration: "
- << buffer->GetDuration().InMicroseconds() << " us, packet size: "
- << buffer->GetDataSize() << " bytes";
- *video_frame = NULL;
+ // Pass |buffer| to libvpx.
+ int64 timestamp = buffer->GetTimestamp().InMicroseconds();
+ void* user_priv = reinterpret_cast<void*>(&timestamp);
+ vpx_codec_err_t status = vpx_codec_decode(vpx_codec_,
+ buffer->GetData(),
+ buffer->GetDataSize(),
+ user_priv,
+ 0);
+ if (status != VPX_CODEC_OK) {
+ LOG(ERROR) << "vpx_codec_decode() failed, status=" << status;
return false;
}
- // If no frame was produced then signal that more data is required to
- // produce more frames. This can happen under two circumstances:
- // 1) Decoder was recently initialized/flushed
- // 2) End of stream was reached and all internal frames have been output
- if (frame_decoded == 0) {
+ // Gets pointer to decoded data.
+ vpx_codec_iter_t iter = NULL;
+ const vpx_image_t* vpx_image = vpx_codec_get_frame(vpx_codec_, &iter);
+ if (!vpx_image) {
*video_frame = NULL;
return true;
}
- // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
- // The decoder is in a bad state and not decoding correctly.
- // Checking for NULL avoids a crash in CopyPlane().
- if (!av_frame_->data[VideoFrame::kYPlane] ||
- !av_frame_->data[VideoFrame::kUPlane] ||
- !av_frame_->data[VideoFrame::kVPlane]) {
- LOG(ERROR) << "Video frame was produced yet has invalid frame data.";
- *video_frame = NULL;
+ if (vpx_image->user_priv != reinterpret_cast<void*>(&timestamp)) {
+ LOG(ERROR) << "Invalid output timestamp.";
return false;
}
- if (!av_frame_->opaque) {
- LOG(ERROR) << "VideoFrame object associated with frame data not set.";
- return false;
- }
- *video_frame = static_cast<VideoFrame*>(av_frame_->opaque);
-
- (*video_frame)->SetTimestamp(
- base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque));
-
+ CopyVpxImageTo(vpx_image, video_frame);
+ (*video_frame)->SetTimestamp(base::TimeDelta::FromMicroseconds(timestamp));
return true;
}
-void FFmpegVideoDecoder::ReleaseFFmpegResources() {
- if (codec_context_) {
- av_free(codec_context_->extradata);
- avcodec_close(codec_context_);
- av_free(codec_context_);
- codec_context_ = NULL;
- }
- if (av_frame_) {
- av_free(av_frame_);
- av_frame_ = NULL;
- }
-}
-
-bool FFmpegVideoDecoder::ConfigureDecoder() {
- const VideoDecoderConfig& config = demuxer_stream_->video_decoder_config();
-
- if (!config.IsValidConfig()) {
- DLOG(ERROR) << "Invalid video stream - " << config.AsHumanReadableString();
- return false;
- }
-
- if (config.is_encrypted()) {
- DLOG(ERROR) << "Encrypted video stream not supported.";
- return false;
- }
+void VpxVideoDecoder::DoReset() {
+ DCHECK(read_cb_.is_null());
- // Release existing decoder resources if necessary.
- ReleaseFFmpegResources();
-
- // Initialize AVCodecContext structure.
- codec_context_ = avcodec_alloc_context3(NULL);
- VideoDecoderConfigToAVCodecContext(config, codec_context_);
-
- // Enable motion vector search (potentially slow), strong deblocking filter
- // for damaged macroblocks, and set our error detection sensitivity.
- codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
- codec_context_->thread_count = GetThreadCount(codec_context_->codec_id);
- codec_context_->opaque = this;
- codec_context_->flags |= CODEC_FLAG_EMU_EDGE;
- codec_context_->get_buffer = GetVideoBufferImpl;
- codec_context_->release_buffer = ReleaseVideoBufferImpl;
-
- AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
- if (!codec || avcodec_open2(codec_context_, codec, NULL) < 0) {
- ReleaseFFmpegResources();
- return false;
- }
+ state_ = kNormal;
+ reset_cb_.Run();
+ reset_cb_.Reset();
+}
- av_frame_ = avcodec_alloc_frame();
- return true;
+void VpxVideoDecoder::CopyVpxImageTo(
+ const vpx_image* vpx_image,
+ scoped_refptr<VideoFrame>* video_frame) {
+ CHECK(vpx_image);
+ CHECK_EQ(vpx_image->d_w % 2, 0U);
+ CHECK_EQ(vpx_image->d_h % 2, 0U);
+ CHECK(vpx_image->fmt == VPX_IMG_FMT_I420 ||
+ vpx_image->fmt == VPX_IMG_FMT_YV12);
+
+ gfx::Size size(vpx_image->d_w, vpx_image->d_h);
+ gfx::Size natural_size =
+ demuxer_stream_->video_decoder_config().natural_size();
+
+ *video_frame = VideoFrame::CreateFrame(VideoFrame::YV12,
+ size,
+ gfx::Rect(size),
+ natural_size,
+ kNoTimestamp());
+ CopyYPlane(vpx_image->planes[VPX_PLANE_Y],
+ vpx_image->stride[VPX_PLANE_Y],
+ vpx_image->d_h,
+ *video_frame);
+ CopyUPlane(vpx_image->planes[VPX_PLANE_U],
+ vpx_image->stride[VPX_PLANE_U],
+ vpx_image->d_h / 2,
+ *video_frame);
+ CopyVPlane(vpx_image->planes[VPX_PLANE_V],
+ vpx_image->stride[VPX_PLANE_V],
+ vpx_image->d_h / 2,
+ *video_frame);
}
} // namespace media
« no previous file with comments | « media/filters/vpx_video_decoder.h ('k') | media/media.gyp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698