Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(83)

Unified Diff: media/video/ffmpeg_video_decode_engine.cc

Issue 8417019: Simplify VideoDecodeEngine interface by making everything synchronous. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: recycling: vanquished Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: media/video/ffmpeg_video_decode_engine.cc
diff --git a/media/video/ffmpeg_video_decode_engine.cc b/media/video/ffmpeg_video_decode_engine.cc
index dad9fc16b27e20870a2ba4c0c00fa416b311e2fa..52a62f6aaf205547f8da5334a64915616772320a 100644
--- a/media/video/ffmpeg_video_decode_engine.cc
+++ b/media/video/ffmpeg_video_decode_engine.cc
@@ -5,12 +5,11 @@
#include "media/video/ffmpeg_video_decode_engine.h"
#include "base/command_line.h"
+#include "base/logging.h"
#include "base/string_number_conversions.h"
-#include "base/task.h"
#include "media/base/buffers.h"
-#include "media/base/limits.h"
#include "media/base/media_switches.h"
-#include "media/base/pipeline.h"
+#include "media/base/video_decoder_config.h"
#include "media/base/video_util.h"
#include "media/ffmpeg/ffmpeg_common.h"
@@ -18,26 +17,16 @@ namespace media {
FFmpegVideoDecodeEngine::FFmpegVideoDecodeEngine()
: codec_context_(NULL),
- event_handler_(NULL),
+ av_frame_(NULL),
frame_rate_numerator_(0),
- frame_rate_denominator_(0),
- pending_input_buffers_(0),
- pending_output_buffers_(0),
- output_eos_reached_(false),
- flush_pending_(false) {
+ frame_rate_denominator_(0) {
}
FFmpegVideoDecodeEngine::~FFmpegVideoDecodeEngine() {
- if (codec_context_) {
- av_free(codec_context_->extradata);
- avcodec_close(codec_context_);
- av_free(codec_context_);
- }
+ Uninitialize();
}
-void FFmpegVideoDecodeEngine::Initialize(
- VideoDecodeEngine::EventHandler* event_handler,
- const VideoDecoderConfig& config) {
+bool FFmpegVideoDecodeEngine::Initialize(const VideoDecoderConfig& config) {
frame_rate_numerator_ = config.frame_rate_numerator();
frame_rate_denominator_ = config.frame_rate_denominator();
@@ -83,64 +72,30 @@ void FFmpegVideoDecodeEngine::Initialize(
// We don't allocate AVFrame on the stack since different versions of FFmpeg
// may change the size of AVFrame, causing stack corruption. The solution is
// to let FFmpeg allocate the structure via avcodec_alloc_frame().
- av_frame_.reset(avcodec_alloc_frame());
-
- // If we do not have enough buffers, we will report error too.
- frame_queue_available_.clear();
-
- // Create output buffer pool when direct rendering is not used.
- for (size_t i = 0; i < Limits::kMaxVideoFrames; ++i) {
- VideoFrame::Format format =
- PixelFormatToVideoFormat(codec_context_->pix_fmt);
-
- scoped_refptr<VideoFrame> video_frame =
- VideoFrame::CreateFrame(format,
- config.visible_rect().width(),
- config.visible_rect().height(),
- kNoTimestamp,
- kNoTimestamp);
- frame_queue_available_.push_back(video_frame);
- }
+ av_frame_ = avcodec_alloc_frame();
// Open the codec!
- bool success = codec && avcodec_open(codec_context_, codec) >= 0;
- event_handler_ = event_handler;
- event_handler_->OnInitializeComplete(success);
+ return codec && avcodec_open(codec_context_, codec) >= 0;
}
-void FFmpegVideoDecodeEngine::ConsumeVideoSample(
- scoped_refptr<Buffer> buffer) {
- pending_input_buffers_--;
- if (flush_pending_) {
- TryToFinishPendingFlush();
- } else {
- // Otherwise try to decode this buffer.
- DecodeFrame(buffer);
+void FFmpegVideoDecodeEngine::Uninitialize() {
+ if (codec_context_) {
+ av_free(codec_context_->extradata);
+ avcodec_close(codec_context_);
+ av_free(codec_context_);
+ codec_context_ = NULL;
}
-}
-
-void FFmpegVideoDecodeEngine::ProduceVideoFrame(
- scoped_refptr<VideoFrame> frame) {
- // We should never receive NULL frame or EOS frame.
- DCHECK(frame.get() && !frame->IsEndOfStream());
-
- // Increment pending output buffer count.
- pending_output_buffers_++;
-
- // Return this frame to available pool after display.
- frame_queue_available_.push_back(frame);
-
- if (flush_pending_) {
- TryToFinishPendingFlush();
- } else if (!output_eos_reached_) {
- // If we already deliver EOS to renderer, we stop reading new input.
- ReadInput();
+ if (av_frame_) {
+ av_free(av_frame_);
+ av_frame_ = NULL;
}
+ frame_rate_numerator_ = 0;
+ frame_rate_denominator_ = 0;
}
-// Try to decode frame when both input and output are ready.
-void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
- scoped_refptr<VideoFrame> video_frame;
+bool FFmpegVideoDecodeEngine::Decode(scoped_refptr<Buffer> buffer,
+ scoped_refptr<VideoFrame>* video_frame) {
+ DCHECK(video_frame);
// Create a packet for input data.
// Due to FFmpeg API changes we no longer have const read-only pointers.
@@ -149,9 +104,6 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
packet.data = const_cast<uint8*>(buffer->GetData());
packet.size = buffer->GetDataSize();
- PipelineStatistics statistics;
- statistics.video_bytes_decoded = buffer->GetDataSize();
-
// Let FFmpeg handle presentation timestamp reordering.
codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds();
@@ -161,7 +113,7 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
int frame_decoded = 0;
int result = avcodec_decode_video2(codec_context_,
- av_frame_.get(),
+ av_frame_,
&frame_decoded,
&packet);
// Log the problem if we can't decode a video frame and exit early.
@@ -170,23 +122,17 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
<< buffer->GetTimestamp().InMicroseconds() << " us, duration: "
<< buffer->GetDuration().InMicroseconds() << " us, packet size: "
<< buffer->GetDataSize() << " bytes";
- event_handler_->OnError();
- return;
+ *video_frame = NULL;
+ return false;
}
- // If frame_decoded == 0, then no frame was produced.
- // In this case, if we already begin to flush codec with empty
- // input packet at the end of input stream, the first time we
- // encounter frame_decoded == 0 signal output frame had been
- // drained, we mark the flag. Otherwise we read from demuxer again.
+ // If no frame was produced then signal that more data is required to
+ // produce more frames. This can happen under two circumstances:
+ // 1) Decoder was recently initialized/flushed
+ // 2) End of stream was reached and all internal frames have been output
Ami GONE FROM CHROMIUM 2011/11/01 22:17:40 I worry that these are things a client needs to kn
scherkus (not reviewing) 2011/11/03 04:55:59 I thought I had that covered with the current docu
Ami GONE FROM CHROMIUM 2011/11/03 16:40:00 Before I was thinking you'd move this comment to t
if (frame_decoded == 0) {
- if (buffer->IsEndOfStream()) { // We had started flushing.
- event_handler_->ConsumeVideoFrame(video_frame, statistics);
- output_eos_reached_ = true;
- } else {
- ReadInput();
- }
- return;
+ *video_frame = NULL;
+ return true;
}
// TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
@@ -195,10 +141,13 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
if (!av_frame_->data[VideoFrame::kYPlane] ||
!av_frame_->data[VideoFrame::kUPlane] ||
!av_frame_->data[VideoFrame::kVPlane]) {
- event_handler_->OnError();
- return;
+ LOG(ERROR) << "Video frame was produced yet has invalid frame data.";
Ami GONE FROM CHROMIUM 2011/11/01 22:17:40 *video_frame=NULL; ?
+ return false;
}
+ // We've got a frame! Make sure we have a place to store it.
+ ReallocateVideoFrameIfNecessary(video_frame);
Ami GONE FROM CHROMIUM 2011/11/01 22:17:40 If you take my suggestion about INVALID below, mak
+
// Determine timestamp and calculate the duration based on the repeat picture
// count. According to FFmpeg docs, the total duration can be calculated as
// follows:
@@ -212,83 +161,47 @@ void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
doubled_time_base.num = frame_rate_denominator_;
doubled_time_base.den = frame_rate_numerator_ * 2;
- base::TimeDelta timestamp =
- base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque);
- base::TimeDelta duration =
- ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict);
-
- // Available frame is guaranteed, because we issue as much reads as
- // available frame, except the case of |frame_decoded| == 0, which
- // implies decoder order delay, and force us to read more inputs.
- DCHECK(frame_queue_available_.size());
- video_frame = frame_queue_available_.front();
- frame_queue_available_.pop_front();
+ (*video_frame)->SetTimestamp(
+ base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque));
+ (*video_frame)->SetDuration(
+ ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict));
// Copy the frame data since FFmpeg reuses internal buffers for AVFrame
// output, meaning the data is only valid until the next
// avcodec_decode_video() call.
- //
- // TODO(scherkus): use VideoFrame dimensions instead and re-allocate
- // VideoFrame if dimensions changes, but for now adjust size locally.
int y_rows = codec_context_->height;
int uv_rows = codec_context_->height;
if (codec_context_->pix_fmt == PIX_FMT_YUV420P) {
uv_rows /= 2;
}
- CopyYPlane(av_frame_->data[0], av_frame_->linesize[0], y_rows, video_frame);
- CopyUPlane(av_frame_->data[1], av_frame_->linesize[1], uv_rows, video_frame);
- CopyVPlane(av_frame_->data[2], av_frame_->linesize[2], uv_rows, video_frame);
+ CopyYPlane(av_frame_->data[0], av_frame_->linesize[0], y_rows, *video_frame);
+ CopyUPlane(av_frame_->data[1], av_frame_->linesize[1], uv_rows, *video_frame);
+ CopyVPlane(av_frame_->data[2], av_frame_->linesize[2], uv_rows, *video_frame);
- video_frame->SetTimestamp(timestamp);
- video_frame->SetDuration(duration);
-
- pending_output_buffers_--;
- event_handler_->ConsumeVideoFrame(video_frame, statistics);
-}
-
-void FFmpegVideoDecodeEngine::Uninitialize() {
- event_handler_->OnUninitializeComplete();
+ return true;
}
void FFmpegVideoDecodeEngine::Flush() {
avcodec_flush_buffers(codec_context_);
- flush_pending_ = true;
- TryToFinishPendingFlush();
}
-void FFmpegVideoDecodeEngine::TryToFinishPendingFlush() {
- DCHECK(flush_pending_);
-
- // We consider ourself flushed when there is no pending input buffers
- // and output buffers, which implies that all buffers had been returned
- // to its owner.
- if (!pending_input_buffers_ && !pending_output_buffers_) {
- // Try to finish flushing and notify pipeline.
- flush_pending_ = false;
- event_handler_->OnFlushComplete();
+void FFmpegVideoDecodeEngine::ReallocateVideoFrameIfNecessary(
+ scoped_refptr<VideoFrame>* video_frame) {
+ VideoFrame::Format format = PixelFormatToVideoFormat(codec_context_->pix_fmt);
Ami GONE FROM CHROMIUM 2011/11/01 22:17:40 If you checked for VideoFrame::INVALID here and re
+ size_t width = codec_context_->width;
+ size_t height = codec_context_->height;
+
+ // Check for format and visible dimension match.
+ if (video_frame && (*video_frame) &&
+ (*video_frame)->format() == format &&
+ (*video_frame)->width() == width &&
+ (*video_frame)->height() == height) {
+ return;
}
-}
-
-void FFmpegVideoDecodeEngine::Seek() {
- // After a seek, output stream no longer considered as EOS.
- output_eos_reached_ = false;
- // The buffer provider is assumed to perform pre-roll operation.
- for (unsigned int i = 0; i < Limits::kMaxVideoFrames; ++i)
- ReadInput();
-
- event_handler_->OnSeekComplete();
-}
-
-void FFmpegVideoDecodeEngine::ReadInput() {
- DCHECK_EQ(output_eos_reached_, false);
- pending_input_buffers_++;
- event_handler_->ProduceVideoSample(NULL);
+ *video_frame = VideoFrame::CreateFrame(format, width, height,
+ kNoTimestamp, kNoTimestamp);
}
} // namespace media
-
-// Disable refcounting for this object because this object only lives
-// on the video decoder thread and there's no need to refcount it.
-DISABLE_RUNNABLE_METHOD_REFCOUNT(media::FFmpegVideoDecodeEngine);

Powered by Google App Engine
This is Rietveld 408576698