Index: media/filters/ffmpeg_video_decoder.cc |
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc |
index 0254f6352149110c9c49ad388df92ce9587a69e4..39d9e3987841bf3f680915932d84c7d2bb014d44 100644 |
--- a/media/filters/ffmpeg_video_decoder.cc |
+++ b/media/filters/ffmpeg_video_decoder.cc |
@@ -5,24 +5,63 @@ |
#include "media/filters/ffmpeg_video_decoder.h" |
#include "base/bind.h" |
+#include "base/command_line.h" |
#include "base/message_loop.h" |
+#include "base/string_number_conversions.h" |
#include "media/base/demuxer_stream.h" |
#include "media/base/filter_host.h" |
#include "media/base/limits.h" |
+#include "media/base/media_switches.h" |
#include "media/base/video_decoder_config.h" |
#include "media/base/video_frame.h" |
+#include "media/base/video_util.h" |
#include "media/ffmpeg/ffmpeg_common.h" |
-#include "media/video/ffmpeg_video_decode_engine.h" |
namespace media { |
+// Always try to use three threads for video decoding. There is little reason |
+// not to since current day CPUs tend to be multi-core and we measured |
+// performance benefits on older machines such as P4s with hyperthreading. |
+// |
+// Handling decoding on separate threads also frees up the pipeline thread to |
+// continue processing. Although it'd be nice to have the option of a single |
+// decoding thread, FFmpeg treats having one thread the same as having zero |
+// threads (i.e., avcodec_decode_video() will execute on the calling thread). |
+// Yet another reason for having two threads :) |
+static const int kDecodeThreads = 2; |
+static const int kMaxDecodeThreads = 16; |
+ |
+// Returns the number of threads given the FFmpeg CodecID. Also inspects the |
+// command line for a valid --video-threads flag. |
+static int GetThreadCount(CodecID codec_id) { |
+ // TODO(scherkus): As of 07/21/2011 we still can't enable Theora multithreaded |
+ // decoding due to bugs in FFmpeg. Dig in and send fixes upstream! |
+ // |
+ // Refer to http://crbug.com/93932 for tsan suppressions on decoding. |
+ int decode_threads = (codec_id == CODEC_ID_THEORA ? 1 : kDecodeThreads); |
+ |
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); |
+ std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads)); |
+ if (threads.empty() || !base::StringToInt(threads, &decode_threads)) |
+ return decode_threads; |
+ |
+ decode_threads = std::max(decode_threads, 0); |
+ decode_threads = std::min(decode_threads, kMaxDecodeThreads); |
+ return decode_threads; |
+} |
+ |
FFmpegVideoDecoder::FFmpegVideoDecoder(MessageLoop* message_loop) |
: message_loop_(message_loop), |
state_(kUninitialized), |
- decode_engine_(new FFmpegVideoDecodeEngine()) { |
+ codec_context_(NULL), |
+ av_frame_(NULL), |
+ frame_rate_numerator_(0), |
+ frame_rate_denominator_(0) { |
} |
-FFmpegVideoDecoder::~FFmpegVideoDecoder() {} |
+FFmpegVideoDecoder::~FFmpegVideoDecoder() { |
+ ReleaseFFmpegResources(); |
+} |
void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream, |
const base::Closure& callback, |
@@ -71,16 +110,36 @@ void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream, |
return; |
} |
- pts_stream_.Initialize(GetFrameDuration(config)); |
- natural_size_ = config.natural_size(); |
+ // Initialize AVCodecContext structure. |
+ codec_context_ = avcodec_alloc_context(); |
+ VideoDecoderConfigToAVCodecContext(config, codec_context_); |
- if (!decode_engine_->Initialize(config)) { |
+ // Enable motion vector search (potentially slow), strong deblocking filter |
+ // for damaged macroblocks, and set our error detection sensitivity. |
+ codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; |
+ codec_context_->error_recognition = FF_ER_CAREFUL; |
+ codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); |
+ |
+ AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |
+ if (!codec) { |
host()->SetError(PIPELINE_ERROR_DECODE); |
callback.Run(); |
return; |
} |
+ if (avcodec_open(codec_context_, codec) < 0) { |
+ host()->SetError(PIPELINE_ERROR_DECODE); |
+ callback.Run(); |
+ return; |
+ } |
+ |
+ // Success! |
state_ = kNormal; |
+ av_frame_ = avcodec_alloc_frame(); |
+ pts_stream_.Initialize(GetFrameDuration(config)); |
+ natural_size_ = config.natural_size(); |
+ frame_rate_numerator_ = config.frame_rate_numerator(); |
+ frame_rate_denominator_ = config.frame_rate_denominator(); |
callback.Run(); |
} |
@@ -91,7 +150,7 @@ void FFmpegVideoDecoder::Stop(const base::Closure& callback) { |
return; |
} |
- decode_engine_->Uninitialize(); |
+ ReleaseFFmpegResources(); |
state_ = kUninitialized; |
callback.Run(); |
} |
@@ -124,7 +183,7 @@ void FFmpegVideoDecoder::Flush(const base::Closure& callback) { |
return; |
} |
- decode_engine_->Flush(); |
+ avcodec_flush_buffers(codec_context_); |
pts_stream_.Flush(); |
state_ = kNormal; |
callback.Run(); |
@@ -221,7 +280,7 @@ void FFmpegVideoDecoder::DoDecodeBuffer(const scoped_refptr<Buffer>& buffer) { |
} |
scoped_refptr<VideoFrame> video_frame; |
- if (!decode_engine_->Decode(buffer, &video_frame)) { |
+ if (!Decode(buffer, &video_frame)) { |
state_ = kDecodeFinished; |
DeliverFrame(VideoFrame::CreateEmptyFrame()); |
host()->SetError(PIPELINE_ERROR_DECODE); |
@@ -256,6 +315,101 @@ void FFmpegVideoDecoder::DoDecodeBuffer(const scoped_refptr<Buffer>& buffer) { |
DeliverFrame(video_frame); |
} |
+bool FFmpegVideoDecoder::Decode( |
+ const scoped_refptr<Buffer>& buffer, |
+ scoped_refptr<VideoFrame>* video_frame) { |
+ DCHECK(video_frame); |
+ |
+ // Create a packet for input data. |
+ // Due to FFmpeg API changes we no longer have const read-only pointers. |
+ AVPacket packet; |
+ av_init_packet(&packet); |
+ packet.data = const_cast<uint8*>(buffer->GetData()); |
+ packet.size = buffer->GetDataSize(); |
+ |
+ // Let FFmpeg handle presentation timestamp reordering. |
+ codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds(); |
+ |
+ // This is for codecs not using get_buffer to initialize |
+ // |av_frame_->reordered_opaque| |
+ av_frame_->reordered_opaque = codec_context_->reordered_opaque; |
+ |
+ int frame_decoded = 0; |
+ int result = avcodec_decode_video2(codec_context_, |
+ av_frame_, |
+ &frame_decoded, |
+ &packet); |
+ // Log the problem if we can't decode a video frame and exit early. |
+ if (result < 0) { |
+ LOG(ERROR) << "Error decoding a video frame with timestamp: " |
+ << buffer->GetTimestamp().InMicroseconds() << " us, duration: " |
+ << buffer->GetDuration().InMicroseconds() << " us, packet size: " |
+ << buffer->GetDataSize() << " bytes"; |
+ *video_frame = NULL; |
+ return false; |
+ } |
+ |
+ // If no frame was produced then signal that more data is required to |
+ // produce more frames. This can happen under two circumstances: |
+ // 1) Decoder was recently initialized/flushed |
+ // 2) End of stream was reached and all internal frames have been output |
+ if (frame_decoded == 0) { |
+ *video_frame = NULL; |
+ return true; |
+ } |
+ |
+ // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675 |
+ // The decoder is in a bad state and not decoding correctly. |
+ // Checking for NULL avoids a crash in CopyPlane(). |
+ if (!av_frame_->data[VideoFrame::kYPlane] || |
+ !av_frame_->data[VideoFrame::kUPlane] || |
+ !av_frame_->data[VideoFrame::kVPlane]) { |
+ LOG(ERROR) << "Video frame was produced yet has invalid frame data."; |
+ *video_frame = NULL; |
+ return false; |
+ } |
+ |
+ // We've got a frame! Make sure we have a place to store it. |
+ *video_frame = AllocateVideoFrame(); |
+ if (!(*video_frame)) { |
+ LOG(ERROR) << "Failed to allocate video frame"; |
+ return false; |
+ } |
+ |
+ // Determine timestamp and calculate the duration based on the repeat picture |
+ // count. According to FFmpeg docs, the total duration can be calculated as |
+ // follows: |
+ // fps = 1 / time_base |
+ // |
+ // duration = (1 / fps) + (repeat_pict) / (2 * fps) |
+ // = (2 + repeat_pict) / (2 * fps) |
+ // = (2 + repeat_pict) / (2 * (1 / time_base)) |
+ DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check. |
+ AVRational doubled_time_base; |
+ doubled_time_base.num = frame_rate_denominator_; |
+ doubled_time_base.den = frame_rate_numerator_ * 2; |
+ |
+ (*video_frame)->SetTimestamp( |
+ base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); |
+ (*video_frame)->SetDuration( |
+ ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict)); |
+ |
+ // Copy the frame data since FFmpeg reuses internal buffers for AVFrame |
+ // output, meaning the data is only valid until the next |
+ // avcodec_decode_video() call. |
+ int y_rows = codec_context_->height; |
+ int uv_rows = codec_context_->height; |
+ if (codec_context_->pix_fmt == PIX_FMT_YUV420P) { |
+ uv_rows /= 2; |
+ } |
+ |
+ CopyYPlane(av_frame_->data[0], av_frame_->linesize[0], y_rows, *video_frame); |
+ CopyUPlane(av_frame_->data[1], av_frame_->linesize[1], uv_rows, *video_frame); |
+ CopyVPlane(av_frame_->data[2], av_frame_->linesize[2], uv_rows, *video_frame); |
+ |
+ return true; |
+} |
+ |
void FFmpegVideoDecoder::DeliverFrame( |
const scoped_refptr<VideoFrame>& video_frame) { |
// Reset the callback before running to protect against reentrancy. |
@@ -264,4 +418,26 @@ void FFmpegVideoDecoder::DeliverFrame( |
read_cb.Run(video_frame); |
} |
+void FFmpegVideoDecoder::ReleaseFFmpegResources() { |
+ if (codec_context_) { |
+ av_free(codec_context_->extradata); |
+ avcodec_close(codec_context_); |
+ av_free(codec_context_); |
+ codec_context_ = NULL; |
+ } |
+ if (av_frame_) { |
+ av_free(av_frame_); |
+ av_frame_ = NULL; |
+ } |
+} |
+ |
+scoped_refptr<VideoFrame> FFmpegVideoDecoder::AllocateVideoFrame() { |
+ VideoFrame::Format format = PixelFormatToVideoFormat(codec_context_->pix_fmt); |
+ size_t width = codec_context_->width; |
+ size_t height = codec_context_->height; |
+ |
+ return VideoFrame::CreateFrame(format, width, height, |
+ kNoTimestamp, kNoTimestamp); |
+} |
+ |
} // namespace media |