Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(152)

Side by Side Diff: media/filters/ffmpeg_video_decoder.cc

Issue 8772069: Collapse FFmpegVideoDecodeEngine into FFmpegVideoDecoder and remove VideoDecodeEngine. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: fixes Created 9 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « media/filters/ffmpeg_video_decoder.h ('k') | media/filters/ffmpeg_video_decoder_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/filters/ffmpeg_video_decoder.h" 5 #include "media/filters/ffmpeg_video_decoder.h"
6 6
7 #include "base/bind.h" 7 #include "base/bind.h"
8 #include "base/command_line.h"
8 #include "base/message_loop.h" 9 #include "base/message_loop.h"
10 #include "base/string_number_conversions.h"
9 #include "media/base/demuxer_stream.h" 11 #include "media/base/demuxer_stream.h"
10 #include "media/base/filter_host.h" 12 #include "media/base/filter_host.h"
11 #include "media/base/limits.h" 13 #include "media/base/limits.h"
14 #include "media/base/media_switches.h"
12 #include "media/base/video_decoder_config.h" 15 #include "media/base/video_decoder_config.h"
13 #include "media/base/video_frame.h" 16 #include "media/base/video_frame.h"
17 #include "media/base/video_util.h"
14 #include "media/ffmpeg/ffmpeg_common.h" 18 #include "media/ffmpeg/ffmpeg_common.h"
15 #include "media/video/ffmpeg_video_decode_engine.h"
16 19
17 namespace media { 20 namespace media {
18 21
22 // Always try to use three threads for video decoding. There is little reason
23 // not to since current day CPUs tend to be multi-core and we measured
24 // performance benefits on older machines such as P4s with hyperthreading.
25 //
26 // Handling decoding on separate threads also frees up the pipeline thread to
27 // continue processing. Although it'd be nice to have the option of a single
28 // decoding thread, FFmpeg treats having one thread the same as having zero
29 // threads (i.e., avcodec_decode_video() will execute on the calling thread).
30 // Yet another reason for having two threads :)
31 static const int kDecodeThreads = 2;
32 static const int kMaxDecodeThreads = 16;
33
34 // Returns the number of threads given the FFmpeg CodecID. Also inspects the
35 // command line for a valid --video-threads flag.
36 static int GetThreadCount(CodecID codec_id) {
37 // TODO(scherkus): As of 07/21/2011 we still can't enable Theora multithreaded
38 // decoding due to bugs in FFmpeg. Dig in and send fixes upstream!
39 //
40 // Refer to http://crbug.com/93932 for tsan suppressions on decoding.
41 int decode_threads = (codec_id == CODEC_ID_THEORA ? 1 : kDecodeThreads);
42
43 const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
44 std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads));
45 if (threads.empty() || !base::StringToInt(threads, &decode_threads))
46 return decode_threads;
47
48 decode_threads = std::max(decode_threads, 0);
49 decode_threads = std::min(decode_threads, kMaxDecodeThreads);
50 return decode_threads;
51 }
52
19 FFmpegVideoDecoder::FFmpegVideoDecoder(MessageLoop* message_loop) 53 FFmpegVideoDecoder::FFmpegVideoDecoder(MessageLoop* message_loop)
20 : message_loop_(message_loop), 54 : message_loop_(message_loop),
21 state_(kUninitialized), 55 state_(kUninitialized),
22 decode_engine_(new FFmpegVideoDecodeEngine()) { 56 codec_context_(NULL),
57 av_frame_(NULL),
58 frame_rate_numerator_(0),
59 frame_rate_denominator_(0) {
23 } 60 }
24 61
25 FFmpegVideoDecoder::~FFmpegVideoDecoder() {} 62 FFmpegVideoDecoder::~FFmpegVideoDecoder() {
63 ReleaseFFmpegResources();
64 }
26 65
27 void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream, 66 void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream,
28 const base::Closure& callback, 67 const base::Closure& callback,
29 const StatisticsCallback& stats_callback) { 68 const StatisticsCallback& stats_callback) {
30 if (MessageLoop::current() != message_loop_) { 69 if (MessageLoop::current() != message_loop_) {
31 message_loop_->PostTask(FROM_HERE, base::Bind( 70 message_loop_->PostTask(FROM_HERE, base::Bind(
32 &FFmpegVideoDecoder::Initialize, this, 71 &FFmpegVideoDecoder::Initialize, this,
33 make_scoped_refptr(demuxer_stream), callback, stats_callback)); 72 make_scoped_refptr(demuxer_stream), callback, stats_callback));
34 return; 73 return;
35 } 74 }
(...skipping 28 matching lines...) Expand all
64 << " frame rate: " << config.frame_rate_numerator() 103 << " frame rate: " << config.frame_rate_numerator()
65 << "/" << config.frame_rate_denominator() 104 << "/" << config.frame_rate_denominator()
66 << " aspect ratio: " << config.aspect_ratio_numerator() 105 << " aspect ratio: " << config.aspect_ratio_numerator()
67 << "/" << config.aspect_ratio_denominator(); 106 << "/" << config.aspect_ratio_denominator();
68 107
69 host()->SetError(PIPELINE_ERROR_DECODE); 108 host()->SetError(PIPELINE_ERROR_DECODE);
70 callback.Run(); 109 callback.Run();
71 return; 110 return;
72 } 111 }
73 112
74 pts_stream_.Initialize(GetFrameDuration(config)); 113 // Initialize AVCodecContext structure.
75 natural_size_ = config.natural_size(); 114 codec_context_ = avcodec_alloc_context();
115 VideoDecoderConfigToAVCodecContext(config, codec_context_);
76 116
77 if (!decode_engine_->Initialize(config)) { 117 // Enable motion vector search (potentially slow), strong deblocking filter
118 // for damaged macroblocks, and set our error detection sensitivity.
119 codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
120 codec_context_->error_recognition = FF_ER_CAREFUL;
121 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id);
122
123 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
124 if (!codec) {
78 host()->SetError(PIPELINE_ERROR_DECODE); 125 host()->SetError(PIPELINE_ERROR_DECODE);
79 callback.Run(); 126 callback.Run();
80 return; 127 return;
81 } 128 }
82 129
130 if (avcodec_open(codec_context_, codec) < 0) {
131 host()->SetError(PIPELINE_ERROR_DECODE);
132 callback.Run();
133 return;
134 }
135
136 // Success!
83 state_ = kNormal; 137 state_ = kNormal;
138 av_frame_ = avcodec_alloc_frame();
139 pts_stream_.Initialize(GetFrameDuration(config));
140 natural_size_ = config.natural_size();
141 frame_rate_numerator_ = config.frame_rate_numerator();
142 frame_rate_denominator_ = config.frame_rate_denominator();
84 callback.Run(); 143 callback.Run();
85 } 144 }
86 145
87 void FFmpegVideoDecoder::Stop(const base::Closure& callback) { 146 void FFmpegVideoDecoder::Stop(const base::Closure& callback) {
88 if (MessageLoop::current() != message_loop_) { 147 if (MessageLoop::current() != message_loop_) {
89 message_loop_->PostTask(FROM_HERE, base::Bind( 148 message_loop_->PostTask(FROM_HERE, base::Bind(
90 &FFmpegVideoDecoder::Stop, this, callback)); 149 &FFmpegVideoDecoder::Stop, this, callback));
91 return; 150 return;
92 } 151 }
93 152
94 decode_engine_->Uninitialize(); 153 ReleaseFFmpegResources();
95 state_ = kUninitialized; 154 state_ = kUninitialized;
96 callback.Run(); 155 callback.Run();
97 } 156 }
98 157
99 void FFmpegVideoDecoder::Seek(base::TimeDelta time, const FilterStatusCB& cb) { 158 void FFmpegVideoDecoder::Seek(base::TimeDelta time, const FilterStatusCB& cb) {
100 if (MessageLoop::current() != message_loop_) { 159 if (MessageLoop::current() != message_loop_) {
101 message_loop_->PostTask(FROM_HERE, base::Bind( 160 message_loop_->PostTask(FROM_HERE, base::Bind(
102 &FFmpegVideoDecoder::Seek, this, time, cb)); 161 &FFmpegVideoDecoder::Seek, this, time, cb));
103 return; 162 return;
104 } 163 }
(...skipping 12 matching lines...) Expand all
117 callback.Run(); 176 callback.Run();
118 } 177 }
119 178
120 void FFmpegVideoDecoder::Flush(const base::Closure& callback) { 179 void FFmpegVideoDecoder::Flush(const base::Closure& callback) {
121 if (MessageLoop::current() != message_loop_) { 180 if (MessageLoop::current() != message_loop_) {
122 message_loop_->PostTask(FROM_HERE, base::Bind( 181 message_loop_->PostTask(FROM_HERE, base::Bind(
123 &FFmpegVideoDecoder::Flush, this, callback)); 182 &FFmpegVideoDecoder::Flush, this, callback));
124 return; 183 return;
125 } 184 }
126 185
127 decode_engine_->Flush(); 186 avcodec_flush_buffers(codec_context_);
128 pts_stream_.Flush(); 187 pts_stream_.Flush();
129 state_ = kNormal; 188 state_ = kNormal;
130 callback.Run(); 189 callback.Run();
131 } 190 }
132 191
133 void FFmpegVideoDecoder::Read(const ReadCB& callback) { 192 void FFmpegVideoDecoder::Read(const ReadCB& callback) {
134 // TODO(scherkus): forced task post since VideoRendererBase::FrameReady() will 193 // TODO(scherkus): forced task post since VideoRendererBase::FrameReady() will
135 // call Read() on FFmpegVideoDecoder's thread as we executed |read_cb_|. 194 // call Read() on FFmpegVideoDecoder's thread as we executed |read_cb_|.
136 message_loop_->PostTask(FROM_HERE, base::Bind( 195 message_loop_->PostTask(FROM_HERE, base::Bind(
137 &FFmpegVideoDecoder::DoRead, this, callback)); 196 &FFmpegVideoDecoder::DoRead, this, callback));
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
214 } 273 }
215 274
216 // Push all incoming timestamps into the priority queue as long as we have 275 // Push all incoming timestamps into the priority queue as long as we have
217 // not yet received an end of stream buffer. It is important that this line 276 // not yet received an end of stream buffer. It is important that this line
218 // stay below the state transition into kFlushCodec done above. 277 // stay below the state transition into kFlushCodec done above.
219 if (state_ == kNormal) { 278 if (state_ == kNormal) {
220 pts_stream_.EnqueuePts(buffer.get()); 279 pts_stream_.EnqueuePts(buffer.get());
221 } 280 }
222 281
223 scoped_refptr<VideoFrame> video_frame; 282 scoped_refptr<VideoFrame> video_frame;
224 if (!decode_engine_->Decode(buffer, &video_frame)) { 283 if (!Decode(buffer, &video_frame)) {
225 state_ = kDecodeFinished; 284 state_ = kDecodeFinished;
226 DeliverFrame(VideoFrame::CreateEmptyFrame()); 285 DeliverFrame(VideoFrame::CreateEmptyFrame());
227 host()->SetError(PIPELINE_ERROR_DECODE); 286 host()->SetError(PIPELINE_ERROR_DECODE);
228 return; 287 return;
229 } 288 }
230 289
231 // Any successful decode counts! 290 // Any successful decode counts!
232 if (buffer->GetDataSize()) { 291 if (buffer->GetDataSize()) {
233 PipelineStatistics statistics; 292 PipelineStatistics statistics;
234 statistics.video_bytes_decoded = buffer->GetDataSize(); 293 statistics.video_bytes_decoded = buffer->GetDataSize();
(...skipping 14 matching lines...) Expand all
249 } 308 }
250 309
251 // If we got a frame make sure its timestamp is correct before sending it off. 310 // If we got a frame make sure its timestamp is correct before sending it off.
252 pts_stream_.UpdatePtsAndDuration(video_frame.get()); 311 pts_stream_.UpdatePtsAndDuration(video_frame.get());
253 video_frame->SetTimestamp(pts_stream_.current_pts()); 312 video_frame->SetTimestamp(pts_stream_.current_pts());
254 video_frame->SetDuration(pts_stream_.current_duration()); 313 video_frame->SetDuration(pts_stream_.current_duration());
255 314
256 DeliverFrame(video_frame); 315 DeliverFrame(video_frame);
257 } 316 }
258 317
318 bool FFmpegVideoDecoder::Decode(
319 const scoped_refptr<Buffer>& buffer,
320 scoped_refptr<VideoFrame>* video_frame) {
321 DCHECK(video_frame);
322
323 // Create a packet for input data.
324 // Due to FFmpeg API changes we no longer have const read-only pointers.
325 AVPacket packet;
326 av_init_packet(&packet);
327 packet.data = const_cast<uint8*>(buffer->GetData());
328 packet.size = buffer->GetDataSize();
329
330 // Let FFmpeg handle presentation timestamp reordering.
331 codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds();
332
333 // This is for codecs not using get_buffer to initialize
334 // |av_frame_->reordered_opaque|
335 av_frame_->reordered_opaque = codec_context_->reordered_opaque;
336
337 int frame_decoded = 0;
338 int result = avcodec_decode_video2(codec_context_,
339 av_frame_,
340 &frame_decoded,
341 &packet);
342 // Log the problem if we can't decode a video frame and exit early.
343 if (result < 0) {
344 LOG(ERROR) << "Error decoding a video frame with timestamp: "
345 << buffer->GetTimestamp().InMicroseconds() << " us, duration: "
346 << buffer->GetDuration().InMicroseconds() << " us, packet size: "
347 << buffer->GetDataSize() << " bytes";
348 *video_frame = NULL;
349 return false;
350 }
351
352 // If no frame was produced then signal that more data is required to
353 // produce more frames. This can happen under two circumstances:
354 // 1) Decoder was recently initialized/flushed
355 // 2) End of stream was reached and all internal frames have been output
356 if (frame_decoded == 0) {
357 *video_frame = NULL;
358 return true;
359 }
360
361 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
362 // The decoder is in a bad state and not decoding correctly.
363 // Checking for NULL avoids a crash in CopyPlane().
364 if (!av_frame_->data[VideoFrame::kYPlane] ||
365 !av_frame_->data[VideoFrame::kUPlane] ||
366 !av_frame_->data[VideoFrame::kVPlane]) {
367 LOG(ERROR) << "Video frame was produced yet has invalid frame data.";
368 *video_frame = NULL;
369 return false;
370 }
371
372 // We've got a frame! Make sure we have a place to store it.
373 *video_frame = AllocateVideoFrame();
374 if (!(*video_frame)) {
375 LOG(ERROR) << "Failed to allocate video frame";
376 return false;
377 }
378
379 // Determine timestamp and calculate the duration based on the repeat picture
380 // count. According to FFmpeg docs, the total duration can be calculated as
381 // follows:
382 // fps = 1 / time_base
383 //
384 // duration = (1 / fps) + (repeat_pict) / (2 * fps)
385 // = (2 + repeat_pict) / (2 * fps)
386 // = (2 + repeat_pict) / (2 * (1 / time_base))
387 DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check.
388 AVRational doubled_time_base;
389 doubled_time_base.num = frame_rate_denominator_;
390 doubled_time_base.den = frame_rate_numerator_ * 2;
391
392 (*video_frame)->SetTimestamp(
393 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque));
394 (*video_frame)->SetDuration(
395 ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict));
396
397 // Copy the frame data since FFmpeg reuses internal buffers for AVFrame
398 // output, meaning the data is only valid until the next
399 // avcodec_decode_video() call.
400 int y_rows = codec_context_->height;
401 int uv_rows = codec_context_->height;
402 if (codec_context_->pix_fmt == PIX_FMT_YUV420P) {
403 uv_rows /= 2;
404 }
405
406 CopyYPlane(av_frame_->data[0], av_frame_->linesize[0], y_rows, *video_frame);
407 CopyUPlane(av_frame_->data[1], av_frame_->linesize[1], uv_rows, *video_frame);
408 CopyVPlane(av_frame_->data[2], av_frame_->linesize[2], uv_rows, *video_frame);
409
410 return true;
411 }
412
259 void FFmpegVideoDecoder::DeliverFrame( 413 void FFmpegVideoDecoder::DeliverFrame(
260 const scoped_refptr<VideoFrame>& video_frame) { 414 const scoped_refptr<VideoFrame>& video_frame) {
261 // Reset the callback before running to protect against reentrancy. 415 // Reset the callback before running to protect against reentrancy.
262 ReadCB read_cb = read_cb_; 416 ReadCB read_cb = read_cb_;
263 read_cb_.Reset(); 417 read_cb_.Reset();
264 read_cb.Run(video_frame); 418 read_cb.Run(video_frame);
265 } 419 }
266 420
421 void FFmpegVideoDecoder::ReleaseFFmpegResources() {
422 if (codec_context_) {
423 av_free(codec_context_->extradata);
424 avcodec_close(codec_context_);
425 av_free(codec_context_);
426 codec_context_ = NULL;
427 }
428 if (av_frame_) {
429 av_free(av_frame_);
430 av_frame_ = NULL;
431 }
432 }
433
434 scoped_refptr<VideoFrame> FFmpegVideoDecoder::AllocateVideoFrame() {
435 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context_->pix_fmt);
436 size_t width = codec_context_->width;
437 size_t height = codec_context_->height;
438
439 return VideoFrame::CreateFrame(format, width, height,
440 kNoTimestamp, kNoTimestamp);
441 }
442
267 } // namespace media 443 } // namespace media
OLDNEW
« no previous file with comments | « media/filters/ffmpeg_video_decoder.h ('k') | media/filters/ffmpeg_video_decoder_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698