| OLD | NEW | 
|---|
| 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved.  Use of this | 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved.  Use of this | 
| 2 // source code is governed by a BSD-style license that can be found in the | 2 // source code is governed by a BSD-style license that can be found in the | 
| 3 // LICENSE file. | 3 // LICENSE file. | 
| 4 | 4 | 
|  | 5 #include "media/filters/ffmpeg_video_decoder.h" | 
|  | 6 | 
|  | 7 #include "base/task.h" | 
|  | 8 #include "base/waitable_event.h" | 
|  | 9 #include "media/base/callback.h" | 
| 5 #include "media/base/limits.h" | 10 #include "media/base/limits.h" | 
| 6 #include "media/base/video_frame_impl.h" | 11 #include "media/base/video_frame_impl.h" | 
|  | 12 #include "media/ffmpeg/ffmpeg_util.h" | 
| 7 #include "media/filters/ffmpeg_common.h" | 13 #include "media/filters/ffmpeg_common.h" | 
| 8 #include "media/filters/ffmpeg_demuxer.h" | 14 #include "media/filters/ffmpeg_demuxer.h" | 
| 9 #include "media/filters/ffmpeg_video_decoder.h" |  | 
| 10 |  | 
| 11 namespace { |  | 
| 12 |  | 
| 13 const AVRational kMicrosBase = { 1, base::Time::kMicrosecondsPerSecond }; |  | 
| 14 |  | 
| 15 // TODO(ajwong): Move this into a utility function file and dedup with |  | 
| 16 // FFmpegDemuxer ConvertTimestamp. |  | 
| 17 base::TimeDelta ConvertTimestamp(const AVRational& time_base, int64 timestamp) { |  | 
| 18   int64 microseconds = av_rescale_q(timestamp, time_base, kMicrosBase); |  | 
| 19   return base::TimeDelta::FromMicroseconds(microseconds); |  | 
| 20 } |  | 
| 21 |  | 
| 22 }  // namespace |  | 
| 23 | 15 | 
| 24 namespace media { | 16 namespace media { | 
| 25 | 17 | 
| 26 // Always try to use two threads for video decoding.  There is little reason | 18 FFmpegVideoDecodeEngine::FFmpegVideoDecodeEngine() | 
| 27 // not to since current day CPUs tend to be multi-core and we measured | 19     : codec_context_(NULL), | 
| 28 // performance benefits on older machines such as P4s with hyperthreading. | 20       state_(kCreated) { | 
| 29 // | 21 } | 
| 30 // Handling decoding on separate threads also frees up the pipeline thread to |  | 
| 31 // continue processing. Although it'd be nice to have the option of a single |  | 
| 32 // decoding thread, FFmpeg treats having one thread the same as having zero |  | 
| 33 // threads (i.e., avcodec_decode_video() will execute on the calling thread). |  | 
| 34 // Yet another reason for having two threads :) |  | 
| 35 // |  | 
| 36 // TODO(scherkus): some video codecs might not like avcodec_thread_init() being |  | 
| 37 // called on them... should attempt to find out which ones those are! |  | 
| 38 static const int kDecodeThreads = 2; |  | 
| 39 | 22 | 
| 40 FFmpegVideoDecoder::FFmpegVideoDecoder() | 23 FFmpegVideoDecodeEngine::~FFmpegVideoDecodeEngine() { | 
|  | 24 } | 
|  | 25 | 
|  | 26 void FFmpegVideoDecodeEngine::Initialize(AVStream* stream, Task* done_cb) { | 
|  | 27   AutoTaskRunner done_runner(done_cb); | 
|  | 28 | 
|  | 29   // Always try to use two threads for video decoding.  There is little reason | 
|  | 30   // not to since current day CPUs tend to be multi-core and we measured | 
|  | 31   // performance benefits on older machines such as P4s with hyperthreading. | 
|  | 32   // | 
|  | 33   // Handling decoding on separate threads also frees up the pipeline thread to | 
|  | 34   // continue processing. Although it'd be nice to have the option of a single | 
|  | 35   // decoding thread, FFmpeg treats having one thread the same as having zero | 
|  | 36   // threads (i.e., avcodec_decode_video() will execute on the calling thread). | 
|  | 37   // Yet another reason for having two threads :) | 
|  | 38   // | 
|  | 39   // TODO(scherkus): some video codecs might not like avcodec_thread_init() | 
|  | 40   // being called on them... should attempt to find out which ones those are! | 
|  | 41   static const int kDecodeThreads = 2; | 
|  | 42 | 
|  | 43   CHECK(state_ == kCreated); | 
|  | 44 | 
|  | 45   codec_context_ = stream->codec; | 
|  | 46   codec_context_->flags2 |= CODEC_FLAG2_FAST;  // Enable faster H264 decode. | 
|  | 47   // Enable motion vector search (potentially slow), strong deblocking filter | 
|  | 48   // for damaged macroblocks, and set our error detection sensitivity. | 
|  | 49   codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; | 
|  | 50   codec_context_->error_recognition = FF_ER_CAREFUL; | 
|  | 51 | 
|  | 52   // Serialize calls to avcodec_open(). | 
|  | 53   AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); | 
|  | 54   { | 
|  | 55     AutoLock auto_lock(FFmpegLock::get()->lock()); | 
|  | 56     if (codec && | 
|  | 57         avcodec_thread_init(codec_context_, kDecodeThreads) >= 0 && | 
|  | 58         avcodec_open(codec_context_, codec) >= 0) { | 
|  | 59       state_ = kNormal; | 
|  | 60     } else { | 
|  | 61       state_ = kError; | 
|  | 62     } | 
|  | 63   } | 
|  | 64 } | 
|  | 65 | 
|  | 66 // Decodes one frame of video with the given buffer. | 
|  | 67 void FFmpegVideoDecodeEngine::DecodeFrame(const Buffer& buffer, | 
|  | 68                                           AVFrame* yuv_frame, | 
|  | 69                                           bool* got_frame, | 
|  | 70                                           Task* done_cb) { | 
|  | 71   AutoTaskRunner done_runner(done_cb); | 
|  | 72 | 
|  | 73   // Create a packet for input data. | 
|  | 74   // Due to FFmpeg API changes we no longer have const read-only pointers. | 
|  | 75   // | 
|  | 76   // TODO(ajwong): This is dangerous since AVPacket may change size with | 
|  | 77   // different ffmpeg versions.  Use the alloca verison. | 
|  | 78   AVPacket packet; | 
|  | 79   av_init_packet(&packet); | 
|  | 80   packet.data = const_cast<uint8*>(buffer.GetData()); | 
|  | 81   packet.size = buffer.GetDataSize(); | 
|  | 82 | 
|  | 83   // We don't allocate AVFrame on the stack since different versions of FFmpeg | 
|  | 84   // may change the size of AVFrame, causing stack corruption.  The solution is | 
|  | 85   // to let FFmpeg allocate the structure via avcodec_alloc_frame(). | 
|  | 86   int frame_decoded = 0; | 
|  | 87   int result = | 
|  | 88       avcodec_decode_video2(codec_context_, yuv_frame, &frame_decoded, &packet); | 
|  | 89 | 
|  | 90   // Log the problem if we can't decode a video frame and exit early. | 
|  | 91   if (result < 0) { | 
|  | 92     LOG(INFO) << "Error decoding a video frame with timestamp: " | 
|  | 93               << buffer.GetTimestamp().InMicroseconds() << " us" | 
|  | 94               << " , duration: " | 
|  | 95               << buffer.GetDuration().InMicroseconds() << " us" | 
|  | 96               << " , packet size: " | 
|  | 97               << buffer.GetDataSize() << " bytes"; | 
|  | 98     *got_frame = false; | 
|  | 99   } else { | 
|  | 100     // If frame_decoded == 0, then no frame was produced. | 
|  | 101     *got_frame = frame_decoded != 0; | 
|  | 102   } | 
|  | 103 } | 
|  | 104 | 
|  | 105 void FFmpegVideoDecodeEngine::Flush(Task* done_cb) { | 
|  | 106   AutoTaskRunner done_runner(done_cb); | 
|  | 107 | 
|  | 108   avcodec_flush_buffers(codec_context_); | 
|  | 109 } | 
|  | 110 | 
|  | 111 VideoSurface::Format FFmpegVideoDecodeEngine::GetSurfaceFormat() const { | 
|  | 112   // J (Motion JPEG) versions of YUV are full range 0..255. | 
|  | 113   // Regular (MPEG) YUV is 16..240. | 
|  | 114   // For now we will ignore the distinction and treat them the same. | 
|  | 115   switch (codec_context_->pix_fmt) { | 
|  | 116     case PIX_FMT_YUV420P: | 
|  | 117     case PIX_FMT_YUVJ420P: | 
|  | 118       return VideoSurface::YV12; | 
|  | 119       break; | 
|  | 120     case PIX_FMT_YUV422P: | 
|  | 121     case PIX_FMT_YUVJ422P: | 
|  | 122       return VideoSurface::YV16; | 
|  | 123       break; | 
|  | 124     default: | 
|  | 125       // TODO(scherkus): More formats here? | 
|  | 126       return VideoSurface::INVALID; | 
|  | 127   } | 
|  | 128 } | 
|  | 129 | 
|  | 130 // static | 
|  | 131 FilterFactory* FFmpegVideoDecoder::CreateFactory() { | 
|  | 132   return new FilterFactoryImpl1<FFmpegVideoDecoder, VideoDecodeEngine*>( | 
|  | 133       new FFmpegVideoDecodeEngine()); | 
|  | 134 } | 
|  | 135 | 
|  | 136 FFmpegVideoDecoder::FFmpegVideoDecoder(VideoDecodeEngine* engine) | 
| 41     : width_(0), | 137     : width_(0), | 
| 42       height_(0), | 138       height_(0), | 
| 43       time_base_(new AVRational()), | 139       time_base_(new AVRational()), | 
| 44       state_(kNormal), | 140       state_(kNormal), | 
| 45       codec_context_(NULL) { | 141       decode_engine_(engine) { | 
| 46 } | 142 } | 
| 47 | 143 | 
| 48 FFmpegVideoDecoder::~FFmpegVideoDecoder() { | 144 FFmpegVideoDecoder::~FFmpegVideoDecoder() { | 
| 49 } | 145 } | 
| 50 | 146 | 
| 51 // static | 147 // static | 
| 52 bool FFmpegVideoDecoder::IsMediaFormatSupported(const MediaFormat& format) { | 148 bool FFmpegVideoDecoder::IsMediaFormatSupported(const MediaFormat& format) { | 
| 53   std::string mime_type; | 149   std::string mime_type; | 
| 54   return format.GetAsString(MediaFormat::kMimeType, &mime_type) && | 150   return format.GetAsString(MediaFormat::kMimeType, &mime_type) && | 
| 55       mime_type::kFFmpegVideo == mime_type; | 151       mime_type::kFFmpegVideo == mime_type; | 
| 56 } | 152 } | 
| 57 | 153 | 
| 58 bool FFmpegVideoDecoder::OnInitialize(DemuxerStream* demuxer_stream) { | 154 void FFmpegVideoDecoder::DoInitialize(DemuxerStream* demuxer_stream, | 
|  | 155                                       bool* success, | 
|  | 156                                       Task* done_cb) { | 
|  | 157   AutoTaskRunner done_runner(done_cb); | 
|  | 158   *success = false; | 
|  | 159 | 
| 59   // Get the AVStream by querying for the provider interface. | 160   // Get the AVStream by querying for the provider interface. | 
| 60   AVStreamProvider* av_stream_provider; | 161   AVStreamProvider* av_stream_provider; | 
| 61   if (!demuxer_stream->QueryInterface(&av_stream_provider)) { | 162   if (!demuxer_stream->QueryInterface(&av_stream_provider)) { | 
| 62     return false; | 163     return; | 
| 63   } | 164   } | 
| 64   AVStream* av_stream = av_stream_provider->GetAVStream(); | 165   AVStream* av_stream = av_stream_provider->GetAVStream(); | 
| 65 | 166 | 
|  | 167   *time_base_ = av_stream->time_base; | 
|  | 168 | 
|  | 169   // TODO(ajwong): We don't need these extra variables if |media_format_| has | 
|  | 170   // them.  Remove. | 
| 66   width_ = av_stream->codec->width; | 171   width_ = av_stream->codec->width; | 
| 67   height_ = av_stream->codec->height; | 172   height_ = av_stream->codec->height; | 
| 68   *time_base_ = av_stream->time_base; |  | 
| 69   if (width_ > Limits::kMaxDimension || height_ > Limits::kMaxDimension || | 173   if (width_ > Limits::kMaxDimension || height_ > Limits::kMaxDimension || | 
| 70       width_ * height_ > Limits::kMaxCanvas) | 174       width_ * height_ > Limits::kMaxCanvas) | 
| 71       return false; | 175       return; | 
| 72 | 176 | 
| 73   media_format_.SetAsString(MediaFormat::kMimeType, | 177   media_format_.SetAsString(MediaFormat::kMimeType, | 
| 74                             mime_type::kUncompressedVideo); | 178                             mime_type::kUncompressedVideo); | 
| 75   media_format_.SetAsInteger(MediaFormat::kWidth, width_); | 179   media_format_.SetAsInteger(MediaFormat::kWidth, width_); | 
| 76   media_format_.SetAsInteger(MediaFormat::kHeight, height_); | 180   media_format_.SetAsInteger(MediaFormat::kHeight, height_); | 
| 77 | 181 | 
| 78   codec_context_ = av_stream->codec; | 182   decode_engine_->Initialize( | 
| 79   codec_context_->flags2 |= CODEC_FLAG2_FAST;  // Enable faster H264 decode. | 183       av_stream, | 
| 80   // Enable motion vector search (potentially slow), strong deblocking filter | 184       NewRunnableMethod(this, | 
| 81   // for damaged macroblocks, and set our error detection sensitivity. | 185                         &FFmpegVideoDecoder::OnInitializeComplete, | 
| 82   codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; | 186                         success, | 
| 83   codec_context_->error_recognition = FF_ER_CAREFUL; | 187                         done_runner.release())); | 
| 84 |  | 
| 85   // Serialize calls to avcodec_open(). |  | 
| 86   AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |  | 
| 87   { |  | 
| 88     AutoLock auto_lock(FFmpegLock::get()->lock()); |  | 
| 89     if (!codec || |  | 
| 90         avcodec_thread_init(codec_context_, kDecodeThreads) < 0 || |  | 
| 91         avcodec_open(codec_context_, codec) < 0) { |  | 
| 92       return false; |  | 
| 93     } |  | 
| 94   } |  | 
| 95   return true; |  | 
| 96 } | 188 } | 
| 97 | 189 | 
| 98 void FFmpegVideoDecoder::OnSeek(base::TimeDelta time) { | 190 void FFmpegVideoDecoder::OnInitializeComplete(bool* success, Task* done_cb) { | 
|  | 191   AutoTaskRunner done_runner(done_cb); | 
|  | 192 | 
|  | 193   *success = decode_engine_->state() == FFmpegVideoDecodeEngine::kNormal; | 
|  | 194 } | 
|  | 195 | 
|  | 196 void FFmpegVideoDecoder::DoSeek(base::TimeDelta time, Task* done_cb) { | 
| 99   // Everything in the presentation time queue is invalid, clear the queue. | 197   // Everything in the presentation time queue is invalid, clear the queue. | 
| 100   while (!pts_heap_.IsEmpty()) | 198   while (!pts_heap_.IsEmpty()) | 
| 101     pts_heap_.Pop(); | 199     pts_heap_.Pop(); | 
| 102 | 200 | 
| 103   // We're back where we started.  It should be completely safe to flush here | 201   // We're back where we started.  It should be completely safe to flush here | 
| 104   // since DecoderBase uses |expecting_discontinuous_| to verify that the next | 202   // since DecoderBase uses |expecting_discontinuous_| to verify that the next | 
| 105   // time OnDecode() is called we will have a discontinuous buffer. | 203   // time DoDecode() is called we will have a discontinuous buffer. | 
|  | 204   // | 
|  | 205   // TODO(ajwong): Should we put a guard here to prevent leaving kError. | 
| 106   state_ = kNormal; | 206   state_ = kNormal; | 
| 107   avcodec_flush_buffers(codec_context_); | 207 | 
|  | 208   decode_engine_->Flush(done_cb); | 
| 108 } | 209 } | 
| 109 | 210 | 
| 110 void FFmpegVideoDecoder::OnDecode(Buffer* buffer) { | 211 void FFmpegVideoDecoder::DoDecode(Buffer* buffer, Task* done_cb) { | 
|  | 212   AutoTaskRunner done_runner(done_cb); | 
|  | 213 | 
|  | 214   // TODO(ajwong): This DoDecode and OnDecodeComplete set of functions is too | 
|  | 215   // complicated to easily unittest.  The test becomes fragile.  Try to find a | 
|  | 216   // way to reorganize into smaller units for testing. | 
|  | 217 | 
| 111   // During decode, because reads are issued asynchronously, it is possible to | 218   // During decode, because reads are issued asynchronously, it is possible to | 
| 112   // receive multiple end of stream buffers since each read is acked. When the | 219   // receive multiple end of stream buffers since each read is acked. When the | 
| 113   // first end of stream buffer is read, FFmpeg may still have frames queued | 220   // first end of stream buffer is read, FFmpeg may still have frames queued | 
| 114   // up in the decoder so we need to go through the decode loop until it stops | 221   // up in the decoder so we need to go through the decode loop until it stops | 
| 115   // giving sensible data.  After that, the decoder should output empty | 222   // giving sensible data.  After that, the decoder should output empty | 
| 116   // frames.  There are three states the decoder can be in: | 223   // frames.  There are three states the decoder can be in: | 
| 117   // | 224   // | 
| 118   //   kNormal: This is the starting state. Buffers are decoded. Decode errors | 225   //   kNormal: This is the starting state. Buffers are decoded. Decode errors | 
| 119   //            are discarded. | 226   //            are discarded. | 
| 120   //   kFlushCodec: There isn't any more input data. Call avcodec_decode_video2 | 227   //   kFlushCodec: There isn't any more input data. Call avcodec_decode_video2 | 
| (...skipping 27 matching lines...) Expand all  Loading... | 
| 148   // not yet received an end of stream buffer.  It is important that this line | 255   // not yet received an end of stream buffer.  It is important that this line | 
| 149   // stay below the state transition into kFlushCodec done above. | 256   // stay below the state transition into kFlushCodec done above. | 
| 150   // | 257   // | 
| 151   // TODO(ajwong): This push logic, along with the pop logic below needs to | 258   // TODO(ajwong): This push logic, along with the pop logic below needs to | 
| 152   // be reevaluated to correctly handle decode errors. | 259   // be reevaluated to correctly handle decode errors. | 
| 153   if (state_ == kNormal) { | 260   if (state_ == kNormal) { | 
| 154     pts_heap_.Push(buffer->GetTimestamp()); | 261     pts_heap_.Push(buffer->GetTimestamp()); | 
| 155   } | 262   } | 
| 156 | 263 | 
| 157   // Otherwise, attempt to decode a single frame. | 264   // Otherwise, attempt to decode a single frame. | 
| 158   scoped_ptr_malloc<AVFrame, ScopedPtrAVFree> yuv_frame(avcodec_alloc_frame()); | 265   AVFrame* yuv_frame = avcodec_alloc_frame(); | 
| 159   if (DecodeFrame(*buffer, codec_context_, yuv_frame.get())) { | 266   bool* got_frame = new bool; | 
| 160     last_pts_ = FindPtsAndDuration(*time_base_, | 267   decode_engine_->DecodeFrame( | 
| 161                                    pts_heap_, | 268       *buffer, | 
| 162                                    last_pts_, | 269       yuv_frame, | 
| 163                                    yuv_frame.get()); | 270       got_frame, | 
|  | 271       NewRunnableMethod(this, | 
|  | 272                         &FFmpegVideoDecoder::OnDecodeComplete, | 
|  | 273                         yuv_frame, | 
|  | 274                         got_frame, | 
|  | 275                         done_runner.release())); | 
|  | 276 } | 
|  | 277 | 
|  | 278 void FFmpegVideoDecoder::OnDecodeComplete(AVFrame* yuv_frame, bool* got_frame, | 
|  | 279                                           Task* done_cb) { | 
|  | 280   // Note: The |done_runner| must be declared *last* to ensure proper | 
|  | 281   // destruction order. | 
|  | 282   scoped_ptr_malloc<AVFrame, ScopedPtrAVFree> yuv_frame_deleter(yuv_frame); | 
|  | 283   scoped_ptr<bool> got_frame_deleter(got_frame); | 
|  | 284   AutoTaskRunner done_runner(done_cb); | 
|  | 285 | 
|  | 286   // If we actually got data back, enqueue a frame. | 
|  | 287   if (*got_frame) { | 
|  | 288     last_pts_ = FindPtsAndDuration(*time_base_, pts_heap_, last_pts_, | 
|  | 289                                    yuv_frame); | 
| 164 | 290 | 
| 165     // Pop off a pts on a successful decode since we are "using up" one | 291     // Pop off a pts on a successful decode since we are "using up" one | 
| 166     // timestamp. | 292     // timestamp. | 
| 167     // | 293     // | 
| 168     // TODO(ajwong): Do we need to pop off a pts when avcodec_decode_video2() | 294     // TODO(ajwong): Do we need to pop off a pts when avcodec_decode_video2() | 
| 169     // returns < 0?  The rationale is that when get_picture_ptr == 0, we skip | 295     // returns < 0?  The rationale is that when get_picture_ptr == 0, we skip | 
| 170     // popping a pts because no frame was produced.  However, when | 296     // popping a pts because no frame was produced.  However, when | 
| 171     // avcodec_decode_video2() returns false, it is a decode error, which | 297     // avcodec_decode_video2() returns false, it is a decode error, which | 
| 172     // if it means a frame is dropped, may require us to pop one more time. | 298     // if it means a frame is dropped, may require us to pop one more time. | 
| 173     if (!pts_heap_.IsEmpty()) { | 299     if (!pts_heap_.IsEmpty()) { | 
| 174       pts_heap_.Pop(); | 300       pts_heap_.Pop(); | 
| 175     } else { | 301     } else { | 
| 176       NOTREACHED() << "Attempting to decode more frames than were input."; | 302       NOTREACHED() << "Attempting to decode more frames than were input."; | 
| 177     } | 303     } | 
| 178 | 304 | 
| 179     if (!EnqueueVideoFrame( | 305     if (!EnqueueVideoFrame( | 
| 180             GetSurfaceFormat(*codec_context_), last_pts_, yuv_frame.get())) { | 306             decode_engine_->GetSurfaceFormat(), last_pts_, yuv_frame)) { | 
| 181       // On an EnqueueEmptyFrame error, error out the whole pipeline and | 307       // On an EnqueueEmptyFrame error, error out the whole pipeline and | 
| 182       // set the state to kDecodeFinished. | 308       // set the state to kDecodeFinished. | 
| 183       SignalPipelineError(); | 309       SignalPipelineError(); | 
| 184     } | 310     } | 
| 185   } else { | 311   } else { | 
| 186     // When in kFlushCodec, any errored decode, or a 0-lengthed frame, | 312     // When in kFlushCodec, any errored decode, or a 0-lengthed frame, | 
| 187     // is taken as a signal to stop decoding. | 313     // is taken as a signal to stop decoding. | 
| 188     if (state_ == kFlushCodec) { | 314     if (state_ == kFlushCodec) { | 
| 189       state_ = kDecodeFinished; | 315       state_ = kDecodeFinished; | 
| 190       EnqueueEmptyFrame(); | 316       EnqueueEmptyFrame(); | 
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 251     dest += dest_stride; | 377     dest += dest_stride; | 
| 252   } | 378   } | 
| 253 } | 379 } | 
| 254 | 380 | 
| 255 void FFmpegVideoDecoder::EnqueueEmptyFrame() { | 381 void FFmpegVideoDecoder::EnqueueEmptyFrame() { | 
| 256   scoped_refptr<VideoFrame> video_frame; | 382   scoped_refptr<VideoFrame> video_frame; | 
| 257   VideoFrameImpl::CreateEmptyFrame(&video_frame); | 383   VideoFrameImpl::CreateEmptyFrame(&video_frame); | 
| 258   EnqueueResult(video_frame); | 384   EnqueueResult(video_frame); | 
| 259 } | 385 } | 
| 260 | 386 | 
| 261 bool FFmpegVideoDecoder::DecodeFrame(const Buffer& buffer, |  | 
| 262                                      AVCodecContext* codec_context, |  | 
| 263                                      AVFrame* yuv_frame) { |  | 
| 264   // Create a packet for input data. |  | 
| 265   // Due to FFmpeg API changes we no longer have const read-only pointers. |  | 
| 266   AVPacket packet; |  | 
| 267   av_init_packet(&packet); |  | 
| 268   packet.data = const_cast<uint8*>(buffer.GetData()); |  | 
| 269   packet.size = buffer.GetDataSize(); |  | 
| 270 |  | 
| 271   // We don't allocate AVFrame on the stack since different versions of FFmpeg |  | 
| 272   // may change the size of AVFrame, causing stack corruption.  The solution is |  | 
| 273   // to let FFmpeg allocate the structure via avcodec_alloc_frame(). |  | 
| 274   int frame_decoded = 0; |  | 
| 275   int result = |  | 
| 276       avcodec_decode_video2(codec_context, yuv_frame, &frame_decoded, &packet); |  | 
| 277 |  | 
| 278   // Log the problem if we can't decode a video frame and exit early. |  | 
| 279   if (result < 0) { |  | 
| 280     LOG(INFO) << "Error decoding a video frame with timestamp: " |  | 
| 281               << buffer.GetTimestamp().InMicroseconds() << " us" |  | 
| 282               << " , duration: " |  | 
| 283               << buffer.GetDuration().InMicroseconds() << " us" |  | 
| 284               << " , packet size: " |  | 
| 285               << buffer.GetDataSize() << " bytes"; |  | 
| 286     return false; |  | 
| 287   } |  | 
| 288 |  | 
| 289   // If frame_decoded == 0, then no frame was produced. |  | 
| 290   return frame_decoded != 0; |  | 
| 291 } |  | 
| 292 |  | 
| 293 FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoder::FindPtsAndDuration( | 387 FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoder::FindPtsAndDuration( | 
| 294     const AVRational& time_base, | 388     const AVRational& time_base, | 
| 295     const PtsHeap& pts_heap, | 389     const PtsHeap& pts_heap, | 
| 296     const TimeTuple& last_pts, | 390     const TimeTuple& last_pts, | 
| 297     const AVFrame* frame) { | 391     const AVFrame* frame) { | 
| 298   TimeTuple pts; | 392   TimeTuple pts; | 
| 299 | 393 | 
| 300   // Default repeat_pict to 0 because if there is no frame information, | 394   // Default repeat_pict to 0 because if there is no frame information, | 
| 301   // we just assume the frame only plays for one time_base. | 395   // we just assume the frame only plays for one time_base. | 
| 302   int repeat_pict = 0; | 396   int repeat_pict = 0; | 
| (...skipping 23 matching lines...) Expand all  Loading... | 
| 326   } | 420   } | 
| 327 | 421 | 
| 328   // Fill in the duration while accounting for repeated frames. | 422   // Fill in the duration while accounting for repeated frames. | 
| 329   // | 423   // | 
| 330   // TODO(ajwong): Make sure this formula is correct. | 424   // TODO(ajwong): Make sure this formula is correct. | 
| 331   pts.duration = ConvertTimestamp(time_base, 1 + repeat_pict); | 425   pts.duration = ConvertTimestamp(time_base, 1 + repeat_pict); | 
| 332 | 426 | 
| 333   return pts; | 427   return pts; | 
| 334 } | 428 } | 
| 335 | 429 | 
| 336 VideoSurface::Format FFmpegVideoDecoder::GetSurfaceFormat( |  | 
| 337     const AVCodecContext& codec_context) { |  | 
| 338   // J (Motion JPEG) versions of YUV are full range 0..255. |  | 
| 339   // Regular (MPEG) YUV is 16..240. |  | 
| 340   // For now we will ignore the distinction and treat them the same. |  | 
| 341   switch (codec_context.pix_fmt) { |  | 
| 342     case PIX_FMT_YUV420P: |  | 
| 343     case PIX_FMT_YUVJ420P: |  | 
| 344       return VideoSurface::YV12; |  | 
| 345       break; |  | 
| 346     case PIX_FMT_YUV422P: |  | 
| 347     case PIX_FMT_YUVJ422P: |  | 
| 348       return VideoSurface::YV16; |  | 
| 349       break; |  | 
| 350     default: |  | 
| 351       // TODO(scherkus): More formats here? |  | 
| 352       return VideoSurface::INVALID; |  | 
| 353   } |  | 
| 354 } |  | 
| 355 |  | 
| 356 void FFmpegVideoDecoder::SignalPipelineError() { | 430 void FFmpegVideoDecoder::SignalPipelineError() { | 
| 357   host()->SetError(PIPELINE_ERROR_DECODE); | 431   host()->SetError(PIPELINE_ERROR_DECODE); | 
| 358   state_ = kDecodeFinished; | 432   state_ = kDecodeFinished; | 
| 359 } | 433 } | 
| 360 | 434 | 
| 361 }  // namespace | 435 void FFmpegVideoDecoder::SetVideoDecodeEngineForTest( | 
|  | 436     VideoDecodeEngine* engine) { | 
|  | 437   decode_engine_.reset(engine); | 
|  | 438 } | 
|  | 439 | 
|  | 440 }  // namespace media | 
| OLD | NEW | 
|---|