| Index: media/filters/ffmpeg_video_decoder.cc
|
| diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
|
| index 93dcee2157a4bfc12f25759eb55dc6abc4431607..b00f9ddbc4c173b2e24826fb476f1d33826d39e3 100644
|
| --- a/media/filters/ffmpeg_video_decoder.cc
|
| +++ b/media/filters/ffmpeg_video_decoder.cc
|
| @@ -24,6 +24,8 @@
|
| #include "media/ffmpeg/ffmpeg_common.h"
|
| #include "media/filters/ffmpeg_glue.h"
|
|
|
| +#include "third_party/ffmpeg/libavutil/intreadwrite.h"
|
| +
|
| namespace media {
|
|
|
| // Always try to use three threads for video decoding. There is little reason
|
| @@ -38,6 +40,73 @@ namespace media {
|
| static const int kDecodeThreads = 2;
|
| static const int kMaxDecodeThreads = 16;
|
|
|
| +// TODO(dalecurtis): This needs to be rewritten in Chrome style if we're going
|
| +// to use it. Right now, pulled directly from ffvp9 parser.
|
| +int FFmpegVideoDecoder::ParseVP9Packet(uint8_t** out_data,
|
| + int* out_size,
|
| + uint8_t* data,
|
| + int size) {
|
| + VP9ParseContext* s = &parse_context_;
|
| + int marker = 0;
|
| +
|
| + if (size <= 0) {
|
| + *out_size = 0;
|
| + *out_data = data;
|
| + return 0;
|
| + }
|
| +
|
| + if (s->n_frames > 0) {
|
| + *out_data = data;
|
| + *out_size = s->size[--s->n_frames];
|
| + return s->n_frames > 0 ? *out_size : size /* i.e. include idx tail */;
|
| + }
|
| +
|
| + marker = data[size - 1];
|
| + if ((marker & 0xe0) == 0xc0) {
|
| + int nbytes = 1 + ((marker >> 3) & 0x3);
|
| + int n_frames = 1 + (marker & 0x7), idx_sz = 2 + n_frames * nbytes;
|
| +
|
| + if (size >= idx_sz && data[size - idx_sz] == marker) {
|
| + const uint8_t* idx = data + size + 1 - idx_sz;
|
| + int first = 1;
|
| +
|
| + switch (nbytes) {
|
| +#define case_n(a, rd) \
|
| + case a: \
|
| + while (n_frames--) { \
|
| + unsigned sz = rd; \
|
| + idx += a; \
|
| + if (sz > static_cast<unsigned>(size)) { \
|
| + s->n_frames = 0; \
|
| + LOG(ERROR) << "parsing failed, superframe too large"; \
|
| + return size; \
|
| + } \
|
| + if (first) { \
|
| + first = 0; \
|
| + *out_data = data; \
|
| + *out_size = sz; \
|
| + s->n_frames = n_frames; \
|
| + } else { \
|
| + s->size[n_frames] = sz; \
|
| + } \
|
| + data += sz; \
|
| + size -= sz; \
|
| + } \
|
| + return *out_size
|
| +
|
| + case_n(1u, *idx);
|
| + case_n(2u, AV_RL16(idx));
|
| + case_n(3u, AV_RL24(idx));
|
| + case_n(4u, AV_RL32(idx));
|
| + }
|
| + }
|
| + }
|
| +
|
| + *out_data = data;
|
| + *out_size = size;
|
| + return size;
|
| +}
|
| +
|
| // Returns the number of threads given the FFmpeg CodecID. Also inspects the
|
| // command line for a valid --video-threads flag.
|
| static int GetThreadCount(AVCodecID codec_id) {
|
| @@ -75,7 +144,7 @@ static size_t RoundUp(size_t value, size_t alignment) {
|
| FFmpegVideoDecoder::FFmpegVideoDecoder(
|
| const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
|
| : task_runner_(task_runner), state_(kUninitialized),
|
| - decode_nalus_(false) {}
|
| + decode_nalus_(false), parse_context_() {}
|
|
|
| int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context,
|
| AVFrame* frame,
|
| @@ -257,64 +326,78 @@ bool FFmpegVideoDecoder::FFmpegDecode(
|
| bool* has_produced_frame) {
|
| DCHECK(!*has_produced_frame);
|
|
|
| + size_t remaining_size = buffer->end_of_stream() ? 0 : buffer->data_size();
|
| +
|
| // Create a packet for input data.
|
| // Due to FFmpeg API changes we no longer have const read-only pointers.
|
| AVPacket packet;
|
| - av_init_packet(&packet);
|
| - if (buffer->end_of_stream()) {
|
| - packet.data = NULL;
|
| - packet.size = 0;
|
| - } else {
|
| - packet.data = const_cast<uint8*>(buffer->data());
|
| - packet.size = buffer->data_size();
|
| + while (remaining_size > 0) {
|
| + av_init_packet(&packet);
|
| + if (buffer->end_of_stream()) {
|
| + packet.data = NULL;
|
| + packet.size = 0;
|
| + } else {
|
| + // TODO(dalecurtis): ffvp9 can't handle superframes, which are multiple
|
| + // frames inside a single packet. Normally its demuxer handles this
|
| + // parsing, but when ChunkDemuxer is used, the packet isn't split right.
|
| + if (codec_context_->codec_id == AV_CODEC_ID_VP9) {
|
| + int len = ParseVP9Packet(
|
| + &packet.data, &packet.size,
|
| + buffer->writable_data() + (buffer->data_size() - remaining_size),
|
| + remaining_size);
|
| + remaining_size -= len;
|
| + } else {
|
| + packet.data = const_cast<uint8*>(buffer->data());
|
| + packet.size = buffer->data_size();
|
| + remaining_size = 0;
|
| + }
|
| +
|
| + // Let FFmpeg handle presentation timestamp reordering.
|
| + codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds();
|
| + // buffer->set_timestamp(buffer->timestamp() + buffer->duration());
|
| + }
|
|
|
| - // Let FFmpeg handle presentation timestamp reordering.
|
| - codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds();
|
| - }
|
| + int frame_decoded = 0;
|
| + const int result = avcodec_decode_video2(
|
| + codec_context_.get(), av_frame_.get(), &frame_decoded, &packet);
|
| + // Log the problem if we can't decode a video frame and exit early.
|
| + if (result < 0) {
|
| + LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString();
|
| + return false;
|
| + }
|
|
|
| - int frame_decoded = 0;
|
| - int result = avcodec_decode_video2(codec_context_.get(),
|
| - av_frame_.get(),
|
| - &frame_decoded,
|
| - &packet);
|
| - // Log the problem if we can't decode a video frame and exit early.
|
| - if (result < 0) {
|
| - LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString();
|
| - return false;
|
| - }
|
| + // FFmpeg says some codecs might have multiple frames per packet. Previous
|
| + // discussions with rbultje@ indicate this shouldn't be true for the codecs
|
| + // we use.
|
| + DCHECK_EQ(result, packet.size);
|
| +
|
| + // If no frame was produced then signal that more data is required to
|
| + // produce more frames. This can happen under two circumstances:
|
| + // 1) Decoder was recently initialized/flushed
|
| + // 2) End of stream was reached and all internal frames have been output
|
| + if (frame_decoded == 0)
|
| + continue;
|
| +
|
| + // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
|
| + // The decoder is in a bad state and not decoding correctly.
|
| + // Checking for NULL avoids a crash in CopyPlane().
|
| + if (!av_frame_->data[VideoFrame::kYPlane] ||
|
| + !av_frame_->data[VideoFrame::kUPlane] ||
|
| + !av_frame_->data[VideoFrame::kVPlane]) {
|
| + LOG(ERROR) << "Video frame was produced yet has invalid frame data.";
|
| + av_frame_unref(av_frame_.get());
|
| + return false;
|
| + }
|
|
|
| - // FFmpeg says some codecs might have multiple frames per packet. Previous
|
| - // discussions with rbultje@ indicate this shouldn't be true for the codecs
|
| - // we use.
|
| - DCHECK_EQ(result, packet.size);
|
| -
|
| - // If no frame was produced then signal that more data is required to
|
| - // produce more frames. This can happen under two circumstances:
|
| - // 1) Decoder was recently initialized/flushed
|
| - // 2) End of stream was reached and all internal frames have been output
|
| - if (frame_decoded == 0) {
|
| - return true;
|
| - }
|
| + scoped_refptr<VideoFrame> frame =
|
| + reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
|
| + frame->set_timestamp(
|
| + base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque));
|
| + *has_produced_frame = true;
|
| + output_cb_.Run(frame);
|
|
|
| - // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
|
| - // The decoder is in a bad state and not decoding correctly.
|
| - // Checking for NULL avoids a crash in CopyPlane().
|
| - if (!av_frame_->data[VideoFrame::kYPlane] ||
|
| - !av_frame_->data[VideoFrame::kUPlane] ||
|
| - !av_frame_->data[VideoFrame::kVPlane]) {
|
| - LOG(ERROR) << "Video frame was produced yet has invalid frame data.";
|
| av_frame_unref(av_frame_.get());
|
| - return false;
|
| }
|
| -
|
| - scoped_refptr<VideoFrame> frame =
|
| - reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
|
| - frame->set_timestamp(
|
| - base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque));
|
| - *has_produced_frame = true;
|
| - output_cb_.Run(frame);
|
| -
|
| - av_frame_unref(av_frame_.get());
|
| return true;
|
| }
|
|
|
|
|