| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/ffmpeg_video_decoder.h" | 5 #include "media/filters/ffmpeg_video_decoder.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <string> | 8 #include <string> |
| 9 | 9 |
| 10 #include "base/bind.h" | 10 #include "base/bind.h" |
| 11 #include "base/callback_helpers.h" | 11 #include "base/callback_helpers.h" |
| 12 #include "base/command_line.h" | 12 #include "base/command_line.h" |
| 13 #include "base/location.h" | 13 #include "base/location.h" |
| 14 #include "base/single_thread_task_runner.h" | 14 #include "base/single_thread_task_runner.h" |
| 15 #include "base/strings/string_number_conversions.h" | 15 #include "base/strings/string_number_conversions.h" |
| 16 #include "media/base/bind_to_current_loop.h" | 16 #include "media/base/bind_to_current_loop.h" |
| 17 #include "media/base/decoder_buffer.h" | 17 #include "media/base/decoder_buffer.h" |
| 18 #include "media/base/limits.h" | 18 #include "media/base/limits.h" |
| 19 #include "media/base/media_switches.h" | 19 #include "media/base/media_switches.h" |
| 20 #include "media/base/pipeline.h" | 20 #include "media/base/pipeline.h" |
| 21 #include "media/base/video_decoder_config.h" | 21 #include "media/base/video_decoder_config.h" |
| 22 #include "media/base/video_frame.h" | 22 #include "media/base/video_frame.h" |
| 23 #include "media/base/video_util.h" | 23 #include "media/base/video_util.h" |
| 24 #include "media/ffmpeg/ffmpeg_common.h" | 24 #include "media/ffmpeg/ffmpeg_common.h" |
| 25 #include "media/filters/ffmpeg_glue.h" | 25 #include "media/filters/ffmpeg_glue.h" |
| 26 | 26 |
| 27 #include "third_party/ffmpeg/libavutil/intreadwrite.h" |
| 28 |
| 27 namespace media { | 29 namespace media { |
| 28 | 30 |
| 29 // Always try to use three threads for video decoding. There is little reason | 31 // Always try to use three threads for video decoding. There is little reason |
| 30 // not to since current day CPUs tend to be multi-core and we measured | 32 // not to since current day CPUs tend to be multi-core and we measured |
| 31 // performance benefits on older machines such as P4s with hyperthreading. | 33 // performance benefits on older machines such as P4s with hyperthreading. |
| 32 // | 34 // |
| 33 // Handling decoding on separate threads also frees up the pipeline thread to | 35 // Handling decoding on separate threads also frees up the pipeline thread to |
| 34 // continue processing. Although it'd be nice to have the option of a single | 36 // continue processing. Although it'd be nice to have the option of a single |
| 35 // decoding thread, FFmpeg treats having one thread the same as having zero | 37 // decoding thread, FFmpeg treats having one thread the same as having zero |
| 36 // threads (i.e., avcodec_decode_video() will execute on the calling thread). | 38 // threads (i.e., avcodec_decode_video() will execute on the calling thread). |
| 37 // Yet another reason for having two threads :) | 39 // Yet another reason for having two threads :) |
| 38 static const int kDecodeThreads = 2; | 40 static const int kDecodeThreads = 2; |
| 39 static const int kMaxDecodeThreads = 16; | 41 static const int kMaxDecodeThreads = 16; |
| 40 | 42 |
| 43 // TODO(dalecurtis): This needs to be rewritten in Chrome style if we're going |
| 44 // to use it. Right now, pulled directly from ffvp9 parser. |
| 45 int FFmpegVideoDecoder::ParseVP9Packet(uint8_t** out_data, |
| 46 int* out_size, |
| 47 uint8_t* data, |
| 48 int size) { |
| 49 VP9ParseContext* s = &parse_context_; |
| 50 int marker = 0; |
| 51 |
| 52 if (size <= 0) { |
| 53 *out_size = 0; |
| 54 *out_data = data; |
| 55 return 0; |
| 56 } |
| 57 |
| 58 if (s->n_frames > 0) { |
| 59 *out_data = data; |
| 60 *out_size = s->size[--s->n_frames]; |
| 61 return s->n_frames > 0 ? *out_size : size /* i.e. include idx tail */; |
| 62 } |
| 63 |
| 64 marker = data[size - 1]; |
| 65 if ((marker & 0xe0) == 0xc0) { |
| 66 int nbytes = 1 + ((marker >> 3) & 0x3); |
| 67 int n_frames = 1 + (marker & 0x7), idx_sz = 2 + n_frames * nbytes; |
| 68 |
| 69 if (size >= idx_sz && data[size - idx_sz] == marker) { |
| 70 const uint8_t* idx = data + size + 1 - idx_sz; |
| 71 int first = 1; |
| 72 |
| 73 switch (nbytes) { |
| 74 #define case_n(a, rd) \ |
| 75 case a: \ |
| 76 while (n_frames--) { \ |
| 77 unsigned sz = rd; \ |
| 78 idx += a; \ |
| 79 if (sz > static_cast<unsigned>(size)) { \ |
| 80 s->n_frames = 0; \ |
| 81 LOG(ERROR) << "parsing failed, superframe too large"; \ |
| 82 return size; \ |
| 83 } \ |
| 84 if (first) { \ |
| 85 first = 0; \ |
| 86 *out_data = data; \ |
| 87 *out_size = sz; \ |
| 88 s->n_frames = n_frames; \ |
| 89 } else { \ |
| 90 s->size[n_frames] = sz; \ |
| 91 } \ |
| 92 data += sz; \ |
| 93 size -= sz; \ |
| 94 } \ |
| 95 return *out_size |
| 96 |
| 97 case_n(1u, *idx); |
| 98 case_n(2u, AV_RL16(idx)); |
| 99 case_n(3u, AV_RL24(idx)); |
| 100 case_n(4u, AV_RL32(idx)); |
| 101 } |
| 102 } |
| 103 } |
| 104 |
| 105 *out_data = data; |
| 106 *out_size = size; |
| 107 return size; |
| 108 } |
| 109 |
| 41 // Returns the number of threads given the FFmpeg CodecID. Also inspects the | 110 // Returns the number of threads given the FFmpeg CodecID. Also inspects the |
| 42 // command line for a valid --video-threads flag. | 111 // command line for a valid --video-threads flag. |
| 43 static int GetThreadCount(AVCodecID codec_id) { | 112 static int GetThreadCount(AVCodecID codec_id) { |
| 44 // Refer to http://crbug.com/93932 for tsan suppressions on decoding. | 113 // Refer to http://crbug.com/93932 for tsan suppressions on decoding. |
| 45 int decode_threads = kDecodeThreads; | 114 int decode_threads = kDecodeThreads; |
| 46 | 115 |
| 47 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); | 116 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); |
| 48 std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads)); | 117 std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads)); |
| 49 if (threads.empty() || !base::StringToInt(threads, &decode_threads)) | 118 if (threads.empty() || !base::StringToInt(threads, &decode_threads)) |
| 50 return decode_threads; | 119 return decode_threads; |
| (...skipping 17 matching lines...) Expand all Loading... |
| 68 | 137 |
| 69 static size_t RoundUp(size_t value, size_t alignment) { | 138 static size_t RoundUp(size_t value, size_t alignment) { |
| 70 // Check that |alignment| is a power of 2. | 139 // Check that |alignment| is a power of 2. |
| 71 DCHECK((alignment + (alignment - 1)) == (alignment | (alignment - 1))); | 140 DCHECK((alignment + (alignment - 1)) == (alignment | (alignment - 1))); |
| 72 return ((value + (alignment - 1)) & ~(alignment - 1)); | 141 return ((value + (alignment - 1)) & ~(alignment - 1)); |
| 73 } | 142 } |
| 74 | 143 |
| 75 FFmpegVideoDecoder::FFmpegVideoDecoder( | 144 FFmpegVideoDecoder::FFmpegVideoDecoder( |
| 76 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) | 145 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) |
| 77 : task_runner_(task_runner), state_(kUninitialized), | 146 : task_runner_(task_runner), state_(kUninitialized), |
| 78 decode_nalus_(false) {} | 147 decode_nalus_(false), parse_context_() {} |
| 79 | 148 |
| 80 int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context, | 149 int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context, |
| 81 AVFrame* frame, | 150 AVFrame* frame, |
| 82 int flags) { | 151 int flags) { |
| 83 // Don't use |codec_context_| here! With threaded decoding, | 152 // Don't use |codec_context_| here! With threaded decoding, |
| 84 // it will contain unsynchronized width/height/pix_fmt values, | 153 // it will contain unsynchronized width/height/pix_fmt values, |
| 85 // whereas |codec_context| contains the current threads's | 154 // whereas |codec_context| contains the current threads's |
| 86 // updated width/height/pix_fmt, which can change for adaptive | 155 // updated width/height/pix_fmt, which can change for adaptive |
| 87 // content. | 156 // content. |
| 88 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt); | 157 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt); |
| (...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 250 | 319 |
| 251 if (state_ != kUninitialized) | 320 if (state_ != kUninitialized) |
| 252 ReleaseFFmpegResources(); | 321 ReleaseFFmpegResources(); |
| 253 } | 322 } |
| 254 | 323 |
| 255 bool FFmpegVideoDecoder::FFmpegDecode( | 324 bool FFmpegVideoDecoder::FFmpegDecode( |
| 256 const scoped_refptr<DecoderBuffer>& buffer, | 325 const scoped_refptr<DecoderBuffer>& buffer, |
| 257 bool* has_produced_frame) { | 326 bool* has_produced_frame) { |
| 258 DCHECK(!*has_produced_frame); | 327 DCHECK(!*has_produced_frame); |
| 259 | 328 |
| 329 size_t remaining_size = buffer->end_of_stream() ? 0 : buffer->data_size(); |
| 330 |
| 260 // Create a packet for input data. | 331 // Create a packet for input data. |
| 261 // Due to FFmpeg API changes we no longer have const read-only pointers. | 332 // Due to FFmpeg API changes we no longer have const read-only pointers. |
| 262 AVPacket packet; | 333 AVPacket packet; |
| 263 av_init_packet(&packet); | 334 while (remaining_size > 0) { |
| 264 if (buffer->end_of_stream()) { | 335 av_init_packet(&packet); |
| 265 packet.data = NULL; | 336 if (buffer->end_of_stream()) { |
| 266 packet.size = 0; | 337 packet.data = NULL; |
| 267 } else { | 338 packet.size = 0; |
| 268 packet.data = const_cast<uint8*>(buffer->data()); | 339 } else { |
| 269 packet.size = buffer->data_size(); | 340 // TODO(dalecurtis): ffvp9 can't handle superframes, which are multiple |
| 341 // frames inside a single packet. Normally its demuxer handles this |
| 342 // parsing, but when ChunkDemuxer is used, the packet isn't split right. |
| 343 if (codec_context_->codec_id == AV_CODEC_ID_VP9) { |
| 344 int len = ParseVP9Packet( |
| 345 &packet.data, &packet.size, |
| 346 buffer->writable_data() + (buffer->data_size() - remaining_size), |
| 347 remaining_size); |
| 348 remaining_size -= len; |
| 349 } else { |
| 350 packet.data = const_cast<uint8*>(buffer->data()); |
| 351 packet.size = buffer->data_size(); |
| 352 remaining_size = 0; |
| 353 } |
| 270 | 354 |
| 271 // Let FFmpeg handle presentation timestamp reordering. | 355 // Let FFmpeg handle presentation timestamp reordering. |
| 272 codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds(); | 356 codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds(); |
| 357 // buffer->set_timestamp(buffer->timestamp() + buffer->duration()); |
| 358 } |
| 359 |
| 360 int frame_decoded = 0; |
| 361 const int result = avcodec_decode_video2( |
| 362 codec_context_.get(), av_frame_.get(), &frame_decoded, &packet); |
| 363 // Log the problem if we can't decode a video frame and exit early. |
| 364 if (result < 0) { |
| 365 LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString(); |
| 366 return false; |
| 367 } |
| 368 |
| 369 // FFmpeg says some codecs might have multiple frames per packet. Previous |
| 370 // discussions with rbultje@ indicate this shouldn't be true for the codecs |
| 371 // we use. |
| 372 DCHECK_EQ(result, packet.size); |
| 373 |
| 374 // If no frame was produced then signal that more data is required to |
| 375 // produce more frames. This can happen under two circumstances: |
| 376 // 1) Decoder was recently initialized/flushed |
| 377 // 2) End of stream was reached and all internal frames have been output |
| 378 if (frame_decoded == 0) |
| 379 continue; |
| 380 |
| 381 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675 |
| 382 // The decoder is in a bad state and not decoding correctly. |
| 383 // Checking for NULL avoids a crash in CopyPlane(). |
| 384 if (!av_frame_->data[VideoFrame::kYPlane] || |
| 385 !av_frame_->data[VideoFrame::kUPlane] || |
| 386 !av_frame_->data[VideoFrame::kVPlane]) { |
| 387 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; |
| 388 av_frame_unref(av_frame_.get()); |
| 389 return false; |
| 390 } |
| 391 |
| 392 scoped_refptr<VideoFrame> frame = |
| 393 reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0])); |
| 394 frame->set_timestamp( |
| 395 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); |
| 396 *has_produced_frame = true; |
| 397 output_cb_.Run(frame); |
| 398 |
| 399 av_frame_unref(av_frame_.get()); |
| 273 } | 400 } |
| 274 | |
| 275 int frame_decoded = 0; | |
| 276 int result = avcodec_decode_video2(codec_context_.get(), | |
| 277 av_frame_.get(), | |
| 278 &frame_decoded, | |
| 279 &packet); | |
| 280 // Log the problem if we can't decode a video frame and exit early. | |
| 281 if (result < 0) { | |
| 282 LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString(); | |
| 283 return false; | |
| 284 } | |
| 285 | |
| 286 // FFmpeg says some codecs might have multiple frames per packet. Previous | |
| 287 // discussions with rbultje@ indicate this shouldn't be true for the codecs | |
| 288 // we use. | |
| 289 DCHECK_EQ(result, packet.size); | |
| 290 | |
| 291 // If no frame was produced then signal that more data is required to | |
| 292 // produce more frames. This can happen under two circumstances: | |
| 293 // 1) Decoder was recently initialized/flushed | |
| 294 // 2) End of stream was reached and all internal frames have been output | |
| 295 if (frame_decoded == 0) { | |
| 296 return true; | |
| 297 } | |
| 298 | |
| 299 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675 | |
| 300 // The decoder is in a bad state and not decoding correctly. | |
| 301 // Checking for NULL avoids a crash in CopyPlane(). | |
| 302 if (!av_frame_->data[VideoFrame::kYPlane] || | |
| 303 !av_frame_->data[VideoFrame::kUPlane] || | |
| 304 !av_frame_->data[VideoFrame::kVPlane]) { | |
| 305 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; | |
| 306 av_frame_unref(av_frame_.get()); | |
| 307 return false; | |
| 308 } | |
| 309 | |
| 310 scoped_refptr<VideoFrame> frame = | |
| 311 reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0])); | |
| 312 frame->set_timestamp( | |
| 313 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); | |
| 314 *has_produced_frame = true; | |
| 315 output_cb_.Run(frame); | |
| 316 | |
| 317 av_frame_unref(av_frame_.get()); | |
| 318 return true; | 401 return true; |
| 319 } | 402 } |
| 320 | 403 |
| 321 void FFmpegVideoDecoder::ReleaseFFmpegResources() { | 404 void FFmpegVideoDecoder::ReleaseFFmpegResources() { |
| 322 codec_context_.reset(); | 405 codec_context_.reset(); |
| 323 av_frame_.reset(); | 406 av_frame_.reset(); |
| 324 } | 407 } |
| 325 | 408 |
| 326 bool FFmpegVideoDecoder::ConfigureDecoder(bool low_delay) { | 409 bool FFmpegVideoDecoder::ConfigureDecoder(bool low_delay) { |
| 327 // Release existing decoder resources if necessary. | 410 // Release existing decoder resources if necessary. |
| (...skipping 17 matching lines...) Expand all Loading... |
| 345 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { | 428 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { |
| 346 ReleaseFFmpegResources(); | 429 ReleaseFFmpegResources(); |
| 347 return false; | 430 return false; |
| 348 } | 431 } |
| 349 | 432 |
| 350 av_frame_.reset(av_frame_alloc()); | 433 av_frame_.reset(av_frame_alloc()); |
| 351 return true; | 434 return true; |
| 352 } | 435 } |
| 353 | 436 |
| 354 } // namespace media | 437 } // namespace media |
| OLD | NEW |