Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/filters/ffmpeg_video_decoder.h" | 5 #include "media/filters/ffmpeg_video_decoder.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <string> | 8 #include <string> |
| 9 | 9 |
| 10 #include "base/bind.h" | 10 #include "base/bind.h" |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 47 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); | 47 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); |
| 48 std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads)); | 48 std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads)); |
| 49 if (threads.empty() || !base::StringToInt(threads, &decode_threads)) | 49 if (threads.empty() || !base::StringToInt(threads, &decode_threads)) |
| 50 return decode_threads; | 50 return decode_threads; |
| 51 | 51 |
| 52 decode_threads = std::max(decode_threads, 0); | 52 decode_threads = std::max(decode_threads, 0); |
| 53 decode_threads = std::min(decode_threads, kMaxDecodeThreads); | 53 decode_threads = std::min(decode_threads, kMaxDecodeThreads); |
| 54 return decode_threads; | 54 return decode_threads; |
| 55 } | 55 } |
| 56 | 56 |
| 57 static int GetVideoBufferImpl(struct AVCodecContext* s, | |
|
wolenetz
2014/05/16 19:45:58
Curious - why do we need this Impl vs just assigni
DaleCurtis
2014/05/16 19:54:26
Because video uses a frame pool which is owned the
| |
| 58 AVFrame* frame, | |
| 59 int flags) { | |
| 60 FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque); | |
| 61 return decoder->GetVideoBuffer(s, frame, flags); | |
| 62 } | |
| 63 | |
| 64 static void ReleaseVideoBufferImpl(void* opaque, uint8* data) { | |
| 65 scoped_refptr<VideoFrame> video_frame; | |
| 66 video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque)); | |
| 67 } | |
| 68 | |
| 57 FFmpegVideoDecoder::FFmpegVideoDecoder( | 69 FFmpegVideoDecoder::FFmpegVideoDecoder( |
| 58 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) | 70 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) |
| 59 : task_runner_(task_runner), state_(kUninitialized) {} | 71 : task_runner_(task_runner), state_(kUninitialized) {} |
| 60 | 72 |
| 61 int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, | 73 int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context, |
| 62 AVFrame* frame) { | 74 AVFrame* frame, |
| 75 int flags) { | |
| 63 // Don't use |codec_context_| here! With threaded decoding, | 76 // Don't use |codec_context_| here! With threaded decoding, |
| 64 // it will contain unsynchronized width/height/pix_fmt values, | 77 // it will contain unsynchronized width/height/pix_fmt values, |
| 65 // whereas |codec_context| contains the current threads's | 78 // whereas |codec_context| contains the current threads's |
| 66 // updated width/height/pix_fmt, which can change for adaptive | 79 // updated width/height/pix_fmt, which can change for adaptive |
| 67 // content. | 80 // content. |
| 68 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt); | 81 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt); |
| 69 if (format == VideoFrame::UNKNOWN) | 82 if (format == VideoFrame::UNKNOWN) |
| 70 return AVERROR(EINVAL); | 83 return AVERROR(EINVAL); |
| 71 DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16 || | 84 DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16 || |
| 72 format == VideoFrame::YV12J); | 85 format == VideoFrame::YV12J); |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 101 | 114 |
| 102 scoped_refptr<VideoFrame> video_frame = frame_pool_.CreateFrame( | 115 scoped_refptr<VideoFrame> video_frame = frame_pool_.CreateFrame( |
| 103 format, coded_size, gfx::Rect(size), natural_size, kNoTimestamp()); | 116 format, coded_size, gfx::Rect(size), natural_size, kNoTimestamp()); |
| 104 | 117 |
| 105 for (int i = 0; i < 3; i++) { | 118 for (int i = 0; i < 3; i++) { |
| 106 frame->base[i] = video_frame->data(i); | 119 frame->base[i] = video_frame->data(i); |
| 107 frame->data[i] = video_frame->data(i); | 120 frame->data[i] = video_frame->data(i); |
| 108 frame->linesize[i] = video_frame->stride(i); | 121 frame->linesize[i] = video_frame->stride(i); |
| 109 } | 122 } |
| 110 | 123 |
| 111 frame->opaque = NULL; | |
| 112 video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque)); | |
| 113 frame->type = FF_BUFFER_TYPE_USER; | |
| 114 frame->width = coded_size.width(); | 124 frame->width = coded_size.width(); |
| 115 frame->height = coded_size.height(); | 125 frame->height = coded_size.height(); |
| 116 frame->format = codec_context->pix_fmt; | 126 frame->format = codec_context->pix_fmt; |
| 117 | 127 |
| 128 // Now create an AVBufferRef for the data just allocated. It will own the | |
| 129 // reference to the VideoFrame object. | |
| 130 void* opaque = NULL; | |
| 131 video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque)); | |
| 132 frame->type = FF_BUFFER_TYPE_USER; | |
| 133 frame->buf[0] = | |
| 134 av_buffer_create(frame->data[0], | |
| 135 VideoFrame::AllocationSize(format, coded_size), | |
| 136 ReleaseVideoBufferImpl, | |
| 137 opaque, | |
| 138 0); | |
| 118 return 0; | 139 return 0; |
| 119 } | 140 } |
| 120 | 141 |
| 121 static int GetVideoBufferImpl(AVCodecContext* s, AVFrame* frame) { | |
| 122 FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque); | |
| 123 return decoder->GetVideoBuffer(s, frame); | |
| 124 } | |
| 125 | |
| 126 static void ReleaseVideoBufferImpl(AVCodecContext* s, AVFrame* frame) { | |
| 127 scoped_refptr<VideoFrame> video_frame; | |
| 128 video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque)); | |
| 129 | |
| 130 // The FFmpeg API expects us to zero the data pointers in | |
| 131 // this callback | |
| 132 memset(frame->data, 0, sizeof(frame->data)); | |
| 133 frame->opaque = NULL; | |
| 134 } | |
| 135 | |
| 136 void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config, | 142 void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config, |
| 137 bool low_delay, | 143 bool low_delay, |
| 138 const PipelineStatusCB& status_cb) { | 144 const PipelineStatusCB& status_cb) { |
| 139 DCHECK(task_runner_->BelongsToCurrentThread()); | 145 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 140 DCHECK(decode_cb_.is_null()); | 146 DCHECK(decode_cb_.is_null()); |
| 141 DCHECK(!config.is_encrypted()); | 147 DCHECK(!config.is_encrypted()); |
| 142 | 148 |
| 143 FFmpegGlue::InitializeFFmpeg(); | 149 FFmpegGlue::InitializeFFmpeg(); |
| 144 | 150 |
| 145 config_ = config; | 151 config_ = config; |
| (...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 265 } | 271 } |
| 266 | 272 |
| 267 base::ResetAndReturn(&decode_cb_).Run(kOk, video_frame); | 273 base::ResetAndReturn(&decode_cb_).Run(kOk, video_frame); |
| 268 } | 274 } |
| 269 | 275 |
| 270 bool FFmpegVideoDecoder::FFmpegDecode( | 276 bool FFmpegVideoDecoder::FFmpegDecode( |
| 271 const scoped_refptr<DecoderBuffer>& buffer, | 277 const scoped_refptr<DecoderBuffer>& buffer, |
| 272 scoped_refptr<VideoFrame>* video_frame) { | 278 scoped_refptr<VideoFrame>* video_frame) { |
| 273 DCHECK(video_frame); | 279 DCHECK(video_frame); |
| 274 | 280 |
| 275 // Reset frame to default values. | |
| 276 avcodec_get_frame_defaults(av_frame_.get()); | |
| 277 | |
| 278 // Create a packet for input data. | 281 // Create a packet for input data. |
| 279 // Due to FFmpeg API changes we no longer have const read-only pointers. | 282 // Due to FFmpeg API changes we no longer have const read-only pointers. |
| 280 AVPacket packet; | 283 AVPacket packet; |
| 281 av_init_packet(&packet); | 284 av_init_packet(&packet); |
| 282 if (buffer->end_of_stream()) { | 285 if (buffer->end_of_stream()) { |
| 283 packet.data = NULL; | 286 packet.data = NULL; |
| 284 packet.size = 0; | 287 packet.size = 0; |
| 285 } else { | 288 } else { |
| 286 packet.data = const_cast<uint8*>(buffer->data()); | 289 packet.data = const_cast<uint8*>(buffer->data()); |
| 287 packet.size = buffer->data_size(); | 290 packet.size = buffer->data_size(); |
| 288 | 291 |
| 289 // Let FFmpeg handle presentation timestamp reordering. | 292 // Let FFmpeg handle presentation timestamp reordering. |
| 290 codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds(); | 293 codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds(); |
| 291 | 294 |
| 292 // This is for codecs not using get_buffer to initialize | 295 // This is for codecs not using get_buffer to initialize |
| 293 // |av_frame_->reordered_opaque| | 296 // |av_frame_->reordered_opaque| |
| 294 av_frame_->reordered_opaque = codec_context_->reordered_opaque; | 297 av_frame_->reordered_opaque = codec_context_->reordered_opaque; |
| 295 } | 298 } |
| 296 | 299 |
| 297 int frame_decoded = 0; | 300 int frame_decoded = 0; |
| 298 int result = avcodec_decode_video2(codec_context_.get(), | 301 int result = avcodec_decode_video2(codec_context_.get(), |
|
wolenetz
2014/05/16 19:45:58
ditto: iterate through multiple packets in buffer?
| |
| 299 av_frame_.get(), | 302 av_frame_.get(), |
| 300 &frame_decoded, | 303 &frame_decoded, |
|
wolenetz
2014/05/16 19:45:58
ditto: protect against MPEG overreads?
| |
| 301 &packet); | 304 &packet); |
| 302 // Log the problem if we can't decode a video frame and exit early. | 305 // Log the problem if we can't decode a video frame and exit early. |
| 303 if (result < 0) { | 306 if (result < 0) { |
| 304 LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString(); | 307 LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString(); |
| 305 *video_frame = NULL; | 308 *video_frame = NULL; |
| 306 return false; | 309 return false; |
| 307 } | 310 } |
| 308 | 311 |
| 309 // If no frame was produced then signal that more data is required to | 312 // If no frame was produced then signal that more data is required to |
| 310 // produce more frames. This can happen under two circumstances: | 313 // produce more frames. This can happen under two circumstances: |
| 311 // 1) Decoder was recently initialized/flushed | 314 // 1) Decoder was recently initialized/flushed |
| 312 // 2) End of stream was reached and all internal frames have been output | 315 // 2) End of stream was reached and all internal frames have been output |
| 313 if (frame_decoded == 0) { | 316 if (frame_decoded == 0) { |
| 314 *video_frame = NULL; | 317 *video_frame = NULL; |
| 315 return true; | 318 return true; |
| 316 } | 319 } |
| 317 | 320 |
| 318 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675 | 321 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675 |
| 319 // The decoder is in a bad state and not decoding correctly. | 322 // The decoder is in a bad state and not decoding correctly. |
| 320 // Checking for NULL avoids a crash in CopyPlane(). | 323 // Checking for NULL avoids a crash in CopyPlane(). |
| 321 if (!av_frame_->data[VideoFrame::kYPlane] || | 324 if (!av_frame_->data[VideoFrame::kYPlane] || |
| 322 !av_frame_->data[VideoFrame::kUPlane] || | 325 !av_frame_->data[VideoFrame::kUPlane] || |
| 323 !av_frame_->data[VideoFrame::kVPlane]) { | 326 !av_frame_->data[VideoFrame::kVPlane]) { |
| 324 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; | 327 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; |
| 325 *video_frame = NULL; | 328 *video_frame = NULL; |
| 329 av_frame_unref(av_frame_.get()); | |
| 326 return false; | 330 return false; |
| 327 } | 331 } |
| 328 | 332 |
| 329 if (!av_frame_->opaque) { | 333 *video_frame = |
| 330 LOG(ERROR) << "VideoFrame object associated with frame data not set."; | 334 reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0])); |
| 331 return false; | |
| 332 } | |
| 333 *video_frame = static_cast<VideoFrame*>(av_frame_->opaque); | |
| 334 | 335 |
| 335 (*video_frame)->set_timestamp( | 336 (*video_frame)->set_timestamp( |
| 336 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); | 337 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); |
| 337 | 338 |
| 339 av_frame_unref(av_frame_.get()); | |
| 338 return true; | 340 return true; |
| 339 } | 341 } |
| 340 | 342 |
| 341 void FFmpegVideoDecoder::ReleaseFFmpegResources() { | 343 void FFmpegVideoDecoder::ReleaseFFmpegResources() { |
| 342 codec_context_.reset(); | 344 codec_context_.reset(); |
| 343 av_frame_.reset(); | 345 av_frame_.reset(); |
| 344 } | 346 } |
| 345 | 347 |
| 346 bool FFmpegVideoDecoder::ConfigureDecoder(bool low_delay) { | 348 bool FFmpegVideoDecoder::ConfigureDecoder(bool low_delay) { |
| 347 // Release existing decoder resources if necessary. | 349 // Release existing decoder resources if necessary. |
| 348 ReleaseFFmpegResources(); | 350 ReleaseFFmpegResources(); |
| 349 | 351 |
| 350 // Initialize AVCodecContext structure. | 352 // Initialize AVCodecContext structure. |
| 351 codec_context_.reset(avcodec_alloc_context3(NULL)); | 353 codec_context_.reset(avcodec_alloc_context3(NULL)); |
| 352 VideoDecoderConfigToAVCodecContext(config_, codec_context_.get()); | 354 VideoDecoderConfigToAVCodecContext(config_, codec_context_.get()); |
| 353 | 355 |
| 354 // Enable motion vector search (potentially slow), strong deblocking filter | |
| 355 // for damaged macroblocks, and set our error detection sensitivity. | |
| 356 codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK; | |
| 357 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); | 356 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); |
| 358 codec_context_->thread_type = low_delay ? FF_THREAD_SLICE : FF_THREAD_FRAME; | 357 codec_context_->thread_type = low_delay ? FF_THREAD_SLICE : FF_THREAD_FRAME; |
| 359 codec_context_->opaque = this; | 358 codec_context_->opaque = this; |
| 360 codec_context_->flags |= CODEC_FLAG_EMU_EDGE; | 359 codec_context_->flags |= CODEC_FLAG_EMU_EDGE; |
| 361 codec_context_->get_buffer = GetVideoBufferImpl; | 360 codec_context_->get_buffer2 = GetVideoBufferImpl; |
| 362 codec_context_->release_buffer = ReleaseVideoBufferImpl; | 361 codec_context_->refcounted_frames = 1; |
| 363 | 362 |
| 364 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); | 363 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); |
| 365 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { | 364 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { |
| 366 ReleaseFFmpegResources(); | 365 ReleaseFFmpegResources(); |
| 367 return false; | 366 return false; |
| 368 } | 367 } |
| 369 | 368 |
| 370 av_frame_.reset(av_frame_alloc()); | 369 av_frame_.reset(av_frame_alloc()); |
| 371 return true; | 370 return true; |
| 372 } | 371 } |
| 373 | 372 |
| 374 } // namespace media | 373 } // namespace media |
| OLD | NEW |