Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(867)

Side by Side Diff: media/filters/ffmpeg_video_decoder.cc

Issue 286953005: Roll FFmpeg for M37. (Closed) Base URL: https://chromium.googlesource.com/chromium/src
Patch Set: Roll DEPS for ChromiumOS fixes. Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « media/filters/ffmpeg_video_decoder.h ('k') | media/filters/pipeline_integration_test_base.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/filters/ffmpeg_video_decoder.h" 5 #include "media/filters/ffmpeg_video_decoder.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <string> 8 #include <string>
9 9
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
47 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); 47 const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
48 std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads)); 48 std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads));
49 if (threads.empty() || !base::StringToInt(threads, &decode_threads)) 49 if (threads.empty() || !base::StringToInt(threads, &decode_threads))
50 return decode_threads; 50 return decode_threads;
51 51
52 decode_threads = std::max(decode_threads, 0); 52 decode_threads = std::max(decode_threads, 0);
53 decode_threads = std::min(decode_threads, kMaxDecodeThreads); 53 decode_threads = std::min(decode_threads, kMaxDecodeThreads);
54 return decode_threads; 54 return decode_threads;
55 } 55 }
56 56
57 static int GetVideoBufferImpl(struct AVCodecContext* s,
58 AVFrame* frame,
59 int flags) {
60 FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque);
61 return decoder->GetVideoBuffer(s, frame, flags);
62 }
63
64 static void ReleaseVideoBufferImpl(void* opaque, uint8* data) {
65 scoped_refptr<VideoFrame> video_frame;
66 video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque));
67 }
68
57 FFmpegVideoDecoder::FFmpegVideoDecoder( 69 FFmpegVideoDecoder::FFmpegVideoDecoder(
58 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner) 70 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
59 : task_runner_(task_runner), state_(kUninitialized) {} 71 : task_runner_(task_runner), state_(kUninitialized) {}
60 72
61 int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context, 73 int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context,
62 AVFrame* frame) { 74 AVFrame* frame,
75 int flags) {
63 // Don't use |codec_context_| here! With threaded decoding, 76 // Don't use |codec_context_| here! With threaded decoding,
64 // it will contain unsynchronized width/height/pix_fmt values, 77 // it will contain unsynchronized width/height/pix_fmt values,
65 // whereas |codec_context| contains the current threads's 78 // whereas |codec_context| contains the current threads's
66 // updated width/height/pix_fmt, which can change for adaptive 79 // updated width/height/pix_fmt, which can change for adaptive
67 // content. 80 // content.
68 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt); 81 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt);
69 if (format == VideoFrame::UNKNOWN) 82 if (format == VideoFrame::UNKNOWN)
70 return AVERROR(EINVAL); 83 return AVERROR(EINVAL);
71 DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16 || 84 DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16 ||
72 format == VideoFrame::YV12J); 85 format == VideoFrame::YV12J);
(...skipping 23 matching lines...) Expand all
96 std::max(size.height(), codec_context->coded_height)); 109 std::max(size.height(), codec_context->coded_height));
97 110
98 if (!VideoFrame::IsValidConfig( 111 if (!VideoFrame::IsValidConfig(
99 format, coded_size, gfx::Rect(size), natural_size)) 112 format, coded_size, gfx::Rect(size), natural_size))
100 return AVERROR(EINVAL); 113 return AVERROR(EINVAL);
101 114
102 scoped_refptr<VideoFrame> video_frame = frame_pool_.CreateFrame( 115 scoped_refptr<VideoFrame> video_frame = frame_pool_.CreateFrame(
103 format, coded_size, gfx::Rect(size), natural_size, kNoTimestamp()); 116 format, coded_size, gfx::Rect(size), natural_size, kNoTimestamp());
104 117
105 for (int i = 0; i < 3; i++) { 118 for (int i = 0; i < 3; i++) {
106 frame->base[i] = video_frame->data(i);
107 frame->data[i] = video_frame->data(i); 119 frame->data[i] = video_frame->data(i);
108 frame->linesize[i] = video_frame->stride(i); 120 frame->linesize[i] = video_frame->stride(i);
109 } 121 }
110 122
111 frame->opaque = NULL;
112 video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque));
113 frame->type = FF_BUFFER_TYPE_USER;
114 frame->width = coded_size.width(); 123 frame->width = coded_size.width();
115 frame->height = coded_size.height(); 124 frame->height = coded_size.height();
116 frame->format = codec_context->pix_fmt; 125 frame->format = codec_context->pix_fmt;
126 frame->reordered_opaque = codec_context->reordered_opaque;
117 127
128 // Now create an AVBufferRef for the data just allocated. It will own the
129 // reference to the VideoFrame object.
130 void* opaque = NULL;
131 video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque));
132 frame->buf[0] =
133 av_buffer_create(frame->data[0],
134 VideoFrame::AllocationSize(format, coded_size),
135 ReleaseVideoBufferImpl,
136 opaque,
137 0);
118 return 0; 138 return 0;
119 } 139 }
120 140
121 static int GetVideoBufferImpl(AVCodecContext* s, AVFrame* frame) {
122 FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque);
123 return decoder->GetVideoBuffer(s, frame);
124 }
125
126 static void ReleaseVideoBufferImpl(AVCodecContext* s, AVFrame* frame) {
127 scoped_refptr<VideoFrame> video_frame;
128 video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque));
129
130 // The FFmpeg API expects us to zero the data pointers in
131 // this callback
132 memset(frame->data, 0, sizeof(frame->data));
133 frame->opaque = NULL;
134 }
135
136 void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config, 141 void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config,
137 bool low_delay, 142 bool low_delay,
138 const PipelineStatusCB& status_cb) { 143 const PipelineStatusCB& status_cb) {
139 DCHECK(task_runner_->BelongsToCurrentThread()); 144 DCHECK(task_runner_->BelongsToCurrentThread());
140 DCHECK(decode_cb_.is_null()); 145 DCHECK(decode_cb_.is_null());
141 DCHECK(!config.is_encrypted()); 146 DCHECK(!config.is_encrypted());
142 147
143 FFmpegGlue::InitializeFFmpeg(); 148 FFmpegGlue::InitializeFFmpeg();
144 149
145 config_ = config; 150 config_ = config;
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
265 } 270 }
266 271
267 base::ResetAndReturn(&decode_cb_).Run(kOk, video_frame); 272 base::ResetAndReturn(&decode_cb_).Run(kOk, video_frame);
268 } 273 }
269 274
270 bool FFmpegVideoDecoder::FFmpegDecode( 275 bool FFmpegVideoDecoder::FFmpegDecode(
271 const scoped_refptr<DecoderBuffer>& buffer, 276 const scoped_refptr<DecoderBuffer>& buffer,
272 scoped_refptr<VideoFrame>* video_frame) { 277 scoped_refptr<VideoFrame>* video_frame) {
273 DCHECK(video_frame); 278 DCHECK(video_frame);
274 279
275 // Reset frame to default values.
276 avcodec_get_frame_defaults(av_frame_.get());
277
278 // Create a packet for input data. 280 // Create a packet for input data.
279 // Due to FFmpeg API changes we no longer have const read-only pointers. 281 // Due to FFmpeg API changes we no longer have const read-only pointers.
280 AVPacket packet; 282 AVPacket packet;
281 av_init_packet(&packet); 283 av_init_packet(&packet);
282 if (buffer->end_of_stream()) { 284 if (buffer->end_of_stream()) {
283 packet.data = NULL; 285 packet.data = NULL;
284 packet.size = 0; 286 packet.size = 0;
285 } else { 287 } else {
286 packet.data = const_cast<uint8*>(buffer->data()); 288 packet.data = const_cast<uint8*>(buffer->data());
287 packet.size = buffer->data_size(); 289 packet.size = buffer->data_size();
288 290
289 // Let FFmpeg handle presentation timestamp reordering. 291 // Let FFmpeg handle presentation timestamp reordering.
290 codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds(); 292 codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds();
291
292 // This is for codecs not using get_buffer to initialize
293 // |av_frame_->reordered_opaque|
294 av_frame_->reordered_opaque = codec_context_->reordered_opaque;
295 } 293 }
296 294
297 int frame_decoded = 0; 295 int frame_decoded = 0;
298 int result = avcodec_decode_video2(codec_context_.get(), 296 int result = avcodec_decode_video2(codec_context_.get(),
299 av_frame_.get(), 297 av_frame_.get(),
300 &frame_decoded, 298 &frame_decoded,
301 &packet); 299 &packet);
302 // Log the problem if we can't decode a video frame and exit early. 300 // Log the problem if we can't decode a video frame and exit early.
303 if (result < 0) { 301 if (result < 0) {
304 LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString(); 302 LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString();
305 *video_frame = NULL; 303 *video_frame = NULL;
306 return false; 304 return false;
307 } 305 }
308 306
307 // FFmpeg says some codecs might have multiple frames per packet. Previous
308 // discussions with rbultje@ indicate this shouldn't be true for the codecs
309 // we use.
310 DCHECK_EQ(result, packet.size);
311
309 // If no frame was produced then signal that more data is required to 312 // If no frame was produced then signal that more data is required to
310 // produce more frames. This can happen under two circumstances: 313 // produce more frames. This can happen under two circumstances:
311 // 1) Decoder was recently initialized/flushed 314 // 1) Decoder was recently initialized/flushed
312 // 2) End of stream was reached and all internal frames have been output 315 // 2) End of stream was reached and all internal frames have been output
313 if (frame_decoded == 0) { 316 if (frame_decoded == 0) {
314 *video_frame = NULL; 317 *video_frame = NULL;
315 return true; 318 return true;
316 } 319 }
317 320
318 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675 321 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
319 // The decoder is in a bad state and not decoding correctly. 322 // The decoder is in a bad state and not decoding correctly.
320 // Checking for NULL avoids a crash in CopyPlane(). 323 // Checking for NULL avoids a crash in CopyPlane().
321 if (!av_frame_->data[VideoFrame::kYPlane] || 324 if (!av_frame_->data[VideoFrame::kYPlane] ||
322 !av_frame_->data[VideoFrame::kUPlane] || 325 !av_frame_->data[VideoFrame::kUPlane] ||
323 !av_frame_->data[VideoFrame::kVPlane]) { 326 !av_frame_->data[VideoFrame::kVPlane]) {
324 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; 327 LOG(ERROR) << "Video frame was produced yet has invalid frame data.";
325 *video_frame = NULL; 328 *video_frame = NULL;
329 av_frame_unref(av_frame_.get());
326 return false; 330 return false;
327 } 331 }
328 332
329 if (!av_frame_->opaque) { 333 *video_frame =
330 LOG(ERROR) << "VideoFrame object associated with frame data not set."; 334 reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
331 return false;
332 }
333 *video_frame = static_cast<VideoFrame*>(av_frame_->opaque);
334 335
335 (*video_frame)->set_timestamp( 336 (*video_frame)->set_timestamp(
336 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); 337 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque));
337 338
339 av_frame_unref(av_frame_.get());
338 return true; 340 return true;
339 } 341 }
340 342
341 void FFmpegVideoDecoder::ReleaseFFmpegResources() { 343 void FFmpegVideoDecoder::ReleaseFFmpegResources() {
342 codec_context_.reset(); 344 codec_context_.reset();
343 av_frame_.reset(); 345 av_frame_.reset();
344 } 346 }
345 347
346 bool FFmpegVideoDecoder::ConfigureDecoder(bool low_delay) { 348 bool FFmpegVideoDecoder::ConfigureDecoder(bool low_delay) {
347 // Release existing decoder resources if necessary. 349 // Release existing decoder resources if necessary.
348 ReleaseFFmpegResources(); 350 ReleaseFFmpegResources();
349 351
350 // Initialize AVCodecContext structure. 352 // Initialize AVCodecContext structure.
351 codec_context_.reset(avcodec_alloc_context3(NULL)); 353 codec_context_.reset(avcodec_alloc_context3(NULL));
352 VideoDecoderConfigToAVCodecContext(config_, codec_context_.get()); 354 VideoDecoderConfigToAVCodecContext(config_, codec_context_.get());
353 355
354 // Enable motion vector search (potentially slow), strong deblocking filter
355 // for damaged macroblocks, and set our error detection sensitivity.
356 codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
357 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id); 356 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id);
358 codec_context_->thread_type = low_delay ? FF_THREAD_SLICE : FF_THREAD_FRAME; 357 codec_context_->thread_type = low_delay ? FF_THREAD_SLICE : FF_THREAD_FRAME;
359 codec_context_->opaque = this; 358 codec_context_->opaque = this;
360 codec_context_->flags |= CODEC_FLAG_EMU_EDGE; 359 codec_context_->flags |= CODEC_FLAG_EMU_EDGE;
361 codec_context_->get_buffer = GetVideoBufferImpl; 360 codec_context_->get_buffer2 = GetVideoBufferImpl;
362 codec_context_->release_buffer = ReleaseVideoBufferImpl; 361 codec_context_->refcounted_frames = 1;
363 362
364 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id); 363 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
365 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { 364 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) {
366 ReleaseFFmpegResources(); 365 ReleaseFFmpegResources();
367 return false; 366 return false;
368 } 367 }
369 368
370 av_frame_.reset(av_frame_alloc()); 369 av_frame_.reset(av_frame_alloc());
371 return true; 370 return true;
372 } 371 }
373 372
374 } // namespace media 373 } // namespace media
OLDNEW
« no previous file with comments | « media/filters/ffmpeg_video_decoder.h ('k') | media/filters/pipeline_integration_test_base.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698