OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/ffmpeg_video_decoder.h" | 5 #include "media/filters/ffmpeg_video_decoder.h" |
6 | 6 |
7 #include <stddef.h> | 7 #include <stddef.h> |
8 #include <stdint.h> | 8 #include <stdint.h> |
9 | 9 |
10 #include <algorithm> | 10 #include <algorithm> |
11 #include <string> | 11 #include <string> |
12 | 12 |
13 #include "base/bind.h" | 13 #include "base/bind.h" |
14 #include "base/callback_helpers.h" | 14 #include "base/callback_helpers.h" |
15 #include "base/command_line.h" | 15 #include "base/command_line.h" |
16 #include "base/location.h" | 16 #include "base/location.h" |
17 #include "base/single_thread_task_runner.h" | 17 #include "base/single_thread_task_runner.h" |
18 #include "base/strings/string_number_conversions.h" | 18 #include "base/strings/string_number_conversions.h" |
19 #include "media/base/bind_to_current_loop.h" | 19 #include "media/base/bind_to_current_loop.h" |
20 #include "media/base/decoder_buffer.h" | 20 #include "media/base/decoder_buffer.h" |
21 #include "media/base/limits.h" | 21 #include "media/base/limits.h" |
22 #include "media/base/media_switches.h" | 22 #include "media/base/media_switches.h" |
23 #include "media/base/timestamp_constants.h" | 23 #include "media/base/timestamp_constants.h" |
24 #include "media/base/video_frame.h" | 24 #include "media/base/video_frame.h" |
25 #include "media/base/video_util.h" | 25 #include "media/base/video_util.h" |
26 #include "media/ffmpeg/ffmpeg_common.h" | 26 #include "media/ffmpeg/ffmpeg_common.h" |
27 #include "media/filters/ffmpeg_glue.h" | 27 #include "media/filters/ffmpeg_glue.h" |
| 28 #include "media/video/hybrid_video_frame_pool.h" |
28 | 29 |
29 namespace media { | 30 namespace media { |
30 | 31 |
31 // Always try to use three threads for video decoding. There is little reason | 32 // Always try to use three threads for video decoding. There is little reason |
32 // not to since current day CPUs tend to be multi-core and we measured | 33 // not to since current day CPUs tend to be multi-core and we measured |
33 // performance benefits on older machines such as P4s with hyperthreading. | 34 // performance benefits on older machines such as P4s with hyperthreading. |
34 // | 35 // |
35 // Handling decoding on separate threads also frees up the pipeline thread to | 36 // Handling decoding on separate threads also frees up the pipeline thread to |
36 // continue processing. Although it'd be nice to have the option of a single | 37 // continue processing. Although it'd be nice to have the option of a single |
37 // decoding thread, FFmpeg treats having one thread the same as having zero | 38 // decoding thread, FFmpeg treats having one thread the same as having zero |
(...skipping 19 matching lines...) Expand all Loading... |
57 } | 58 } |
58 | 59 |
59 static int GetVideoBufferImpl(struct AVCodecContext* s, | 60 static int GetVideoBufferImpl(struct AVCodecContext* s, |
60 AVFrame* frame, | 61 AVFrame* frame, |
61 int flags) { | 62 int flags) { |
62 FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque); | 63 FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque); |
63 return decoder->GetVideoBuffer(s, frame, flags); | 64 return decoder->GetVideoBuffer(s, frame, flags); |
64 } | 65 } |
65 | 66 |
66 static void ReleaseVideoBufferImpl(void* opaque, uint8_t* data) { | 67 static void ReleaseVideoBufferImpl(void* opaque, uint8_t* data) { |
67 scoped_refptr<VideoFrame> video_frame; | 68 std::unique_ptr<VideoFrameFuture> video_frame_future( |
68 video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque)); | 69 static_cast<VideoFrameFuture*>(opaque)); |
69 } | 70 } |
70 | 71 |
71 // static | 72 // static |
72 bool FFmpegVideoDecoder::IsCodecSupported(VideoCodec codec) { | 73 bool FFmpegVideoDecoder::IsCodecSupported(VideoCodec codec) { |
73 FFmpegGlue::InitializeFFmpeg(); | 74 FFmpegGlue::InitializeFFmpeg(); |
74 return avcodec_find_decoder(VideoCodecToCodecID(codec)) != nullptr; | 75 return avcodec_find_decoder(VideoCodecToCodecID(codec)) != nullptr; |
75 } | 76 } |
76 | 77 |
77 FFmpegVideoDecoder::FFmpegVideoDecoder() | 78 FFmpegVideoDecoder::FFmpegVideoDecoder() |
| 79 : FFmpegVideoDecoder(std::unique_ptr<GpuMemoryBufferVideoFramePool>()) {} |
| 80 |
| 81 FFmpegVideoDecoder::FFmpegVideoDecoder( |
| 82 std::unique_ptr<GpuMemoryBufferVideoFramePool> gpu_video_frame_pool) |
78 : state_(kUninitialized), decode_nalus_(false) { | 83 : state_(kUninitialized), decode_nalus_(false) { |
79 thread_checker_.DetachFromThread(); | 84 thread_checker_.DetachFromThread(); |
| 85 gpu_video_frame_pool->SetUsage(GpuMemoryBufferVideoFramePool::Usage::FFMPEG); |
| 86 hybrid_frame_pool_.reset( |
| 87 new HybridVideoFramePool(std::move(gpu_video_frame_pool))); |
80 } | 88 } |
81 | 89 |
82 int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context, | 90 int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context, |
83 AVFrame* frame, | 91 AVFrame* frame, |
84 int flags) { | 92 int flags) { |
85 // Don't use |codec_context_| here! With threaded decoding, | 93 // Don't use |codec_context_| here! With threaded decoding, |
86 // it will contain unsynchronized width/height/pix_fmt values, | 94 // it will contain unsynchronized width/height/pix_fmt values, |
87 // whereas |codec_context| contains the current threads's | 95 // whereas |codec_context| contains the current threads's |
88 // updated width/height/pix_fmt, which can change for adaptive | 96 // updated width/height/pix_fmt, which can change for adaptive |
89 // content. | 97 // content. |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
122 gfx::Size coded_size(std::max(size.width(), codec_context->coded_width), | 130 gfx::Size coded_size(std::max(size.width(), codec_context->coded_width), |
123 std::max(size.height(), codec_context->coded_height)); | 131 std::max(size.height(), codec_context->coded_height)); |
124 | 132 |
125 if (!VideoFrame::IsValidConfig(format, VideoFrame::STORAGE_UNKNOWN, | 133 if (!VideoFrame::IsValidConfig(format, VideoFrame::STORAGE_UNKNOWN, |
126 coded_size, gfx::Rect(size), natural_size)) { | 134 coded_size, gfx::Rect(size), natural_size)) { |
127 return AVERROR(EINVAL); | 135 return AVERROR(EINVAL); |
128 } | 136 } |
129 | 137 |
130 // FFmpeg expects the initialize allocation to be zero-initialized. Failure | 138 // FFmpeg expects the initialize allocation to be zero-initialized. Failure |
131 // to do so can lead to unitialized value usage. See http://crbug.com/390941 | 139 // to do so can lead to unitialized value usage. See http://crbug.com/390941 |
132 scoped_refptr<VideoFrame> video_frame = frame_pool_.CreateFrame( | 140 std::unique_ptr<VideoFrameFuture> video_frame_future = |
133 format, coded_size, gfx::Rect(size), natural_size, kNoTimestamp()); | 141 hybrid_frame_pool_->CreateFrame(format, coded_size, gfx::Rect(size), |
| 142 natural_size, kNoTimestamp()); |
| 143 DCHECK(video_frame_future); |
134 | 144 |
135 // Prefer the color space from the codec context. If it's not specified (or is | 145 for (size_t i = 0; i < VideoFrame::NumPlanes(format); i++) { |
136 // set to an unsupported value), fall back on the value from the config. | 146 frame->data[i] = video_frame_future->data(i); |
137 ColorSpace color_space = AVColorSpaceToColorSpace(codec_context->colorspace, | 147 frame->linesize[i] = video_frame_future->stride(i); |
138 codec_context->color_range); | |
139 if (color_space == COLOR_SPACE_UNSPECIFIED) | |
140 color_space = config_.color_space(); | |
141 video_frame->metadata()->SetInteger(VideoFrameMetadata::COLOR_SPACE, | |
142 color_space); | |
143 | |
144 for (size_t i = 0; i < VideoFrame::NumPlanes(video_frame->format()); i++) { | |
145 frame->data[i] = video_frame->data(i); | |
146 frame->linesize[i] = video_frame->stride(i); | |
147 } | 148 } |
148 | 149 |
149 frame->width = coded_size.width(); | 150 frame->width = coded_size.width(); |
150 frame->height = coded_size.height(); | 151 frame->height = coded_size.height(); |
151 frame->format = codec_context->pix_fmt; | 152 frame->format = codec_context->pix_fmt; |
152 frame->reordered_opaque = codec_context->reordered_opaque; | 153 frame->reordered_opaque = codec_context->reordered_opaque; |
153 | 154 |
154 // Now create an AVBufferRef for the data just allocated. It will own the | 155 // Now create an AVBufferRef for the data just allocated. It will own the |
155 // reference to the VideoFrame object. | 156 // reference to the VideoFrame object. |
156 void* opaque = NULL; | 157 void* opaque = video_frame_future.release(); |
157 video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque)); | |
158 frame->buf[0] = | 158 frame->buf[0] = |
159 av_buffer_create(frame->data[0], | 159 av_buffer_create(nullptr, 0, ReleaseVideoBufferImpl, opaque, 0); |
160 VideoFrame::AllocationSize(format, coded_size), | |
161 ReleaseVideoBufferImpl, | |
162 opaque, | |
163 0); | |
164 return 0; | 160 return 0; |
165 } | 161 } |
166 | 162 |
167 std::string FFmpegVideoDecoder::GetDisplayName() const { | 163 std::string FFmpegVideoDecoder::GetDisplayName() const { |
168 return "FFmpegVideoDecoder"; | 164 return "FFmpegVideoDecoder"; |
169 } | 165 } |
170 | 166 |
171 void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config, | 167 void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config, |
172 bool low_delay, | 168 bool low_delay, |
173 CdmContext* /* cdm_context */, | 169 CdmContext* /* cdm_context */, |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
326 // The decoder is in a bad state and not decoding correctly. | 322 // The decoder is in a bad state and not decoding correctly. |
327 // Checking for NULL avoids a crash in CopyPlane(). | 323 // Checking for NULL avoids a crash in CopyPlane(). |
328 if (!av_frame_->data[VideoFrame::kYPlane] || | 324 if (!av_frame_->data[VideoFrame::kYPlane] || |
329 !av_frame_->data[VideoFrame::kUPlane] || | 325 !av_frame_->data[VideoFrame::kUPlane] || |
330 !av_frame_->data[VideoFrame::kVPlane]) { | 326 !av_frame_->data[VideoFrame::kVPlane]) { |
331 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; | 327 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; |
332 av_frame_unref(av_frame_.get()); | 328 av_frame_unref(av_frame_.get()); |
333 return false; | 329 return false; |
334 } | 330 } |
335 | 331 |
336 scoped_refptr<VideoFrame> frame = | 332 VideoFrameFuture* frame_future = |
337 reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0])); | 333 static_cast<VideoFrameFuture*>(av_buffer_get_opaque(av_frame_->buf[0])); |
| 334 scoped_refptr<VideoFrame> frame = frame_future->Release(); |
| 335 DCHECK(frame); |
| 336 |
| 337 // Prefer the color space from the codec context. If it's not specified (or is |
| 338 // set to an unsupported value), fall back on the value from the config. |
| 339 ColorSpace color_space = AVColorSpaceToColorSpace( |
| 340 codec_context_->colorspace, codec_context_->color_range); |
| 341 if (color_space == COLOR_SPACE_UNSPECIFIED) |
| 342 color_space = config_.color_space(); |
| 343 frame->metadata()->SetInteger(VideoFrameMetadata::COLOR_SPACE, color_space); |
338 frame->set_timestamp( | 344 frame->set_timestamp( |
339 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); | 345 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); |
340 *has_produced_frame = true; | 346 *has_produced_frame = true; |
341 output_cb_.Run(frame); | 347 output_cb_.Run(frame); |
342 | 348 |
343 av_frame_unref(av_frame_.get()); | 349 av_frame_unref(av_frame_.get()); |
344 return true; | 350 return true; |
345 } | 351 } |
346 | 352 |
347 void FFmpegVideoDecoder::ReleaseFFmpegResources() { | 353 void FFmpegVideoDecoder::ReleaseFFmpegResources() { |
(...skipping 26 matching lines...) Expand all Loading... |
374 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { | 380 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { |
375 ReleaseFFmpegResources(); | 381 ReleaseFFmpegResources(); |
376 return false; | 382 return false; |
377 } | 383 } |
378 | 384 |
379 av_frame_.reset(av_frame_alloc()); | 385 av_frame_.reset(av_frame_alloc()); |
380 return true; | 386 return true; |
381 } | 387 } |
382 | 388 |
383 } // namespace media | 389 } // namespace media |
OLD | NEW |