OLD | NEW |
---|---|
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/video/ffmpeg_video_decode_engine.h" | 5 #include "media/video/ffmpeg_video_decode_engine.h" |
6 | 6 |
7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
8 #include "base/logging.h" | |
8 #include "base/string_number_conversions.h" | 9 #include "base/string_number_conversions.h" |
9 #include "base/task.h" | |
10 #include "media/base/buffers.h" | 10 #include "media/base/buffers.h" |
11 #include "media/base/limits.h" | |
12 #include "media/base/media_switches.h" | 11 #include "media/base/media_switches.h" |
13 #include "media/base/pipeline.h" | 12 #include "media/base/video_decoder_config.h" |
14 #include "media/base/video_util.h" | 13 #include "media/base/video_util.h" |
15 #include "media/ffmpeg/ffmpeg_common.h" | 14 #include "media/ffmpeg/ffmpeg_common.h" |
16 | 15 |
17 namespace media { | 16 namespace media { |
18 | 17 |
19 FFmpegVideoDecodeEngine::FFmpegVideoDecodeEngine() | 18 FFmpegVideoDecodeEngine::FFmpegVideoDecodeEngine() |
20 : codec_context_(NULL), | 19 : codec_context_(NULL), |
21 event_handler_(NULL), | 20 av_frame_(NULL), |
22 frame_rate_numerator_(0), | 21 frame_rate_numerator_(0), |
23 frame_rate_denominator_(0), | 22 frame_rate_denominator_(0) { |
24 pending_input_buffers_(0), | |
25 pending_output_buffers_(0), | |
26 output_eos_reached_(false), | |
27 flush_pending_(false) { | |
28 } | 23 } |
29 | 24 |
30 FFmpegVideoDecodeEngine::~FFmpegVideoDecodeEngine() { | 25 FFmpegVideoDecodeEngine::~FFmpegVideoDecodeEngine() { |
31 if (codec_context_) { | 26 Uninitialize(); |
32 av_free(codec_context_->extradata); | |
33 avcodec_close(codec_context_); | |
34 av_free(codec_context_); | |
35 } | |
36 } | 27 } |
37 | 28 |
38 void FFmpegVideoDecodeEngine::Initialize( | 29 bool FFmpegVideoDecodeEngine::Initialize(const VideoDecoderConfig& config) { |
39 VideoDecodeEngine::EventHandler* event_handler, | |
40 const VideoDecoderConfig& config) { | |
41 frame_rate_numerator_ = config.frame_rate_numerator(); | 30 frame_rate_numerator_ = config.frame_rate_numerator(); |
42 frame_rate_denominator_ = config.frame_rate_denominator(); | 31 frame_rate_denominator_ = config.frame_rate_denominator(); |
43 | 32 |
44 // Always try to use three threads for video decoding. There is little reason | 33 // Always try to use three threads for video decoding. There is little reason |
45 // not to since current day CPUs tend to be multi-core and we measured | 34 // not to since current day CPUs tend to be multi-core and we measured |
46 // performance benefits on older machines such as P4s with hyperthreading. | 35 // performance benefits on older machines such as P4s with hyperthreading. |
47 // | 36 // |
48 // Handling decoding on separate threads also frees up the pipeline thread to | 37 // Handling decoding on separate threads also frees up the pipeline thread to |
49 // continue processing. Although it'd be nice to have the option of a single | 38 // continue processing. Although it'd be nice to have the option of a single |
50 // decoding thread, FFmpeg treats having one thread the same as having zero | 39 // decoding thread, FFmpeg treats having one thread the same as having zero |
(...skipping 22 matching lines...) Expand all Loading... | |
73 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); | 62 const CommandLine* cmd_line = CommandLine::ForCurrentProcess(); |
74 std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads)); | 63 std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads)); |
75 if ((!threads.empty() && | 64 if ((!threads.empty() && |
76 !base::StringToInt(threads, &decode_threads)) || | 65 !base::StringToInt(threads, &decode_threads)) || |
77 decode_threads < 0 || decode_threads > kMaxDecodeThreads) { | 66 decode_threads < 0 || decode_threads > kMaxDecodeThreads) { |
78 decode_threads = kDecodeThreads; | 67 decode_threads = kDecodeThreads; |
79 } | 68 } |
80 | 69 |
81 codec_context_->thread_count = decode_threads; | 70 codec_context_->thread_count = decode_threads; |
82 | 71 |
83 // We don't allocate AVFrame on the stack since different versions of FFmpeg | 72 av_frame_ = avcodec_alloc_frame(); |
84 // may change the size of AVFrame, causing stack corruption. The solution is | |
85 // to let FFmpeg allocate the structure via avcodec_alloc_frame(). | |
86 av_frame_.reset(avcodec_alloc_frame()); | |
87 | 73 |
88 // If we do not have enough buffers, we will report error too. | 74 // Open the codec! |
89 frame_queue_available_.clear(); | 75 return codec && avcodec_open(codec_context_, codec) >= 0; |
90 | |
91 // Convert the pixel format to video format and ensure we support it. | |
92 VideoFrame::Format format = | |
93 PixelFormatToVideoFormat(codec_context_->pix_fmt); | |
94 | |
95 bool success = false; | |
96 if (format != VideoFrame::INVALID) { | |
97 // Create output buffer pool when direct rendering is not used. | |
98 for (size_t i = 0; i < Limits::kMaxVideoFrames; ++i) { | |
99 scoped_refptr<VideoFrame> video_frame = | |
100 VideoFrame::CreateFrame(format, | |
101 config.visible_rect().width(), | |
102 config.visible_rect().height(), | |
103 kNoTimestamp, | |
104 kNoTimestamp); | |
105 frame_queue_available_.push_back(video_frame); | |
106 } | |
107 | |
108 // Open the codec! | |
109 success = codec && avcodec_open(codec_context_, codec) >= 0; | |
110 } | |
111 | |
112 event_handler_ = event_handler; | |
113 event_handler_->OnInitializeComplete(success); | |
114 } | 76 } |
115 | 77 |
116 void FFmpegVideoDecodeEngine::ConsumeVideoSample( | 78 void FFmpegVideoDecodeEngine::Uninitialize() { |
117 scoped_refptr<Buffer> buffer) { | 79 if (codec_context_) { |
118 pending_input_buffers_--; | 80 av_free(codec_context_->extradata); |
119 if (flush_pending_) { | 81 avcodec_close(codec_context_); |
120 TryToFinishPendingFlush(); | 82 av_free(codec_context_); |
121 } else { | 83 codec_context_ = NULL; |
122 // Otherwise try to decode this buffer. | |
123 DecodeFrame(buffer); | |
124 } | 84 } |
85 if (av_frame_) { | |
86 av_free(av_frame_); | |
87 av_frame_ = NULL; | |
88 } | |
89 frame_rate_numerator_ = 0; | |
90 frame_rate_denominator_ = 0; | |
125 } | 91 } |
126 | 92 |
127 void FFmpegVideoDecodeEngine::ProduceVideoFrame( | 93 bool FFmpegVideoDecodeEngine::Decode(const scoped_refptr<Buffer>& buffer, |
128 scoped_refptr<VideoFrame> frame) { | 94 scoped_refptr<VideoFrame>* video_frame) { |
129 // We should never receive NULL frame or EOS frame. | 95 DCHECK(video_frame); |
130 DCHECK(frame.get() && !frame->IsEndOfStream()); | |
131 | |
132 // Increment pending output buffer count. | |
133 pending_output_buffers_++; | |
134 | |
135 // Return this frame to available pool after display. | |
136 frame_queue_available_.push_back(frame); | |
137 | |
138 if (flush_pending_) { | |
139 TryToFinishPendingFlush(); | |
140 } else if (!output_eos_reached_) { | |
141 // If we already deliver EOS to renderer, we stop reading new input. | |
142 ReadInput(); | |
143 } | |
144 } | |
145 | |
146 // Try to decode frame when both input and output are ready. | |
147 void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) { | |
148 scoped_refptr<VideoFrame> video_frame; | |
149 | 96 |
150 // Create a packet for input data. | 97 // Create a packet for input data. |
151 // Due to FFmpeg API changes we no longer have const read-only pointers. | 98 // Due to FFmpeg API changes we no longer have const read-only pointers. |
152 AVPacket packet; | 99 AVPacket packet; |
153 av_init_packet(&packet); | 100 av_init_packet(&packet); |
154 packet.data = const_cast<uint8*>(buffer->GetData()); | 101 packet.data = const_cast<uint8*>(buffer->GetData()); |
155 packet.size = buffer->GetDataSize(); | 102 packet.size = buffer->GetDataSize(); |
156 | 103 |
157 PipelineStatistics statistics; | |
158 statistics.video_bytes_decoded = buffer->GetDataSize(); | |
159 | |
160 // Let FFmpeg handle presentation timestamp reordering. | 104 // Let FFmpeg handle presentation timestamp reordering. |
161 codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds(); | 105 codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds(); |
162 | 106 |
163 // This is for codecs not using get_buffer to initialize | 107 // This is for codecs not using get_buffer to initialize |
164 // |av_frame_->reordered_opaque| | 108 // |av_frame_->reordered_opaque| |
165 av_frame_->reordered_opaque = codec_context_->reordered_opaque; | 109 av_frame_->reordered_opaque = codec_context_->reordered_opaque; |
166 | 110 |
167 int frame_decoded = 0; | 111 int frame_decoded = 0; |
168 int result = avcodec_decode_video2(codec_context_, | 112 int result = avcodec_decode_video2(codec_context_, |
169 av_frame_.get(), | 113 av_frame_, |
170 &frame_decoded, | 114 &frame_decoded, |
171 &packet); | 115 &packet); |
172 // Log the problem if we can't decode a video frame and exit early. | 116 // Log the problem if we can't decode a video frame and exit early. |
173 if (result < 0) { | 117 if (result < 0) { |
174 LOG(ERROR) << "Error decoding a video frame with timestamp: " | 118 LOG(ERROR) << "Error decoding a video frame with timestamp: " |
175 << buffer->GetTimestamp().InMicroseconds() << " us, duration: " | 119 << buffer->GetTimestamp().InMicroseconds() << " us, duration: " |
176 << buffer->GetDuration().InMicroseconds() << " us, packet size: " | 120 << buffer->GetDuration().InMicroseconds() << " us, packet size: " |
177 << buffer->GetDataSize() << " bytes"; | 121 << buffer->GetDataSize() << " bytes"; |
178 event_handler_->OnError(); | 122 *video_frame = NULL; |
179 return; | 123 return false; |
180 } | 124 } |
181 | 125 |
182 // If frame_decoded == 0, then no frame was produced. | 126 // If no frame was produced then signal that more data is required to |
183 // In this case, if we already begin to flush codec with empty | 127 // produce more frames. This can happen under two circumstances: |
184 // input packet at the end of input stream, the first time we | 128 // 1) Decoder was recently initialized/flushed |
185 // encounter frame_decoded == 0 signal output frame had been | 129 // 2) End of stream was reached and all internal frames have been output |
186 // drained, we mark the flag. Otherwise we read from demuxer again. | |
187 if (frame_decoded == 0) { | 130 if (frame_decoded == 0) { |
188 if (buffer->IsEndOfStream()) { // We had started flushing. | 131 *video_frame = NULL; |
189 event_handler_->ConsumeVideoFrame(video_frame, statistics); | 132 return true; |
190 output_eos_reached_ = true; | |
191 } else { | |
192 ReadInput(); | |
193 } | |
194 return; | |
195 } | 133 } |
196 | 134 |
197 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675 | 135 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675 |
198 // The decoder is in a bad state and not decoding correctly. | 136 // The decoder is in a bad state and not decoding correctly. |
199 // Checking for NULL avoids a crash in CopyPlane(). | 137 // Checking for NULL avoids a crash in CopyPlane(). |
200 if (!av_frame_->data[VideoFrame::kYPlane] || | 138 if (!av_frame_->data[VideoFrame::kYPlane] || |
201 !av_frame_->data[VideoFrame::kUPlane] || | 139 !av_frame_->data[VideoFrame::kUPlane] || |
202 !av_frame_->data[VideoFrame::kVPlane]) { | 140 !av_frame_->data[VideoFrame::kVPlane]) { |
203 event_handler_->OnError(); | 141 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; |
204 return; | 142 *video_frame = NULL; |
143 return false; | |
205 } | 144 } |
206 | 145 |
146 // We've got a frame! Make sure we have a place to store it. | |
147 *video_frame = AllocateVideoFrame(); | |
Ami GONE FROM CHROMIUM
2011/11/03 16:40:00
return false if NULL?
scherkus (not reviewing)
2011/11/03 20:34:38
Done.
| |
148 | |
207 // Determine timestamp and calculate the duration based on the repeat picture | 149 // Determine timestamp and calculate the duration based on the repeat picture |
208 // count. According to FFmpeg docs, the total duration can be calculated as | 150 // count. According to FFmpeg docs, the total duration can be calculated as |
209 // follows: | 151 // follows: |
210 // fps = 1 / time_base | 152 // fps = 1 / time_base |
211 // | 153 // |
212 // duration = (1 / fps) + (repeat_pict) / (2 * fps) | 154 // duration = (1 / fps) + (repeat_pict) / (2 * fps) |
213 // = (2 + repeat_pict) / (2 * fps) | 155 // = (2 + repeat_pict) / (2 * fps) |
214 // = (2 + repeat_pict) / (2 * (1 / time_base)) | 156 // = (2 + repeat_pict) / (2 * (1 / time_base)) |
215 DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check. | 157 DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check. |
216 AVRational doubled_time_base; | 158 AVRational doubled_time_base; |
217 doubled_time_base.num = frame_rate_denominator_; | 159 doubled_time_base.num = frame_rate_denominator_; |
218 doubled_time_base.den = frame_rate_numerator_ * 2; | 160 doubled_time_base.den = frame_rate_numerator_ * 2; |
219 | 161 |
220 base::TimeDelta timestamp = | 162 (*video_frame)->SetTimestamp( |
221 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque); | 163 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); |
222 base::TimeDelta duration = | 164 (*video_frame)->SetDuration( |
223 ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict); | 165 ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict)); |
224 | |
225 // Available frame is guaranteed, because we issue as much reads as | |
226 // available frame, except the case of |frame_decoded| == 0, which | |
227 // implies decoder order delay, and force us to read more inputs. | |
228 DCHECK(frame_queue_available_.size()); | |
229 video_frame = frame_queue_available_.front(); | |
230 frame_queue_available_.pop_front(); | |
231 | 166 |
232 // Copy the frame data since FFmpeg reuses internal buffers for AVFrame | 167 // Copy the frame data since FFmpeg reuses internal buffers for AVFrame |
233 // output, meaning the data is only valid until the next | 168 // output, meaning the data is only valid until the next |
234 // avcodec_decode_video() call. | 169 // avcodec_decode_video() call. |
235 // | |
236 // TODO(scherkus): use VideoFrame dimensions instead and re-allocate | |
237 // VideoFrame if dimensions changes, but for now adjust size locally. | |
238 int y_rows = codec_context_->height; | 170 int y_rows = codec_context_->height; |
239 int uv_rows = codec_context_->height; | 171 int uv_rows = codec_context_->height; |
240 if (codec_context_->pix_fmt == PIX_FMT_YUV420P) { | 172 if (codec_context_->pix_fmt == PIX_FMT_YUV420P) { |
241 uv_rows /= 2; | 173 uv_rows /= 2; |
242 } | 174 } |
243 | 175 |
244 CopyYPlane(av_frame_->data[0], av_frame_->linesize[0], y_rows, video_frame); | 176 CopyYPlane(av_frame_->data[0], av_frame_->linesize[0], y_rows, *video_frame); |
245 CopyUPlane(av_frame_->data[1], av_frame_->linesize[1], uv_rows, video_frame); | 177 CopyUPlane(av_frame_->data[1], av_frame_->linesize[1], uv_rows, *video_frame); |
246 CopyVPlane(av_frame_->data[2], av_frame_->linesize[2], uv_rows, video_frame); | 178 CopyVPlane(av_frame_->data[2], av_frame_->linesize[2], uv_rows, *video_frame); |
247 | 179 |
248 video_frame->SetTimestamp(timestamp); | 180 return true; |
249 video_frame->SetDuration(duration); | |
250 | |
251 pending_output_buffers_--; | |
252 event_handler_->ConsumeVideoFrame(video_frame, statistics); | |
253 } | |
254 | |
255 void FFmpegVideoDecodeEngine::Uninitialize() { | |
256 event_handler_->OnUninitializeComplete(); | |
257 } | 181 } |
258 | 182 |
259 void FFmpegVideoDecodeEngine::Flush() { | 183 void FFmpegVideoDecodeEngine::Flush() { |
260 avcodec_flush_buffers(codec_context_); | 184 avcodec_flush_buffers(codec_context_); |
261 flush_pending_ = true; | |
262 TryToFinishPendingFlush(); | |
263 } | 185 } |
264 | 186 |
265 void FFmpegVideoDecodeEngine::TryToFinishPendingFlush() { | 187 scoped_refptr<VideoFrame> FFmpegVideoDecodeEngine::AllocateVideoFrame() { |
266 DCHECK(flush_pending_); | 188 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context_->pix_fmt); |
189 size_t width = codec_context_->width; | |
190 size_t height = codec_context_->height; | |
267 | 191 |
268 // We consider ourself flushed when there is no pending input buffers | 192 return VideoFrame::CreateFrame(format, width, height, |
269 // and output buffers, which implies that all buffers had been returned | 193 kNoTimestamp, kNoTimestamp); |
270 // to its owner. | |
271 if (!pending_input_buffers_ && !pending_output_buffers_) { | |
272 // Try to finish flushing and notify pipeline. | |
273 flush_pending_ = false; | |
274 event_handler_->OnFlushComplete(); | |
275 } | |
276 } | |
277 | |
278 void FFmpegVideoDecodeEngine::Seek() { | |
279 // After a seek, output stream no longer considered as EOS. | |
280 output_eos_reached_ = false; | |
281 | |
282 // The buffer provider is assumed to perform pre-roll operation. | |
283 for (unsigned int i = 0; i < Limits::kMaxVideoFrames; ++i) | |
284 ReadInput(); | |
285 | |
286 event_handler_->OnSeekComplete(); | |
287 } | |
288 | |
289 void FFmpegVideoDecodeEngine::ReadInput() { | |
290 DCHECK_EQ(output_eos_reached_, false); | |
291 pending_input_buffers_++; | |
292 event_handler_->ProduceVideoSample(NULL); | |
293 } | 194 } |
294 | 195 |
295 } // namespace media | 196 } // namespace media |
296 | |
297 // Disable refcounting for this object because this object only lives | |
298 // on the video decoder thread and there's no need to refcount it. | |
299 DISABLE_RUNNABLE_METHOD_REFCOUNT(media::FFmpegVideoDecodeEngine); | |
OLD | NEW |