OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/ffmpeg_video_decoder.h" | 5 #include "media/filters/ffmpeg_video_decoder.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <string> | 8 #include <string> |
9 | 9 |
10 #include "base/bind.h" | 10 #include "base/bind.h" |
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
144 av_buffer_create(frame->data[0], | 144 av_buffer_create(frame->data[0], |
145 VideoFrame::AllocationSize(format, coded_size), | 145 VideoFrame::AllocationSize(format, coded_size), |
146 ReleaseVideoBufferImpl, | 146 ReleaseVideoBufferImpl, |
147 opaque, | 147 opaque, |
148 0); | 148 0); |
149 return 0; | 149 return 0; |
150 } | 150 } |
151 | 151 |
152 void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config, | 152 void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config, |
153 bool low_delay, | 153 bool low_delay, |
154 const PipelineStatusCB& status_cb) { | 154 const PipelineStatusCB& status_cb, |
| 155 const OutputCB& output_cb) { |
155 DCHECK(task_runner_->BelongsToCurrentThread()); | 156 DCHECK(task_runner_->BelongsToCurrentThread()); |
156 DCHECK(decode_cb_.is_null()); | |
157 DCHECK(!config.is_encrypted()); | 157 DCHECK(!config.is_encrypted()); |
| 158 DCHECK(!output_cb.is_null()); |
158 | 159 |
159 FFmpegGlue::InitializeFFmpeg(); | 160 FFmpegGlue::InitializeFFmpeg(); |
160 | 161 |
161 config_ = config; | 162 config_ = config; |
162 PipelineStatusCB initialize_cb = BindToCurrentLoop(status_cb); | 163 PipelineStatusCB initialize_cb = BindToCurrentLoop(status_cb); |
163 | 164 |
164 if (!config.IsValidConfig() || !ConfigureDecoder(low_delay)) { | 165 if (!config.IsValidConfig() || !ConfigureDecoder(low_delay)) { |
165 initialize_cb.Run(DECODER_ERROR_NOT_SUPPORTED); | 166 initialize_cb.Run(DECODER_ERROR_NOT_SUPPORTED); |
166 return; | 167 return; |
167 } | 168 } |
168 | 169 |
| 170 output_cb_ = BindToCurrentLoop(output_cb); |
| 171 |
169 // Success! | 172 // Success! |
170 state_ = kNormal; | 173 state_ = kNormal; |
171 initialize_cb.Run(PIPELINE_OK); | 174 initialize_cb.Run(PIPELINE_OK); |
172 } | 175 } |
173 | 176 |
174 void FFmpegVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer, | 177 void FFmpegVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer, |
175 const DecodeCB& decode_cb) { | 178 const DecodeCB& decode_cb) { |
176 DCHECK(task_runner_->BelongsToCurrentThread()); | 179 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 180 DCHECK(buffer); |
177 DCHECK(!decode_cb.is_null()); | 181 DCHECK(!decode_cb.is_null()); |
178 CHECK_NE(state_, kUninitialized); | 182 CHECK_NE(state_, kUninitialized); |
179 CHECK(decode_cb_.is_null()) << "Overlapping decodes are not supported."; | 183 |
180 decode_cb_ = BindToCurrentLoop(decode_cb); | 184 DecodeCB decode_cb_bound = BindToCurrentLoop(decode_cb); |
181 | 185 |
182 if (state_ == kError) { | 186 if (state_ == kError) { |
183 base::ResetAndReturn(&decode_cb_).Run(kDecodeError, NULL); | 187 decode_cb_bound.Run(kDecodeError); |
184 return; | 188 return; |
185 } | 189 } |
186 | 190 |
187 // Return empty frames if decoding has finished. | |
188 if (state_ == kDecodeFinished) { | 191 if (state_ == kDecodeFinished) { |
189 base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEOSFrame()); | 192 output_cb_.Run(VideoFrame::CreateEOSFrame()); |
| 193 decode_cb_bound.Run(kOk); |
190 return; | 194 return; |
191 } | 195 } |
192 | 196 |
193 DecodeBuffer(buffer); | 197 DCHECK_EQ(state_, kNormal); |
194 } | |
195 | |
196 void FFmpegVideoDecoder::Reset(const base::Closure& closure) { | |
197 DCHECK(task_runner_->BelongsToCurrentThread()); | |
198 DCHECK(decode_cb_.is_null()); | |
199 | |
200 avcodec_flush_buffers(codec_context_.get()); | |
201 state_ = kNormal; | |
202 task_runner_->PostTask(FROM_HERE, closure); | |
203 } | |
204 | |
205 void FFmpegVideoDecoder::Stop() { | |
206 DCHECK(task_runner_->BelongsToCurrentThread()); | |
207 | |
208 if (state_ == kUninitialized) | |
209 return; | |
210 | |
211 ReleaseFFmpegResources(); | |
212 state_ = kUninitialized; | |
213 } | |
214 | |
215 FFmpegVideoDecoder::~FFmpegVideoDecoder() { | |
216 DCHECK_EQ(kUninitialized, state_); | |
217 DCHECK(!codec_context_); | |
218 DCHECK(!av_frame_); | |
219 } | |
220 | |
221 void FFmpegVideoDecoder::DecodeBuffer( | |
222 const scoped_refptr<DecoderBuffer>& buffer) { | |
223 DCHECK(task_runner_->BelongsToCurrentThread()); | |
224 DCHECK_NE(state_, kUninitialized); | |
225 DCHECK_NE(state_, kDecodeFinished); | |
226 DCHECK_NE(state_, kError); | |
227 DCHECK(!decode_cb_.is_null()); | |
228 DCHECK(buffer); | |
229 | 198 |
230 // During decode, because reads are issued asynchronously, it is possible to | 199 // During decode, because reads are issued asynchronously, it is possible to |
231 // receive multiple end of stream buffers since each decode is acked. When the | 200 // receive multiple end of stream buffers since each decode is acked. When the |
232 // first end of stream buffer is read, FFmpeg may still have frames queued | 201 // first end of stream buffer is read, FFmpeg may still have frames queued |
233 // up in the decoder so we need to go through the decode loop until it stops | 202 // up in the decoder so we need to go through the decode loop until it stops |
234 // giving sensible data. After that, the decoder should output empty | 203 // giving sensible data. After that, the decoder should output empty |
235 // frames. There are three states the decoder can be in: | 204 // frames. There are three states the decoder can be in: |
236 // | 205 // |
237 // kNormal: This is the starting state. Buffers are decoded. Decode errors | 206 // kNormal: This is the starting state. Buffers are decoded. Decode errors |
238 // are discarded. | 207 // are discarded. |
239 // kFlushCodec: There isn't any more input data. Call avcodec_decode_video2 | |
240 // until no more data is returned to flush out remaining | |
241 // frames. The input buffer is ignored at this point. | |
242 // kDecodeFinished: All calls return empty frames. | 208 // kDecodeFinished: All calls return empty frames. |
243 // kError: Unexpected error happened. | 209 // kError: Unexpected error happened. |
244 // | 210 // |
245 // These are the possible state transitions. | 211 // These are the possible state transitions. |
246 // | 212 // |
247 // kNormal -> kFlushCodec: | 213 // kNormal -> kDecodeFinished: |
248 // When buffer->end_of_stream() is first true. | 214 // When EOS buffer is received and the codec has been flushed. |
249 // kNormal -> kError: | 215 // kNormal -> kError: |
250 // A decoding error occurs and decoding needs to stop. | 216 // A decoding error occurs and decoding needs to stop. |
251 // kFlushCodec -> kDecodeFinished: | |
252 // When avcodec_decode_video2() returns 0 data. | |
253 // kFlushCodec -> kError: | |
254 // When avcodec_decode_video2() errors out. | |
255 // (any state) -> kNormal: | 217 // (any state) -> kNormal: |
256 // Any time Reset() is called. | 218 // Any time Reset() is called. |
257 | 219 |
258 // Transition to kFlushCodec on the first end of stream buffer. | 220 bool has_produced_frame; |
259 if (state_ == kNormal && buffer->end_of_stream()) { | 221 do { |
260 state_ = kFlushCodec; | 222 has_produced_frame = false; |
| 223 if (!FFmpegDecode(buffer, &has_produced_frame)) { |
| 224 state_ = kError; |
| 225 decode_cb_bound.Run(kDecodeError); |
| 226 return; |
| 227 } |
| 228 // Repeat to flush the decoder after receiving EOS buffer. |
| 229 } while (buffer->end_of_stream() && has_produced_frame); |
| 230 |
| 231 if (buffer->end_of_stream()) { |
| 232 output_cb_.Run(VideoFrame::CreateEOSFrame()); |
| 233 state_ = kDecodeFinished; |
261 } | 234 } |
262 | 235 |
263 scoped_refptr<VideoFrame> video_frame; | 236 decode_cb_bound.Run(kOk); |
264 if (!FFmpegDecode(buffer, &video_frame)) { | 237 } |
265 state_ = kError; | 238 |
266 base::ResetAndReturn(&decode_cb_).Run(kDecodeError, NULL); | 239 void FFmpegVideoDecoder::Reset(const base::Closure& closure) { |
| 240 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 241 |
| 242 avcodec_flush_buffers(codec_context_.get()); |
| 243 state_ = kNormal; |
| 244 task_runner_->PostTask(FROM_HERE, closure); |
| 245 } |
| 246 |
| 247 void FFmpegVideoDecoder::Stop() { |
| 248 DCHECK(task_runner_->BelongsToCurrentThread()); |
| 249 |
| 250 if (state_ == kUninitialized) |
267 return; | 251 return; |
268 } | |
269 | 252 |
270 if (!video_frame.get()) { | 253 ReleaseFFmpegResources(); |
271 if (state_ == kFlushCodec) { | 254 state_ = kUninitialized; |
272 DCHECK(buffer->end_of_stream()); | 255 } |
273 state_ = kDecodeFinished; | |
274 base::ResetAndReturn(&decode_cb_) | |
275 .Run(kOk, VideoFrame::CreateEOSFrame()); | |
276 return; | |
277 } | |
278 | 256 |
279 base::ResetAndReturn(&decode_cb_).Run(kNotEnoughData, NULL); | 257 FFmpegVideoDecoder::~FFmpegVideoDecoder() { |
280 return; | 258 DCHECK_EQ(kUninitialized, state_); |
281 } | 259 DCHECK(!codec_context_); |
282 | 260 DCHECK(!av_frame_); |
283 base::ResetAndReturn(&decode_cb_).Run(kOk, video_frame); | |
284 } | 261 } |
285 | 262 |
286 bool FFmpegVideoDecoder::FFmpegDecode( | 263 bool FFmpegVideoDecoder::FFmpegDecode( |
287 const scoped_refptr<DecoderBuffer>& buffer, | 264 const scoped_refptr<DecoderBuffer>& buffer, |
288 scoped_refptr<VideoFrame>* video_frame) { | 265 bool* has_produced_frame) { |
289 DCHECK(video_frame); | 266 DCHECK(!*has_produced_frame); |
290 | 267 |
291 // Create a packet for input data. | 268 // Create a packet for input data. |
292 // Due to FFmpeg API changes we no longer have const read-only pointers. | 269 // Due to FFmpeg API changes we no longer have const read-only pointers. |
293 AVPacket packet; | 270 AVPacket packet; |
294 av_init_packet(&packet); | 271 av_init_packet(&packet); |
295 if (buffer->end_of_stream()) { | 272 if (buffer->end_of_stream()) { |
296 packet.data = NULL; | 273 packet.data = NULL; |
297 packet.size = 0; | 274 packet.size = 0; |
298 } else { | 275 } else { |
299 packet.data = const_cast<uint8*>(buffer->data()); | 276 packet.data = const_cast<uint8*>(buffer->data()); |
300 packet.size = buffer->data_size(); | 277 packet.size = buffer->data_size(); |
301 | 278 |
302 // Let FFmpeg handle presentation timestamp reordering. | 279 // Let FFmpeg handle presentation timestamp reordering. |
303 codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds(); | 280 codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds(); |
304 } | 281 } |
305 | 282 |
306 int frame_decoded = 0; | 283 int frame_decoded = 0; |
307 int result = avcodec_decode_video2(codec_context_.get(), | 284 int result = avcodec_decode_video2(codec_context_.get(), |
308 av_frame_.get(), | 285 av_frame_.get(), |
309 &frame_decoded, | 286 &frame_decoded, |
310 &packet); | 287 &packet); |
311 // Log the problem if we can't decode a video frame and exit early. | 288 // Log the problem if we can't decode a video frame and exit early. |
312 if (result < 0) { | 289 if (result < 0) { |
313 LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString(); | 290 LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString(); |
314 *video_frame = NULL; | |
315 return false; | 291 return false; |
316 } | 292 } |
317 | 293 |
318 // FFmpeg says some codecs might have multiple frames per packet. Previous | 294 // FFmpeg says some codecs might have multiple frames per packet. Previous |
319 // discussions with rbultje@ indicate this shouldn't be true for the codecs | 295 // discussions with rbultje@ indicate this shouldn't be true for the codecs |
320 // we use. | 296 // we use. |
321 DCHECK_EQ(result, packet.size); | 297 DCHECK_EQ(result, packet.size); |
322 | 298 |
323 // If no frame was produced then signal that more data is required to | 299 // If no frame was produced then signal that more data is required to |
324 // produce more frames. This can happen under two circumstances: | 300 // produce more frames. This can happen under two circumstances: |
325 // 1) Decoder was recently initialized/flushed | 301 // 1) Decoder was recently initialized/flushed |
326 // 2) End of stream was reached and all internal frames have been output | 302 // 2) End of stream was reached and all internal frames have been output |
327 if (frame_decoded == 0) { | 303 if (frame_decoded == 0) { |
328 *video_frame = NULL; | |
329 return true; | 304 return true; |
330 } | 305 } |
331 | 306 |
332 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675 | 307 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675 |
333 // The decoder is in a bad state and not decoding correctly. | 308 // The decoder is in a bad state and not decoding correctly. |
334 // Checking for NULL avoids a crash in CopyPlane(). | 309 // Checking for NULL avoids a crash in CopyPlane(). |
335 if (!av_frame_->data[VideoFrame::kYPlane] || | 310 if (!av_frame_->data[VideoFrame::kYPlane] || |
336 !av_frame_->data[VideoFrame::kUPlane] || | 311 !av_frame_->data[VideoFrame::kUPlane] || |
337 !av_frame_->data[VideoFrame::kVPlane]) { | 312 !av_frame_->data[VideoFrame::kVPlane]) { |
338 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; | 313 LOG(ERROR) << "Video frame was produced yet has invalid frame data."; |
339 *video_frame = NULL; | |
340 av_frame_unref(av_frame_.get()); | 314 av_frame_unref(av_frame_.get()); |
341 return false; | 315 return false; |
342 } | 316 } |
343 | 317 |
344 *video_frame = | 318 scoped_refptr<VideoFrame> frame = |
345 reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0])); | 319 reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0])); |
346 | 320 frame->set_timestamp( |
347 (*video_frame)->set_timestamp( | |
348 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); | 321 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque)); |
| 322 *has_produced_frame = true; |
| 323 output_cb_.Run(frame); |
349 | 324 |
350 av_frame_unref(av_frame_.get()); | 325 av_frame_unref(av_frame_.get()); |
351 return true; | 326 return true; |
352 } | 327 } |
353 | 328 |
354 void FFmpegVideoDecoder::ReleaseFFmpegResources() { | 329 void FFmpegVideoDecoder::ReleaseFFmpegResources() { |
355 codec_context_.reset(); | 330 codec_context_.reset(); |
356 av_frame_.reset(); | 331 av_frame_.reset(); |
357 } | 332 } |
358 | 333 |
(...skipping 19 matching lines...) Expand all Loading... |
378 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { | 353 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) { |
379 ReleaseFFmpegResources(); | 354 ReleaseFFmpegResources(); |
380 return false; | 355 return false; |
381 } | 356 } |
382 | 357 |
383 av_frame_.reset(av_frame_alloc()); | 358 av_frame_.reset(av_frame_alloc()); |
384 return true; | 359 return true; |
385 } | 360 } |
386 | 361 |
387 } // namespace media | 362 } // namespace media |
OLD | NEW |