OLD | NEW |
1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/filters/ffmpeg_video_decoder.h" | 5 #include "media/filters/ffmpeg_video_decoder.h" |
6 | 6 |
7 #include <deque> | 7 #include <deque> |
8 | 8 |
9 #include "base/task.h" | 9 #include "base/task.h" |
| 10 #include "media/base/callback.h" |
10 #include "media/base/filters.h" | 11 #include "media/base/filters.h" |
| 12 #include "media/base/filter_host.h" |
11 #include "media/base/limits.h" | 13 #include "media/base/limits.h" |
12 #include "media/base/media_format.h" | 14 #include "media/base/media_format.h" |
13 #include "media/base/video_frame.h" | 15 #include "media/base/video_frame.h" |
14 #include "media/ffmpeg/ffmpeg_common.h" | 16 #include "media/ffmpeg/ffmpeg_common.h" |
15 #include "media/ffmpeg/ffmpeg_util.h" | 17 #include "media/ffmpeg/ffmpeg_util.h" |
16 #include "media/filters/ffmpeg_interfaces.h" | 18 #include "media/filters/ffmpeg_interfaces.h" |
17 #include "media/filters/ffmpeg_video_decode_engine.h" | 19 #include "media/filters/ffmpeg_video_decode_engine.h" |
18 #include "media/filters/video_decode_engine.h" | 20 #include "media/filters/video_decode_engine.h" |
19 | 21 |
20 namespace media { | 22 namespace media { |
21 | 23 |
22 FFmpegVideoDecoder::FFmpegVideoDecoder(VideoDecodeEngine* engine) | 24 FFmpegVideoDecoder::FFmpegVideoDecoder(VideoDecodeEngine* engine) |
23 : width_(0), | 25 : width_(0), |
24 height_(0), | 26 height_(0), |
25 time_base_(new AVRational()), | 27 time_base_(new AVRational()), |
26 state_(kNormal), | 28 state_(kUnInitialized), |
27 decode_engine_(engine) { | 29 decode_engine_(engine), |
| 30 pending_reads_(0), |
| 31 pending_requests_(0) { |
28 } | 32 } |
29 | 33 |
30 FFmpegVideoDecoder::~FFmpegVideoDecoder() { | 34 FFmpegVideoDecoder::~FFmpegVideoDecoder() { |
31 } | 35 } |
32 | 36 |
33 void FFmpegVideoDecoder::DoInitialize(DemuxerStream* demuxer_stream, | 37 void FFmpegVideoDecoder::Initialize(DemuxerStream* demuxer_stream, |
34 bool* success, | 38 FilterCallback* callback) { |
35 Task* done_cb) { | 39 if (MessageLoop::current() != message_loop()) { |
36 AutoTaskRunner done_runner(done_cb); | 40 message_loop()->PostTask(FROM_HERE, |
37 *success = false; | 41 NewRunnableMethod(this, |
| 42 &FFmpegVideoDecoder::Initialize, |
| 43 demuxer_stream, |
| 44 callback)); |
| 45 return; |
| 46 } |
| 47 |
| 48 DCHECK_EQ(MessageLoop::current(), message_loop()); |
| 49 DCHECK(!demuxer_stream_); |
| 50 |
| 51 demuxer_stream_ = demuxer_stream; |
38 | 52 |
39 // Get the AVStream by querying for the provider interface. | 53 // Get the AVStream by querying for the provider interface. |
40 AVStreamProvider* av_stream_provider; | 54 AVStreamProvider* av_stream_provider; |
41 if (!demuxer_stream->QueryInterface(&av_stream_provider)) { | 55 if (!demuxer_stream->QueryInterface(&av_stream_provider)) { |
| 56 FFmpegVideoDecoder::OnInitializeComplete(callback); |
42 return; | 57 return; |
43 } | 58 } |
44 AVStream* av_stream = av_stream_provider->GetAVStream(); | 59 AVStream* av_stream = av_stream_provider->GetAVStream(); |
45 | 60 |
46 time_base_->den = av_stream->r_frame_rate.num; | 61 time_base_->den = av_stream->r_frame_rate.num; |
47 time_base_->num = av_stream->r_frame_rate.den; | 62 time_base_->num = av_stream->r_frame_rate.den; |
48 | 63 |
49 // TODO(ajwong): We don't need these extra variables if |media_format_| has | 64 // TODO(ajwong): We don't need these extra variables if |media_format_| has |
50 // them. Remove. | 65 // them. Remove. |
51 width_ = av_stream->codec->width; | 66 width_ = av_stream->codec->width; |
52 height_ = av_stream->codec->height; | 67 height_ = av_stream->codec->height; |
53 if (width_ > Limits::kMaxDimension || | 68 if (width_ > Limits::kMaxDimension || |
54 height_ > Limits::kMaxDimension || | 69 height_ > Limits::kMaxDimension || |
55 (width_ * height_) > Limits::kMaxCanvas) { | 70 (width_ * height_) > Limits::kMaxCanvas) { |
| 71 FFmpegVideoDecoder::OnInitializeComplete(callback); |
56 return; | 72 return; |
57 } | 73 } |
58 | 74 |
59 decode_engine_->Initialize( | 75 decode_engine_->Initialize( |
60 message_loop(), | 76 message_loop(), |
61 av_stream, | 77 av_stream, |
62 NewCallback(this, &FFmpegVideoDecoder::OnEmptyBufferDone), | 78 NewCallback(this, &FFmpegVideoDecoder::OnEngineEmptyBufferDone), |
63 NewCallback(this, &FFmpegVideoDecoder::OnDecodeComplete), | 79 NewCallback(this, &FFmpegVideoDecoder::OnEngineFillBufferDone), |
64 NewRunnableMethod(this, | 80 NewRunnableMethod(this, |
65 &FFmpegVideoDecoder::OnInitializeComplete, | 81 &FFmpegVideoDecoder::OnInitializeComplete, |
66 success, | 82 callback)); |
67 done_runner.release())); | |
68 } | 83 } |
69 | 84 |
70 void FFmpegVideoDecoder::OnInitializeComplete(bool* success, Task* done_cb) { | 85 void FFmpegVideoDecoder::OnInitializeComplete(FilterCallback* callback) { |
71 AutoTaskRunner done_runner(done_cb); | 86 CHECK_EQ(MessageLoop::current(), message_loop()); |
72 | 87 |
73 *success = decode_engine_->state() == VideoDecodeEngine::kNormal; | 88 AutoCallbackRunner done_runner(callback); |
74 if (*success) { | 89 |
| 90 bool success = decode_engine_->state() == VideoDecodeEngine::kNormal; |
| 91 if (success) { |
75 media_format_.SetAsString(MediaFormat::kMimeType, | 92 media_format_.SetAsString(MediaFormat::kMimeType, |
76 mime_type::kUncompressedVideo); | 93 mime_type::kUncompressedVideo); |
77 media_format_.SetAsInteger(MediaFormat::kWidth, width_); | 94 media_format_.SetAsInteger(MediaFormat::kWidth, width_); |
78 media_format_.SetAsInteger(MediaFormat::kHeight, height_); | 95 media_format_.SetAsInteger(MediaFormat::kHeight, height_); |
79 media_format_.SetAsInteger( | 96 media_format_.SetAsInteger( |
80 MediaFormat::kSurfaceType, | 97 MediaFormat::kSurfaceType, |
81 static_cast<int>(VideoFrame::TYPE_SYSTEM_MEMORY)); | 98 static_cast<int>(VideoFrame::TYPE_SYSTEM_MEMORY)); |
82 media_format_.SetAsInteger( | 99 media_format_.SetAsInteger( |
83 MediaFormat::kSurfaceFormat, | 100 MediaFormat::kSurfaceFormat, |
84 static_cast<int>(decode_engine_->GetSurfaceFormat())); | 101 static_cast<int>(decode_engine_->GetSurfaceFormat())); |
| 102 state_ = kNormal; |
| 103 } else { |
| 104 host()->SetError(PIPELINE_ERROR_DECODE); |
85 } | 105 } |
86 } | 106 } |
87 | 107 |
88 void FFmpegVideoDecoder::DoStop(Task* done_cb) { | 108 void FFmpegVideoDecoder::Stop(FilterCallback* callback) { |
89 decode_engine_->Stop(done_cb); | 109 if (MessageLoop::current() != message_loop()) { |
| 110 message_loop()->PostTask(FROM_HERE, |
| 111 NewRunnableMethod(this, |
| 112 &FFmpegVideoDecoder::Stop, |
| 113 callback)); |
| 114 return; |
| 115 } |
| 116 |
| 117 DCHECK_EQ(MessageLoop::current(), message_loop()); |
| 118 |
| 119 decode_engine_->Stop( |
| 120 NewRunnableMethod(this, &FFmpegVideoDecoder::OnStopComplete, callback)); |
90 } | 121 } |
91 | 122 |
92 void FFmpegVideoDecoder::DoSeek(base::TimeDelta time, Task* done_cb) { | 123 void FFmpegVideoDecoder::OnStopComplete(FilterCallback* callback) { |
| 124 DCHECK_EQ(MessageLoop::current(), message_loop()); |
| 125 |
| 126 AutoCallbackRunner done_runner(callback); |
| 127 state_ = kStopped; |
| 128 } |
| 129 |
| 130 void FFmpegVideoDecoder::Flush(FilterCallback* callback) { |
| 131 if (MessageLoop::current() != message_loop()) { |
| 132 message_loop()->PostTask(FROM_HERE, |
| 133 NewRunnableMethod(this, |
| 134 &FFmpegVideoDecoder::Flush, |
| 135 callback)); |
| 136 return; |
| 137 } |
| 138 |
| 139 DCHECK_EQ(MessageLoop::current(), message_loop()); |
| 140 |
93 // Everything in the presentation time queue is invalid, clear the queue. | 141 // Everything in the presentation time queue is invalid, clear the queue. |
94 while (!pts_heap_.IsEmpty()) | 142 while (!pts_heap_.IsEmpty()) |
95 pts_heap_.Pop(); | 143 pts_heap_.Pop(); |
96 | 144 |
97 // We're back where we started. It should be completely safe to flush here | 145 decode_engine_->Flush( |
98 // since DecoderBase uses |expecting_discontinuous_| to verify that the next | 146 NewRunnableMethod(this, &FFmpegVideoDecoder::OnFlushComplete, callback)); |
99 // time DoDecode() is called we will have a discontinuous buffer. | |
100 // | |
101 // TODO(ajwong): Should we put a guard here to prevent leaving kError. | |
102 state_ = kNormal; | |
103 | |
104 decode_engine_->Flush(done_cb); | |
105 } | 147 } |
106 | 148 |
107 void FFmpegVideoDecoder::DoDecode(Buffer* buffer) { | 149 void FFmpegVideoDecoder::OnFlushComplete(FilterCallback* callback) { |
108 // TODO(ajwong): This DoDecode() and OnDecodeComplete() set of functions is | 150 DCHECK_EQ(MessageLoop::current(), message_loop()); |
109 // too complicated to easily unittest. The test becomes fragile. Try to | 151 |
110 // find a way to reorganize into smaller units for testing. | 152 AutoCallbackRunner done_runner(callback); |
| 153 |
| 154 // Since we are sending Flush() in reverse order of filter. (i.e. flushing |
| 155 // renderer before decoder). we could guaranteed the following invariant. |
| 156 // TODO(jiesun): when we move to parallel Flush, we should remove this. |
| 157 DCHECK_EQ(0u, pending_reads_) << "Pending reads should have completed"; |
| 158 DCHECK_EQ(0u, pending_requests_) << "Pending requests should be empty"; |
| 159 |
| 160 } |
| 161 |
| 162 void FFmpegVideoDecoder::Seek(base::TimeDelta time, |
| 163 FilterCallback* callback) { |
| 164 if (MessageLoop::current() != message_loop()) { |
| 165 message_loop()->PostTask(FROM_HERE, |
| 166 NewRunnableMethod(this, |
| 167 &FFmpegVideoDecoder::Seek, |
| 168 time, |
| 169 callback)); |
| 170 return; |
| 171 } |
| 172 |
| 173 DCHECK_EQ(MessageLoop::current(), message_loop()); |
| 174 |
| 175 decode_engine_->Seek( |
| 176 NewRunnableMethod(this, &FFmpegVideoDecoder::OnSeekComplete, callback)); |
| 177 } |
| 178 |
| 179 void FFmpegVideoDecoder::OnSeekComplete(FilterCallback* callback) { |
| 180 DCHECK_EQ(MessageLoop::current(), message_loop()); |
| 181 |
| 182 AutoCallbackRunner done_runner(callback); |
| 183 state_ = kNormal; |
| 184 } |
| 185 |
| 186 void FFmpegVideoDecoder::OnReadComplete(Buffer* buffer_in) { |
| 187 scoped_refptr<Buffer> buffer = buffer_in; |
| 188 message_loop()->PostTask( |
| 189 FROM_HERE, |
| 190 NewRunnableMethod(this, |
| 191 &FFmpegVideoDecoder::OnReadCompleteTask, |
| 192 buffer)); |
| 193 } |
| 194 |
| 195 void FFmpegVideoDecoder::OnReadCompleteTask(scoped_refptr<Buffer> buffer) { |
| 196 DCHECK_EQ(MessageLoop::current(), message_loop()); |
| 197 DCHECK_GT(pending_reads_, 0u); |
| 198 |
| 199 --pending_reads_; |
111 | 200 |
112 // During decode, because reads are issued asynchronously, it is possible to | 201 // During decode, because reads are issued asynchronously, it is possible to |
113 // receive multiple end of stream buffers since each read is acked. When the | 202 // receive multiple end of stream buffers since each read is acked. When the |
114 // first end of stream buffer is read, FFmpeg may still have frames queued | 203 // first end of stream buffer is read, FFmpeg may still have frames queued |
115 // up in the decoder so we need to go through the decode loop until it stops | 204 // up in the decoder so we need to go through the decode loop until it stops |
116 // giving sensible data. After that, the decoder should output empty | 205 // giving sensible data. After that, the decoder should output empty |
117 // frames. There are three states the decoder can be in: | 206 // frames. There are three states the decoder can be in: |
118 // | 207 // |
119 // kNormal: This is the starting state. Buffers are decoded. Decode errors | 208 // kNormal: This is the starting state. Buffers are decoded. Decode errors |
120 // are discarded. | 209 // are discarded. |
121 // kFlushCodec: There isn't any more input data. Call avcodec_decode_video2 | 210 // kFlushCodec: There isn't any more input data. Call avcodec_decode_video2 |
122 // until no more data is returned to flush out remaining | 211 // until no more data is returned to flush out remaining |
123 // frames. The input buffer is ignored at this point. | 212 // frames. The input buffer is ignored at this point. |
124 // kDecodeFinished: All calls return empty frames. | 213 // kDecodeFinished: All calls return empty frames. |
125 // | 214 // |
126 // These are the possible state transitions. | 215 // These are the possible state transitions. |
127 // | 216 // |
128 // kNormal -> kFlushCodec: | 217 // kNormal -> kFlushCodec: |
129 // When buffer->IsEndOfStream() is first true. | 218 // When buffer->IsEndOfStream() is first true. |
130 // kNormal -> kDecodeFinished: | 219 // kNormal -> kDecodeFinished: |
131 // A catastrophic failure occurs, and decoding needs to stop. | 220 // A catastrophic failure occurs, and decoding needs to stop. |
132 // kFlushCodec -> kDecodeFinished: | 221 // kFlushCodec -> kDecodeFinished: |
133 // When avcodec_decode_video2() returns 0 data or errors out. | 222 // When avcodec_decode_video2() returns 0 data or errors out. |
134 // (any state) -> kNormal: | 223 // (any state) -> kNormal: |
135 // Any time buffer->IsDiscontinuous() is true. | 224 // Any time buffer->IsDiscontinuous() is true. |
136 // | 225 // |
137 // If the decoding is finished, we just always return empty frames. | 226 // If the decoding is finished, we just always return empty frames. |
138 if (state_ == kDecodeFinished) { | 227 if (state_ == kDecodeFinished || state_ == kStopped) { |
139 EnqueueEmptyFrame(); | 228 DCHECK(buffer->IsEndOfStream()); |
140 OnEmptyBufferDone(NULL); | 229 |
| 230 --pending_requests_; |
| 231 // Signal VideoRenderer the end of the stream event. |
| 232 scoped_refptr<VideoFrame> video_frame; |
| 233 VideoFrame::CreateEmptyFrame(&video_frame); |
| 234 fill_buffer_done_callback()->Run(video_frame); |
141 return; | 235 return; |
142 } | 236 } |
143 | 237 |
144 // Transition to kFlushCodec on the first end of stream buffer. | 238 // Transition to kFlushCodec on the first end of stream buffer. |
145 if (state_ == kNormal && buffer->IsEndOfStream()) { | 239 if (state_ == kNormal && buffer->IsEndOfStream()) { |
146 state_ = kFlushCodec; | 240 state_ = kFlushCodec; |
147 } | 241 } |
148 | 242 |
149 // Push all incoming timestamps into the priority queue as long as we have | 243 // Push all incoming timestamps into the priority queue as long as we have |
150 // not yet received an end of stream buffer. It is important that this line | 244 // not yet received an end of stream buffer. It is important that this line |
151 // stay below the state transition into kFlushCodec done above. | 245 // stay below the state transition into kFlushCodec done above. |
152 // | 246 // |
153 // TODO(ajwong): This push logic, along with the pop logic below needs to | 247 // TODO(ajwong): This push logic, along with the pop logic below needs to |
154 // be reevaluated to correctly handle decode errors. | 248 // be reevaluated to correctly handle decode errors. |
155 if (state_ == kNormal && | 249 if (state_ == kNormal && !buffer->IsEndOfStream() && |
156 buffer->GetTimestamp() != StreamSample::kInvalidTimestamp) { | 250 buffer->GetTimestamp() != StreamSample::kInvalidTimestamp) { |
157 pts_heap_.Push(buffer->GetTimestamp()); | 251 pts_heap_.Push(buffer->GetTimestamp()); |
158 } | 252 } |
159 | 253 |
160 // Otherwise, attempt to decode a single frame. | 254 // Otherwise, attempt to decode a single frame. |
161 decode_engine_->EmptyThisBuffer(buffer); | 255 decode_engine_->EmptyThisBuffer(buffer); |
162 } | 256 } |
163 | 257 |
164 void FFmpegVideoDecoder::OnDecodeComplete( | 258 void FFmpegVideoDecoder::FillThisBuffer( |
165 scoped_refptr<VideoFrame> video_frame) { | 259 scoped_refptr<VideoFrame> video_frame) { |
| 260 if (MessageLoop::current() != message_loop()) { |
| 261 message_loop()->PostTask( |
| 262 FROM_HERE, |
| 263 NewRunnableMethod(this, |
| 264 &FFmpegVideoDecoder::FillThisBuffer, |
| 265 video_frame)); |
| 266 return; |
| 267 } |
| 268 |
| 269 DCHECK_EQ(MessageLoop::current(), message_loop()); |
| 270 |
| 271 // Synchronized flushing before stop should prevent this. |
| 272 if (state_ == kStopped) |
| 273 return; // Discard the video frame. |
| 274 |
| 275 // Notify decode engine the available of new frame. |
| 276 ++pending_requests_; |
| 277 decode_engine_->FillThisBuffer(video_frame); |
| 278 } |
| 279 |
| 280 void FFmpegVideoDecoder::OnEngineFillBufferDone( |
| 281 scoped_refptr<VideoFrame> video_frame) { |
| 282 DCHECK_EQ(MessageLoop::current(), message_loop()); |
| 283 |
| 284 // TODO(jiesun): Flush before stop will prevent this from happening. |
| 285 if (state_ == kStopped) |
| 286 return; // Discard the video frame. |
| 287 |
166 if (video_frame.get()) { | 288 if (video_frame.get()) { |
167 // If we actually got data back, enqueue a frame. | 289 // If we actually got data back, enqueue a frame. |
168 last_pts_ = FindPtsAndDuration(*time_base_, &pts_heap_, last_pts_, | 290 last_pts_ = FindPtsAndDuration(*time_base_, &pts_heap_, last_pts_, |
169 video_frame.get()); | 291 video_frame.get()); |
170 | 292 |
171 video_frame->SetTimestamp(last_pts_.timestamp); | 293 video_frame->SetTimestamp(last_pts_.timestamp); |
172 video_frame->SetDuration(last_pts_.duration); | 294 video_frame->SetDuration(last_pts_.duration); |
173 EnqueueVideoFrame(video_frame); | 295 |
| 296 // Deliver this frame to VideoRenderer. |
| 297 --pending_requests_; |
| 298 fill_buffer_done_callback()->Run(video_frame); |
174 } else { | 299 } else { |
175 // When in kFlushCodec, any errored decode, or a 0-lengthed frame, | 300 // When in kFlushCodec, any errored decode, or a 0-lengthed frame, |
176 // is taken as a signal to stop decoding. | 301 // is taken as a signal to stop decoding. |
177 if (state_ == kFlushCodec) { | 302 if (state_ == kFlushCodec) { |
178 state_ = kDecodeFinished; | 303 state_ = kDecodeFinished; |
179 EnqueueEmptyFrame(); | 304 |
| 305 --pending_requests_; |
| 306 // Signal VideoRenderer the end of the stream event. |
| 307 scoped_refptr<VideoFrame> video_frame; |
| 308 VideoFrame::CreateEmptyFrame(&video_frame); |
| 309 fill_buffer_done_callback()->Run(video_frame); |
180 } | 310 } |
181 } | 311 } |
182 | |
183 OnEmptyBufferDone(NULL); | |
184 } | 312 } |
185 | 313 |
186 void FFmpegVideoDecoder::OnEmptyBufferDone(scoped_refptr<Buffer> buffer) { | 314 void FFmpegVideoDecoder::OnEngineEmptyBufferDone( |
187 // Currently we just ignore the returned buffer. | 315 scoped_refptr<Buffer> buffer) { |
188 DecoderBase<VideoDecoder, VideoFrame>::OnDecodeComplete(); | 316 DCHECK_EQ(MessageLoop::current(), message_loop()); |
189 } | 317 DCHECK_LE(pending_reads_, pending_requests_); |
190 | 318 |
191 void FFmpegVideoDecoder::FillThisBuffer(scoped_refptr<VideoFrame> frame) { | 319 if (state_ != kDecodeFinished) { |
192 DecoderBase<VideoDecoder, VideoFrame>::FillThisBuffer(frame); | 320 demuxer_stream_->Read( |
193 // Notify decode engine the available of new frame. | 321 NewCallback(this, &FFmpegVideoDecoder::OnReadComplete)); |
194 decode_engine_->FillThisBuffer(frame); | 322 ++pending_reads_; |
195 } | 323 } |
196 | |
197 void FFmpegVideoDecoder::EnqueueVideoFrame( | |
198 const scoped_refptr<VideoFrame>& video_frame) { | |
199 EnqueueResult(video_frame); | |
200 } | |
201 | |
202 void FFmpegVideoDecoder::EnqueueEmptyFrame() { | |
203 scoped_refptr<VideoFrame> video_frame; | |
204 VideoFrame::CreateEmptyFrame(&video_frame); | |
205 EnqueueResult(video_frame); | |
206 } | 324 } |
207 | 325 |
208 FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoder::FindPtsAndDuration( | 326 FFmpegVideoDecoder::TimeTuple FFmpegVideoDecoder::FindPtsAndDuration( |
209 const AVRational& time_base, | 327 const AVRational& time_base, |
210 PtsHeap* pts_heap, | 328 PtsHeap* pts_heap, |
211 const TimeTuple& last_pts, | 329 const TimeTuple& last_pts, |
212 const VideoFrame* frame) { | 330 const VideoFrame* frame) { |
213 TimeTuple pts; | 331 TimeTuple pts; |
214 | 332 |
215 // First search the VideoFrame for the pts. This is the most authoritative. | 333 // First search the VideoFrame for the pts. This is the most authoritative. |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
247 duration.ToInternalValue() != 0) { | 365 duration.ToInternalValue() != 0) { |
248 pts.duration = duration; | 366 pts.duration = duration; |
249 } else { | 367 } else { |
250 // Otherwise assume a normal frame duration. | 368 // Otherwise assume a normal frame duration. |
251 pts.duration = ConvertTimestamp(time_base, 1); | 369 pts.duration = ConvertTimestamp(time_base, 1); |
252 } | 370 } |
253 | 371 |
254 return pts; | 372 return pts; |
255 } | 373 } |
256 | 374 |
257 void FFmpegVideoDecoder::SignalPipelineError() { | 375 bool FFmpegVideoDecoder::ProvidesBuffer() { |
258 host()->SetError(PIPELINE_ERROR_DECODE); | 376 if (!decode_engine_.get()) return false; |
259 state_ = kDecodeFinished; | 377 return decode_engine_->ProvidesBuffer(); |
260 } | 378 } |
261 | 379 |
262 void FFmpegVideoDecoder::SetVideoDecodeEngineForTest( | 380 void FFmpegVideoDecoder::SetVideoDecodeEngineForTest( |
263 VideoDecodeEngine* engine) { | 381 VideoDecodeEngine* engine) { |
264 decode_engine_.reset(engine); | 382 decode_engine_.reset(engine); |
265 } | 383 } |
266 | 384 |
267 // static | 385 // static |
268 FilterFactory* FFmpegVideoDecoder::CreateFactory() { | 386 FilterFactory* FFmpegVideoDecoder::CreateFactory() { |
269 return new FilterFactoryImpl1<FFmpegVideoDecoder, FFmpegVideoDecodeEngine*>( | 387 return new FilterFactoryImpl1<FFmpegVideoDecoder, FFmpegVideoDecodeEngine*>( |
270 new FFmpegVideoDecodeEngine()); | 388 new FFmpegVideoDecodeEngine()); |
271 } | 389 } |
272 | 390 |
273 // static | 391 // static |
274 bool FFmpegVideoDecoder::IsMediaFormatSupported(const MediaFormat& format) { | 392 bool FFmpegVideoDecoder::IsMediaFormatSupported(const MediaFormat& format) { |
275 std::string mime_type; | 393 std::string mime_type; |
276 return format.GetAsString(MediaFormat::kMimeType, &mime_type) && | 394 return format.GetAsString(MediaFormat::kMimeType, &mime_type) && |
277 mime_type::kFFmpegVideo == mime_type; | 395 mime_type::kFFmpegVideo == mime_type; |
278 } | 396 } |
279 | 397 |
280 } // namespace media | 398 } // namespace media |
OLD | NEW |