OLD | NEW |
| (Empty) |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/v4l2_video_decode_accelerator.h" | |
6 | |
7 #include <dlfcn.h> | |
8 #include <errno.h> | |
9 #include <fcntl.h> | |
10 #include <linux/videodev2.h> | |
11 #include <poll.h> | |
12 #include <string.h> | |
13 #include <sys/eventfd.h> | |
14 #include <sys/ioctl.h> | |
15 #include <sys/mman.h> | |
16 | |
17 #include "base/bind.h" | |
18 #include "base/command_line.h" | |
19 #include "base/message_loop/message_loop.h" | |
20 #include "base/numerics/safe_conversions.h" | |
21 #include "base/thread_task_runner_handle.h" | |
22 #include "base/trace_event/trace_event.h" | |
23 #include "build/build_config.h" | |
24 #include "content/common/gpu/media/shared_memory_region.h" | |
25 #include "media/base/bind_to_current_loop.h" | |
26 #include "media/base/media_switches.h" | |
27 #include "media/filters/h264_parser.h" | |
28 #include "ui/gfx/geometry/rect.h" | |
29 #include "ui/gl/gl_context.h" | |
30 #include "ui/gl/scoped_binders.h" | |
31 | |
32 #define NOTIFY_ERROR(x) \ | |
33 do { \ | |
34 LOG(ERROR) << "Setting error state:" << x; \ | |
35 SetErrorState(x); \ | |
36 } while (0) | |
37 | |
38 #define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str) \ | |
39 do { \ | |
40 if (device_->Ioctl(type, arg) != 0) { \ | |
41 PLOG(ERROR) << __func__ << "(): ioctl() failed: " << type_str; \ | |
42 NOTIFY_ERROR(PLATFORM_FAILURE); \ | |
43 return value; \ | |
44 } \ | |
45 } while (0) | |
46 | |
47 #define IOCTL_OR_ERROR_RETURN(type, arg) \ | |
48 IOCTL_OR_ERROR_RETURN_VALUE(type, arg, ((void)0), #type) | |
49 | |
50 #define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \ | |
51 IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type) | |
52 | |
53 #define IOCTL_OR_LOG_ERROR(type, arg) \ | |
54 do { \ | |
55 if (device_->Ioctl(type, arg) != 0) \ | |
56 PLOG(ERROR) << __func__ << "(): ioctl() failed: " << #type; \ | |
57 } while (0) | |
58 | |
59 namespace content { | |
60 | |
61 // static | |
62 const uint32_t V4L2VideoDecodeAccelerator::supported_input_fourccs_[] = { | |
63 V4L2_PIX_FMT_H264, V4L2_PIX_FMT_VP8, V4L2_PIX_FMT_VP9, | |
64 }; | |
65 | |
66 struct V4L2VideoDecodeAccelerator::BitstreamBufferRef { | |
67 BitstreamBufferRef( | |
68 base::WeakPtr<Client>& client, | |
69 scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner, | |
70 std::unique_ptr<SharedMemoryRegion> shm, | |
71 int32_t input_id); | |
72 ~BitstreamBufferRef(); | |
73 const base::WeakPtr<Client> client; | |
74 const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner; | |
75 const std::unique_ptr<SharedMemoryRegion> shm; | |
76 size_t bytes_used; | |
77 const int32_t input_id; | |
78 }; | |
79 | |
80 struct V4L2VideoDecodeAccelerator::EGLSyncKHRRef { | |
81 EGLSyncKHRRef(EGLDisplay egl_display, EGLSyncKHR egl_sync); | |
82 ~EGLSyncKHRRef(); | |
83 EGLDisplay const egl_display; | |
84 EGLSyncKHR egl_sync; | |
85 }; | |
86 | |
87 struct V4L2VideoDecodeAccelerator::PictureRecord { | |
88 PictureRecord(bool cleared, const media::Picture& picture); | |
89 ~PictureRecord(); | |
90 bool cleared; // Whether the texture is cleared and safe to render from. | |
91 media::Picture picture; // The decoded picture. | |
92 }; | |
93 | |
94 V4L2VideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef( | |
95 base::WeakPtr<Client>& client, | |
96 scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner, | |
97 std::unique_ptr<SharedMemoryRegion> shm, | |
98 int32_t input_id) | |
99 : client(client), | |
100 client_task_runner(client_task_runner), | |
101 shm(std::move(shm)), | |
102 bytes_used(0), | |
103 input_id(input_id) {} | |
104 | |
105 V4L2VideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() { | |
106 if (input_id >= 0) { | |
107 client_task_runner->PostTask( | |
108 FROM_HERE, | |
109 base::Bind(&Client::NotifyEndOfBitstreamBuffer, client, input_id)); | |
110 } | |
111 } | |
112 | |
113 V4L2VideoDecodeAccelerator::EGLSyncKHRRef::EGLSyncKHRRef( | |
114 EGLDisplay egl_display, EGLSyncKHR egl_sync) | |
115 : egl_display(egl_display), | |
116 egl_sync(egl_sync) { | |
117 } | |
118 | |
119 V4L2VideoDecodeAccelerator::EGLSyncKHRRef::~EGLSyncKHRRef() { | |
120 // We don't check for eglDestroySyncKHR failures, because if we get here | |
121 // with a valid sync object, something went wrong and we are getting | |
122 // destroyed anyway. | |
123 if (egl_sync != EGL_NO_SYNC_KHR) | |
124 eglDestroySyncKHR(egl_display, egl_sync); | |
125 } | |
126 | |
127 V4L2VideoDecodeAccelerator::InputRecord::InputRecord() | |
128 : at_device(false), | |
129 address(NULL), | |
130 length(0), | |
131 bytes_used(0), | |
132 input_id(-1) { | |
133 } | |
134 | |
135 V4L2VideoDecodeAccelerator::InputRecord::~InputRecord() { | |
136 } | |
137 | |
138 V4L2VideoDecodeAccelerator::OutputRecord::OutputRecord() | |
139 : state(kFree), | |
140 egl_image(EGL_NO_IMAGE_KHR), | |
141 egl_sync(EGL_NO_SYNC_KHR), | |
142 picture_id(-1), | |
143 cleared(false) { | |
144 } | |
145 | |
146 V4L2VideoDecodeAccelerator::OutputRecord::~OutputRecord() {} | |
147 | |
148 V4L2VideoDecodeAccelerator::PictureRecord::PictureRecord( | |
149 bool cleared, | |
150 const media::Picture& picture) | |
151 : cleared(cleared), picture(picture) {} | |
152 | |
153 V4L2VideoDecodeAccelerator::PictureRecord::~PictureRecord() {} | |
154 | |
155 V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator( | |
156 EGLDisplay egl_display, | |
157 const GetGLContextCallback& get_gl_context_cb, | |
158 const MakeGLContextCurrentCallback& make_context_current_cb, | |
159 const scoped_refptr<V4L2Device>& device) | |
160 : child_task_runner_(base::ThreadTaskRunnerHandle::Get()), | |
161 decoder_thread_("V4L2DecoderThread"), | |
162 decoder_state_(kUninitialized), | |
163 device_(device), | |
164 decoder_delay_bitstream_buffer_id_(-1), | |
165 decoder_current_input_buffer_(-1), | |
166 decoder_decode_buffer_tasks_scheduled_(0), | |
167 decoder_frames_at_client_(0), | |
168 decoder_flushing_(false), | |
169 resolution_change_reset_pending_(false), | |
170 decoder_partial_frame_pending_(false), | |
171 input_streamon_(false), | |
172 input_buffer_queued_count_(0), | |
173 output_streamon_(false), | |
174 output_buffer_queued_count_(0), | |
175 output_dpb_size_(0), | |
176 output_planes_count_(0), | |
177 picture_clearing_count_(0), | |
178 pictures_assigned_(false, false), | |
179 device_poll_thread_("V4L2DevicePollThread"), | |
180 egl_display_(egl_display), | |
181 get_gl_context_cb_(get_gl_context_cb), | |
182 make_context_current_cb_(make_context_current_cb), | |
183 video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN), | |
184 output_format_fourcc_(0), | |
185 egl_image_format_fourcc_(0), | |
186 egl_image_planes_count_(0), | |
187 weak_this_factory_(this) { | |
188 weak_this_ = weak_this_factory_.GetWeakPtr(); | |
189 } | |
190 | |
191 V4L2VideoDecodeAccelerator::~V4L2VideoDecodeAccelerator() { | |
192 DCHECK(!decoder_thread_.IsRunning()); | |
193 DCHECK(!device_poll_thread_.IsRunning()); | |
194 | |
195 DestroyInputBuffers(); | |
196 DestroyOutputBuffers(); | |
197 | |
198 // These maps have members that should be manually destroyed, e.g. file | |
199 // descriptors, mmap() segments, etc. | |
200 DCHECK(input_buffer_map_.empty()); | |
201 DCHECK(output_buffer_map_.empty()); | |
202 } | |
203 | |
204 bool V4L2VideoDecodeAccelerator::Initialize(const Config& config, | |
205 Client* client) { | |
206 DVLOG(3) << "Initialize()"; | |
207 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
208 DCHECK_EQ(decoder_state_, kUninitialized); | |
209 | |
210 if (!device_->SupportsDecodeProfileForV4L2PixelFormats( | |
211 config.profile, arraysize(supported_input_fourccs_), | |
212 supported_input_fourccs_)) { | |
213 DVLOG(1) << "Initialize(): unsupported profile=" << config.profile; | |
214 return false; | |
215 } | |
216 | |
217 if (config.is_encrypted) { | |
218 NOTREACHED() << "Encrypted streams are not supported for this VDA"; | |
219 return false; | |
220 } | |
221 | |
222 if (config.output_mode != Config::OutputMode::ALLOCATE) { | |
223 NOTREACHED() << "Only ALLOCATE OutputMode is supported by this VDA"; | |
224 return false; | |
225 } | |
226 | |
227 if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) { | |
228 NOTREACHED() << "GL callbacks are required for this VDA"; | |
229 return false; | |
230 } | |
231 | |
232 client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client)); | |
233 client_ = client_ptr_factory_->GetWeakPtr(); | |
234 // If we haven't been set up to decode on separate thread via | |
235 // TryToSetupDecodeOnSeparateThread(), use the main thread/client for | |
236 // decode tasks. | |
237 if (!decode_task_runner_) { | |
238 decode_task_runner_ = child_task_runner_; | |
239 DCHECK(!decode_client_); | |
240 decode_client_ = client_; | |
241 } | |
242 | |
243 video_profile_ = config.profile; | |
244 | |
245 if (egl_display_ == EGL_NO_DISPLAY) { | |
246 LOG(ERROR) << "Initialize(): could not get EGLDisplay"; | |
247 return false; | |
248 } | |
249 | |
250 // We need the context to be initialized to query extensions. | |
251 if (!make_context_current_cb_.Run()) { | |
252 LOG(ERROR) << "Initialize(): could not make context current"; | |
253 return false; | |
254 } | |
255 | |
256 // TODO(posciak): crbug.com/450898. | |
257 #if defined(ARCH_CPU_ARMEL) | |
258 if (!gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync) { | |
259 LOG(ERROR) << "Initialize(): context does not have EGL_KHR_fence_sync"; | |
260 return false; | |
261 } | |
262 #endif | |
263 | |
264 // Capabilities check. | |
265 struct v4l2_capability caps; | |
266 const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; | |
267 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps); | |
268 if ((caps.capabilities & kCapsRequired) != kCapsRequired) { | |
269 LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP" | |
270 ", caps check failed: 0x" << std::hex << caps.capabilities; | |
271 return false; | |
272 } | |
273 | |
274 if (!SetupFormats()) | |
275 return false; | |
276 | |
277 // Subscribe to the resolution change event. | |
278 struct v4l2_event_subscription sub; | |
279 memset(&sub, 0, sizeof(sub)); | |
280 sub.type = V4L2_EVENT_SOURCE_CHANGE; | |
281 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_SUBSCRIBE_EVENT, &sub); | |
282 | |
283 if (video_profile_ >= media::H264PROFILE_MIN && | |
284 video_profile_ <= media::H264PROFILE_MAX) { | |
285 decoder_h264_parser_.reset(new media::H264Parser()); | |
286 } | |
287 | |
288 if (!CreateInputBuffers()) | |
289 return false; | |
290 | |
291 if (!decoder_thread_.Start()) { | |
292 LOG(ERROR) << "Initialize(): decoder thread failed to start"; | |
293 return false; | |
294 } | |
295 | |
296 decoder_state_ = kInitialized; | |
297 | |
298 // StartDevicePoll will NOTIFY_ERROR on failure, so IgnoreResult is fine here. | |
299 decoder_thread_.message_loop()->PostTask( | |
300 FROM_HERE, | |
301 base::Bind( | |
302 base::IgnoreResult(&V4L2VideoDecodeAccelerator::StartDevicePoll), | |
303 base::Unretained(this))); | |
304 | |
305 return true; | |
306 } | |
307 | |
308 void V4L2VideoDecodeAccelerator::Decode( | |
309 const media::BitstreamBuffer& bitstream_buffer) { | |
310 DVLOG(1) << "Decode(): input_id=" << bitstream_buffer.id() | |
311 << ", size=" << bitstream_buffer.size(); | |
312 DCHECK(decode_task_runner_->BelongsToCurrentThread()); | |
313 | |
314 if (bitstream_buffer.id() < 0) { | |
315 LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id(); | |
316 if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle())) | |
317 base::SharedMemory::CloseHandle(bitstream_buffer.handle()); | |
318 NOTIFY_ERROR(INVALID_ARGUMENT); | |
319 return; | |
320 } | |
321 | |
322 // DecodeTask() will take care of running a DecodeBufferTask(). | |
323 decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( | |
324 &V4L2VideoDecodeAccelerator::DecodeTask, base::Unretained(this), | |
325 bitstream_buffer)); | |
326 } | |
327 | |
328 void V4L2VideoDecodeAccelerator::AssignPictureBuffers( | |
329 const std::vector<media::PictureBuffer>& buffers) { | |
330 DVLOG(3) << "AssignPictureBuffers(): buffer_count=" << buffers.size(); | |
331 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
332 | |
333 const uint32_t req_buffer_count = | |
334 output_dpb_size_ + kDpbOutputBufferExtraCount; | |
335 | |
336 if (buffers.size() < req_buffer_count) { | |
337 LOG(ERROR) << "AssignPictureBuffers(): Failed to provide requested picture" | |
338 " buffers. (Got " << buffers.size() | |
339 << ", requested " << req_buffer_count << ")"; | |
340 NOTIFY_ERROR(INVALID_ARGUMENT); | |
341 return; | |
342 } | |
343 | |
344 gfx::GLContext* gl_context = get_gl_context_cb_.Run(); | |
345 if (!gl_context || !make_context_current_cb_.Run()) { | |
346 LOG(ERROR) << "AssignPictureBuffers(): could not make context current"; | |
347 NOTIFY_ERROR(PLATFORM_FAILURE); | |
348 return; | |
349 } | |
350 | |
351 gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0); | |
352 | |
353 // It's safe to manipulate all the buffer state here, because the decoder | |
354 // thread is waiting on pictures_assigned_. | |
355 | |
356 // Allocate the output buffers. | |
357 struct v4l2_requestbuffers reqbufs; | |
358 memset(&reqbufs, 0, sizeof(reqbufs)); | |
359 reqbufs.count = buffers.size(); | |
360 reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
361 reqbufs.memory = V4L2_MEMORY_MMAP; | |
362 IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs); | |
363 | |
364 if (reqbufs.count != buffers.size()) { | |
365 DLOG(ERROR) << "Could not allocate enough output buffers"; | |
366 NOTIFY_ERROR(PLATFORM_FAILURE); | |
367 return; | |
368 } | |
369 | |
370 if (image_processor_device_) { | |
371 DCHECK(!image_processor_); | |
372 image_processor_.reset(new V4L2ImageProcessor(image_processor_device_)); | |
373 // Unretained is safe because |this| owns image processor and there will be | |
374 // no callbacks after processor destroys. | |
375 if (!image_processor_->Initialize( | |
376 V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_), | |
377 V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_), | |
378 V4L2_MEMORY_DMABUF, visible_size_, coded_size_, visible_size_, | |
379 visible_size_, buffers.size(), | |
380 base::Bind(&V4L2VideoDecodeAccelerator::ImageProcessorError, | |
381 base::Unretained(this)))) { | |
382 LOG(ERROR) << "Initialize image processor failed"; | |
383 NOTIFY_ERROR(PLATFORM_FAILURE); | |
384 return; | |
385 } | |
386 DCHECK(image_processor_->output_allocated_size() == egl_image_size_); | |
387 if (image_processor_->input_allocated_size() != coded_size_) { | |
388 LOG(ERROR) << "Image processor should be able to take the output coded " | |
389 << "size of decoder " << coded_size_.ToString() | |
390 << " without adjusting to " | |
391 << image_processor_->input_allocated_size().ToString(); | |
392 NOTIFY_ERROR(PLATFORM_FAILURE); | |
393 return; | |
394 } | |
395 } | |
396 | |
397 output_buffer_map_.resize(buffers.size()); | |
398 | |
399 DCHECK(free_output_buffers_.empty()); | |
400 for (size_t i = 0; i < output_buffer_map_.size(); ++i) { | |
401 DCHECK(buffers[i].size() == egl_image_size_); | |
402 | |
403 OutputRecord& output_record = output_buffer_map_[i]; | |
404 DCHECK_EQ(output_record.state, kFree); | |
405 DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); | |
406 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); | |
407 DCHECK_EQ(output_record.picture_id, -1); | |
408 DCHECK_EQ(output_record.cleared, false); | |
409 DCHECK_LE(1u, buffers[i].texture_ids().size()); | |
410 | |
411 gfx::Size egl_image_size; | |
412 if (image_processor_device_) { | |
413 std::vector<base::ScopedFD> fds = device_->GetDmabufsForV4L2Buffer( | |
414 i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); | |
415 if (fds.empty()) { | |
416 LOG(ERROR) << "Failed to get DMABUFs of decoder."; | |
417 NOTIFY_ERROR(PLATFORM_FAILURE); | |
418 return; | |
419 } | |
420 output_record.fds = std::move(fds); | |
421 } | |
422 | |
423 std::vector<base::ScopedFD> dmabuf_fds; | |
424 dmabuf_fds = egl_image_device_->GetDmabufsForV4L2Buffer( | |
425 i, egl_image_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); | |
426 if (dmabuf_fds.empty()) { | |
427 LOG(ERROR) << "Failed to get DMABUFs for EGLImage."; | |
428 NOTIFY_ERROR(PLATFORM_FAILURE); | |
429 return; | |
430 } | |
431 | |
432 EGLImageKHR egl_image = egl_image_device_->CreateEGLImage( | |
433 egl_display_, gl_context->GetHandle(), buffers[i].texture_ids()[0], | |
434 egl_image_size_, i, egl_image_format_fourcc_, dmabuf_fds); | |
435 if (egl_image == EGL_NO_IMAGE_KHR) { | |
436 LOG(ERROR) << "AssignPictureBuffers(): could not create EGLImageKHR"; | |
437 // Ownership of EGLImages allocated in previous iterations of this loop | |
438 // has been transferred to output_buffer_map_. After we error-out here | |
439 // the destructor will handle their cleanup. | |
440 NOTIFY_ERROR(PLATFORM_FAILURE); | |
441 return; | |
442 } | |
443 | |
444 output_record.egl_image = egl_image; | |
445 output_record.picture_id = buffers[i].id(); | |
446 free_output_buffers_.push(i); | |
447 DVLOG(3) << "AssignPictureBuffers(): buffer[" << i | |
448 << "]: picture_id=" << output_record.picture_id; | |
449 } | |
450 | |
451 pictures_assigned_.Signal(); | |
452 } | |
453 | |
454 void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) { | |
455 DVLOG(3) << "ReusePictureBuffer(): picture_buffer_id=" << picture_buffer_id; | |
456 // Must be run on child thread, as we'll insert a sync in the EGL context. | |
457 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
458 | |
459 if (!make_context_current_cb_.Run()) { | |
460 LOG(ERROR) << "ReusePictureBuffer(): could not make context current"; | |
461 NOTIFY_ERROR(PLATFORM_FAILURE); | |
462 return; | |
463 } | |
464 | |
465 EGLSyncKHR egl_sync = EGL_NO_SYNC_KHR; | |
466 // TODO(posciak): crbug.com/450898. | |
467 #if defined(ARCH_CPU_ARMEL) | |
468 egl_sync = eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL); | |
469 if (egl_sync == EGL_NO_SYNC_KHR) { | |
470 LOG(ERROR) << "ReusePictureBuffer(): eglCreateSyncKHR() failed"; | |
471 NOTIFY_ERROR(PLATFORM_FAILURE); | |
472 return; | |
473 } | |
474 #endif | |
475 | |
476 std::unique_ptr<EGLSyncKHRRef> egl_sync_ref( | |
477 new EGLSyncKHRRef(egl_display_, egl_sync)); | |
478 decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( | |
479 &V4L2VideoDecodeAccelerator::ReusePictureBufferTask, | |
480 base::Unretained(this), picture_buffer_id, base::Passed(&egl_sync_ref))); | |
481 } | |
482 | |
483 void V4L2VideoDecodeAccelerator::Flush() { | |
484 DVLOG(3) << "Flush()"; | |
485 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
486 decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( | |
487 &V4L2VideoDecodeAccelerator::FlushTask, base::Unretained(this))); | |
488 } | |
489 | |
490 void V4L2VideoDecodeAccelerator::Reset() { | |
491 DVLOG(3) << "Reset()"; | |
492 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
493 decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( | |
494 &V4L2VideoDecodeAccelerator::ResetTask, base::Unretained(this))); | |
495 } | |
496 | |
497 void V4L2VideoDecodeAccelerator::Destroy() { | |
498 DVLOG(3) << "Destroy()"; | |
499 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
500 | |
501 // We're destroying; cancel all callbacks. | |
502 client_ptr_factory_.reset(); | |
503 weak_this_factory_.InvalidateWeakPtrs(); | |
504 | |
505 // If the decoder thread is running, destroy using posted task. | |
506 if (decoder_thread_.IsRunning()) { | |
507 decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( | |
508 &V4L2VideoDecodeAccelerator::DestroyTask, base::Unretained(this))); | |
509 pictures_assigned_.Signal(); | |
510 // DestroyTask() will cause the decoder_thread_ to flush all tasks. | |
511 decoder_thread_.Stop(); | |
512 } else { | |
513 // Otherwise, call the destroy task directly. | |
514 DestroyTask(); | |
515 } | |
516 | |
517 delete this; | |
518 } | |
519 | |
520 bool V4L2VideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( | |
521 const base::WeakPtr<Client>& decode_client, | |
522 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { | |
523 decode_client_ = decode_client_; | |
524 decode_task_runner_ = decode_task_runner; | |
525 return true; | |
526 } | |
527 | |
528 media::VideoPixelFormat V4L2VideoDecodeAccelerator::GetOutputFormat() const { | |
529 return V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_); | |
530 } | |
531 | |
532 // static | |
533 media::VideoDecodeAccelerator::SupportedProfiles | |
534 V4L2VideoDecodeAccelerator::GetSupportedProfiles() { | |
535 scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder); | |
536 if (!device) | |
537 return SupportedProfiles(); | |
538 | |
539 return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_), | |
540 supported_input_fourccs_); | |
541 } | |
542 | |
543 void V4L2VideoDecodeAccelerator::DecodeTask( | |
544 const media::BitstreamBuffer& bitstream_buffer) { | |
545 DVLOG(3) << "DecodeTask(): input_id=" << bitstream_buffer.id(); | |
546 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
547 DCHECK_NE(decoder_state_, kUninitialized); | |
548 TRACE_EVENT1("Video Decoder", "V4L2VDA::DecodeTask", "input_id", | |
549 bitstream_buffer.id()); | |
550 | |
551 std::unique_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef( | |
552 decode_client_, decode_task_runner_, | |
553 std::unique_ptr<SharedMemoryRegion>( | |
554 new SharedMemoryRegion(bitstream_buffer, true)), | |
555 bitstream_buffer.id())); | |
556 | |
557 // Skip empty buffer. | |
558 if (bitstream_buffer.size() == 0) | |
559 return; | |
560 | |
561 if (!bitstream_record->shm->Map()) { | |
562 LOG(ERROR) << "Decode(): could not map bitstream_buffer"; | |
563 NOTIFY_ERROR(UNREADABLE_INPUT); | |
564 return; | |
565 } | |
566 DVLOG(3) << "DecodeTask(): mapped at=" << bitstream_record->shm->memory(); | |
567 | |
568 if (decoder_state_ == kResetting || decoder_flushing_) { | |
569 // In the case that we're resetting or flushing, we need to delay decoding | |
570 // the BitstreamBuffers that come after the Reset() or Flush() call. When | |
571 // we're here, we know that this DecodeTask() was scheduled by a Decode() | |
572 // call that came after (in the client thread) the Reset() or Flush() call; | |
573 // thus set up the delay if necessary. | |
574 if (decoder_delay_bitstream_buffer_id_ == -1) | |
575 decoder_delay_bitstream_buffer_id_ = bitstream_record->input_id; | |
576 } else if (decoder_state_ == kError) { | |
577 DVLOG(2) << "DecodeTask(): early out: kError state"; | |
578 return; | |
579 } | |
580 | |
581 decoder_input_queue_.push( | |
582 linked_ptr<BitstreamBufferRef>(bitstream_record.release())); | |
583 decoder_decode_buffer_tasks_scheduled_++; | |
584 DecodeBufferTask(); | |
585 } | |
586 | |
587 void V4L2VideoDecodeAccelerator::DecodeBufferTask() { | |
588 DVLOG(3) << "DecodeBufferTask()"; | |
589 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
590 DCHECK_NE(decoder_state_, kUninitialized); | |
591 TRACE_EVENT0("Video Decoder", "V4L2VDA::DecodeBufferTask"); | |
592 | |
593 decoder_decode_buffer_tasks_scheduled_--; | |
594 | |
595 if (decoder_state_ == kResetting) { | |
596 DVLOG(2) << "DecodeBufferTask(): early out: kResetting state"; | |
597 return; | |
598 } else if (decoder_state_ == kError) { | |
599 DVLOG(2) << "DecodeBufferTask(): early out: kError state"; | |
600 return; | |
601 } else if (decoder_state_ == kChangingResolution) { | |
602 DVLOG(2) << "DecodeBufferTask(): early out: resolution change pending"; | |
603 return; | |
604 } | |
605 | |
606 if (decoder_current_bitstream_buffer_ == NULL) { | |
607 if (decoder_input_queue_.empty()) { | |
608 // We're waiting for a new buffer -- exit without scheduling a new task. | |
609 return; | |
610 } | |
611 linked_ptr<BitstreamBufferRef>& buffer_ref = decoder_input_queue_.front(); | |
612 if (decoder_delay_bitstream_buffer_id_ == buffer_ref->input_id) { | |
613 // We're asked to delay decoding on this and subsequent buffers. | |
614 return; | |
615 } | |
616 | |
617 // Setup to use the next buffer. | |
618 decoder_current_bitstream_buffer_.reset(buffer_ref.release()); | |
619 decoder_input_queue_.pop(); | |
620 const auto& shm = decoder_current_bitstream_buffer_->shm; | |
621 if (shm) { | |
622 DVLOG(3) << "DecodeBufferTask(): reading input_id=" | |
623 << decoder_current_bitstream_buffer_->input_id | |
624 << ", addr=" << shm->memory() << ", size=" << shm->size(); | |
625 } else { | |
626 DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId); | |
627 DVLOG(3) << "DecodeBufferTask(): reading input_id=kFlushBufferId"; | |
628 } | |
629 } | |
630 bool schedule_task = false; | |
631 size_t decoded_size = 0; | |
632 const auto& shm = decoder_current_bitstream_buffer_->shm; | |
633 if (!shm) { | |
634 // This is a dummy buffer, queued to flush the pipe. Flush. | |
635 DCHECK_EQ(decoder_current_bitstream_buffer_->input_id, kFlushBufferId); | |
636 // Enqueue a buffer guaranteed to be empty. To do that, we flush the | |
637 // current input, enqueue no data to the next frame, then flush that down. | |
638 schedule_task = true; | |
639 if (decoder_current_input_buffer_ != -1 && | |
640 input_buffer_map_[decoder_current_input_buffer_].input_id != | |
641 kFlushBufferId) | |
642 schedule_task = FlushInputFrame(); | |
643 | |
644 if (schedule_task && AppendToInputFrame(NULL, 0) && FlushInputFrame()) { | |
645 DVLOG(2) << "DecodeBufferTask(): enqueued flush buffer"; | |
646 decoder_partial_frame_pending_ = false; | |
647 schedule_task = true; | |
648 } else { | |
649 // If we failed to enqueue the empty buffer (due to pipeline | |
650 // backpressure), don't advance the bitstream buffer queue, and don't | |
651 // schedule the next task. This bitstream buffer queue entry will get | |
652 // reprocessed when the pipeline frees up. | |
653 schedule_task = false; | |
654 } | |
655 } else if (shm->size() == 0) { | |
656 // This is a buffer queued from the client that has zero size. Skip. | |
657 schedule_task = true; | |
658 } else { | |
659 // This is a buffer queued from the client, with actual contents. Decode. | |
660 const uint8_t* const data = | |
661 reinterpret_cast<const uint8_t*>(shm->memory()) + | |
662 decoder_current_bitstream_buffer_->bytes_used; | |
663 const size_t data_size = | |
664 shm->size() - decoder_current_bitstream_buffer_->bytes_used; | |
665 if (!AdvanceFrameFragment(data, data_size, &decoded_size)) { | |
666 NOTIFY_ERROR(UNREADABLE_INPUT); | |
667 return; | |
668 } | |
669 // AdvanceFrameFragment should not return a size larger than the buffer | |
670 // size, even on invalid data. | |
671 CHECK_LE(decoded_size, data_size); | |
672 | |
673 switch (decoder_state_) { | |
674 case kInitialized: | |
675 case kAfterReset: | |
676 schedule_task = DecodeBufferInitial(data, decoded_size, &decoded_size); | |
677 break; | |
678 case kDecoding: | |
679 schedule_task = DecodeBufferContinue(data, decoded_size); | |
680 break; | |
681 default: | |
682 NOTIFY_ERROR(ILLEGAL_STATE); | |
683 return; | |
684 } | |
685 } | |
686 if (decoder_state_ == kError) { | |
687 // Failed during decode. | |
688 return; | |
689 } | |
690 | |
691 if (schedule_task) { | |
692 decoder_current_bitstream_buffer_->bytes_used += decoded_size; | |
693 if ((shm ? shm->size() : 0) == | |
694 decoder_current_bitstream_buffer_->bytes_used) { | |
695 // Our current bitstream buffer is done; return it. | |
696 int32_t input_id = decoder_current_bitstream_buffer_->input_id; | |
697 DVLOG(3) << "DecodeBufferTask(): finished input_id=" << input_id; | |
698 // BitstreamBufferRef destructor calls NotifyEndOfBitstreamBuffer(). | |
699 decoder_current_bitstream_buffer_.reset(); | |
700 } | |
701 ScheduleDecodeBufferTaskIfNeeded(); | |
702 } | |
703 } | |
704 | |
705 bool V4L2VideoDecodeAccelerator::AdvanceFrameFragment(const uint8_t* data, | |
706 size_t size, | |
707 size_t* endpos) { | |
708 if (video_profile_ >= media::H264PROFILE_MIN && | |
709 video_profile_ <= media::H264PROFILE_MAX) { | |
710 // For H264, we need to feed HW one frame at a time. This is going to take | |
711 // some parsing of our input stream. | |
712 decoder_h264_parser_->SetStream(data, size); | |
713 media::H264NALU nalu; | |
714 media::H264Parser::Result result; | |
715 *endpos = 0; | |
716 | |
717 // Keep on peeking the next NALs while they don't indicate a frame | |
718 // boundary. | |
719 for (;;) { | |
720 bool end_of_frame = false; | |
721 result = decoder_h264_parser_->AdvanceToNextNALU(&nalu); | |
722 if (result == media::H264Parser::kInvalidStream || | |
723 result == media::H264Parser::kUnsupportedStream) | |
724 return false; | |
725 if (result == media::H264Parser::kEOStream) { | |
726 // We've reached the end of the buffer before finding a frame boundary. | |
727 decoder_partial_frame_pending_ = true; | |
728 return true; | |
729 } | |
730 switch (nalu.nal_unit_type) { | |
731 case media::H264NALU::kNonIDRSlice: | |
732 case media::H264NALU::kIDRSlice: | |
733 if (nalu.size < 1) | |
734 return false; | |
735 // For these two, if the "first_mb_in_slice" field is zero, start a | |
736 // new frame and return. This field is Exp-Golomb coded starting on | |
737 // the eighth data bit of the NAL; a zero value is encoded with a | |
738 // leading '1' bit in the byte, which we can detect as the byte being | |
739 // (unsigned) greater than or equal to 0x80. | |
740 if (nalu.data[1] >= 0x80) { | |
741 end_of_frame = true; | |
742 break; | |
743 } | |
744 break; | |
745 case media::H264NALU::kSEIMessage: | |
746 case media::H264NALU::kSPS: | |
747 case media::H264NALU::kPPS: | |
748 case media::H264NALU::kAUD: | |
749 case media::H264NALU::kEOSeq: | |
750 case media::H264NALU::kEOStream: | |
751 case media::H264NALU::kReserved14: | |
752 case media::H264NALU::kReserved15: | |
753 case media::H264NALU::kReserved16: | |
754 case media::H264NALU::kReserved17: | |
755 case media::H264NALU::kReserved18: | |
756 // These unconditionally signal a frame boundary. | |
757 end_of_frame = true; | |
758 break; | |
759 default: | |
760 // For all others, keep going. | |
761 break; | |
762 } | |
763 if (end_of_frame) { | |
764 if (!decoder_partial_frame_pending_ && *endpos == 0) { | |
765 // The frame was previously restarted, and we haven't filled the | |
766 // current frame with any contents yet. Start the new frame here and | |
767 // continue parsing NALs. | |
768 } else { | |
769 // The frame wasn't previously restarted and/or we have contents for | |
770 // the current frame; signal the start of a new frame here: we don't | |
771 // have a partial frame anymore. | |
772 decoder_partial_frame_pending_ = false; | |
773 return true; | |
774 } | |
775 } | |
776 *endpos = (nalu.data + nalu.size) - data; | |
777 } | |
778 NOTREACHED(); | |
779 return false; | |
780 } else { | |
781 DCHECK_GE(video_profile_, media::VP8PROFILE_MIN); | |
782 DCHECK_LE(video_profile_, media::VP9PROFILE_MAX); | |
783 // For VP8/9, we can just dump the entire buffer. No fragmentation needed, | |
784 // and we never return a partial frame. | |
785 *endpos = size; | |
786 decoder_partial_frame_pending_ = false; | |
787 return true; | |
788 } | |
789 } | |
790 | |
791 void V4L2VideoDecodeAccelerator::ScheduleDecodeBufferTaskIfNeeded() { | |
792 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
793 | |
794 // If we're behind on tasks, schedule another one. | |
795 int buffers_to_decode = decoder_input_queue_.size(); | |
796 if (decoder_current_bitstream_buffer_ != NULL) | |
797 buffers_to_decode++; | |
798 if (decoder_decode_buffer_tasks_scheduled_ < buffers_to_decode) { | |
799 decoder_decode_buffer_tasks_scheduled_++; | |
800 decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( | |
801 &V4L2VideoDecodeAccelerator::DecodeBufferTask, | |
802 base::Unretained(this))); | |
803 } | |
804 } | |
805 | |
806 bool V4L2VideoDecodeAccelerator::DecodeBufferInitial( | |
807 const void* data, size_t size, size_t* endpos) { | |
808 DVLOG(3) << "DecodeBufferInitial(): data=" << data << ", size=" << size; | |
809 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
810 DCHECK_NE(decoder_state_, kUninitialized); | |
811 DCHECK_NE(decoder_state_, kDecoding); | |
812 // Initial decode. We haven't been able to get output stream format info yet. | |
813 // Get it, and start decoding. | |
814 | |
815 // Copy in and send to HW. | |
816 if (!AppendToInputFrame(data, size)) | |
817 return false; | |
818 | |
819 // If we only have a partial frame, don't flush and process yet. | |
820 if (decoder_partial_frame_pending_) | |
821 return true; | |
822 | |
823 if (!FlushInputFrame()) | |
824 return false; | |
825 | |
826 // Recycle buffers. | |
827 Dequeue(); | |
828 | |
829 // Check and see if we have format info yet. | |
830 struct v4l2_format format; | |
831 gfx::Size visible_size; | |
832 bool again = false; | |
833 if (!GetFormatInfo(&format, &visible_size, &again)) | |
834 return false; | |
835 | |
836 *endpos = size; | |
837 | |
838 if (again) { | |
839 // Need more stream to decode format, return true and schedule next buffer. | |
840 return true; | |
841 } | |
842 | |
843 // Run this initialization only on first startup. | |
844 if (decoder_state_ == kInitialized) { | |
845 DVLOG(3) << "DecodeBufferInitial(): running initialization"; | |
846 // Success! Setup our parameters. | |
847 if (!CreateBuffersForFormat(format, visible_size)) | |
848 return false; | |
849 } | |
850 | |
851 decoder_state_ = kDecoding; | |
852 ScheduleDecodeBufferTaskIfNeeded(); | |
853 return true; | |
854 } | |
855 | |
856 bool V4L2VideoDecodeAccelerator::DecodeBufferContinue( | |
857 const void* data, size_t size) { | |
858 DVLOG(3) << "DecodeBufferContinue(): data=" << data << ", size=" << size; | |
859 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
860 DCHECK_EQ(decoder_state_, kDecoding); | |
861 | |
862 // Both of these calls will set kError state if they fail. | |
863 // Only flush the frame if it's complete. | |
864 return (AppendToInputFrame(data, size) && | |
865 (decoder_partial_frame_pending_ || FlushInputFrame())); | |
866 } | |
867 | |
868 bool V4L2VideoDecodeAccelerator::AppendToInputFrame( | |
869 const void* data, size_t size) { | |
870 DVLOG(3) << "AppendToInputFrame()"; | |
871 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
872 DCHECK_NE(decoder_state_, kUninitialized); | |
873 DCHECK_NE(decoder_state_, kResetting); | |
874 DCHECK_NE(decoder_state_, kError); | |
875 // This routine can handle data == NULL and size == 0, which occurs when | |
876 // we queue an empty buffer for the purposes of flushing the pipe. | |
877 | |
878 // Flush if we're too big | |
879 if (decoder_current_input_buffer_ != -1) { | |
880 InputRecord& input_record = | |
881 input_buffer_map_[decoder_current_input_buffer_]; | |
882 if (input_record.bytes_used + size > input_record.length) { | |
883 if (!FlushInputFrame()) | |
884 return false; | |
885 decoder_current_input_buffer_ = -1; | |
886 } | |
887 } | |
888 | |
889 // Try to get an available input buffer | |
890 if (decoder_current_input_buffer_ == -1) { | |
891 if (free_input_buffers_.empty()) { | |
892 // See if we can get more free buffers from HW | |
893 Dequeue(); | |
894 if (free_input_buffers_.empty()) { | |
895 // Nope! | |
896 DVLOG(2) << "AppendToInputFrame(): stalled for input buffers"; | |
897 return false; | |
898 } | |
899 } | |
900 decoder_current_input_buffer_ = free_input_buffers_.back(); | |
901 free_input_buffers_.pop_back(); | |
902 InputRecord& input_record = | |
903 input_buffer_map_[decoder_current_input_buffer_]; | |
904 DCHECK_EQ(input_record.bytes_used, 0); | |
905 DCHECK_EQ(input_record.input_id, -1); | |
906 DCHECK(decoder_current_bitstream_buffer_ != NULL); | |
907 input_record.input_id = decoder_current_bitstream_buffer_->input_id; | |
908 } | |
909 | |
910 DCHECK(data != NULL || size == 0); | |
911 if (size == 0) { | |
912 // If we asked for an empty buffer, return now. We return only after | |
913 // getting the next input buffer, since we might actually want an empty | |
914 // input buffer for flushing purposes. | |
915 return true; | |
916 } | |
917 | |
918 // Copy in to the buffer. | |
919 InputRecord& input_record = | |
920 input_buffer_map_[decoder_current_input_buffer_]; | |
921 if (size > input_record.length - input_record.bytes_used) { | |
922 LOG(ERROR) << "AppendToInputFrame(): over-size frame, erroring"; | |
923 NOTIFY_ERROR(UNREADABLE_INPUT); | |
924 return false; | |
925 } | |
926 memcpy(reinterpret_cast<uint8_t*>(input_record.address) + | |
927 input_record.bytes_used, | |
928 data, size); | |
929 input_record.bytes_used += size; | |
930 | |
931 return true; | |
932 } | |
933 | |
934 bool V4L2VideoDecodeAccelerator::FlushInputFrame() { | |
935 DVLOG(3) << "FlushInputFrame()"; | |
936 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
937 DCHECK_NE(decoder_state_, kUninitialized); | |
938 DCHECK_NE(decoder_state_, kResetting); | |
939 DCHECK_NE(decoder_state_, kError); | |
940 | |
941 if (decoder_current_input_buffer_ == -1) | |
942 return true; | |
943 | |
944 InputRecord& input_record = | |
945 input_buffer_map_[decoder_current_input_buffer_]; | |
946 DCHECK_NE(input_record.input_id, -1); | |
947 DCHECK(input_record.input_id != kFlushBufferId || | |
948 input_record.bytes_used == 0); | |
949 // * if input_id >= 0, this input buffer was prompted by a bitstream buffer we | |
950 // got from the client. We can skip it if it is empty. | |
951 // * if input_id < 0 (should be kFlushBufferId in this case), this input | |
952 // buffer was prompted by a flush buffer, and should be queued even when | |
953 // empty. | |
954 if (input_record.input_id >= 0 && input_record.bytes_used == 0) { | |
955 input_record.input_id = -1; | |
956 free_input_buffers_.push_back(decoder_current_input_buffer_); | |
957 decoder_current_input_buffer_ = -1; | |
958 return true; | |
959 } | |
960 | |
961 // Queue it. | |
962 input_ready_queue_.push(decoder_current_input_buffer_); | |
963 decoder_current_input_buffer_ = -1; | |
964 DVLOG(3) << "FlushInputFrame(): submitting input_id=" | |
965 << input_record.input_id; | |
966 // Enqueue once since there's new available input for it. | |
967 Enqueue(); | |
968 | |
969 return (decoder_state_ != kError); | |
970 } | |
971 | |
972 void V4L2VideoDecodeAccelerator::ServiceDeviceTask(bool event_pending) { | |
973 DVLOG(3) << "ServiceDeviceTask()"; | |
974 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
975 DCHECK_NE(decoder_state_, kUninitialized); | |
976 TRACE_EVENT0("Video Decoder", "V4L2VDA::ServiceDeviceTask"); | |
977 | |
978 if (decoder_state_ == kResetting) { | |
979 DVLOG(2) << "ServiceDeviceTask(): early out: kResetting state"; | |
980 return; | |
981 } else if (decoder_state_ == kError) { | |
982 DVLOG(2) << "ServiceDeviceTask(): early out: kError state"; | |
983 return; | |
984 } else if (decoder_state_ == kChangingResolution) { | |
985 DVLOG(2) << "ServiceDeviceTask(): early out: kChangingResolution state"; | |
986 return; | |
987 } | |
988 | |
989 bool resolution_change_pending = false; | |
990 if (event_pending) | |
991 resolution_change_pending = DequeueResolutionChangeEvent(); | |
992 Dequeue(); | |
993 Enqueue(); | |
994 | |
995 // Clear the interrupt fd. | |
996 if (!device_->ClearDevicePollInterrupt()) { | |
997 NOTIFY_ERROR(PLATFORM_FAILURE); | |
998 return; | |
999 } | |
1000 | |
1001 bool poll_device = false; | |
1002 // Add fd, if we should poll on it. | |
1003 // Can be polled as soon as either input or output buffers are queued. | |
1004 if (input_buffer_queued_count_ + output_buffer_queued_count_ > 0) | |
1005 poll_device = true; | |
1006 | |
1007 // ServiceDeviceTask() should only ever be scheduled from DevicePollTask(), | |
1008 // so either: | |
1009 // * device_poll_thread_ is running normally | |
1010 // * device_poll_thread_ scheduled us, but then a ResetTask() or DestroyTask() | |
1011 // shut it down, in which case we're either in kResetting or kError states | |
1012 // respectively, and we should have early-outed already. | |
1013 DCHECK(device_poll_thread_.message_loop()); | |
1014 // Queue the DevicePollTask() now. | |
1015 device_poll_thread_.message_loop()->PostTask( | |
1016 FROM_HERE, | |
1017 base::Bind(&V4L2VideoDecodeAccelerator::DevicePollTask, | |
1018 base::Unretained(this), | |
1019 poll_device)); | |
1020 | |
1021 DVLOG(1) << "ServiceDeviceTask(): buffer counts: DEC[" | |
1022 << decoder_input_queue_.size() << "->" | |
1023 << input_ready_queue_.size() << "] => DEVICE[" | |
1024 << free_input_buffers_.size() << "+" | |
1025 << input_buffer_queued_count_ << "/" | |
1026 << input_buffer_map_.size() << "->" | |
1027 << free_output_buffers_.size() << "+" | |
1028 << output_buffer_queued_count_ << "/" | |
1029 << output_buffer_map_.size() << "] => PROCESSOR[" | |
1030 << image_processor_bitstream_buffer_ids_.size() << "] => CLIENT[" | |
1031 << decoder_frames_at_client_ << "]"; | |
1032 | |
1033 ScheduleDecodeBufferTaskIfNeeded(); | |
1034 if (resolution_change_pending) | |
1035 StartResolutionChange(); | |
1036 } | |
1037 | |
1038 void V4L2VideoDecodeAccelerator::Enqueue() { | |
1039 DVLOG(3) << "Enqueue()"; | |
1040 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1041 DCHECK_NE(decoder_state_, kUninitialized); | |
1042 TRACE_EVENT0("Video Decoder", "V4L2VDA::Enqueue"); | |
1043 | |
1044 // Drain the pipe of completed decode buffers. | |
1045 const int old_inputs_queued = input_buffer_queued_count_; | |
1046 while (!input_ready_queue_.empty()) { | |
1047 if (!EnqueueInputRecord()) | |
1048 return; | |
1049 } | |
1050 if (old_inputs_queued == 0 && input_buffer_queued_count_ != 0) { | |
1051 // We just started up a previously empty queue. | |
1052 // Queue state changed; signal interrupt. | |
1053 if (!device_->SetDevicePollInterrupt()) { | |
1054 PLOG(ERROR) << "SetDevicePollInterrupt(): failed"; | |
1055 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1056 return; | |
1057 } | |
1058 // Start VIDIOC_STREAMON if we haven't yet. | |
1059 if (!input_streamon_) { | |
1060 __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1061 IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type); | |
1062 input_streamon_ = true; | |
1063 } | |
1064 } | |
1065 | |
1066 // Enqueue all the outputs we can. | |
1067 const int old_outputs_queued = output_buffer_queued_count_; | |
1068 while (!free_output_buffers_.empty()) { | |
1069 if (!EnqueueOutputRecord()) | |
1070 return; | |
1071 } | |
1072 if (old_outputs_queued == 0 && output_buffer_queued_count_ != 0) { | |
1073 // We just started up a previously empty queue. | |
1074 // Queue state changed; signal interrupt. | |
1075 if (!device_->SetDevicePollInterrupt()) { | |
1076 PLOG(ERROR) << "SetDevicePollInterrupt(): failed"; | |
1077 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1078 return; | |
1079 } | |
1080 // Start VIDIOC_STREAMON if we haven't yet. | |
1081 if (!output_streamon_) { | |
1082 __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1083 IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type); | |
1084 output_streamon_ = true; | |
1085 } | |
1086 } | |
1087 } | |
1088 | |
1089 bool V4L2VideoDecodeAccelerator::DequeueResolutionChangeEvent() { | |
1090 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1091 DCHECK_NE(decoder_state_, kUninitialized); | |
1092 DVLOG(3) << "DequeueResolutionChangeEvent()"; | |
1093 | |
1094 struct v4l2_event ev; | |
1095 memset(&ev, 0, sizeof(ev)); | |
1096 | |
1097 while (device_->Ioctl(VIDIOC_DQEVENT, &ev) == 0) { | |
1098 if (ev.type == V4L2_EVENT_SOURCE_CHANGE) { | |
1099 if (ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) { | |
1100 DVLOG(3) | |
1101 << "DequeueResolutionChangeEvent(): got resolution change event."; | |
1102 return true; | |
1103 } | |
1104 } else { | |
1105 LOG(ERROR) << "DequeueResolutionChangeEvent(): got an event (" << ev.type | |
1106 << ") we haven't subscribed to."; | |
1107 } | |
1108 } | |
1109 return false; | |
1110 } | |
1111 | |
1112 void V4L2VideoDecodeAccelerator::Dequeue() { | |
1113 DVLOG(3) << "Dequeue()"; | |
1114 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1115 DCHECK_NE(decoder_state_, kUninitialized); | |
1116 TRACE_EVENT0("Video Decoder", "V4L2VDA::Dequeue"); | |
1117 | |
1118 // Dequeue completed input (VIDEO_OUTPUT) buffers, and recycle to the free | |
1119 // list. | |
1120 while (input_buffer_queued_count_ > 0) { | |
1121 DCHECK(input_streamon_); | |
1122 struct v4l2_buffer dqbuf; | |
1123 struct v4l2_plane planes[1]; | |
1124 memset(&dqbuf, 0, sizeof(dqbuf)); | |
1125 memset(planes, 0, sizeof(planes)); | |
1126 dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1127 dqbuf.memory = V4L2_MEMORY_MMAP; | |
1128 dqbuf.m.planes = planes; | |
1129 dqbuf.length = 1; | |
1130 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) { | |
1131 if (errno == EAGAIN) { | |
1132 // EAGAIN if we're just out of buffers to dequeue. | |
1133 break; | |
1134 } | |
1135 PLOG(ERROR) << "Dequeue(): ioctl() failed: VIDIOC_DQBUF"; | |
1136 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1137 return; | |
1138 } | |
1139 InputRecord& input_record = input_buffer_map_[dqbuf.index]; | |
1140 DCHECK(input_record.at_device); | |
1141 free_input_buffers_.push_back(dqbuf.index); | |
1142 input_record.at_device = false; | |
1143 input_record.bytes_used = 0; | |
1144 input_record.input_id = -1; | |
1145 input_buffer_queued_count_--; | |
1146 } | |
1147 | |
1148 // Dequeue completed output (VIDEO_CAPTURE) buffers, and queue to the | |
1149 // completed queue. | |
1150 while (output_buffer_queued_count_ > 0) { | |
1151 DCHECK(output_streamon_); | |
1152 struct v4l2_buffer dqbuf; | |
1153 std::unique_ptr<struct v4l2_plane[]> planes( | |
1154 new v4l2_plane[output_planes_count_]); | |
1155 memset(&dqbuf, 0, sizeof(dqbuf)); | |
1156 memset(planes.get(), 0, sizeof(struct v4l2_plane) * output_planes_count_); | |
1157 dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1158 dqbuf.memory = V4L2_MEMORY_MMAP; | |
1159 dqbuf.m.planes = planes.get(); | |
1160 dqbuf.length = output_planes_count_; | |
1161 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) { | |
1162 if (errno == EAGAIN) { | |
1163 // EAGAIN if we're just out of buffers to dequeue. | |
1164 break; | |
1165 } | |
1166 PLOG(ERROR) << "Dequeue(): ioctl() failed: VIDIOC_DQBUF"; | |
1167 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1168 return; | |
1169 } | |
1170 OutputRecord& output_record = output_buffer_map_[dqbuf.index]; | |
1171 DCHECK_EQ(output_record.state, kAtDevice); | |
1172 DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR); | |
1173 DCHECK_NE(output_record.picture_id, -1); | |
1174 output_buffer_queued_count_--; | |
1175 if (dqbuf.m.planes[0].bytesused == 0) { | |
1176 // This is an empty output buffer returned as part of a flush. | |
1177 output_record.state = kFree; | |
1178 free_output_buffers_.push(dqbuf.index); | |
1179 } else { | |
1180 int32_t bitstream_buffer_id = dqbuf.timestamp.tv_sec; | |
1181 DCHECK_GE(bitstream_buffer_id, 0); | |
1182 DVLOG(3) << "Dequeue output buffer: dqbuf index=" << dqbuf.index | |
1183 << " bitstream input_id=" << bitstream_buffer_id; | |
1184 if (image_processor_device_) { | |
1185 output_record.state = kAtProcessor; | |
1186 image_processor_bitstream_buffer_ids_.push(bitstream_buffer_id); | |
1187 std::vector<int> fds; | |
1188 for (auto& fd : output_record.fds) { | |
1189 fds.push_back(fd.get()); | |
1190 } | |
1191 scoped_refptr<media::VideoFrame> frame = | |
1192 media::VideoFrame::WrapExternalDmabufs( | |
1193 V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_), | |
1194 coded_size_, gfx::Rect(visible_size_), visible_size_, fds, | |
1195 base::TimeDelta()); | |
1196 // Unretained is safe because |this| owns image processor and there will | |
1197 // be no callbacks after processor destroys. Also, this class ensures it | |
1198 // is safe to post a task from child thread to decoder thread using | |
1199 // Unretained. | |
1200 image_processor_->Process( | |
1201 frame, dqbuf.index, | |
1202 media::BindToCurrentLoop( | |
1203 base::Bind(&V4L2VideoDecodeAccelerator::FrameProcessed, | |
1204 base::Unretained(this), bitstream_buffer_id))); | |
1205 } else { | |
1206 output_record.state = kAtClient; | |
1207 decoder_frames_at_client_++; | |
1208 const media::Picture picture(output_record.picture_id, | |
1209 bitstream_buffer_id, | |
1210 gfx::Rect(visible_size_), false); | |
1211 pending_picture_ready_.push( | |
1212 PictureRecord(output_record.cleared, picture)); | |
1213 SendPictureReady(); | |
1214 output_record.cleared = true; | |
1215 } | |
1216 } | |
1217 } | |
1218 | |
1219 NotifyFlushDoneIfNeeded(); | |
1220 } | |
1221 | |
1222 bool V4L2VideoDecodeAccelerator::EnqueueInputRecord() { | |
1223 DVLOG(3) << "EnqueueInputRecord()"; | |
1224 DCHECK(!input_ready_queue_.empty()); | |
1225 | |
1226 // Enqueue an input (VIDEO_OUTPUT) buffer. | |
1227 const int buffer = input_ready_queue_.front(); | |
1228 InputRecord& input_record = input_buffer_map_[buffer]; | |
1229 DCHECK(!input_record.at_device); | |
1230 struct v4l2_buffer qbuf; | |
1231 struct v4l2_plane qbuf_plane; | |
1232 memset(&qbuf, 0, sizeof(qbuf)); | |
1233 memset(&qbuf_plane, 0, sizeof(qbuf_plane)); | |
1234 qbuf.index = buffer; | |
1235 qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1236 qbuf.timestamp.tv_sec = input_record.input_id; | |
1237 qbuf.memory = V4L2_MEMORY_MMAP; | |
1238 qbuf.m.planes = &qbuf_plane; | |
1239 qbuf.m.planes[0].bytesused = input_record.bytes_used; | |
1240 qbuf.length = 1; | |
1241 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf); | |
1242 input_ready_queue_.pop(); | |
1243 input_record.at_device = true; | |
1244 input_buffer_queued_count_++; | |
1245 DVLOG(3) << "EnqueueInputRecord(): enqueued input_id=" | |
1246 << input_record.input_id << " size=" << input_record.bytes_used; | |
1247 return true; | |
1248 } | |
1249 | |
1250 bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() { | |
1251 DVLOG(3) << "EnqueueOutputRecord()"; | |
1252 DCHECK(!free_output_buffers_.empty()); | |
1253 | |
1254 // Enqueue an output (VIDEO_CAPTURE) buffer. | |
1255 const int buffer = free_output_buffers_.front(); | |
1256 OutputRecord& output_record = output_buffer_map_[buffer]; | |
1257 DCHECK_EQ(output_record.state, kFree); | |
1258 DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR); | |
1259 DCHECK_NE(output_record.picture_id, -1); | |
1260 if (output_record.egl_sync != EGL_NO_SYNC_KHR) { | |
1261 TRACE_EVENT0("Video Decoder", | |
1262 "V4L2VDA::EnqueueOutputRecord: eglClientWaitSyncKHR"); | |
1263 // If we have to wait for completion, wait. Note that | |
1264 // free_output_buffers_ is a FIFO queue, so we always wait on the | |
1265 // buffer that has been in the queue the longest. | |
1266 if (eglClientWaitSyncKHR(egl_display_, output_record.egl_sync, 0, | |
1267 EGL_FOREVER_KHR) == EGL_FALSE) { | |
1268 // This will cause tearing, but is safe otherwise. | |
1269 DLOG(WARNING) << __func__ << " eglClientWaitSyncKHR failed!"; | |
1270 } | |
1271 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) { | |
1272 LOG(ERROR) << __func__ << " eglDestroySyncKHR failed!"; | |
1273 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1274 return false; | |
1275 } | |
1276 output_record.egl_sync = EGL_NO_SYNC_KHR; | |
1277 } | |
1278 struct v4l2_buffer qbuf; | |
1279 std::unique_ptr<struct v4l2_plane[]> qbuf_planes( | |
1280 new v4l2_plane[output_planes_count_]); | |
1281 memset(&qbuf, 0, sizeof(qbuf)); | |
1282 memset( | |
1283 qbuf_planes.get(), 0, sizeof(struct v4l2_plane) * output_planes_count_); | |
1284 qbuf.index = buffer; | |
1285 qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1286 qbuf.memory = V4L2_MEMORY_MMAP; | |
1287 qbuf.m.planes = qbuf_planes.get(); | |
1288 qbuf.length = output_planes_count_; | |
1289 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf); | |
1290 free_output_buffers_.pop(); | |
1291 output_record.state = kAtDevice; | |
1292 output_buffer_queued_count_++; | |
1293 return true; | |
1294 } | |
1295 | |
1296 void V4L2VideoDecodeAccelerator::ReusePictureBufferTask( | |
1297 int32_t picture_buffer_id, | |
1298 std::unique_ptr<EGLSyncKHRRef> egl_sync_ref) { | |
1299 DVLOG(3) << "ReusePictureBufferTask(): picture_buffer_id=" | |
1300 << picture_buffer_id; | |
1301 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1302 TRACE_EVENT0("Video Decoder", "V4L2VDA::ReusePictureBufferTask"); | |
1303 | |
1304 // We run ReusePictureBufferTask even if we're in kResetting. | |
1305 if (decoder_state_ == kError) { | |
1306 DVLOG(2) << "ReusePictureBufferTask(): early out: kError state"; | |
1307 return; | |
1308 } | |
1309 | |
1310 if (decoder_state_ == kChangingResolution) { | |
1311 DVLOG(2) << "ReusePictureBufferTask(): early out: kChangingResolution"; | |
1312 return; | |
1313 } | |
1314 | |
1315 size_t index; | |
1316 for (index = 0; index < output_buffer_map_.size(); ++index) | |
1317 if (output_buffer_map_[index].picture_id == picture_buffer_id) | |
1318 break; | |
1319 | |
1320 if (index >= output_buffer_map_.size()) { | |
1321 // It's possible that we've already posted a DismissPictureBuffer for this | |
1322 // picture, but it has not yet executed when this ReusePictureBuffer was | |
1323 // posted to us by the client. In that case just ignore this (we've already | |
1324 // dismissed it and accounted for that) and let the sync object get | |
1325 // destroyed. | |
1326 DVLOG(4) << "ReusePictureBufferTask(): got picture id= " | |
1327 << picture_buffer_id << " not in use (anymore?)."; | |
1328 return; | |
1329 } | |
1330 | |
1331 OutputRecord& output_record = output_buffer_map_[index]; | |
1332 if (output_record.state != kAtClient) { | |
1333 LOG(ERROR) << "ReusePictureBufferTask(): picture_buffer_id not reusable"; | |
1334 NOTIFY_ERROR(INVALID_ARGUMENT); | |
1335 return; | |
1336 } | |
1337 | |
1338 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); | |
1339 output_record.egl_sync = egl_sync_ref->egl_sync; | |
1340 output_record.state = kFree; | |
1341 free_output_buffers_.push(index); | |
1342 decoder_frames_at_client_--; | |
1343 // Take ownership of the EGLSync. | |
1344 egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR; | |
1345 // We got a buffer back, so enqueue it back. | |
1346 Enqueue(); | |
1347 } | |
1348 | |
1349 void V4L2VideoDecodeAccelerator::FlushTask() { | |
1350 DVLOG(3) << "FlushTask()"; | |
1351 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1352 TRACE_EVENT0("Video Decoder", "V4L2VDA::FlushTask"); | |
1353 | |
1354 // Flush outstanding buffers. | |
1355 if (decoder_state_ == kInitialized || decoder_state_ == kAfterReset) { | |
1356 // There's nothing in the pipe, so return done immediately. | |
1357 DVLOG(3) << "FlushTask(): returning flush"; | |
1358 child_task_runner_->PostTask(FROM_HERE, | |
1359 base::Bind(&Client::NotifyFlushDone, client_)); | |
1360 return; | |
1361 } else if (decoder_state_ == kError) { | |
1362 DVLOG(2) << "FlushTask(): early out: kError state"; | |
1363 return; | |
1364 } | |
1365 | |
1366 // We don't support stacked flushing. | |
1367 DCHECK(!decoder_flushing_); | |
1368 | |
1369 // Queue up an empty buffer -- this triggers the flush. | |
1370 decoder_input_queue_.push( | |
1371 linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef( | |
1372 decode_client_, decode_task_runner_, nullptr, kFlushBufferId))); | |
1373 decoder_flushing_ = true; | |
1374 SendPictureReady(); // Send all pending PictureReady. | |
1375 | |
1376 ScheduleDecodeBufferTaskIfNeeded(); | |
1377 } | |
1378 | |
1379 void V4L2VideoDecodeAccelerator::NotifyFlushDoneIfNeeded() { | |
1380 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1381 if (!decoder_flushing_) | |
1382 return; | |
1383 | |
1384 // Pipeline is empty when: | |
1385 // * Decoder input queue is empty of non-delayed buffers. | |
1386 // * There is no currently filling input buffer. | |
1387 // * Input holding queue is empty. | |
1388 // * All input (VIDEO_OUTPUT) buffers are returned. | |
1389 // * All image processor buffers are returned. | |
1390 if (!decoder_input_queue_.empty()) { | |
1391 if (decoder_input_queue_.front()->input_id != | |
1392 decoder_delay_bitstream_buffer_id_) | |
1393 return; | |
1394 } | |
1395 if (decoder_current_input_buffer_ != -1) | |
1396 return; | |
1397 if ((input_ready_queue_.size() + input_buffer_queued_count_) != 0) | |
1398 return; | |
1399 if (image_processor_bitstream_buffer_ids_.size() != 0) | |
1400 return; | |
1401 | |
1402 // TODO(posciak): crbug.com/270039. Exynos requires a streamoff-streamon | |
1403 // sequence after flush to continue, even if we are not resetting. This would | |
1404 // make sense, because we don't really want to resume from a non-resume point | |
1405 // (e.g. not from an IDR) if we are flushed. | |
1406 // MSE player however triggers a Flush() on chunk end, but never Reset(). One | |
1407 // could argue either way, or even say that Flush() is not needed/harmful when | |
1408 // transitioning to next chunk. | |
1409 // For now, do the streamoff-streamon cycle to satisfy Exynos and not freeze | |
1410 // when doing MSE. This should be harmless otherwise. | |
1411 if (!(StopDevicePoll() && StopOutputStream() && StopInputStream())) | |
1412 return; | |
1413 | |
1414 if (!StartDevicePoll()) | |
1415 return; | |
1416 | |
1417 decoder_delay_bitstream_buffer_id_ = -1; | |
1418 decoder_flushing_ = false; | |
1419 DVLOG(3) << "NotifyFlushDoneIfNeeded(): returning flush"; | |
1420 child_task_runner_->PostTask(FROM_HERE, | |
1421 base::Bind(&Client::NotifyFlushDone, client_)); | |
1422 | |
1423 // While we were flushing, we early-outed DecodeBufferTask()s. | |
1424 ScheduleDecodeBufferTaskIfNeeded(); | |
1425 } | |
1426 | |
1427 void V4L2VideoDecodeAccelerator::ResetTask() { | |
1428 DVLOG(3) << "ResetTask()"; | |
1429 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1430 TRACE_EVENT0("Video Decoder", "V4L2VDA::ResetTask"); | |
1431 | |
1432 if (decoder_state_ == kError) { | |
1433 DVLOG(2) << "ResetTask(): early out: kError state"; | |
1434 return; | |
1435 } | |
1436 | |
1437 // If we are in the middle of switching resolutions, postpone reset until | |
1438 // it's done. We don't have to worry about timing of this wrt to decoding, | |
1439 // because output pipe is already stopped if we are changing resolution. | |
1440 // We will come back here after we are done with the resolution change. | |
1441 DCHECK(!resolution_change_reset_pending_); | |
1442 if (decoder_state_ == kChangingResolution) { | |
1443 resolution_change_reset_pending_ = true; | |
1444 return; | |
1445 } | |
1446 | |
1447 // After the output stream is stopped, the codec should not post any | |
1448 // resolution change events. So we dequeue the resolution change event | |
1449 // afterwards. The event could be posted before or while stopping the output | |
1450 // stream. The codec will expect the buffer of new size after the seek, so | |
1451 // we need to handle the resolution change event first. | |
1452 if (!(StopDevicePoll() && StopOutputStream())) | |
1453 return; | |
1454 | |
1455 if (DequeueResolutionChangeEvent()) { | |
1456 resolution_change_reset_pending_ = true; | |
1457 StartResolutionChange(); | |
1458 return; | |
1459 } | |
1460 | |
1461 if (!StopInputStream()) | |
1462 return; | |
1463 | |
1464 decoder_current_bitstream_buffer_.reset(); | |
1465 while (!decoder_input_queue_.empty()) | |
1466 decoder_input_queue_.pop(); | |
1467 | |
1468 decoder_current_input_buffer_ = -1; | |
1469 | |
1470 // Drop all buffers in image processor. | |
1471 while (!image_processor_bitstream_buffer_ids_.empty()) | |
1472 image_processor_bitstream_buffer_ids_.pop(); | |
1473 | |
1474 // If we were flushing, we'll never return any more BitstreamBuffers or | |
1475 // PictureBuffers; they have all been dropped and returned by now. | |
1476 NotifyFlushDoneIfNeeded(); | |
1477 | |
1478 // Mark that we're resetting, then enqueue a ResetDoneTask(). All intervening | |
1479 // jobs will early-out in the kResetting state. | |
1480 decoder_state_ = kResetting; | |
1481 SendPictureReady(); // Send all pending PictureReady. | |
1482 decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( | |
1483 &V4L2VideoDecodeAccelerator::ResetDoneTask, base::Unretained(this))); | |
1484 } | |
1485 | |
1486 void V4L2VideoDecodeAccelerator::ResetDoneTask() { | |
1487 DVLOG(3) << "ResetDoneTask()"; | |
1488 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1489 TRACE_EVENT0("Video Decoder", "V4L2VDA::ResetDoneTask"); | |
1490 | |
1491 if (decoder_state_ == kError) { | |
1492 DVLOG(2) << "ResetDoneTask(): early out: kError state"; | |
1493 return; | |
1494 } | |
1495 | |
1496 if (!StartDevicePoll()) | |
1497 return; | |
1498 | |
1499 // Reset format-specific bits. | |
1500 if (video_profile_ >= media::H264PROFILE_MIN && | |
1501 video_profile_ <= media::H264PROFILE_MAX) { | |
1502 decoder_h264_parser_.reset(new media::H264Parser()); | |
1503 } | |
1504 | |
1505 // Jobs drained, we're finished resetting. | |
1506 DCHECK_EQ(decoder_state_, kResetting); | |
1507 if (output_buffer_map_.empty()) { | |
1508 // We must have gotten Reset() before we had a chance to request buffers | |
1509 // from the client. | |
1510 decoder_state_ = kInitialized; | |
1511 } else { | |
1512 decoder_state_ = kAfterReset; | |
1513 } | |
1514 | |
1515 decoder_partial_frame_pending_ = false; | |
1516 decoder_delay_bitstream_buffer_id_ = -1; | |
1517 child_task_runner_->PostTask(FROM_HERE, | |
1518 base::Bind(&Client::NotifyResetDone, client_)); | |
1519 | |
1520 // While we were resetting, we early-outed DecodeBufferTask()s. | |
1521 ScheduleDecodeBufferTaskIfNeeded(); | |
1522 } | |
1523 | |
1524 void V4L2VideoDecodeAccelerator::DestroyTask() { | |
1525 DVLOG(3) << "DestroyTask()"; | |
1526 TRACE_EVENT0("Video Decoder", "V4L2VDA::DestroyTask"); | |
1527 | |
1528 // DestroyTask() should run regardless of decoder_state_. | |
1529 | |
1530 StopDevicePoll(); | |
1531 StopOutputStream(); | |
1532 StopInputStream(); | |
1533 | |
1534 decoder_current_bitstream_buffer_.reset(); | |
1535 decoder_current_input_buffer_ = -1; | |
1536 decoder_decode_buffer_tasks_scheduled_ = 0; | |
1537 decoder_frames_at_client_ = 0; | |
1538 while (!decoder_input_queue_.empty()) | |
1539 decoder_input_queue_.pop(); | |
1540 decoder_flushing_ = false; | |
1541 | |
1542 // Set our state to kError. Just in case. | |
1543 decoder_state_ = kError; | |
1544 } | |
1545 | |
1546 bool V4L2VideoDecodeAccelerator::StartDevicePoll() { | |
1547 DVLOG(3) << "StartDevicePoll()"; | |
1548 DCHECK(!device_poll_thread_.IsRunning()); | |
1549 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1550 | |
1551 // Start up the device poll thread and schedule its first DevicePollTask(). | |
1552 if (!device_poll_thread_.Start()) { | |
1553 LOG(ERROR) << "StartDevicePoll(): Device thread failed to start"; | |
1554 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1555 return false; | |
1556 } | |
1557 device_poll_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( | |
1558 &V4L2VideoDecodeAccelerator::DevicePollTask, | |
1559 base::Unretained(this), | |
1560 0)); | |
1561 | |
1562 return true; | |
1563 } | |
1564 | |
1565 bool V4L2VideoDecodeAccelerator::StopDevicePoll() { | |
1566 DVLOG(3) << "StopDevicePoll()"; | |
1567 | |
1568 if (!device_poll_thread_.IsRunning()) | |
1569 return true; | |
1570 | |
1571 if (decoder_thread_.IsRunning()) | |
1572 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1573 | |
1574 // Signal the DevicePollTask() to stop, and stop the device poll thread. | |
1575 if (!device_->SetDevicePollInterrupt()) { | |
1576 PLOG(ERROR) << "SetDevicePollInterrupt(): failed"; | |
1577 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1578 return false; | |
1579 } | |
1580 device_poll_thread_.Stop(); | |
1581 // Clear the interrupt now, to be sure. | |
1582 if (!device_->ClearDevicePollInterrupt()) { | |
1583 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1584 return false; | |
1585 } | |
1586 DVLOG(3) << "StopDevicePoll(): device poll stopped"; | |
1587 return true; | |
1588 } | |
1589 | |
1590 bool V4L2VideoDecodeAccelerator::StopOutputStream() { | |
1591 DVLOG(3) << "StopOutputStream()"; | |
1592 if (!output_streamon_) | |
1593 return true; | |
1594 | |
1595 __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1596 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type); | |
1597 output_streamon_ = false; | |
1598 | |
1599 for (size_t i = 0; i < output_buffer_map_.size(); ++i) { | |
1600 // After streamoff, the device drops ownership of all buffers, even if we | |
1601 // don't dequeue them explicitly. Some of them may still be owned by the | |
1602 // client however. Reuse only those that aren't. | |
1603 OutputRecord& output_record = output_buffer_map_[i]; | |
1604 if (output_record.state == kAtDevice) { | |
1605 output_record.state = kFree; | |
1606 free_output_buffers_.push(i); | |
1607 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); | |
1608 } | |
1609 } | |
1610 output_buffer_queued_count_ = 0; | |
1611 return true; | |
1612 } | |
1613 | |
1614 bool V4L2VideoDecodeAccelerator::StopInputStream() { | |
1615 DVLOG(3) << "StopInputStream()"; | |
1616 if (!input_streamon_) | |
1617 return true; | |
1618 | |
1619 __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1620 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type); | |
1621 input_streamon_ = false; | |
1622 | |
1623 // Reset accounting info for input. | |
1624 while (!input_ready_queue_.empty()) | |
1625 input_ready_queue_.pop(); | |
1626 free_input_buffers_.clear(); | |
1627 for (size_t i = 0; i < input_buffer_map_.size(); ++i) { | |
1628 free_input_buffers_.push_back(i); | |
1629 input_buffer_map_[i].at_device = false; | |
1630 input_buffer_map_[i].bytes_used = 0; | |
1631 input_buffer_map_[i].input_id = -1; | |
1632 } | |
1633 input_buffer_queued_count_ = 0; | |
1634 | |
1635 return true; | |
1636 } | |
1637 | |
1638 void V4L2VideoDecodeAccelerator::StartResolutionChange() { | |
1639 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1640 DCHECK_NE(decoder_state_, kUninitialized); | |
1641 DCHECK_NE(decoder_state_, kResetting); | |
1642 | |
1643 DVLOG(3) << "Initiate resolution change"; | |
1644 | |
1645 if (!(StopDevicePoll() && StopOutputStream())) | |
1646 return; | |
1647 | |
1648 decoder_state_ = kChangingResolution; | |
1649 | |
1650 if (!image_processor_bitstream_buffer_ids_.empty()) { | |
1651 DVLOG(3) << "Wait image processor to finish before destroying buffers."; | |
1652 return; | |
1653 } | |
1654 | |
1655 // Post a task to clean up buffers on child thread. This will also ensure | |
1656 // that we won't accept ReusePictureBuffer() anymore after that. | |
1657 child_task_runner_->PostTask( | |
1658 FROM_HERE, | |
1659 base::Bind(&V4L2VideoDecodeAccelerator::ResolutionChangeDestroyBuffers, | |
1660 weak_this_)); | |
1661 } | |
1662 | |
1663 void V4L2VideoDecodeAccelerator::FinishResolutionChange() { | |
1664 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1665 DCHECK_EQ(decoder_state_, kChangingResolution); | |
1666 DVLOG(3) << "FinishResolutionChange()"; | |
1667 | |
1668 if (decoder_state_ == kError) { | |
1669 DVLOG(2) << "FinishResolutionChange(): early out: kError state"; | |
1670 return; | |
1671 } | |
1672 | |
1673 struct v4l2_format format; | |
1674 bool again; | |
1675 gfx::Size visible_size; | |
1676 bool ret = GetFormatInfo(&format, &visible_size, &again); | |
1677 if (!ret || again) { | |
1678 LOG(ERROR) << "Couldn't get format information after resolution change"; | |
1679 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1680 return; | |
1681 } | |
1682 | |
1683 if (!CreateBuffersForFormat(format, visible_size)) { | |
1684 LOG(ERROR) << "Couldn't reallocate buffers after resolution change"; | |
1685 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1686 return; | |
1687 } | |
1688 | |
1689 decoder_state_ = kDecoding; | |
1690 | |
1691 if (resolution_change_reset_pending_) { | |
1692 resolution_change_reset_pending_ = false; | |
1693 ResetTask(); | |
1694 return; | |
1695 } | |
1696 | |
1697 if (!StartDevicePoll()) | |
1698 return; | |
1699 | |
1700 Enqueue(); | |
1701 ScheduleDecodeBufferTaskIfNeeded(); | |
1702 } | |
1703 | |
1704 void V4L2VideoDecodeAccelerator::DevicePollTask(bool poll_device) { | |
1705 DVLOG(3) << "DevicePollTask()"; | |
1706 DCHECK_EQ(device_poll_thread_.message_loop(), base::MessageLoop::current()); | |
1707 TRACE_EVENT0("Video Decoder", "V4L2VDA::DevicePollTask"); | |
1708 | |
1709 bool event_pending = false; | |
1710 | |
1711 if (!device_->Poll(poll_device, &event_pending)) { | |
1712 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1713 return; | |
1714 } | |
1715 | |
1716 // All processing should happen on ServiceDeviceTask(), since we shouldn't | |
1717 // touch decoder state from this thread. | |
1718 decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( | |
1719 &V4L2VideoDecodeAccelerator::ServiceDeviceTask, | |
1720 base::Unretained(this), event_pending)); | |
1721 } | |
1722 | |
1723 void V4L2VideoDecodeAccelerator::NotifyError(Error error) { | |
1724 DVLOG(2) << "NotifyError()"; | |
1725 | |
1726 if (!child_task_runner_->BelongsToCurrentThread()) { | |
1727 child_task_runner_->PostTask( | |
1728 FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::NotifyError, | |
1729 weak_this_, error)); | |
1730 return; | |
1731 } | |
1732 | |
1733 if (client_) { | |
1734 client_->NotifyError(error); | |
1735 client_ptr_factory_.reset(); | |
1736 } | |
1737 } | |
1738 | |
1739 void V4L2VideoDecodeAccelerator::SetErrorState(Error error) { | |
1740 // We can touch decoder_state_ only if this is the decoder thread or the | |
1741 // decoder thread isn't running. | |
1742 if (decoder_thread_.message_loop() != NULL && | |
1743 decoder_thread_.message_loop() != base::MessageLoop::current()) { | |
1744 decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( | |
1745 &V4L2VideoDecodeAccelerator::SetErrorState, | |
1746 base::Unretained(this), error)); | |
1747 return; | |
1748 } | |
1749 | |
1750 // Post NotifyError only if we are already initialized, as the API does | |
1751 // not allow doing so before that. | |
1752 if (decoder_state_ != kError && decoder_state_ != kUninitialized) | |
1753 NotifyError(error); | |
1754 | |
1755 decoder_state_ = kError; | |
1756 } | |
1757 | |
1758 bool V4L2VideoDecodeAccelerator::GetFormatInfo(struct v4l2_format* format, | |
1759 gfx::Size* visible_size, | |
1760 bool* again) { | |
1761 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1762 | |
1763 *again = false; | |
1764 memset(format, 0, sizeof(*format)); | |
1765 format->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1766 if (device_->Ioctl(VIDIOC_G_FMT, format) != 0) { | |
1767 if (errno == EINVAL) { | |
1768 // EINVAL means we haven't seen sufficient stream to decode the format. | |
1769 *again = true; | |
1770 return true; | |
1771 } else { | |
1772 PLOG(ERROR) << __func__ << "(): ioctl() failed: VIDIOC_G_FMT"; | |
1773 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1774 return false; | |
1775 } | |
1776 } | |
1777 | |
1778 // Make sure we are still getting the format we set on initialization. | |
1779 if (format->fmt.pix_mp.pixelformat != output_format_fourcc_) { | |
1780 LOG(ERROR) << "Unexpected format from G_FMT on output"; | |
1781 return false; | |
1782 } | |
1783 | |
1784 gfx::Size coded_size(format->fmt.pix_mp.width, format->fmt.pix_mp.height); | |
1785 if (visible_size != nullptr) | |
1786 *visible_size = GetVisibleSize(coded_size); | |
1787 | |
1788 return true; | |
1789 } | |
1790 | |
1791 bool V4L2VideoDecodeAccelerator::CreateBuffersForFormat( | |
1792 const struct v4l2_format& format, | |
1793 const gfx::Size& visible_size) { | |
1794 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1795 output_planes_count_ = format.fmt.pix_mp.num_planes; | |
1796 coded_size_.SetSize(format.fmt.pix_mp.width, format.fmt.pix_mp.height); | |
1797 visible_size_ = visible_size; | |
1798 if (image_processor_device_) { | |
1799 V4L2ImageProcessor processor(image_processor_device_); | |
1800 egl_image_size_ = visible_size_; | |
1801 egl_image_planes_count_ = 0; | |
1802 if (!processor.TryOutputFormat(egl_image_format_fourcc_, &egl_image_size_, | |
1803 &egl_image_planes_count_)) { | |
1804 LOG(ERROR) << "Fail to get output size and plane count of processor"; | |
1805 return false; | |
1806 } | |
1807 } else { | |
1808 egl_image_size_ = coded_size_; | |
1809 egl_image_planes_count_ = output_planes_count_; | |
1810 } | |
1811 DVLOG(3) << "CreateBuffersForFormat(): new resolution: " | |
1812 << coded_size_.ToString() | |
1813 << ", visible size: " << visible_size_.ToString() | |
1814 << ", EGLImage size: " << egl_image_size_.ToString(); | |
1815 | |
1816 return CreateOutputBuffers(); | |
1817 } | |
1818 | |
1819 gfx::Size V4L2VideoDecodeAccelerator::GetVisibleSize( | |
1820 const gfx::Size& coded_size) { | |
1821 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
1822 | |
1823 struct v4l2_crop crop_arg; | |
1824 memset(&crop_arg, 0, sizeof(crop_arg)); | |
1825 crop_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1826 | |
1827 if (device_->Ioctl(VIDIOC_G_CROP, &crop_arg) != 0) { | |
1828 PLOG(ERROR) << "GetVisibleSize(): ioctl() VIDIOC_G_CROP failed"; | |
1829 return coded_size; | |
1830 } | |
1831 | |
1832 gfx::Rect rect(crop_arg.c.left, crop_arg.c.top, crop_arg.c.width, | |
1833 crop_arg.c.height); | |
1834 DVLOG(3) << "visible rectangle is " << rect.ToString(); | |
1835 if (!gfx::Rect(coded_size).Contains(rect)) { | |
1836 DLOG(ERROR) << "visible rectangle " << rect.ToString() | |
1837 << " is not inside coded size " << coded_size.ToString(); | |
1838 return coded_size; | |
1839 } | |
1840 if (rect.IsEmpty()) { | |
1841 DLOG(ERROR) << "visible size is empty"; | |
1842 return coded_size; | |
1843 } | |
1844 | |
1845 // Chrome assume picture frame is coded at (0, 0). | |
1846 if (!rect.origin().IsOrigin()) { | |
1847 DLOG(ERROR) << "Unexpected visible rectangle " << rect.ToString() | |
1848 << ", top-left is not origin"; | |
1849 return coded_size; | |
1850 } | |
1851 | |
1852 return rect.size(); | |
1853 } | |
1854 | |
1855 bool V4L2VideoDecodeAccelerator::CreateInputBuffers() { | |
1856 DVLOG(3) << "CreateInputBuffers()"; | |
1857 // We always run this as we prepare to initialize. | |
1858 DCHECK_EQ(decoder_state_, kUninitialized); | |
1859 DCHECK(!input_streamon_); | |
1860 DCHECK(input_buffer_map_.empty()); | |
1861 | |
1862 struct v4l2_requestbuffers reqbufs; | |
1863 memset(&reqbufs, 0, sizeof(reqbufs)); | |
1864 reqbufs.count = kInputBufferCount; | |
1865 reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1866 reqbufs.memory = V4L2_MEMORY_MMAP; | |
1867 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs); | |
1868 input_buffer_map_.resize(reqbufs.count); | |
1869 for (size_t i = 0; i < input_buffer_map_.size(); ++i) { | |
1870 free_input_buffers_.push_back(i); | |
1871 | |
1872 // Query for the MEMORY_MMAP pointer. | |
1873 struct v4l2_plane planes[1]; | |
1874 struct v4l2_buffer buffer; | |
1875 memset(&buffer, 0, sizeof(buffer)); | |
1876 memset(planes, 0, sizeof(planes)); | |
1877 buffer.index = i; | |
1878 buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1879 buffer.memory = V4L2_MEMORY_MMAP; | |
1880 buffer.m.planes = planes; | |
1881 buffer.length = 1; | |
1882 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer); | |
1883 void* address = device_->Mmap(NULL, | |
1884 buffer.m.planes[0].length, | |
1885 PROT_READ | PROT_WRITE, | |
1886 MAP_SHARED, | |
1887 buffer.m.planes[0].m.mem_offset); | |
1888 if (address == MAP_FAILED) { | |
1889 PLOG(ERROR) << "CreateInputBuffers(): mmap() failed"; | |
1890 return false; | |
1891 } | |
1892 input_buffer_map_[i].address = address; | |
1893 input_buffer_map_[i].length = buffer.m.planes[0].length; | |
1894 } | |
1895 | |
1896 return true; | |
1897 } | |
1898 | |
1899 bool V4L2VideoDecodeAccelerator::SetupFormats() { | |
1900 // We always run this as we prepare to initialize. | |
1901 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
1902 DCHECK_EQ(decoder_state_, kUninitialized); | |
1903 DCHECK(!input_streamon_); | |
1904 DCHECK(!output_streamon_); | |
1905 | |
1906 __u32 input_format_fourcc = | |
1907 V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_, false); | |
1908 if (!input_format_fourcc) { | |
1909 NOTREACHED(); | |
1910 return false; | |
1911 } | |
1912 | |
1913 size_t input_size; | |
1914 gfx::Size max_resolution, min_resolution; | |
1915 device_->GetSupportedResolution(input_format_fourcc, &min_resolution, | |
1916 &max_resolution); | |
1917 if (max_resolution.width() > 1920 && max_resolution.height() > 1088) | |
1918 input_size = kInputBufferMaxSizeFor4k; | |
1919 else | |
1920 input_size = kInputBufferMaxSizeFor1080p; | |
1921 | |
1922 struct v4l2_fmtdesc fmtdesc; | |
1923 memset(&fmtdesc, 0, sizeof(fmtdesc)); | |
1924 fmtdesc.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1925 bool is_format_supported = false; | |
1926 while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) { | |
1927 if (fmtdesc.pixelformat == input_format_fourcc) { | |
1928 is_format_supported = true; | |
1929 break; | |
1930 } | |
1931 ++fmtdesc.index; | |
1932 } | |
1933 | |
1934 if (!is_format_supported) { | |
1935 DVLOG(1) << "Input fourcc " << input_format_fourcc | |
1936 << " not supported by device."; | |
1937 return false; | |
1938 } | |
1939 | |
1940 struct v4l2_format format; | |
1941 memset(&format, 0, sizeof(format)); | |
1942 format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1943 format.fmt.pix_mp.pixelformat = input_format_fourcc; | |
1944 format.fmt.pix_mp.plane_fmt[0].sizeimage = input_size; | |
1945 format.fmt.pix_mp.num_planes = 1; | |
1946 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format); | |
1947 | |
1948 // We have to set up the format for output, because the driver may not allow | |
1949 // changing it once we start streaming; whether it can support our chosen | |
1950 // output format or not may depend on the input format. | |
1951 memset(&fmtdesc, 0, sizeof(fmtdesc)); | |
1952 fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1953 while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) { | |
1954 if (device_->CanCreateEGLImageFrom(fmtdesc.pixelformat)) { | |
1955 output_format_fourcc_ = fmtdesc.pixelformat; | |
1956 break; | |
1957 } | |
1958 ++fmtdesc.index; | |
1959 } | |
1960 | |
1961 if (output_format_fourcc_ == 0) { | |
1962 DVLOG(1) << "Could not find a usable output format. Try image processor"; | |
1963 image_processor_device_ = V4L2Device::Create(V4L2Device::kImageProcessor); | |
1964 if (!image_processor_device_) { | |
1965 DVLOG(1) << "No image processor device."; | |
1966 return false; | |
1967 } | |
1968 output_format_fourcc_ = FindImageProcessorInputFormat(); | |
1969 if (output_format_fourcc_ == 0) { | |
1970 LOG(ERROR) << "Couldn't find a usable input format from image processor"; | |
1971 return false; | |
1972 } | |
1973 egl_image_format_fourcc_ = FindImageProcessorOutputFormat(); | |
1974 if (egl_image_format_fourcc_ == 0) { | |
1975 LOG(ERROR) << "Couldn't find a usable output format from image processor"; | |
1976 return false; | |
1977 } | |
1978 egl_image_device_ = image_processor_device_; | |
1979 } else { | |
1980 egl_image_format_fourcc_ = output_format_fourcc_; | |
1981 egl_image_device_ = device_; | |
1982 } | |
1983 DVLOG(2) << __func__ << ": Output format=" << output_format_fourcc_; | |
1984 | |
1985 // Just set the fourcc for output; resolution, etc., will come from the | |
1986 // driver once it extracts it from the stream. | |
1987 memset(&format, 0, sizeof(format)); | |
1988 format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1989 format.fmt.pix_mp.pixelformat = output_format_fourcc_; | |
1990 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format); | |
1991 | |
1992 return true; | |
1993 } | |
1994 | |
1995 uint32_t V4L2VideoDecodeAccelerator::FindImageProcessorInputFormat() { | |
1996 V4L2ImageProcessor image_processor(image_processor_device_); | |
1997 std::vector<uint32_t> processor_input_formats = | |
1998 image_processor.GetSupportedInputFormats(); | |
1999 struct v4l2_fmtdesc fmtdesc; | |
2000 memset(&fmtdesc, 0, sizeof(fmtdesc)); | |
2001 fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
2002 while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) { | |
2003 if (std::find(processor_input_formats.begin(), | |
2004 processor_input_formats.end(), | |
2005 fmtdesc.pixelformat) != processor_input_formats.end()) { | |
2006 DVLOG(1) << "Image processor input format=" << fmtdesc.pixelformat; | |
2007 return fmtdesc.pixelformat; | |
2008 } | |
2009 ++fmtdesc.index; | |
2010 } | |
2011 return 0; | |
2012 } | |
2013 | |
2014 uint32_t V4L2VideoDecodeAccelerator::FindImageProcessorOutputFormat() { | |
2015 V4L2ImageProcessor image_processor(image_processor_device_); | |
2016 std::vector<uint32_t> processor_output_formats = | |
2017 image_processor.GetSupportedOutputFormats(); | |
2018 for (uint32_t processor_output_format : processor_output_formats) { | |
2019 if (device_->CanCreateEGLImageFrom(processor_output_format)) { | |
2020 DVLOG(1) << "Image processor output format=" << processor_output_format; | |
2021 return processor_output_format; | |
2022 } | |
2023 } | |
2024 | |
2025 return 0; | |
2026 } | |
2027 | |
2028 bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() { | |
2029 DVLOG(3) << "CreateOutputBuffers()"; | |
2030 DCHECK(decoder_state_ == kInitialized || | |
2031 decoder_state_ == kChangingResolution); | |
2032 DCHECK(!output_streamon_); | |
2033 DCHECK(output_buffer_map_.empty()); | |
2034 | |
2035 // Number of output buffers we need. | |
2036 struct v4l2_control ctrl; | |
2037 memset(&ctrl, 0, sizeof(ctrl)); | |
2038 ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE; | |
2039 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_G_CTRL, &ctrl); | |
2040 output_dpb_size_ = ctrl.value; | |
2041 | |
2042 // Output format setup in Initialize(). | |
2043 | |
2044 const uint32_t buffer_count = output_dpb_size_ + kDpbOutputBufferExtraCount; | |
2045 DVLOG(3) << "CreateOutputBuffers(): ProvidePictureBuffers(): " | |
2046 << "buffer_count=" << buffer_count | |
2047 << ", coded_size=" << egl_image_size_.ToString(); | |
2048 child_task_runner_->PostTask( | |
2049 FROM_HERE, | |
2050 base::Bind(&Client::ProvidePictureBuffers, client_, buffer_count, 1, | |
2051 egl_image_size_, device_->GetTextureTarget())); | |
2052 | |
2053 // Wait for the client to call AssignPictureBuffers() on the Child thread. | |
2054 // We do this, because if we continue decoding without finishing buffer | |
2055 // allocation, we may end up Resetting before AssignPictureBuffers arrives, | |
2056 // resulting in unnecessary complications and subtle bugs. | |
2057 // For example, if the client calls Decode(Input1), Reset(), Decode(Input2) | |
2058 // in a sequence, and Decode(Input1) results in us getting here and exiting | |
2059 // without waiting, we might end up running Reset{,Done}Task() before | |
2060 // AssignPictureBuffers is scheduled, thus cleaning up and pushing buffers | |
2061 // to the free_output_buffers_ map twice. If we somehow marked buffers as | |
2062 // not ready, we'd need special handling for restarting the second Decode | |
2063 // task and delaying it anyway. | |
2064 // Waiting here is not very costly and makes reasoning about different | |
2065 // situations much simpler. | |
2066 pictures_assigned_.Wait(); | |
2067 | |
2068 Enqueue(); | |
2069 return true; | |
2070 } | |
2071 | |
2072 void V4L2VideoDecodeAccelerator::DestroyInputBuffers() { | |
2073 DVLOG(3) << "DestroyInputBuffers()"; | |
2074 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
2075 DCHECK(!input_streamon_); | |
2076 | |
2077 for (size_t i = 0; i < input_buffer_map_.size(); ++i) { | |
2078 if (input_buffer_map_[i].address != NULL) { | |
2079 device_->Munmap(input_buffer_map_[i].address, | |
2080 input_buffer_map_[i].length); | |
2081 } | |
2082 } | |
2083 | |
2084 struct v4l2_requestbuffers reqbufs; | |
2085 memset(&reqbufs, 0, sizeof(reqbufs)); | |
2086 reqbufs.count = 0; | |
2087 reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
2088 reqbufs.memory = V4L2_MEMORY_MMAP; | |
2089 IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS, &reqbufs); | |
2090 | |
2091 input_buffer_map_.clear(); | |
2092 free_input_buffers_.clear(); | |
2093 } | |
2094 | |
2095 bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() { | |
2096 DVLOG(3) << "DestroyOutputBuffers()"; | |
2097 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
2098 DCHECK(!output_streamon_); | |
2099 bool success = true; | |
2100 | |
2101 for (size_t i = 0; i < output_buffer_map_.size(); ++i) { | |
2102 OutputRecord& output_record = output_buffer_map_[i]; | |
2103 | |
2104 if (output_record.egl_image != EGL_NO_IMAGE_KHR) { | |
2105 if (egl_image_device_->DestroyEGLImage( | |
2106 egl_display_, output_record.egl_image) != EGL_TRUE) { | |
2107 DVLOG(1) << __func__ << " DestroyEGLImage failed."; | |
2108 success = false; | |
2109 } | |
2110 } | |
2111 | |
2112 if (output_record.egl_sync != EGL_NO_SYNC_KHR) { | |
2113 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) { | |
2114 DVLOG(1) << __func__ << " eglDestroySyncKHR failed."; | |
2115 success = false; | |
2116 } | |
2117 } | |
2118 | |
2119 DVLOG(1) << "DestroyOutputBuffers(): dismissing PictureBuffer id=" | |
2120 << output_record.picture_id; | |
2121 child_task_runner_->PostTask( | |
2122 FROM_HERE, base::Bind(&Client::DismissPictureBuffer, client_, | |
2123 output_record.picture_id)); | |
2124 } | |
2125 | |
2126 if (image_processor_) | |
2127 image_processor_.release()->Destroy(); | |
2128 | |
2129 struct v4l2_requestbuffers reqbufs; | |
2130 memset(&reqbufs, 0, sizeof(reqbufs)); | |
2131 reqbufs.count = 0; | |
2132 reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
2133 reqbufs.memory = V4L2_MEMORY_MMAP; | |
2134 if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) { | |
2135 PLOG(ERROR) << "DestroyOutputBuffers() ioctl() failed: VIDIOC_REQBUFS"; | |
2136 success = false; | |
2137 } | |
2138 | |
2139 output_buffer_map_.clear(); | |
2140 while (!free_output_buffers_.empty()) | |
2141 free_output_buffers_.pop(); | |
2142 output_buffer_queued_count_ = 0; | |
2143 // The client may still hold some buffers. The texture holds a reference to | |
2144 // the buffer. It is OK to free the buffer and destroy EGLImage here. | |
2145 decoder_frames_at_client_ = 0; | |
2146 | |
2147 return success; | |
2148 } | |
2149 | |
2150 void V4L2VideoDecodeAccelerator::ResolutionChangeDestroyBuffers() { | |
2151 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
2152 DVLOG(3) << "ResolutionChangeDestroyBuffers()"; | |
2153 | |
2154 if (!DestroyOutputBuffers()) { | |
2155 LOG(ERROR) << __func__ << " Failed destroying output buffers."; | |
2156 NOTIFY_ERROR(PLATFORM_FAILURE); | |
2157 return; | |
2158 } | |
2159 | |
2160 // Finish resolution change on decoder thread. | |
2161 decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind( | |
2162 &V4L2VideoDecodeAccelerator::FinishResolutionChange, | |
2163 base::Unretained(this))); | |
2164 } | |
2165 | |
2166 void V4L2VideoDecodeAccelerator::SendPictureReady() { | |
2167 DVLOG(3) << "SendPictureReady()"; | |
2168 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
2169 bool resetting_or_flushing = | |
2170 (decoder_state_ == kResetting || decoder_flushing_); | |
2171 while (pending_picture_ready_.size() > 0) { | |
2172 bool cleared = pending_picture_ready_.front().cleared; | |
2173 const media::Picture& picture = pending_picture_ready_.front().picture; | |
2174 if (cleared && picture_clearing_count_ == 0) { | |
2175 // This picture is cleared. It can be posted to a thread different than | |
2176 // the main GPU thread to reduce latency. This should be the case after | |
2177 // all pictures are cleared at the beginning. | |
2178 decode_task_runner_->PostTask( | |
2179 FROM_HERE, | |
2180 base::Bind(&Client::PictureReady, decode_client_, picture)); | |
2181 pending_picture_ready_.pop(); | |
2182 } else if (!cleared || resetting_or_flushing) { | |
2183 DVLOG(3) << "SendPictureReady()" | |
2184 << ". cleared=" << pending_picture_ready_.front().cleared | |
2185 << ", decoder_state_=" << decoder_state_ | |
2186 << ", decoder_flushing_=" << decoder_flushing_ | |
2187 << ", picture_clearing_count_=" << picture_clearing_count_; | |
2188 // If the picture is not cleared, post it to the child thread because it | |
2189 // has to be cleared in the child thread. A picture only needs to be | |
2190 // cleared once. If the decoder is resetting or flushing, send all | |
2191 // pictures to ensure PictureReady arrive before reset or flush done. | |
2192 child_task_runner_->PostTaskAndReply( | |
2193 FROM_HERE, base::Bind(&Client::PictureReady, client_, picture), | |
2194 // Unretained is safe. If Client::PictureReady gets to run, |this| is | |
2195 // alive. Destroy() will wait the decode thread to finish. | |
2196 base::Bind(&V4L2VideoDecodeAccelerator::PictureCleared, | |
2197 base::Unretained(this))); | |
2198 picture_clearing_count_++; | |
2199 pending_picture_ready_.pop(); | |
2200 } else { | |
2201 // This picture is cleared. But some pictures are about to be cleared on | |
2202 // the child thread. To preserve the order, do not send this until those | |
2203 // pictures are cleared. | |
2204 break; | |
2205 } | |
2206 } | |
2207 } | |
2208 | |
2209 void V4L2VideoDecodeAccelerator::PictureCleared() { | |
2210 DVLOG(3) << "PictureCleared(). clearing count=" << picture_clearing_count_; | |
2211 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
2212 DCHECK_GT(picture_clearing_count_, 0); | |
2213 picture_clearing_count_--; | |
2214 SendPictureReady(); | |
2215 } | |
2216 | |
2217 void V4L2VideoDecodeAccelerator::FrameProcessed(int32_t bitstream_buffer_id, | |
2218 int output_buffer_index) { | |
2219 DVLOG(3) << __func__ << ": output_buffer_index=" << output_buffer_index | |
2220 << ", bitstream_buffer_id=" << bitstream_buffer_id; | |
2221 DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current()); | |
2222 DCHECK_GE(output_buffer_index, 0); | |
2223 DCHECK_LT(output_buffer_index, static_cast<int>(output_buffer_map_.size())); | |
2224 | |
2225 OutputRecord& output_record = output_buffer_map_[output_buffer_index]; | |
2226 DCHECK_EQ(output_record.state, kAtProcessor); | |
2227 if (!image_processor_bitstream_buffer_ids_.empty() && | |
2228 image_processor_bitstream_buffer_ids_.front() == bitstream_buffer_id) { | |
2229 DVLOG(3) << __func__ << ": picture_id=" << output_record.picture_id; | |
2230 DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR); | |
2231 DCHECK_NE(output_record.picture_id, -1); | |
2232 // Send the processed frame to render. | |
2233 output_record.state = kAtClient; | |
2234 decoder_frames_at_client_++; | |
2235 image_processor_bitstream_buffer_ids_.pop(); | |
2236 const media::Picture picture(output_record.picture_id, bitstream_buffer_id, | |
2237 gfx::Rect(visible_size_), false); | |
2238 pending_picture_ready_.push(PictureRecord(output_record.cleared, picture)); | |
2239 SendPictureReady(); | |
2240 output_record.cleared = true; | |
2241 // Flush or resolution change may be waiting image processor to finish. | |
2242 if (image_processor_bitstream_buffer_ids_.empty()) { | |
2243 NotifyFlushDoneIfNeeded(); | |
2244 if (decoder_state_ == kChangingResolution) | |
2245 StartResolutionChange(); | |
2246 } | |
2247 } else { | |
2248 DVLOG(2) << "Bitstream buffer id " << bitstream_buffer_id << " not found " | |
2249 << "because of Reset. Drop the buffer"; | |
2250 output_record.state = kFree; | |
2251 free_output_buffers_.push(output_buffer_index); | |
2252 Enqueue(); | |
2253 } | |
2254 } | |
2255 | |
2256 void V4L2VideoDecodeAccelerator::ImageProcessorError() { | |
2257 LOG(ERROR) << "Image processor error"; | |
2258 NOTIFY_ERROR(PLATFORM_FAILURE); | |
2259 } | |
2260 | |
2261 } // namespace content | |
OLD | NEW |