OLD | NEW |
| (Empty) |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h" | |
6 | |
7 #include <errno.h> | |
8 #include <fcntl.h> | |
9 #include <linux/videodev2.h> | |
10 #include <poll.h> | |
11 #include <string.h> | |
12 #include <sys/eventfd.h> | |
13 #include <sys/ioctl.h> | |
14 #include <sys/mman.h> | |
15 | |
16 #include <memory> | |
17 | |
18 #include "base/bind.h" | |
19 #include "base/bind_helpers.h" | |
20 #include "base/callback.h" | |
21 #include "base/callback_helpers.h" | |
22 #include "base/command_line.h" | |
23 #include "base/macros.h" | |
24 #include "base/memory/ptr_util.h" | |
25 #include "base/numerics/safe_conversions.h" | |
26 #include "base/strings/stringprintf.h" | |
27 #include "content/common/gpu/media/shared_memory_region.h" | |
28 #include "media/base/bind_to_current_loop.h" | |
29 #include "media/base/media_switches.h" | |
30 #include "ui/gl/gl_context.h" | |
31 #include "ui/gl/scoped_binders.h" | |
32 | |
33 #define LOGF(level) LOG(level) << __FUNCTION__ << "(): " | |
34 #define DVLOGF(level) DVLOG(level) << __FUNCTION__ << "(): " | |
35 | |
36 #define NOTIFY_ERROR(x) \ | |
37 do { \ | |
38 LOG(ERROR) << "Setting error state:" << x; \ | |
39 SetErrorState(x); \ | |
40 } while (0) | |
41 | |
42 #define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str) \ | |
43 do { \ | |
44 if (device_->Ioctl(type, arg) != 0) { \ | |
45 PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << type_str; \ | |
46 return value; \ | |
47 } \ | |
48 } while (0) | |
49 | |
50 #define IOCTL_OR_ERROR_RETURN(type, arg) \ | |
51 IOCTL_OR_ERROR_RETURN_VALUE(type, arg, ((void)0), #type) | |
52 | |
53 #define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \ | |
54 IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type) | |
55 | |
56 #define IOCTL_OR_LOG_ERROR(type, arg) \ | |
57 do { \ | |
58 if (device_->Ioctl(type, arg) != 0) \ | |
59 PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << #type; \ | |
60 } while (0) | |
61 | |
62 namespace content { | |
63 | |
64 // static | |
65 const uint32_t V4L2SliceVideoDecodeAccelerator::supported_input_fourccs_[] = { | |
66 V4L2_PIX_FMT_H264_SLICE, V4L2_PIX_FMT_VP8_FRAME, | |
67 }; | |
68 | |
69 class V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface | |
70 : public base::RefCounted<V4L2DecodeSurface> { | |
71 public: | |
72 using ReleaseCB = base::Callback<void(int)>; | |
73 | |
74 V4L2DecodeSurface(int32_t bitstream_id, | |
75 int input_record, | |
76 int output_record, | |
77 const ReleaseCB& release_cb); | |
78 | |
79 // Mark the surface as decoded. This will also release all references, as | |
80 // they are not needed anymore. | |
81 void SetDecoded(); | |
82 bool decoded() const { return decoded_; } | |
83 | |
84 int32_t bitstream_id() const { return bitstream_id_; } | |
85 int input_record() const { return input_record_; } | |
86 int output_record() const { return output_record_; } | |
87 uint32_t config_store() const { return config_store_; } | |
88 | |
89 // Take references to each reference surface and keep them until the | |
90 // target surface is decoded. | |
91 void SetReferenceSurfaces( | |
92 const std::vector<scoped_refptr<V4L2DecodeSurface>>& ref_surfaces); | |
93 | |
94 std::string ToString() const; | |
95 | |
96 private: | |
97 friend class base::RefCounted<V4L2DecodeSurface>; | |
98 ~V4L2DecodeSurface(); | |
99 | |
100 int32_t bitstream_id_; | |
101 int input_record_; | |
102 int output_record_; | |
103 uint32_t config_store_; | |
104 | |
105 bool decoded_; | |
106 ReleaseCB release_cb_; | |
107 | |
108 std::vector<scoped_refptr<V4L2DecodeSurface>> reference_surfaces_; | |
109 | |
110 DISALLOW_COPY_AND_ASSIGN(V4L2DecodeSurface); | |
111 }; | |
112 | |
113 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::V4L2DecodeSurface( | |
114 int32_t bitstream_id, | |
115 int input_record, | |
116 int output_record, | |
117 const ReleaseCB& release_cb) | |
118 : bitstream_id_(bitstream_id), | |
119 input_record_(input_record), | |
120 output_record_(output_record), | |
121 config_store_(input_record + 1), | |
122 decoded_(false), | |
123 release_cb_(release_cb) {} | |
124 | |
125 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::~V4L2DecodeSurface() { | |
126 DVLOGF(5) << "Releasing output record id=" << output_record_; | |
127 release_cb_.Run(output_record_); | |
128 } | |
129 | |
130 void V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::SetReferenceSurfaces( | |
131 const std::vector<scoped_refptr<V4L2DecodeSurface>>& ref_surfaces) { | |
132 DCHECK(reference_surfaces_.empty()); | |
133 reference_surfaces_ = ref_surfaces; | |
134 } | |
135 | |
136 void V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::SetDecoded() { | |
137 DCHECK(!decoded_); | |
138 decoded_ = true; | |
139 | |
140 // We can now drop references to all reference surfaces for this surface | |
141 // as we are done with decoding. | |
142 reference_surfaces_.clear(); | |
143 } | |
144 | |
145 std::string V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::ToString() | |
146 const { | |
147 std::string out; | |
148 base::StringAppendF(&out, "Buffer %d -> %d. ", input_record_, output_record_); | |
149 base::StringAppendF(&out, "Reference surfaces:"); | |
150 for (const auto& ref : reference_surfaces_) { | |
151 DCHECK_NE(ref->output_record(), output_record_); | |
152 base::StringAppendF(&out, " %d", ref->output_record()); | |
153 } | |
154 return out; | |
155 } | |
156 | |
157 V4L2SliceVideoDecodeAccelerator::InputRecord::InputRecord() | |
158 : input_id(-1), | |
159 address(nullptr), | |
160 length(0), | |
161 bytes_used(0), | |
162 at_device(false) { | |
163 } | |
164 | |
165 V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord() | |
166 : at_device(false), | |
167 at_client(false), | |
168 picture_id(-1), | |
169 texture_id(0), | |
170 egl_image(EGL_NO_IMAGE_KHR), | |
171 egl_sync(EGL_NO_SYNC_KHR), | |
172 cleared(false) {} | |
173 | |
174 struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef { | |
175 BitstreamBufferRef( | |
176 base::WeakPtr<VideoDecodeAccelerator::Client>& client, | |
177 const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner, | |
178 SharedMemoryRegion* shm, | |
179 int32_t input_id); | |
180 ~BitstreamBufferRef(); | |
181 const base::WeakPtr<VideoDecodeAccelerator::Client> client; | |
182 const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner; | |
183 const std::unique_ptr<SharedMemoryRegion> shm; | |
184 off_t bytes_used; | |
185 const int32_t input_id; | |
186 }; | |
187 | |
188 V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef( | |
189 base::WeakPtr<VideoDecodeAccelerator::Client>& client, | |
190 const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner, | |
191 SharedMemoryRegion* shm, | |
192 int32_t input_id) | |
193 : client(client), | |
194 client_task_runner(client_task_runner), | |
195 shm(shm), | |
196 bytes_used(0), | |
197 input_id(input_id) {} | |
198 | |
199 V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() { | |
200 if (input_id >= 0) { | |
201 DVLOGF(5) << "returning input_id: " << input_id; | |
202 client_task_runner->PostTask( | |
203 FROM_HERE, | |
204 base::Bind(&VideoDecodeAccelerator::Client::NotifyEndOfBitstreamBuffer, | |
205 client, input_id)); | |
206 } | |
207 } | |
208 | |
209 struct V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef { | |
210 EGLSyncKHRRef(EGLDisplay egl_display, EGLSyncKHR egl_sync); | |
211 ~EGLSyncKHRRef(); | |
212 EGLDisplay const egl_display; | |
213 EGLSyncKHR egl_sync; | |
214 }; | |
215 | |
216 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::EGLSyncKHRRef( | |
217 EGLDisplay egl_display, | |
218 EGLSyncKHR egl_sync) | |
219 : egl_display(egl_display), egl_sync(egl_sync) { | |
220 } | |
221 | |
222 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::~EGLSyncKHRRef() { | |
223 // We don't check for eglDestroySyncKHR failures, because if we get here | |
224 // with a valid sync object, something went wrong and we are getting | |
225 // destroyed anyway. | |
226 if (egl_sync != EGL_NO_SYNC_KHR) | |
227 eglDestroySyncKHR(egl_display, egl_sync); | |
228 } | |
229 | |
230 struct V4L2SliceVideoDecodeAccelerator::PictureRecord { | |
231 PictureRecord(bool cleared, const media::Picture& picture); | |
232 ~PictureRecord(); | |
233 bool cleared; // Whether the texture is cleared and safe to render from. | |
234 media::Picture picture; // The decoded picture. | |
235 }; | |
236 | |
237 V4L2SliceVideoDecodeAccelerator::PictureRecord::PictureRecord( | |
238 bool cleared, | |
239 const media::Picture& picture) | |
240 : cleared(cleared), picture(picture) { | |
241 } | |
242 | |
243 V4L2SliceVideoDecodeAccelerator::PictureRecord::~PictureRecord() { | |
244 } | |
245 | |
246 class V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator | |
247 : public H264Decoder::H264Accelerator { | |
248 public: | |
249 V4L2H264Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec); | |
250 ~V4L2H264Accelerator() override; | |
251 | |
252 // H264Decoder::H264Accelerator implementation. | |
253 scoped_refptr<H264Picture> CreateH264Picture() override; | |
254 | |
255 bool SubmitFrameMetadata(const media::H264SPS* sps, | |
256 const media::H264PPS* pps, | |
257 const H264DPB& dpb, | |
258 const H264Picture::Vector& ref_pic_listp0, | |
259 const H264Picture::Vector& ref_pic_listb0, | |
260 const H264Picture::Vector& ref_pic_listb1, | |
261 const scoped_refptr<H264Picture>& pic) override; | |
262 | |
263 bool SubmitSlice(const media::H264PPS* pps, | |
264 const media::H264SliceHeader* slice_hdr, | |
265 const H264Picture::Vector& ref_pic_list0, | |
266 const H264Picture::Vector& ref_pic_list1, | |
267 const scoped_refptr<H264Picture>& pic, | |
268 const uint8_t* data, | |
269 size_t size) override; | |
270 | |
271 bool SubmitDecode(const scoped_refptr<H264Picture>& pic) override; | |
272 bool OutputPicture(const scoped_refptr<H264Picture>& pic) override; | |
273 | |
274 void Reset() override; | |
275 | |
276 private: | |
277 // Max size of reference list. | |
278 static const size_t kDPBIndicesListSize = 32; | |
279 void H264PictureListToDPBIndicesList(const H264Picture::Vector& src_pic_list, | |
280 uint8_t dst_list[kDPBIndicesListSize]); | |
281 | |
282 void H264DPBToV4L2DPB( | |
283 const H264DPB& dpb, | |
284 std::vector<scoped_refptr<V4L2DecodeSurface>>* ref_surfaces); | |
285 | |
286 scoped_refptr<V4L2DecodeSurface> H264PictureToV4L2DecodeSurface( | |
287 const scoped_refptr<H264Picture>& pic); | |
288 | |
289 size_t num_slices_; | |
290 V4L2SliceVideoDecodeAccelerator* v4l2_dec_; | |
291 | |
292 // TODO(posciak): This should be queried from hardware once supported. | |
293 static const size_t kMaxSlices = 16; | |
294 struct v4l2_ctrl_h264_slice_param v4l2_slice_params_[kMaxSlices]; | |
295 struct v4l2_ctrl_h264_decode_param v4l2_decode_param_; | |
296 | |
297 DISALLOW_COPY_AND_ASSIGN(V4L2H264Accelerator); | |
298 }; | |
299 | |
300 class V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator | |
301 : public VP8Decoder::VP8Accelerator { | |
302 public: | |
303 V4L2VP8Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec); | |
304 ~V4L2VP8Accelerator() override; | |
305 | |
306 // VP8Decoder::VP8Accelerator implementation. | |
307 scoped_refptr<VP8Picture> CreateVP8Picture() override; | |
308 | |
309 bool SubmitDecode(const scoped_refptr<VP8Picture>& pic, | |
310 const media::Vp8FrameHeader* frame_hdr, | |
311 const scoped_refptr<VP8Picture>& last_frame, | |
312 const scoped_refptr<VP8Picture>& golden_frame, | |
313 const scoped_refptr<VP8Picture>& alt_frame) override; | |
314 | |
315 bool OutputPicture(const scoped_refptr<VP8Picture>& pic) override; | |
316 | |
317 private: | |
318 scoped_refptr<V4L2DecodeSurface> VP8PictureToV4L2DecodeSurface( | |
319 const scoped_refptr<VP8Picture>& pic); | |
320 | |
321 V4L2SliceVideoDecodeAccelerator* v4l2_dec_; | |
322 | |
323 DISALLOW_COPY_AND_ASSIGN(V4L2VP8Accelerator); | |
324 }; | |
325 | |
326 // Codec-specific subclasses of software decoder picture classes. | |
327 // This allows us to keep decoders oblivious of our implementation details. | |
328 class V4L2H264Picture : public H264Picture { | |
329 public: | |
330 V4L2H264Picture(const scoped_refptr< | |
331 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>& dec_surface); | |
332 | |
333 V4L2H264Picture* AsV4L2H264Picture() override { return this; } | |
334 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface> | |
335 dec_surface() { | |
336 return dec_surface_; | |
337 } | |
338 | |
339 private: | |
340 ~V4L2H264Picture() override; | |
341 | |
342 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface> | |
343 dec_surface_; | |
344 | |
345 DISALLOW_COPY_AND_ASSIGN(V4L2H264Picture); | |
346 }; | |
347 | |
348 V4L2H264Picture::V4L2H264Picture(const scoped_refptr< | |
349 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>& dec_surface) | |
350 : dec_surface_(dec_surface) { | |
351 } | |
352 | |
353 V4L2H264Picture::~V4L2H264Picture() { | |
354 } | |
355 | |
356 class V4L2VP8Picture : public VP8Picture { | |
357 public: | |
358 V4L2VP8Picture(const scoped_refptr< | |
359 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>& dec_surface); | |
360 | |
361 V4L2VP8Picture* AsV4L2VP8Picture() override { return this; } | |
362 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface> | |
363 dec_surface() { | |
364 return dec_surface_; | |
365 } | |
366 | |
367 private: | |
368 ~V4L2VP8Picture() override; | |
369 | |
370 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface> | |
371 dec_surface_; | |
372 | |
373 DISALLOW_COPY_AND_ASSIGN(V4L2VP8Picture); | |
374 }; | |
375 | |
376 V4L2VP8Picture::V4L2VP8Picture(const scoped_refptr< | |
377 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>& dec_surface) | |
378 : dec_surface_(dec_surface) { | |
379 } | |
380 | |
381 V4L2VP8Picture::~V4L2VP8Picture() { | |
382 } | |
383 | |
384 V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator( | |
385 const scoped_refptr<V4L2Device>& device, | |
386 EGLDisplay egl_display, | |
387 const GetGLContextCallback& get_gl_context_cb, | |
388 const MakeGLContextCurrentCallback& make_context_current_cb) | |
389 : input_planes_count_(0), | |
390 output_planes_count_(0), | |
391 child_task_runner_(base::ThreadTaskRunnerHandle::Get()), | |
392 device_(device), | |
393 decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"), | |
394 device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"), | |
395 input_streamon_(false), | |
396 input_buffer_queued_count_(0), | |
397 output_streamon_(false), | |
398 output_buffer_queued_count_(0), | |
399 video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN), | |
400 output_format_fourcc_(0), | |
401 state_(kUninitialized), | |
402 output_mode_(Config::OutputMode::ALLOCATE), | |
403 decoder_flushing_(false), | |
404 decoder_resetting_(false), | |
405 surface_set_change_pending_(false), | |
406 picture_clearing_count_(0), | |
407 egl_display_(egl_display), | |
408 get_gl_context_cb_(get_gl_context_cb), | |
409 make_context_current_cb_(make_context_current_cb), | |
410 weak_this_factory_(this) { | |
411 weak_this_ = weak_this_factory_.GetWeakPtr(); | |
412 } | |
413 | |
414 V4L2SliceVideoDecodeAccelerator::~V4L2SliceVideoDecodeAccelerator() { | |
415 DVLOGF(2); | |
416 | |
417 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
418 DCHECK(!decoder_thread_.IsRunning()); | |
419 DCHECK(!device_poll_thread_.IsRunning()); | |
420 | |
421 DCHECK(input_buffer_map_.empty()); | |
422 DCHECK(output_buffer_map_.empty()); | |
423 } | |
424 | |
425 void V4L2SliceVideoDecodeAccelerator::NotifyError(Error error) { | |
426 if (!child_task_runner_->BelongsToCurrentThread()) { | |
427 child_task_runner_->PostTask( | |
428 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::NotifyError, | |
429 weak_this_, error)); | |
430 return; | |
431 } | |
432 | |
433 if (client_) { | |
434 client_->NotifyError(error); | |
435 client_ptr_factory_.reset(); | |
436 } | |
437 } | |
438 | |
439 bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config, | |
440 Client* client) { | |
441 DVLOGF(3) << "profile: " << config.profile; | |
442 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
443 DCHECK_EQ(state_, kUninitialized); | |
444 | |
445 if (!device_->SupportsDecodeProfileForV4L2PixelFormats( | |
446 config.profile, arraysize(supported_input_fourccs_), | |
447 supported_input_fourccs_)) { | |
448 DVLOGF(1) << "unsupported profile " << config.profile; | |
449 return false; | |
450 } | |
451 | |
452 if (config.is_encrypted) { | |
453 NOTREACHED() << "Encrypted streams are not supported for this VDA"; | |
454 return false; | |
455 } | |
456 | |
457 if (config.output_mode != Config::OutputMode::ALLOCATE && | |
458 config.output_mode != Config::OutputMode::IMPORT) { | |
459 NOTREACHED() << "Only ALLOCATE and IMPORT OutputModes are supported"; | |
460 return false; | |
461 } | |
462 | |
463 client_ptr_factory_.reset( | |
464 new base::WeakPtrFactory<VideoDecodeAccelerator::Client>(client)); | |
465 client_ = client_ptr_factory_->GetWeakPtr(); | |
466 // If we haven't been set up to decode on separate thread via | |
467 // TryToSetupDecodeOnSeparateThread(), use the main thread/client for | |
468 // decode tasks. | |
469 if (!decode_task_runner_) { | |
470 decode_task_runner_ = child_task_runner_; | |
471 DCHECK(!decode_client_); | |
472 decode_client_ = client_; | |
473 } | |
474 | |
475 video_profile_ = config.profile; | |
476 | |
477 if (video_profile_ >= media::H264PROFILE_MIN && | |
478 video_profile_ <= media::H264PROFILE_MAX) { | |
479 h264_accelerator_.reset(new V4L2H264Accelerator(this)); | |
480 decoder_.reset(new H264Decoder(h264_accelerator_.get())); | |
481 } else if (video_profile_ >= media::VP8PROFILE_MIN && | |
482 video_profile_ <= media::VP8PROFILE_MAX) { | |
483 vp8_accelerator_.reset(new V4L2VP8Accelerator(this)); | |
484 decoder_.reset(new VP8Decoder(vp8_accelerator_.get())); | |
485 } else { | |
486 NOTREACHED() << "Unsupported profile " << video_profile_; | |
487 return false; | |
488 } | |
489 | |
490 // TODO(posciak): This needs to be queried once supported. | |
491 input_planes_count_ = 1; | |
492 output_planes_count_ = 1; | |
493 | |
494 if (egl_display_ == EGL_NO_DISPLAY) { | |
495 LOG(ERROR) << "Initialize(): could not get EGLDisplay"; | |
496 return false; | |
497 } | |
498 | |
499 // We need the context to be initialized to query extensions. | |
500 if (!make_context_current_cb_.is_null()) { | |
501 if (!make_context_current_cb_.Run()) { | |
502 LOG(ERROR) << "Initialize(): could not make context current"; | |
503 return false; | |
504 } | |
505 | |
506 if (!gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync) { | |
507 LOG(ERROR) << "Initialize(): context does not have EGL_KHR_fence_sync"; | |
508 return false; | |
509 } | |
510 } else { | |
511 DVLOG(1) << "No GL callbacks provided, initializing without GL support"; | |
512 } | |
513 | |
514 // Capabilities check. | |
515 struct v4l2_capability caps; | |
516 const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; | |
517 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps); | |
518 if ((caps.capabilities & kCapsRequired) != kCapsRequired) { | |
519 LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP" | |
520 ", caps check failed: 0x" << std::hex << caps.capabilities; | |
521 return false; | |
522 } | |
523 | |
524 if (!SetupFormats()) | |
525 return false; | |
526 | |
527 if (!decoder_thread_.Start()) { | |
528 DLOG(ERROR) << "Initialize(): device thread failed to start"; | |
529 return false; | |
530 } | |
531 decoder_thread_task_runner_ = decoder_thread_.task_runner(); | |
532 | |
533 state_ = kInitialized; | |
534 output_mode_ = config.output_mode; | |
535 | |
536 // InitializeTask will NOTIFY_ERROR on failure. | |
537 decoder_thread_task_runner_->PostTask( | |
538 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::InitializeTask, | |
539 base::Unretained(this))); | |
540 | |
541 DVLOGF(1) << "V4L2SliceVideoDecodeAccelerator initialized"; | |
542 return true; | |
543 } | |
544 | |
545 void V4L2SliceVideoDecodeAccelerator::InitializeTask() { | |
546 DVLOGF(3); | |
547 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
548 DCHECK_EQ(state_, kInitialized); | |
549 | |
550 if (!CreateInputBuffers()) | |
551 NOTIFY_ERROR(PLATFORM_FAILURE); | |
552 | |
553 // Output buffers will be created once decoder gives us information | |
554 // about their size and required count. | |
555 state_ = kDecoding; | |
556 } | |
557 | |
558 void V4L2SliceVideoDecodeAccelerator::Destroy() { | |
559 DVLOGF(3); | |
560 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
561 | |
562 if (decoder_thread_.IsRunning()) { | |
563 decoder_thread_task_runner_->PostTask( | |
564 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DestroyTask, | |
565 base::Unretained(this))); | |
566 | |
567 // Wait for tasks to finish/early-exit. | |
568 decoder_thread_.Stop(); | |
569 } | |
570 | |
571 delete this; | |
572 DVLOGF(3) << "Destroyed"; | |
573 } | |
574 | |
575 void V4L2SliceVideoDecodeAccelerator::DestroyTask() { | |
576 DVLOGF(3); | |
577 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
578 | |
579 state_ = kError; | |
580 | |
581 decoder_->Reset(); | |
582 | |
583 decoder_current_bitstream_buffer_.reset(); | |
584 while (!decoder_input_queue_.empty()) | |
585 decoder_input_queue_.pop(); | |
586 | |
587 // Stop streaming and the device_poll_thread_. | |
588 StopDevicePoll(false); | |
589 | |
590 DestroyInputBuffers(); | |
591 DestroyOutputs(false); | |
592 | |
593 DCHECK(surfaces_at_device_.empty()); | |
594 DCHECK(surfaces_at_display_.empty()); | |
595 DCHECK(decoder_display_queue_.empty()); | |
596 } | |
597 | |
598 bool V4L2SliceVideoDecodeAccelerator::SetupFormats() { | |
599 DCHECK_EQ(state_, kUninitialized); | |
600 | |
601 __u32 input_format_fourcc = | |
602 V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_, true); | |
603 if (!input_format_fourcc) { | |
604 NOTREACHED(); | |
605 return false; | |
606 } | |
607 | |
608 size_t input_size; | |
609 gfx::Size max_resolution, min_resolution; | |
610 device_->GetSupportedResolution(input_format_fourcc, &min_resolution, | |
611 &max_resolution); | |
612 if (max_resolution.width() > 1920 && max_resolution.height() > 1088) | |
613 input_size = kInputBufferMaxSizeFor4k; | |
614 else | |
615 input_size = kInputBufferMaxSizeFor1080p; | |
616 | |
617 struct v4l2_fmtdesc fmtdesc; | |
618 memset(&fmtdesc, 0, sizeof(fmtdesc)); | |
619 fmtdesc.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
620 bool is_format_supported = false; | |
621 while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) { | |
622 if (fmtdesc.pixelformat == input_format_fourcc) { | |
623 is_format_supported = true; | |
624 break; | |
625 } | |
626 ++fmtdesc.index; | |
627 } | |
628 | |
629 if (!is_format_supported) { | |
630 DVLOG(1) << "Input fourcc " << input_format_fourcc | |
631 << " not supported by device."; | |
632 return false; | |
633 } | |
634 | |
635 struct v4l2_format format; | |
636 memset(&format, 0, sizeof(format)); | |
637 format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
638 format.fmt.pix_mp.pixelformat = input_format_fourcc; | |
639 format.fmt.pix_mp.plane_fmt[0].sizeimage = input_size; | |
640 format.fmt.pix_mp.num_planes = input_planes_count_; | |
641 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format); | |
642 | |
643 // We have to set up the format for output, because the driver may not allow | |
644 // changing it once we start streaming; whether it can support our chosen | |
645 // output format or not may depend on the input format. | |
646 memset(&fmtdesc, 0, sizeof(fmtdesc)); | |
647 fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
648 output_format_fourcc_ = 0; | |
649 while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) { | |
650 if (device_->CanCreateEGLImageFrom(fmtdesc.pixelformat)) { | |
651 output_format_fourcc_ = fmtdesc.pixelformat; | |
652 break; | |
653 } | |
654 ++fmtdesc.index; | |
655 } | |
656 | |
657 if (output_format_fourcc_ == 0) { | |
658 LOG(ERROR) << "Could not find a usable output format"; | |
659 return false; | |
660 } | |
661 | |
662 // Only set fourcc for output; resolution, etc., will come from the | |
663 // driver once it extracts it from the stream. | |
664 memset(&format, 0, sizeof(format)); | |
665 format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
666 format.fmt.pix_mp.pixelformat = output_format_fourcc_; | |
667 format.fmt.pix_mp.num_planes = output_planes_count_; | |
668 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format); | |
669 | |
670 return true; | |
671 } | |
672 | |
673 bool V4L2SliceVideoDecodeAccelerator::CreateInputBuffers() { | |
674 DVLOGF(3); | |
675 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
676 DCHECK(!input_streamon_); | |
677 DCHECK(input_buffer_map_.empty()); | |
678 | |
679 struct v4l2_requestbuffers reqbufs; | |
680 memset(&reqbufs, 0, sizeof(reqbufs)); | |
681 reqbufs.count = kNumInputBuffers; | |
682 reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
683 reqbufs.memory = V4L2_MEMORY_MMAP; | |
684 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs); | |
685 if (reqbufs.count < kNumInputBuffers) { | |
686 PLOG(ERROR) << "Could not allocate enough output buffers"; | |
687 return false; | |
688 } | |
689 input_buffer_map_.resize(reqbufs.count); | |
690 for (size_t i = 0; i < input_buffer_map_.size(); ++i) { | |
691 free_input_buffers_.push_back(i); | |
692 | |
693 // Query for the MEMORY_MMAP pointer. | |
694 struct v4l2_plane planes[VIDEO_MAX_PLANES]; | |
695 struct v4l2_buffer buffer; | |
696 memset(&buffer, 0, sizeof(buffer)); | |
697 memset(planes, 0, sizeof(planes)); | |
698 buffer.index = i; | |
699 buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
700 buffer.memory = V4L2_MEMORY_MMAP; | |
701 buffer.m.planes = planes; | |
702 buffer.length = input_planes_count_; | |
703 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer); | |
704 void* address = device_->Mmap(nullptr, | |
705 buffer.m.planes[0].length, | |
706 PROT_READ | PROT_WRITE, | |
707 MAP_SHARED, | |
708 buffer.m.planes[0].m.mem_offset); | |
709 if (address == MAP_FAILED) { | |
710 PLOG(ERROR) << "CreateInputBuffers(): mmap() failed"; | |
711 return false; | |
712 } | |
713 input_buffer_map_[i].address = address; | |
714 input_buffer_map_[i].length = buffer.m.planes[0].length; | |
715 } | |
716 | |
717 return true; | |
718 } | |
719 | |
720 bool V4L2SliceVideoDecodeAccelerator::CreateOutputBuffers() { | |
721 DVLOGF(3); | |
722 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
723 DCHECK(!output_streamon_); | |
724 DCHECK(output_buffer_map_.empty()); | |
725 DCHECK(surfaces_at_display_.empty()); | |
726 DCHECK(surfaces_at_device_.empty()); | |
727 | |
728 visible_size_ = decoder_->GetPicSize(); | |
729 size_t num_pictures = decoder_->GetRequiredNumOfPictures(); | |
730 | |
731 DCHECK_GT(num_pictures, 0u); | |
732 DCHECK(!visible_size_.IsEmpty()); | |
733 | |
734 struct v4l2_format format; | |
735 memset(&format, 0, sizeof(format)); | |
736 format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
737 format.fmt.pix_mp.pixelformat = output_format_fourcc_; | |
738 format.fmt.pix_mp.width = visible_size_.width(); | |
739 format.fmt.pix_mp.height = visible_size_.height(); | |
740 format.fmt.pix_mp.num_planes = input_planes_count_; | |
741 | |
742 if (device_->Ioctl(VIDIOC_S_FMT, &format) != 0) { | |
743 PLOG(ERROR) << "Failed setting format to: " << output_format_fourcc_; | |
744 NOTIFY_ERROR(PLATFORM_FAILURE); | |
745 return false; | |
746 } | |
747 | |
748 coded_size_.SetSize(base::checked_cast<int>(format.fmt.pix_mp.width), | |
749 base::checked_cast<int>(format.fmt.pix_mp.height)); | |
750 DCHECK_EQ(coded_size_.width() % 16, 0); | |
751 DCHECK_EQ(coded_size_.height() % 16, 0); | |
752 | |
753 if (!gfx::Rect(coded_size_).Contains(gfx::Rect(visible_size_))) { | |
754 LOG(ERROR) << "Got invalid adjusted coded size: " << coded_size_.ToString(); | |
755 return false; | |
756 } | |
757 | |
758 DVLOGF(3) << "buffer_count=" << num_pictures | |
759 << ", visible size=" << visible_size_.ToString() | |
760 << ", coded size=" << coded_size_.ToString(); | |
761 | |
762 child_task_runner_->PostTask( | |
763 FROM_HERE, | |
764 base::Bind(&VideoDecodeAccelerator::Client::ProvidePictureBuffers, | |
765 client_, num_pictures, 1, coded_size_, | |
766 device_->GetTextureTarget())); | |
767 | |
768 // Go into kAwaitingPictureBuffers to prevent us from doing any more decoding | |
769 // or event handling while we are waiting for AssignPictureBuffers(). Not | |
770 // having Pictures available would not have prevented us from making decoding | |
771 // progress entirely e.g. in the case of H.264 where we could further decode | |
772 // non-slice NALUs and could even get another resolution change before we were | |
773 // done with this one. After we get the buffers, we'll go back into kIdle and | |
774 // kick off further event processing, and eventually go back into kDecoding | |
775 // once no more events are pending (if any). | |
776 state_ = kAwaitingPictureBuffers; | |
777 return true; | |
778 } | |
779 | |
780 void V4L2SliceVideoDecodeAccelerator::DestroyInputBuffers() { | |
781 DVLOGF(3); | |
782 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread() || | |
783 !decoder_thread_.IsRunning()); | |
784 DCHECK(!input_streamon_); | |
785 | |
786 for (auto& input_record : input_buffer_map_) { | |
787 if (input_record.address != nullptr) | |
788 device_->Munmap(input_record.address, input_record.length); | |
789 } | |
790 | |
791 struct v4l2_requestbuffers reqbufs; | |
792 memset(&reqbufs, 0, sizeof(reqbufs)); | |
793 reqbufs.count = 0; | |
794 reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
795 reqbufs.memory = V4L2_MEMORY_MMAP; | |
796 IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS, &reqbufs); | |
797 | |
798 input_buffer_map_.clear(); | |
799 free_input_buffers_.clear(); | |
800 } | |
801 | |
802 void V4L2SliceVideoDecodeAccelerator::DismissPictures( | |
803 const std::vector<int32_t>& picture_buffer_ids, | |
804 base::WaitableEvent* done) { | |
805 DVLOGF(3); | |
806 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
807 | |
808 for (auto picture_buffer_id : picture_buffer_ids) { | |
809 DVLOGF(1) << "dismissing PictureBuffer id=" << picture_buffer_id; | |
810 client_->DismissPictureBuffer(picture_buffer_id); | |
811 } | |
812 | |
813 done->Signal(); | |
814 } | |
815 | |
816 void V4L2SliceVideoDecodeAccelerator::DevicePollTask(bool poll_device) { | |
817 DVLOGF(4); | |
818 DCHECK_EQ(device_poll_thread_.message_loop(), base::MessageLoop::current()); | |
819 | |
820 bool event_pending; | |
821 if (!device_->Poll(poll_device, &event_pending)) { | |
822 NOTIFY_ERROR(PLATFORM_FAILURE); | |
823 return; | |
824 } | |
825 | |
826 // All processing should happen on ServiceDeviceTask(), since we shouldn't | |
827 // touch encoder state from this thread. | |
828 decoder_thread_task_runner_->PostTask( | |
829 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask, | |
830 base::Unretained(this))); | |
831 } | |
832 | |
833 void V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask() { | |
834 DVLOGF(4); | |
835 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
836 | |
837 // ServiceDeviceTask() should only ever be scheduled from DevicePollTask(). | |
838 | |
839 Dequeue(); | |
840 SchedulePollIfNeeded(); | |
841 } | |
842 | |
843 void V4L2SliceVideoDecodeAccelerator::SchedulePollIfNeeded() { | |
844 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
845 | |
846 if (!device_poll_thread_.IsRunning()) { | |
847 DVLOGF(2) << "Device poll thread stopped, will not schedule poll"; | |
848 return; | |
849 } | |
850 | |
851 DCHECK(input_streamon_ || output_streamon_); | |
852 | |
853 if (input_buffer_queued_count_ + output_buffer_queued_count_ == 0) { | |
854 DVLOGF(4) << "No buffers queued, will not schedule poll"; | |
855 return; | |
856 } | |
857 | |
858 DVLOGF(4) << "Scheduling device poll task"; | |
859 | |
860 device_poll_thread_.message_loop()->PostTask( | |
861 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask, | |
862 base::Unretained(this), true)); | |
863 | |
864 DVLOGF(2) << "buffer counts: " | |
865 << "INPUT[" << decoder_input_queue_.size() << "]" | |
866 << " => DEVICE[" | |
867 << free_input_buffers_.size() << "+" | |
868 << input_buffer_queued_count_ << "/" | |
869 << input_buffer_map_.size() << "]->[" | |
870 << free_output_buffers_.size() << "+" | |
871 << output_buffer_queued_count_ << "/" | |
872 << output_buffer_map_.size() << "]" | |
873 << " => DISPLAYQ[" << decoder_display_queue_.size() << "]" | |
874 << " => CLIENT[" << surfaces_at_display_.size() << "]"; | |
875 } | |
876 | |
877 void V4L2SliceVideoDecodeAccelerator::Enqueue( | |
878 const scoped_refptr<V4L2DecodeSurface>& dec_surface) { | |
879 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
880 | |
881 const int old_inputs_queued = input_buffer_queued_count_; | |
882 const int old_outputs_queued = output_buffer_queued_count_; | |
883 | |
884 if (!EnqueueInputRecord(dec_surface->input_record(), | |
885 dec_surface->config_store())) { | |
886 DVLOGF(1) << "Failed queueing an input buffer"; | |
887 NOTIFY_ERROR(PLATFORM_FAILURE); | |
888 return; | |
889 } | |
890 | |
891 if (!EnqueueOutputRecord(dec_surface->output_record())) { | |
892 DVLOGF(1) << "Failed queueing an output buffer"; | |
893 NOTIFY_ERROR(PLATFORM_FAILURE); | |
894 return; | |
895 } | |
896 | |
897 bool inserted = | |
898 surfaces_at_device_.insert(std::make_pair(dec_surface->output_record(), | |
899 dec_surface)).second; | |
900 DCHECK(inserted); | |
901 | |
902 if (old_inputs_queued == 0 && old_outputs_queued == 0) | |
903 SchedulePollIfNeeded(); | |
904 } | |
905 | |
906 void V4L2SliceVideoDecodeAccelerator::Dequeue() { | |
907 DVLOGF(3); | |
908 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
909 | |
910 struct v4l2_buffer dqbuf; | |
911 struct v4l2_plane planes[VIDEO_MAX_PLANES]; | |
912 while (input_buffer_queued_count_ > 0) { | |
913 DCHECK(input_streamon_); | |
914 memset(&dqbuf, 0, sizeof(dqbuf)); | |
915 memset(&planes, 0, sizeof(planes)); | |
916 dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
917 dqbuf.memory = V4L2_MEMORY_MMAP; | |
918 dqbuf.m.planes = planes; | |
919 dqbuf.length = input_planes_count_; | |
920 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) { | |
921 if (errno == EAGAIN) { | |
922 // EAGAIN if we're just out of buffers to dequeue. | |
923 break; | |
924 } | |
925 PLOG(ERROR) << "ioctl() failed: VIDIOC_DQBUF"; | |
926 NOTIFY_ERROR(PLATFORM_FAILURE); | |
927 return; | |
928 } | |
929 InputRecord& input_record = input_buffer_map_[dqbuf.index]; | |
930 DCHECK(input_record.at_device); | |
931 input_record.at_device = false; | |
932 ReuseInputBuffer(dqbuf.index); | |
933 input_buffer_queued_count_--; | |
934 DVLOGF(4) << "Dequeued input=" << dqbuf.index | |
935 << " count: " << input_buffer_queued_count_; | |
936 } | |
937 | |
938 while (output_buffer_queued_count_ > 0) { | |
939 DCHECK(output_streamon_); | |
940 memset(&dqbuf, 0, sizeof(dqbuf)); | |
941 memset(&planes, 0, sizeof(planes)); | |
942 dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
943 dqbuf.memory = | |
944 (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP | |
945 : V4L2_MEMORY_DMABUF); | |
946 dqbuf.m.planes = planes; | |
947 dqbuf.length = output_planes_count_; | |
948 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) { | |
949 if (errno == EAGAIN) { | |
950 // EAGAIN if we're just out of buffers to dequeue. | |
951 break; | |
952 } | |
953 PLOG(ERROR) << "ioctl() failed: VIDIOC_DQBUF"; | |
954 NOTIFY_ERROR(PLATFORM_FAILURE); | |
955 return; | |
956 } | |
957 OutputRecord& output_record = output_buffer_map_[dqbuf.index]; | |
958 DCHECK(output_record.at_device); | |
959 output_record.at_device = false; | |
960 output_buffer_queued_count_--; | |
961 DVLOGF(3) << "Dequeued output=" << dqbuf.index | |
962 << " count " << output_buffer_queued_count_; | |
963 | |
964 V4L2DecodeSurfaceByOutputId::iterator it = | |
965 surfaces_at_device_.find(dqbuf.index); | |
966 if (it == surfaces_at_device_.end()) { | |
967 DLOG(ERROR) << "Got invalid surface from device."; | |
968 NOTIFY_ERROR(PLATFORM_FAILURE); | |
969 } | |
970 | |
971 it->second->SetDecoded(); | |
972 surfaces_at_device_.erase(it); | |
973 } | |
974 | |
975 // A frame was decoded, see if we can output it. | |
976 TryOutputSurfaces(); | |
977 | |
978 ProcessPendingEventsIfNeeded(); | |
979 } | |
980 | |
981 void V4L2SliceVideoDecodeAccelerator::NewEventPending() { | |
982 // Switch to event processing mode if we are decoding. Otherwise we are either | |
983 // already in it, or we will potentially switch to it later, after finishing | |
984 // other tasks. | |
985 if (state_ == kDecoding) | |
986 state_ = kIdle; | |
987 | |
988 ProcessPendingEventsIfNeeded(); | |
989 } | |
990 | |
991 bool V4L2SliceVideoDecodeAccelerator::FinishEventProcessing() { | |
992 DCHECK_EQ(state_, kIdle); | |
993 | |
994 state_ = kDecoding; | |
995 ScheduleDecodeBufferTaskIfNeeded(); | |
996 | |
997 return true; | |
998 } | |
999 | |
1000 void V4L2SliceVideoDecodeAccelerator::ProcessPendingEventsIfNeeded() { | |
1001 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1002 | |
1003 // Process pending events, if any, in the correct order. | |
1004 // We always first process the surface set change, as it is an internal | |
1005 // event from the decoder and interleaving it with external requests would | |
1006 // put the decoder in an undefined state. | |
1007 using ProcessFunc = bool (V4L2SliceVideoDecodeAccelerator::*)(); | |
1008 const ProcessFunc process_functions[] = { | |
1009 &V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChange, | |
1010 &V4L2SliceVideoDecodeAccelerator::FinishFlush, | |
1011 &V4L2SliceVideoDecodeAccelerator::FinishReset, | |
1012 &V4L2SliceVideoDecodeAccelerator::FinishEventProcessing, | |
1013 }; | |
1014 | |
1015 for (const auto& fn : process_functions) { | |
1016 if (state_ != kIdle) | |
1017 return; | |
1018 | |
1019 if (!(this->*fn)()) | |
1020 return; | |
1021 } | |
1022 } | |
1023 | |
1024 void V4L2SliceVideoDecodeAccelerator::ReuseInputBuffer(int index) { | |
1025 DVLOGF(4) << "Reusing input buffer, index=" << index; | |
1026 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1027 | |
1028 DCHECK_LT(index, static_cast<int>(input_buffer_map_.size())); | |
1029 InputRecord& input_record = input_buffer_map_[index]; | |
1030 | |
1031 DCHECK(!input_record.at_device); | |
1032 input_record.input_id = -1; | |
1033 input_record.bytes_used = 0; | |
1034 | |
1035 DCHECK_EQ(std::count(free_input_buffers_.begin(), free_input_buffers_.end(), | |
1036 index), 0); | |
1037 free_input_buffers_.push_back(index); | |
1038 } | |
1039 | |
1040 void V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer(int index) { | |
1041 DVLOGF(4) << "Reusing output buffer, index=" << index; | |
1042 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1043 | |
1044 DCHECK_LT(index, static_cast<int>(output_buffer_map_.size())); | |
1045 OutputRecord& output_record = output_buffer_map_[index]; | |
1046 DCHECK(!output_record.at_device); | |
1047 DCHECK(!output_record.at_client); | |
1048 | |
1049 DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(), | |
1050 index), 0); | |
1051 free_output_buffers_.push_back(index); | |
1052 | |
1053 ScheduleDecodeBufferTaskIfNeeded(); | |
1054 } | |
1055 | |
1056 bool V4L2SliceVideoDecodeAccelerator::EnqueueInputRecord( | |
1057 int index, | |
1058 uint32_t config_store) { | |
1059 DVLOGF(3); | |
1060 DCHECK_LT(index, static_cast<int>(input_buffer_map_.size())); | |
1061 DCHECK_GT(config_store, 0u); | |
1062 | |
1063 // Enqueue an input (VIDEO_OUTPUT) buffer for an input video frame. | |
1064 InputRecord& input_record = input_buffer_map_[index]; | |
1065 DCHECK(!input_record.at_device); | |
1066 struct v4l2_buffer qbuf; | |
1067 struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES]; | |
1068 memset(&qbuf, 0, sizeof(qbuf)); | |
1069 memset(qbuf_planes, 0, sizeof(qbuf_planes)); | |
1070 qbuf.index = index; | |
1071 qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1072 qbuf.memory = V4L2_MEMORY_MMAP; | |
1073 qbuf.m.planes = qbuf_planes; | |
1074 qbuf.m.planes[0].bytesused = input_record.bytes_used; | |
1075 qbuf.length = input_planes_count_; | |
1076 qbuf.config_store = config_store; | |
1077 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf); | |
1078 input_record.at_device = true; | |
1079 input_buffer_queued_count_++; | |
1080 DVLOGF(4) << "Enqueued input=" << qbuf.index | |
1081 << " count: " << input_buffer_queued_count_; | |
1082 | |
1083 return true; | |
1084 } | |
1085 | |
1086 bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) { | |
1087 DVLOGF(3); | |
1088 DCHECK_LT(index, static_cast<int>(output_buffer_map_.size())); | |
1089 | |
1090 // Enqueue an output (VIDEO_CAPTURE) buffer. | |
1091 OutputRecord& output_record = output_buffer_map_[index]; | |
1092 DCHECK(!output_record.at_device); | |
1093 DCHECK(!output_record.at_client); | |
1094 DCHECK_NE(output_record.picture_id, -1); | |
1095 | |
1096 if (output_record.egl_sync != EGL_NO_SYNC_KHR) { | |
1097 // If we have to wait for completion, wait. Note that | |
1098 // free_output_buffers_ is a FIFO queue, so we always wait on the | |
1099 // buffer that has been in the queue the longest. | |
1100 if (eglClientWaitSyncKHR(egl_display_, output_record.egl_sync, 0, | |
1101 EGL_FOREVER_KHR) == EGL_FALSE) { | |
1102 // This will cause tearing, but is safe otherwise. | |
1103 DVLOGF(1) << "eglClientWaitSyncKHR failed!"; | |
1104 } | |
1105 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) { | |
1106 LOGF(ERROR) << "eglDestroySyncKHR failed!"; | |
1107 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1108 return false; | |
1109 } | |
1110 output_record.egl_sync = EGL_NO_SYNC_KHR; | |
1111 } | |
1112 | |
1113 struct v4l2_buffer qbuf; | |
1114 struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES]; | |
1115 memset(&qbuf, 0, sizeof(qbuf)); | |
1116 memset(qbuf_planes, 0, sizeof(qbuf_planes)); | |
1117 qbuf.index = index; | |
1118 qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1119 if (output_mode_ == Config::OutputMode::ALLOCATE) { | |
1120 qbuf.memory = V4L2_MEMORY_MMAP; | |
1121 } else { | |
1122 qbuf.memory = V4L2_MEMORY_DMABUF; | |
1123 DCHECK_EQ(output_planes_count_, output_record.dmabuf_fds.size()); | |
1124 for (size_t i = 0; i < output_record.dmabuf_fds.size(); ++i) { | |
1125 DCHECK(output_record.dmabuf_fds[i].is_valid()); | |
1126 qbuf_planes[i].m.fd = output_record.dmabuf_fds[i].get(); | |
1127 } | |
1128 } | |
1129 qbuf.m.planes = qbuf_planes; | |
1130 qbuf.length = output_planes_count_; | |
1131 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf); | |
1132 output_record.at_device = true; | |
1133 output_buffer_queued_count_++; | |
1134 DVLOGF(4) << "Enqueued output=" << qbuf.index | |
1135 << " count: " << output_buffer_queued_count_; | |
1136 | |
1137 return true; | |
1138 } | |
1139 | |
1140 bool V4L2SliceVideoDecodeAccelerator::StartDevicePoll() { | |
1141 DVLOGF(3) << "Starting device poll"; | |
1142 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1143 DCHECK(!device_poll_thread_.IsRunning()); | |
1144 | |
1145 // Start up the device poll thread and schedule its first DevicePollTask(). | |
1146 if (!device_poll_thread_.Start()) { | |
1147 DLOG(ERROR) << "StartDevicePoll(): Device thread failed to start"; | |
1148 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1149 return false; | |
1150 } | |
1151 if (!input_streamon_) { | |
1152 __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1153 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMON, &type); | |
1154 input_streamon_ = true; | |
1155 } | |
1156 | |
1157 if (!output_streamon_) { | |
1158 __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1159 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMON, &type); | |
1160 output_streamon_ = true; | |
1161 } | |
1162 | |
1163 device_poll_thread_.message_loop()->PostTask( | |
1164 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask, | |
1165 base::Unretained(this), true)); | |
1166 | |
1167 return true; | |
1168 } | |
1169 | |
1170 bool V4L2SliceVideoDecodeAccelerator::StopDevicePoll(bool keep_input_state) { | |
1171 DVLOGF(3) << "Stopping device poll"; | |
1172 if (decoder_thread_.IsRunning()) | |
1173 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1174 | |
1175 // Signal the DevicePollTask() to stop, and stop the device poll thread. | |
1176 if (!device_->SetDevicePollInterrupt()) { | |
1177 PLOG(ERROR) << "SetDevicePollInterrupt(): failed"; | |
1178 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1179 return false; | |
1180 } | |
1181 device_poll_thread_.Stop(); | |
1182 DVLOGF(3) << "Device poll thread stopped"; | |
1183 | |
1184 // Clear the interrupt now, to be sure. | |
1185 if (!device_->ClearDevicePollInterrupt()) { | |
1186 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1187 return false; | |
1188 } | |
1189 | |
1190 if (!keep_input_state) { | |
1191 if (input_streamon_) { | |
1192 __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
1193 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type); | |
1194 } | |
1195 input_streamon_ = false; | |
1196 } | |
1197 | |
1198 if (output_streamon_) { | |
1199 __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1200 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type); | |
1201 } | |
1202 output_streamon_ = false; | |
1203 | |
1204 if (!keep_input_state) { | |
1205 for (size_t i = 0; i < input_buffer_map_.size(); ++i) { | |
1206 InputRecord& input_record = input_buffer_map_[i]; | |
1207 if (input_record.at_device) { | |
1208 input_record.at_device = false; | |
1209 ReuseInputBuffer(i); | |
1210 input_buffer_queued_count_--; | |
1211 } | |
1212 } | |
1213 DCHECK_EQ(input_buffer_queued_count_, 0); | |
1214 } | |
1215 | |
1216 // STREAMOFF makes the driver drop all buffers without decoding and DQBUFing, | |
1217 // so we mark them all as at_device = false and clear surfaces_at_device_. | |
1218 for (size_t i = 0; i < output_buffer_map_.size(); ++i) { | |
1219 OutputRecord& output_record = output_buffer_map_[i]; | |
1220 if (output_record.at_device) { | |
1221 output_record.at_device = false; | |
1222 output_buffer_queued_count_--; | |
1223 } | |
1224 } | |
1225 surfaces_at_device_.clear(); | |
1226 DCHECK_EQ(output_buffer_queued_count_, 0); | |
1227 | |
1228 // Drop all surfaces that were awaiting decode before being displayed, | |
1229 // since we've just cancelled all outstanding decodes. | |
1230 while (!decoder_display_queue_.empty()) | |
1231 decoder_display_queue_.pop(); | |
1232 | |
1233 DVLOGF(3) << "Device poll stopped"; | |
1234 return true; | |
1235 } | |
1236 | |
1237 void V4L2SliceVideoDecodeAccelerator::Decode( | |
1238 const media::BitstreamBuffer& bitstream_buffer) { | |
1239 DVLOGF(3) << "input_id=" << bitstream_buffer.id() | |
1240 << ", size=" << bitstream_buffer.size(); | |
1241 DCHECK(decode_task_runner_->BelongsToCurrentThread()); | |
1242 | |
1243 if (bitstream_buffer.id() < 0) { | |
1244 LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id(); | |
1245 if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle())) | |
1246 base::SharedMemory::CloseHandle(bitstream_buffer.handle()); | |
1247 NOTIFY_ERROR(INVALID_ARGUMENT); | |
1248 return; | |
1249 } | |
1250 | |
1251 decoder_thread_task_runner_->PostTask( | |
1252 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeTask, | |
1253 base::Unretained(this), bitstream_buffer)); | |
1254 } | |
1255 | |
1256 void V4L2SliceVideoDecodeAccelerator::DecodeTask( | |
1257 const media::BitstreamBuffer& bitstream_buffer) { | |
1258 DVLOGF(3) << "input_id=" << bitstream_buffer.id() | |
1259 << " size=" << bitstream_buffer.size(); | |
1260 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1261 | |
1262 std::unique_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef( | |
1263 decode_client_, decode_task_runner_, | |
1264 new SharedMemoryRegion(bitstream_buffer, true), bitstream_buffer.id())); | |
1265 | |
1266 // Skip empty buffer. | |
1267 if (bitstream_buffer.size() == 0) | |
1268 return; | |
1269 | |
1270 if (!bitstream_record->shm->Map()) { | |
1271 LOGF(ERROR) << "Could not map bitstream_buffer"; | |
1272 NOTIFY_ERROR(UNREADABLE_INPUT); | |
1273 return; | |
1274 } | |
1275 DVLOGF(3) << "mapped at=" << bitstream_record->shm->memory(); | |
1276 | |
1277 decoder_input_queue_.push( | |
1278 linked_ptr<BitstreamBufferRef>(bitstream_record.release())); | |
1279 | |
1280 ScheduleDecodeBufferTaskIfNeeded(); | |
1281 } | |
1282 | |
1283 bool V4L2SliceVideoDecodeAccelerator::TrySetNewBistreamBuffer() { | |
1284 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1285 DCHECK(!decoder_current_bitstream_buffer_); | |
1286 | |
1287 if (decoder_input_queue_.empty()) | |
1288 return false; | |
1289 | |
1290 decoder_current_bitstream_buffer_.reset( | |
1291 decoder_input_queue_.front().release()); | |
1292 decoder_input_queue_.pop(); | |
1293 | |
1294 if (decoder_current_bitstream_buffer_->input_id == kFlushBufferId) { | |
1295 // This is a buffer we queued for ourselves to trigger flush at this time. | |
1296 InitiateFlush(); | |
1297 return false; | |
1298 } | |
1299 | |
1300 const uint8_t* const data = reinterpret_cast<const uint8_t*>( | |
1301 decoder_current_bitstream_buffer_->shm->memory()); | |
1302 const size_t data_size = decoder_current_bitstream_buffer_->shm->size(); | |
1303 decoder_->SetStream(data, data_size); | |
1304 | |
1305 return true; | |
1306 } | |
1307 | |
1308 void V4L2SliceVideoDecodeAccelerator::ScheduleDecodeBufferTaskIfNeeded() { | |
1309 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1310 if (state_ == kDecoding) { | |
1311 decoder_thread_task_runner_->PostTask( | |
1312 FROM_HERE, | |
1313 base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeBufferTask, | |
1314 base::Unretained(this))); | |
1315 } | |
1316 } | |
1317 | |
1318 void V4L2SliceVideoDecodeAccelerator::DecodeBufferTask() { | |
1319 DVLOGF(3); | |
1320 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1321 | |
1322 if (state_ != kDecoding) { | |
1323 DVLOGF(3) << "Early exit, not in kDecoding"; | |
1324 return; | |
1325 } | |
1326 | |
1327 while (true) { | |
1328 AcceleratedVideoDecoder::DecodeResult res; | |
1329 res = decoder_->Decode(); | |
1330 switch (res) { | |
1331 case AcceleratedVideoDecoder::kAllocateNewSurfaces: | |
1332 DVLOGF(2) << "Decoder requesting a new set of surfaces"; | |
1333 InitiateSurfaceSetChange(); | |
1334 return; | |
1335 | |
1336 case AcceleratedVideoDecoder::kRanOutOfStreamData: | |
1337 decoder_current_bitstream_buffer_.reset(); | |
1338 if (!TrySetNewBistreamBuffer()) | |
1339 return; | |
1340 | |
1341 break; | |
1342 | |
1343 case AcceleratedVideoDecoder::kRanOutOfSurfaces: | |
1344 // No more surfaces for the decoder, we'll come back once we have more. | |
1345 DVLOGF(4) << "Ran out of surfaces"; | |
1346 return; | |
1347 | |
1348 case AcceleratedVideoDecoder::kDecodeError: | |
1349 DVLOGF(1) << "Error decoding stream"; | |
1350 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1351 return; | |
1352 } | |
1353 } | |
1354 } | |
1355 | |
1356 void V4L2SliceVideoDecodeAccelerator::InitiateSurfaceSetChange() { | |
1357 DVLOGF(2); | |
1358 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1359 DCHECK_EQ(state_, kDecoding); | |
1360 | |
1361 DCHECK(!surface_set_change_pending_); | |
1362 surface_set_change_pending_ = true; | |
1363 NewEventPending(); | |
1364 } | |
1365 | |
1366 bool V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChange() { | |
1367 DVLOGF(2); | |
1368 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1369 | |
1370 if (!surface_set_change_pending_) | |
1371 return true; | |
1372 | |
1373 if (!surfaces_at_device_.empty()) | |
1374 return false; | |
1375 | |
1376 DCHECK_EQ(state_, kIdle); | |
1377 DCHECK(decoder_display_queue_.empty()); | |
1378 // All output buffers should've been returned from decoder and device by now. | |
1379 // The only remaining owner of surfaces may be display (client), and we will | |
1380 // dismiss them when destroying output buffers below. | |
1381 DCHECK_EQ(free_output_buffers_.size() + surfaces_at_display_.size(), | |
1382 output_buffer_map_.size()); | |
1383 | |
1384 // Keep input queue running while we switch outputs. | |
1385 if (!StopDevicePoll(true)) { | |
1386 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1387 return false; | |
1388 } | |
1389 | |
1390 // This will return only once all buffers are dismissed and destroyed. | |
1391 // This does not wait until they are displayed however, as display retains | |
1392 // references to the buffers bound to textures and will release them | |
1393 // after displaying. | |
1394 if (!DestroyOutputs(true)) { | |
1395 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1396 return false; | |
1397 } | |
1398 | |
1399 if (!CreateOutputBuffers()) { | |
1400 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1401 return false; | |
1402 } | |
1403 | |
1404 surface_set_change_pending_ = false; | |
1405 DVLOG(3) << "Surface set change finished"; | |
1406 return true; | |
1407 } | |
1408 | |
1409 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) { | |
1410 DVLOGF(3); | |
1411 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1412 std::vector<int32_t> picture_buffers_to_dismiss; | |
1413 | |
1414 if (output_buffer_map_.empty()) | |
1415 return true; | |
1416 | |
1417 for (const auto& output_record : output_buffer_map_) { | |
1418 DCHECK(!output_record.at_device); | |
1419 | |
1420 if (output_record.egl_sync != EGL_NO_SYNC_KHR) { | |
1421 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) | |
1422 DVLOGF(1) << "eglDestroySyncKHR failed."; | |
1423 } | |
1424 | |
1425 if (output_record.egl_image != EGL_NO_IMAGE_KHR) { | |
1426 child_task_runner_->PostTask( | |
1427 FROM_HERE, | |
1428 base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_, | |
1429 egl_display_, output_record.egl_image)); | |
1430 } | |
1431 | |
1432 picture_buffers_to_dismiss.push_back(output_record.picture_id); | |
1433 } | |
1434 | |
1435 if (dismiss) { | |
1436 DVLOGF(2) << "Scheduling picture dismissal"; | |
1437 base::WaitableEvent done(false, false); | |
1438 child_task_runner_->PostTask( | |
1439 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DismissPictures, | |
1440 weak_this_, picture_buffers_to_dismiss, &done)); | |
1441 done.Wait(); | |
1442 } | |
1443 | |
1444 // At this point client can't call ReusePictureBuffer on any of the pictures | |
1445 // anymore, so it's safe to destroy. | |
1446 return DestroyOutputBuffers(); | |
1447 } | |
1448 | |
1449 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputBuffers() { | |
1450 DVLOGF(3); | |
1451 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread() || | |
1452 !decoder_thread_.IsRunning()); | |
1453 DCHECK(!output_streamon_); | |
1454 DCHECK(surfaces_at_device_.empty()); | |
1455 DCHECK(decoder_display_queue_.empty()); | |
1456 DCHECK_EQ(surfaces_at_display_.size() + free_output_buffers_.size(), | |
1457 output_buffer_map_.size()); | |
1458 | |
1459 if (output_buffer_map_.empty()) | |
1460 return true; | |
1461 | |
1462 // It's ok to do this, client will retain references to textures, but we are | |
1463 // not interested in reusing the surfaces anymore. | |
1464 // This will prevent us from reusing old surfaces in case we have some | |
1465 // ReusePictureBuffer() pending on ChildThread already. It's ok to ignore | |
1466 // them, because we have already dismissed them (in DestroyOutputs()). | |
1467 for (const auto& surface_at_display : surfaces_at_display_) { | |
1468 size_t index = surface_at_display.second->output_record(); | |
1469 DCHECK_LT(index, output_buffer_map_.size()); | |
1470 OutputRecord& output_record = output_buffer_map_[index]; | |
1471 DCHECK(output_record.at_client); | |
1472 output_record.at_client = false; | |
1473 } | |
1474 surfaces_at_display_.clear(); | |
1475 DCHECK_EQ(free_output_buffers_.size(), output_buffer_map_.size()); | |
1476 | |
1477 free_output_buffers_.clear(); | |
1478 output_buffer_map_.clear(); | |
1479 | |
1480 struct v4l2_requestbuffers reqbufs; | |
1481 memset(&reqbufs, 0, sizeof(reqbufs)); | |
1482 reqbufs.count = 0; | |
1483 reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1484 reqbufs.memory = V4L2_MEMORY_MMAP; | |
1485 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs); | |
1486 | |
1487 return true; | |
1488 } | |
1489 | |
1490 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers( | |
1491 const std::vector<media::PictureBuffer>& buffers) { | |
1492 DVLOGF(3); | |
1493 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
1494 | |
1495 decoder_thread_task_runner_->PostTask( | |
1496 FROM_HERE, | |
1497 base::Bind(&V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask, | |
1498 base::Unretained(this), buffers)); | |
1499 } | |
1500 | |
1501 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask( | |
1502 const std::vector<media::PictureBuffer>& buffers) { | |
1503 DVLOGF(3); | |
1504 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1505 DCHECK_EQ(state_, kAwaitingPictureBuffers); | |
1506 | |
1507 const uint32_t req_buffer_count = decoder_->GetRequiredNumOfPictures(); | |
1508 | |
1509 if (buffers.size() < req_buffer_count) { | |
1510 DLOG(ERROR) << "Failed to provide requested picture buffers. " | |
1511 << "(Got " << buffers.size() | |
1512 << ", requested " << req_buffer_count << ")"; | |
1513 NOTIFY_ERROR(INVALID_ARGUMENT); | |
1514 return; | |
1515 } | |
1516 | |
1517 // Allocate the output buffers. | |
1518 struct v4l2_requestbuffers reqbufs; | |
1519 memset(&reqbufs, 0, sizeof(reqbufs)); | |
1520 reqbufs.count = buffers.size(); | |
1521 reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; | |
1522 reqbufs.memory = | |
1523 (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP | |
1524 : V4L2_MEMORY_DMABUF); | |
1525 IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs); | |
1526 | |
1527 if (reqbufs.count != buffers.size()) { | |
1528 DLOG(ERROR) << "Could not allocate enough output buffers"; | |
1529 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1530 return; | |
1531 } | |
1532 | |
1533 DCHECK(free_output_buffers_.empty()); | |
1534 DCHECK(output_buffer_map_.empty()); | |
1535 output_buffer_map_.resize(buffers.size()); | |
1536 for (size_t i = 0; i < output_buffer_map_.size(); ++i) { | |
1537 DCHECK(buffers[i].size() == coded_size_); | |
1538 DCHECK_EQ(1u, buffers[i].texture_ids().size()); | |
1539 | |
1540 OutputRecord& output_record = output_buffer_map_[i]; | |
1541 DCHECK(!output_record.at_device); | |
1542 DCHECK(!output_record.at_client); | |
1543 DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); | |
1544 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); | |
1545 DCHECK_EQ(output_record.picture_id, -1); | |
1546 DCHECK(output_record.dmabuf_fds.empty()); | |
1547 DCHECK_EQ(output_record.cleared, false); | |
1548 | |
1549 output_record.picture_id = buffers[i].id(); | |
1550 output_record.texture_id = buffers[i].texture_ids()[0]; | |
1551 // This will remain true until ImportBufferForPicture is called, either by | |
1552 // the client, or by ourselves, if we are allocating. | |
1553 output_record.at_client = true; | |
1554 if (output_mode_ == Config::OutputMode::ALLOCATE) { | |
1555 std::vector<base::ScopedFD> dmabuf_fds = | |
1556 std::move(device_->GetDmabufsForV4L2Buffer( | |
1557 i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)); | |
1558 if (dmabuf_fds.empty()) { | |
1559 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1560 return; | |
1561 } | |
1562 | |
1563 auto passed_dmabuf_fds(base::WrapUnique( | |
1564 new std::vector<base::ScopedFD>(std::move(dmabuf_fds)))); | |
1565 ImportBufferForPictureTask(output_record.picture_id, | |
1566 std::move(passed_dmabuf_fds)); | |
1567 } // else we'll get triggered via ImportBufferForPicture() from client. | |
1568 DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id; | |
1569 } | |
1570 | |
1571 if (!StartDevicePoll()) { | |
1572 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1573 return; | |
1574 } | |
1575 | |
1576 // Put us in kIdle to allow further event processing. | |
1577 // ProcessPendingEventsIfNeeded() will put us back into kDecoding after all | |
1578 // other pending events are processed successfully. | |
1579 state_ = kIdle; | |
1580 ProcessPendingEventsIfNeeded(); | |
1581 } | |
1582 | |
1583 void V4L2SliceVideoDecodeAccelerator::CreateEGLImageFor( | |
1584 size_t buffer_index, | |
1585 int32_t picture_buffer_id, | |
1586 std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds, | |
1587 GLuint texture_id, | |
1588 const gfx::Size& size, | |
1589 uint32_t fourcc) { | |
1590 DVLOGF(3) << "index=" << buffer_index; | |
1591 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
1592 | |
1593 if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) { | |
1594 DLOG(ERROR) << "GL callbacks required for binding to EGLImages"; | |
1595 NOTIFY_ERROR(INVALID_ARGUMENT); | |
1596 return; | |
1597 } | |
1598 | |
1599 gfx::GLContext* gl_context = get_gl_context_cb_.Run(); | |
1600 if (!gl_context || !make_context_current_cb_.Run()) { | |
1601 DLOG(ERROR) << "No GL context"; | |
1602 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1603 return; | |
1604 } | |
1605 | |
1606 gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0); | |
1607 | |
1608 EGLImageKHR egl_image = | |
1609 device_->CreateEGLImage(egl_display_, gl_context->GetHandle(), texture_id, | |
1610 size, buffer_index, fourcc, *passed_dmabuf_fds); | |
1611 if (egl_image == EGL_NO_IMAGE_KHR) { | |
1612 LOGF(ERROR) << "Could not create EGLImageKHR," | |
1613 << " index=" << buffer_index << " texture_id=" << texture_id; | |
1614 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1615 return; | |
1616 } | |
1617 | |
1618 decoder_thread_task_runner_->PostTask( | |
1619 FROM_HERE, | |
1620 base::Bind(&V4L2SliceVideoDecodeAccelerator::AssignEGLImage, | |
1621 base::Unretained(this), buffer_index, picture_buffer_id, | |
1622 egl_image, base::Passed(&passed_dmabuf_fds))); | |
1623 } | |
1624 | |
1625 void V4L2SliceVideoDecodeAccelerator::AssignEGLImage( | |
1626 size_t buffer_index, | |
1627 int32_t picture_buffer_id, | |
1628 EGLImageKHR egl_image, | |
1629 std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) { | |
1630 DVLOGF(3) << "index=" << buffer_index; | |
1631 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1632 | |
1633 // It's possible that while waiting for the EGLImages to be allocated and | |
1634 // assigned, we have already decoded more of the stream and saw another | |
1635 // resolution change. This is a normal situation, in such a case either there | |
1636 // is no output record with this index awaiting an EGLImage to be assigned to | |
1637 // it, or the record is already updated to use a newer PictureBuffer and is | |
1638 // awaiting an EGLImage associated with a different picture_buffer_id. If so, | |
1639 // just discard this image, we will get the one we are waiting for later. | |
1640 if (buffer_index >= output_buffer_map_.size() || | |
1641 output_buffer_map_[buffer_index].picture_id != picture_buffer_id) { | |
1642 DVLOGF(3) << "Picture set already changed, dropping EGLImage"; | |
1643 child_task_runner_->PostTask( | |
1644 FROM_HERE, base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), | |
1645 device_, egl_display_, egl_image)); | |
1646 return; | |
1647 } | |
1648 | |
1649 OutputRecord& output_record = output_buffer_map_[buffer_index]; | |
1650 DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); | |
1651 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); | |
1652 DCHECK(!output_record.at_client); | |
1653 DCHECK(!output_record.at_device); | |
1654 | |
1655 output_record.egl_image = egl_image; | |
1656 if (output_mode_ == Config::OutputMode::IMPORT) { | |
1657 DCHECK(output_record.dmabuf_fds.empty()); | |
1658 output_record.dmabuf_fds = std::move(*passed_dmabuf_fds); | |
1659 } | |
1660 | |
1661 DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(), | |
1662 buffer_index), | |
1663 0); | |
1664 free_output_buffers_.push_back(buffer_index); | |
1665 ScheduleDecodeBufferTaskIfNeeded(); | |
1666 } | |
1667 | |
1668 void V4L2SliceVideoDecodeAccelerator::ImportBufferForPicture( | |
1669 int32_t picture_buffer_id, | |
1670 const std::vector<gfx::GpuMemoryBufferHandle>& gpu_memory_buffer_handles) { | |
1671 DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id; | |
1672 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
1673 | |
1674 auto passed_dmabuf_fds(base::WrapUnique(new std::vector<base::ScopedFD>())); | |
1675 #if defined(USE_OZONE) | |
1676 for (const auto& handle : gpu_memory_buffer_handles) { | |
1677 int fd = -1; | |
1678 fd = handle.native_pixmap_handle.fd.fd; | |
1679 DCHECK_NE(fd, -1); | |
1680 passed_dmabuf_fds->push_back(base::ScopedFD(fd)); | |
1681 } | |
1682 #endif | |
1683 | |
1684 if (output_mode_ != Config::OutputMode::IMPORT) { | |
1685 LOGF(ERROR) << "Cannot import in non-import mode"; | |
1686 NOTIFY_ERROR(INVALID_ARGUMENT); | |
1687 return; | |
1688 } | |
1689 | |
1690 decoder_thread_task_runner_->PostTask( | |
1691 FROM_HERE, | |
1692 base::Bind(&V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask, | |
1693 base::Unretained(this), picture_buffer_id, | |
1694 base::Passed(&passed_dmabuf_fds))); | |
1695 } | |
1696 | |
1697 void V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask( | |
1698 int32_t picture_buffer_id, | |
1699 std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) { | |
1700 DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id; | |
1701 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1702 | |
1703 const auto iter = | |
1704 std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(), | |
1705 [picture_buffer_id](const OutputRecord& output_record) { | |
1706 return output_record.picture_id == picture_buffer_id; | |
1707 }); | |
1708 if (iter == output_buffer_map_.end()) { | |
1709 LOGF(ERROR) << "Invalid picture_buffer_id=" << picture_buffer_id; | |
1710 NOTIFY_ERROR(INVALID_ARGUMENT); | |
1711 return; | |
1712 } | |
1713 | |
1714 if (!iter->at_client) { | |
1715 LOGF(ERROR) << "Cannot import buffer that not owned by client"; | |
1716 NOTIFY_ERROR(INVALID_ARGUMENT); | |
1717 return; | |
1718 } | |
1719 | |
1720 size_t index = iter - output_buffer_map_.begin(); | |
1721 DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(), | |
1722 index), | |
1723 0); | |
1724 | |
1725 DCHECK(!iter->at_device); | |
1726 iter->at_client = false; | |
1727 if (iter->texture_id != 0) { | |
1728 if (iter->egl_image != EGL_NO_IMAGE_KHR) { | |
1729 child_task_runner_->PostTask( | |
1730 FROM_HERE, | |
1731 base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_, | |
1732 egl_display_, iter->egl_image)); | |
1733 } | |
1734 | |
1735 child_task_runner_->PostTask( | |
1736 FROM_HERE, | |
1737 base::Bind(&V4L2SliceVideoDecodeAccelerator::CreateEGLImageFor, | |
1738 weak_this_, index, picture_buffer_id, | |
1739 base::Passed(&passed_dmabuf_fds), iter->texture_id, | |
1740 coded_size_, output_format_fourcc_)); | |
1741 } else { | |
1742 // No need for an EGLImage, start using this buffer now. | |
1743 DCHECK_EQ(output_planes_count_, passed_dmabuf_fds->size()); | |
1744 iter->dmabuf_fds.swap(*passed_dmabuf_fds); | |
1745 free_output_buffers_.push_back(index); | |
1746 ScheduleDecodeBufferTaskIfNeeded(); | |
1747 } | |
1748 } | |
1749 | |
1750 void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer( | |
1751 int32_t picture_buffer_id) { | |
1752 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
1753 DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id; | |
1754 | |
1755 std::unique_ptr<EGLSyncKHRRef> egl_sync_ref; | |
1756 | |
1757 if (!make_context_current_cb_.is_null()) { | |
1758 if (!make_context_current_cb_.Run()) { | |
1759 LOGF(ERROR) << "could not make context current"; | |
1760 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1761 return; | |
1762 } | |
1763 | |
1764 EGLSyncKHR egl_sync = | |
1765 eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL); | |
1766 if (egl_sync == EGL_NO_SYNC_KHR) { | |
1767 LOGF(ERROR) << "eglCreateSyncKHR() failed"; | |
1768 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1769 return; | |
1770 } | |
1771 | |
1772 egl_sync_ref.reset(new EGLSyncKHRRef(egl_display_, egl_sync)); | |
1773 } | |
1774 | |
1775 decoder_thread_task_runner_->PostTask( | |
1776 FROM_HERE, | |
1777 base::Bind(&V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask, | |
1778 base::Unretained(this), picture_buffer_id, | |
1779 base::Passed(&egl_sync_ref))); | |
1780 } | |
1781 | |
1782 void V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask( | |
1783 int32_t picture_buffer_id, | |
1784 std::unique_ptr<EGLSyncKHRRef> egl_sync_ref) { | |
1785 DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id; | |
1786 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1787 | |
1788 V4L2DecodeSurfaceByPictureBufferId::iterator it = | |
1789 surfaces_at_display_.find(picture_buffer_id); | |
1790 if (it == surfaces_at_display_.end()) { | |
1791 // It's possible that we've already posted a DismissPictureBuffer for this | |
1792 // picture, but it has not yet executed when this ReusePictureBuffer was | |
1793 // posted to us by the client. In that case just ignore this (we've already | |
1794 // dismissed it and accounted for that) and let the sync object get | |
1795 // destroyed. | |
1796 DVLOGF(3) << "got picture id=" << picture_buffer_id | |
1797 << " not in use (anymore?)."; | |
1798 return; | |
1799 } | |
1800 | |
1801 OutputRecord& output_record = output_buffer_map_[it->second->output_record()]; | |
1802 if (output_record.at_device || !output_record.at_client) { | |
1803 DVLOGF(1) << "picture_buffer_id not reusable"; | |
1804 NOTIFY_ERROR(INVALID_ARGUMENT); | |
1805 return; | |
1806 } | |
1807 | |
1808 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); | |
1809 DCHECK(!output_record.at_device); | |
1810 output_record.at_client = false; | |
1811 if (egl_sync_ref) { | |
1812 output_record.egl_sync = egl_sync_ref->egl_sync; | |
1813 // Take ownership of the EGLSync. | |
1814 egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR; | |
1815 } | |
1816 | |
1817 surfaces_at_display_.erase(it); | |
1818 } | |
1819 | |
1820 void V4L2SliceVideoDecodeAccelerator::Flush() { | |
1821 DVLOGF(3); | |
1822 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
1823 | |
1824 decoder_thread_task_runner_->PostTask( | |
1825 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::FlushTask, | |
1826 base::Unretained(this))); | |
1827 } | |
1828 | |
1829 void V4L2SliceVideoDecodeAccelerator::FlushTask() { | |
1830 DVLOGF(3); | |
1831 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1832 | |
1833 if (!decoder_input_queue_.empty()) { | |
1834 // We are not done with pending inputs, so queue an empty buffer, | |
1835 // which - when reached - will trigger flush sequence. | |
1836 decoder_input_queue_.push( | |
1837 linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef( | |
1838 decode_client_, decode_task_runner_, nullptr, kFlushBufferId))); | |
1839 return; | |
1840 } | |
1841 | |
1842 // No more inputs pending, so just finish flushing here. | |
1843 InitiateFlush(); | |
1844 } | |
1845 | |
1846 void V4L2SliceVideoDecodeAccelerator::InitiateFlush() { | |
1847 DVLOGF(3); | |
1848 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1849 | |
1850 // This will trigger output for all remaining surfaces in the decoder. | |
1851 // However, not all of them may be decoded yet (they would be queued | |
1852 // in hardware then). | |
1853 if (!decoder_->Flush()) { | |
1854 DVLOGF(1) << "Failed flushing the decoder."; | |
1855 NOTIFY_ERROR(PLATFORM_FAILURE); | |
1856 return; | |
1857 } | |
1858 | |
1859 // Put the decoder in an idle state, ready to resume. | |
1860 decoder_->Reset(); | |
1861 | |
1862 DCHECK(!decoder_flushing_); | |
1863 decoder_flushing_ = true; | |
1864 NewEventPending(); | |
1865 } | |
1866 | |
1867 bool V4L2SliceVideoDecodeAccelerator::FinishFlush() { | |
1868 DVLOGF(3); | |
1869 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1870 | |
1871 if (!decoder_flushing_) | |
1872 return true; | |
1873 | |
1874 if (!surfaces_at_device_.empty()) | |
1875 return false; | |
1876 | |
1877 DCHECK_EQ(state_, kIdle); | |
1878 | |
1879 // At this point, all remaining surfaces are decoded and dequeued, and since | |
1880 // we have already scheduled output for them in InitiateFlush(), their | |
1881 // respective PictureReady calls have been posted (or they have been queued on | |
1882 // pending_picture_ready_). So at this time, once we SendPictureReady(), | |
1883 // we will have all remaining PictureReady() posted to the client and we | |
1884 // can post NotifyFlushDone(). | |
1885 DCHECK(decoder_display_queue_.empty()); | |
1886 | |
1887 // Decoder should have already returned all surfaces and all surfaces are | |
1888 // out of hardware. There can be no other owners of input buffers. | |
1889 DCHECK_EQ(free_input_buffers_.size(), input_buffer_map_.size()); | |
1890 | |
1891 SendPictureReady(); | |
1892 | |
1893 decoder_flushing_ = false; | |
1894 DVLOGF(3) << "Flush finished"; | |
1895 | |
1896 child_task_runner_->PostTask(FROM_HERE, | |
1897 base::Bind(&Client::NotifyFlushDone, client_)); | |
1898 | |
1899 return true; | |
1900 } | |
1901 | |
1902 void V4L2SliceVideoDecodeAccelerator::Reset() { | |
1903 DVLOGF(3); | |
1904 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
1905 | |
1906 decoder_thread_task_runner_->PostTask( | |
1907 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::ResetTask, | |
1908 base::Unretained(this))); | |
1909 } | |
1910 | |
1911 void V4L2SliceVideoDecodeAccelerator::ResetTask() { | |
1912 DVLOGF(3); | |
1913 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1914 | |
1915 if (decoder_resetting_) { | |
1916 // This is a bug in the client, multiple Reset()s before NotifyResetDone() | |
1917 // are not allowed. | |
1918 NOTREACHED() << "Client should not be requesting multiple Reset()s"; | |
1919 return; | |
1920 } | |
1921 | |
1922 // Put the decoder in an idle state, ready to resume. | |
1923 decoder_->Reset(); | |
1924 | |
1925 // Drop all remaining inputs. | |
1926 decoder_current_bitstream_buffer_.reset(); | |
1927 while (!decoder_input_queue_.empty()) | |
1928 decoder_input_queue_.pop(); | |
1929 | |
1930 decoder_resetting_ = true; | |
1931 NewEventPending(); | |
1932 } | |
1933 | |
1934 bool V4L2SliceVideoDecodeAccelerator::FinishReset() { | |
1935 DVLOGF(3); | |
1936 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1937 | |
1938 if (!decoder_resetting_) | |
1939 return true; | |
1940 | |
1941 if (!surfaces_at_device_.empty()) | |
1942 return false; | |
1943 | |
1944 DCHECK_EQ(state_, kIdle); | |
1945 DCHECK(!decoder_flushing_); | |
1946 SendPictureReady(); | |
1947 | |
1948 // Drop any pending outputs. | |
1949 while (!decoder_display_queue_.empty()) | |
1950 decoder_display_queue_.pop(); | |
1951 | |
1952 // At this point we can have no input buffers in the decoder, because we | |
1953 // Reset()ed it in ResetTask(), and have not scheduled any new Decode()s | |
1954 // having been in kIdle since. We don't have any surfaces in the HW either - | |
1955 // we just checked that surfaces_at_device_.empty(), and inputs are tied | |
1956 // to surfaces. Since there can be no other owners of input buffers, we can | |
1957 // simply mark them all as available. | |
1958 DCHECK_EQ(input_buffer_queued_count_, 0); | |
1959 free_input_buffers_.clear(); | |
1960 for (size_t i = 0; i < input_buffer_map_.size(); ++i) { | |
1961 DCHECK(!input_buffer_map_[i].at_device); | |
1962 ReuseInputBuffer(i); | |
1963 } | |
1964 | |
1965 decoder_resetting_ = false; | |
1966 DVLOGF(3) << "Reset finished"; | |
1967 | |
1968 child_task_runner_->PostTask(FROM_HERE, | |
1969 base::Bind(&Client::NotifyResetDone, client_)); | |
1970 | |
1971 return true; | |
1972 } | |
1973 | |
1974 void V4L2SliceVideoDecodeAccelerator::SetErrorState(Error error) { | |
1975 // We can touch decoder_state_ only if this is the decoder thread or the | |
1976 // decoder thread isn't running. | |
1977 if (decoder_thread_.IsRunning() && | |
1978 !decoder_thread_task_runner_->BelongsToCurrentThread()) { | |
1979 decoder_thread_task_runner_->PostTask( | |
1980 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::SetErrorState, | |
1981 base::Unretained(this), error)); | |
1982 return; | |
1983 } | |
1984 | |
1985 // Post NotifyError only if we are already initialized, as the API does | |
1986 // not allow doing so before that. | |
1987 if (state_ != kError && state_ != kUninitialized) | |
1988 NotifyError(error); | |
1989 | |
1990 state_ = kError; | |
1991 } | |
1992 | |
1993 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::V4L2H264Accelerator( | |
1994 V4L2SliceVideoDecodeAccelerator* v4l2_dec) | |
1995 : num_slices_(0), v4l2_dec_(v4l2_dec) { | |
1996 DCHECK(v4l2_dec_); | |
1997 } | |
1998 | |
1999 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::~V4L2H264Accelerator() { | |
2000 } | |
2001 | |
2002 scoped_refptr<H264Picture> | |
2003 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::CreateH264Picture() { | |
2004 scoped_refptr<V4L2DecodeSurface> dec_surface = v4l2_dec_->CreateSurface(); | |
2005 if (!dec_surface) | |
2006 return nullptr; | |
2007 | |
2008 return new V4L2H264Picture(dec_surface); | |
2009 } | |
2010 | |
2011 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator:: | |
2012 H264PictureListToDPBIndicesList(const H264Picture::Vector& src_pic_list, | |
2013 uint8_t dst_list[kDPBIndicesListSize]) { | |
2014 size_t i; | |
2015 for (i = 0; i < src_pic_list.size() && i < kDPBIndicesListSize; ++i) { | |
2016 const scoped_refptr<H264Picture>& pic = src_pic_list[i]; | |
2017 dst_list[i] = pic ? pic->dpb_position : VIDEO_MAX_FRAME; | |
2018 } | |
2019 | |
2020 while (i < kDPBIndicesListSize) | |
2021 dst_list[i++] = VIDEO_MAX_FRAME; | |
2022 } | |
2023 | |
2024 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::H264DPBToV4L2DPB( | |
2025 const H264DPB& dpb, | |
2026 std::vector<scoped_refptr<V4L2DecodeSurface>>* ref_surfaces) { | |
2027 memset(v4l2_decode_param_.dpb, 0, sizeof(v4l2_decode_param_.dpb)); | |
2028 size_t i = 0; | |
2029 for (const auto& pic : dpb) { | |
2030 if (i >= arraysize(v4l2_decode_param_.dpb)) { | |
2031 DVLOG(1) << "Invalid DPB size"; | |
2032 break; | |
2033 } | |
2034 | |
2035 int index = VIDEO_MAX_FRAME; | |
2036 if (!pic->nonexisting) { | |
2037 scoped_refptr<V4L2DecodeSurface> dec_surface = | |
2038 H264PictureToV4L2DecodeSurface(pic); | |
2039 index = dec_surface->output_record(); | |
2040 ref_surfaces->push_back(dec_surface); | |
2041 } | |
2042 | |
2043 struct v4l2_h264_dpb_entry& entry = v4l2_decode_param_.dpb[i++]; | |
2044 entry.buf_index = index; | |
2045 entry.frame_num = pic->frame_num; | |
2046 entry.pic_num = pic->pic_num; | |
2047 entry.top_field_order_cnt = pic->top_field_order_cnt; | |
2048 entry.bottom_field_order_cnt = pic->bottom_field_order_cnt; | |
2049 entry.flags = (pic->ref ? V4L2_H264_DPB_ENTRY_FLAG_ACTIVE : 0) | | |
2050 (pic->long_term ? V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM : 0); | |
2051 } | |
2052 } | |
2053 | |
2054 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitFrameMetadata( | |
2055 const media::H264SPS* sps, | |
2056 const media::H264PPS* pps, | |
2057 const H264DPB& dpb, | |
2058 const H264Picture::Vector& ref_pic_listp0, | |
2059 const H264Picture::Vector& ref_pic_listb0, | |
2060 const H264Picture::Vector& ref_pic_listb1, | |
2061 const scoped_refptr<H264Picture>& pic) { | |
2062 struct v4l2_ext_control ctrl; | |
2063 std::vector<struct v4l2_ext_control> ctrls; | |
2064 | |
2065 struct v4l2_ctrl_h264_sps v4l2_sps; | |
2066 memset(&v4l2_sps, 0, sizeof(v4l2_sps)); | |
2067 v4l2_sps.constraint_set_flags = | |
2068 sps->constraint_set0_flag ? V4L2_H264_SPS_CONSTRAINT_SET0_FLAG : 0 | | |
2069 sps->constraint_set1_flag ? V4L2_H264_SPS_CONSTRAINT_SET1_FLAG : 0 | | |
2070 sps->constraint_set2_flag ? V4L2_H264_SPS_CONSTRAINT_SET2_FLAG : 0 | | |
2071 sps->constraint_set3_flag ? V4L2_H264_SPS_CONSTRAINT_SET3_FLAG : 0 | | |
2072 sps->constraint_set4_flag ? V4L2_H264_SPS_CONSTRAINT_SET4_FLAG : 0 | | |
2073 sps->constraint_set5_flag ? V4L2_H264_SPS_CONSTRAINT_SET5_FLAG : 0; | |
2074 #define SPS_TO_V4L2SPS(a) v4l2_sps.a = sps->a | |
2075 SPS_TO_V4L2SPS(profile_idc); | |
2076 SPS_TO_V4L2SPS(level_idc); | |
2077 SPS_TO_V4L2SPS(seq_parameter_set_id); | |
2078 SPS_TO_V4L2SPS(chroma_format_idc); | |
2079 SPS_TO_V4L2SPS(bit_depth_luma_minus8); | |
2080 SPS_TO_V4L2SPS(bit_depth_chroma_minus8); | |
2081 SPS_TO_V4L2SPS(log2_max_frame_num_minus4); | |
2082 SPS_TO_V4L2SPS(pic_order_cnt_type); | |
2083 SPS_TO_V4L2SPS(log2_max_pic_order_cnt_lsb_minus4); | |
2084 SPS_TO_V4L2SPS(offset_for_non_ref_pic); | |
2085 SPS_TO_V4L2SPS(offset_for_top_to_bottom_field); | |
2086 SPS_TO_V4L2SPS(num_ref_frames_in_pic_order_cnt_cycle); | |
2087 | |
2088 static_assert(arraysize(v4l2_sps.offset_for_ref_frame) == | |
2089 arraysize(sps->offset_for_ref_frame), | |
2090 "offset_for_ref_frame arrays must be same size"); | |
2091 for (size_t i = 0; i < arraysize(v4l2_sps.offset_for_ref_frame); ++i) | |
2092 v4l2_sps.offset_for_ref_frame[i] = sps->offset_for_ref_frame[i]; | |
2093 SPS_TO_V4L2SPS(max_num_ref_frames); | |
2094 SPS_TO_V4L2SPS(pic_width_in_mbs_minus1); | |
2095 SPS_TO_V4L2SPS(pic_height_in_map_units_minus1); | |
2096 #undef SPS_TO_V4L2SPS | |
2097 | |
2098 #define SET_V4L2_SPS_FLAG_IF(cond, flag) \ | |
2099 v4l2_sps.flags |= ((sps->cond) ? (flag) : 0) | |
2100 SET_V4L2_SPS_FLAG_IF(separate_colour_plane_flag, | |
2101 V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE); | |
2102 SET_V4L2_SPS_FLAG_IF(qpprime_y_zero_transform_bypass_flag, | |
2103 V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS); | |
2104 SET_V4L2_SPS_FLAG_IF(delta_pic_order_always_zero_flag, | |
2105 V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO); | |
2106 SET_V4L2_SPS_FLAG_IF(gaps_in_frame_num_value_allowed_flag, | |
2107 V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED); | |
2108 SET_V4L2_SPS_FLAG_IF(frame_mbs_only_flag, V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY); | |
2109 SET_V4L2_SPS_FLAG_IF(mb_adaptive_frame_field_flag, | |
2110 V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD); | |
2111 SET_V4L2_SPS_FLAG_IF(direct_8x8_inference_flag, | |
2112 V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE); | |
2113 #undef SET_FLAG | |
2114 memset(&ctrl, 0, sizeof(ctrl)); | |
2115 ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SPS; | |
2116 ctrl.size = sizeof(v4l2_sps); | |
2117 ctrl.p_h264_sps = &v4l2_sps; | |
2118 ctrls.push_back(ctrl); | |
2119 | |
2120 struct v4l2_ctrl_h264_pps v4l2_pps; | |
2121 memset(&v4l2_pps, 0, sizeof(v4l2_pps)); | |
2122 #define PPS_TO_V4L2PPS(a) v4l2_pps.a = pps->a | |
2123 PPS_TO_V4L2PPS(pic_parameter_set_id); | |
2124 PPS_TO_V4L2PPS(seq_parameter_set_id); | |
2125 PPS_TO_V4L2PPS(num_slice_groups_minus1); | |
2126 PPS_TO_V4L2PPS(num_ref_idx_l0_default_active_minus1); | |
2127 PPS_TO_V4L2PPS(num_ref_idx_l1_default_active_minus1); | |
2128 PPS_TO_V4L2PPS(weighted_bipred_idc); | |
2129 PPS_TO_V4L2PPS(pic_init_qp_minus26); | |
2130 PPS_TO_V4L2PPS(pic_init_qs_minus26); | |
2131 PPS_TO_V4L2PPS(chroma_qp_index_offset); | |
2132 PPS_TO_V4L2PPS(second_chroma_qp_index_offset); | |
2133 #undef PPS_TO_V4L2PPS | |
2134 | |
2135 #define SET_V4L2_PPS_FLAG_IF(cond, flag) \ | |
2136 v4l2_pps.flags |= ((pps->cond) ? (flag) : 0) | |
2137 SET_V4L2_PPS_FLAG_IF(entropy_coding_mode_flag, | |
2138 V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE); | |
2139 SET_V4L2_PPS_FLAG_IF( | |
2140 bottom_field_pic_order_in_frame_present_flag, | |
2141 V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT); | |
2142 SET_V4L2_PPS_FLAG_IF(weighted_pred_flag, V4L2_H264_PPS_FLAG_WEIGHTED_PRED); | |
2143 SET_V4L2_PPS_FLAG_IF(deblocking_filter_control_present_flag, | |
2144 V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT); | |
2145 SET_V4L2_PPS_FLAG_IF(constrained_intra_pred_flag, | |
2146 V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED); | |
2147 SET_V4L2_PPS_FLAG_IF(redundant_pic_cnt_present_flag, | |
2148 V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT); | |
2149 SET_V4L2_PPS_FLAG_IF(transform_8x8_mode_flag, | |
2150 V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE); | |
2151 SET_V4L2_PPS_FLAG_IF(pic_scaling_matrix_present_flag, | |
2152 V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT); | |
2153 #undef SET_V4L2_PPS_FLAG_IF | |
2154 memset(&ctrl, 0, sizeof(ctrl)); | |
2155 ctrl.id = V4L2_CID_MPEG_VIDEO_H264_PPS; | |
2156 ctrl.size = sizeof(v4l2_pps); | |
2157 ctrl.p_h264_pps = &v4l2_pps; | |
2158 ctrls.push_back(ctrl); | |
2159 | |
2160 struct v4l2_ctrl_h264_scaling_matrix v4l2_scaling_matrix; | |
2161 memset(&v4l2_scaling_matrix, 0, sizeof(v4l2_scaling_matrix)); | |
2162 static_assert(arraysize(v4l2_scaling_matrix.scaling_list_4x4) <= | |
2163 arraysize(pps->scaling_list4x4) && | |
2164 arraysize(v4l2_scaling_matrix.scaling_list_4x4[0]) <= | |
2165 arraysize(pps->scaling_list4x4[0]) && | |
2166 arraysize(v4l2_scaling_matrix.scaling_list_8x8) <= | |
2167 arraysize(pps->scaling_list8x8) && | |
2168 arraysize(v4l2_scaling_matrix.scaling_list_8x8[0]) <= | |
2169 arraysize(pps->scaling_list8x8[0]), | |
2170 "scaling_lists must be of correct size"); | |
2171 for (size_t i = 0; i < arraysize(v4l2_scaling_matrix.scaling_list_4x4); ++i) { | |
2172 for (size_t j = 0; j < arraysize(v4l2_scaling_matrix.scaling_list_4x4[i]); | |
2173 ++j) { | |
2174 v4l2_scaling_matrix.scaling_list_4x4[i][j] = pps->scaling_list4x4[i][j]; | |
2175 } | |
2176 } | |
2177 for (size_t i = 0; i < arraysize(v4l2_scaling_matrix.scaling_list_8x8); ++i) { | |
2178 for (size_t j = 0; j < arraysize(v4l2_scaling_matrix.scaling_list_8x8[i]); | |
2179 ++j) { | |
2180 v4l2_scaling_matrix.scaling_list_8x8[i][j] = pps->scaling_list8x8[i][j]; | |
2181 } | |
2182 } | |
2183 memset(&ctrl, 0, sizeof(ctrl)); | |
2184 ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX; | |
2185 ctrl.size = sizeof(v4l2_scaling_matrix); | |
2186 ctrl.p_h264_scal_mtrx = &v4l2_scaling_matrix; | |
2187 ctrls.push_back(ctrl); | |
2188 | |
2189 scoped_refptr<V4L2DecodeSurface> dec_surface = | |
2190 H264PictureToV4L2DecodeSurface(pic); | |
2191 | |
2192 struct v4l2_ext_controls ext_ctrls; | |
2193 memset(&ext_ctrls, 0, sizeof(ext_ctrls)); | |
2194 ext_ctrls.count = ctrls.size(); | |
2195 ext_ctrls.controls = &ctrls[0]; | |
2196 ext_ctrls.config_store = dec_surface->config_store(); | |
2197 v4l2_dec_->SubmitExtControls(&ext_ctrls); | |
2198 | |
2199 H264PictureListToDPBIndicesList(ref_pic_listp0, | |
2200 v4l2_decode_param_.ref_pic_list_p0); | |
2201 H264PictureListToDPBIndicesList(ref_pic_listb0, | |
2202 v4l2_decode_param_.ref_pic_list_b0); | |
2203 H264PictureListToDPBIndicesList(ref_pic_listb1, | |
2204 v4l2_decode_param_.ref_pic_list_b1); | |
2205 | |
2206 std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces; | |
2207 H264DPBToV4L2DPB(dpb, &ref_surfaces); | |
2208 dec_surface->SetReferenceSurfaces(ref_surfaces); | |
2209 | |
2210 return true; | |
2211 } | |
2212 | |
2213 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitSlice( | |
2214 const media::H264PPS* pps, | |
2215 const media::H264SliceHeader* slice_hdr, | |
2216 const H264Picture::Vector& ref_pic_list0, | |
2217 const H264Picture::Vector& ref_pic_list1, | |
2218 const scoped_refptr<H264Picture>& pic, | |
2219 const uint8_t* data, | |
2220 size_t size) { | |
2221 if (num_slices_ == kMaxSlices) { | |
2222 LOGF(ERROR) << "Over limit of supported slices per frame"; | |
2223 return false; | |
2224 } | |
2225 | |
2226 struct v4l2_ctrl_h264_slice_param& v4l2_slice_param = | |
2227 v4l2_slice_params_[num_slices_++]; | |
2228 memset(&v4l2_slice_param, 0, sizeof(v4l2_slice_param)); | |
2229 | |
2230 v4l2_slice_param.size = size; | |
2231 #define SHDR_TO_V4L2SPARM(a) v4l2_slice_param.a = slice_hdr->a | |
2232 SHDR_TO_V4L2SPARM(header_bit_size); | |
2233 SHDR_TO_V4L2SPARM(first_mb_in_slice); | |
2234 SHDR_TO_V4L2SPARM(slice_type); | |
2235 SHDR_TO_V4L2SPARM(pic_parameter_set_id); | |
2236 SHDR_TO_V4L2SPARM(colour_plane_id); | |
2237 SHDR_TO_V4L2SPARM(frame_num); | |
2238 SHDR_TO_V4L2SPARM(idr_pic_id); | |
2239 SHDR_TO_V4L2SPARM(pic_order_cnt_lsb); | |
2240 SHDR_TO_V4L2SPARM(delta_pic_order_cnt_bottom); | |
2241 SHDR_TO_V4L2SPARM(delta_pic_order_cnt0); | |
2242 SHDR_TO_V4L2SPARM(delta_pic_order_cnt1); | |
2243 SHDR_TO_V4L2SPARM(redundant_pic_cnt); | |
2244 SHDR_TO_V4L2SPARM(dec_ref_pic_marking_bit_size); | |
2245 SHDR_TO_V4L2SPARM(cabac_init_idc); | |
2246 SHDR_TO_V4L2SPARM(slice_qp_delta); | |
2247 SHDR_TO_V4L2SPARM(slice_qs_delta); | |
2248 SHDR_TO_V4L2SPARM(disable_deblocking_filter_idc); | |
2249 SHDR_TO_V4L2SPARM(slice_alpha_c0_offset_div2); | |
2250 SHDR_TO_V4L2SPARM(slice_beta_offset_div2); | |
2251 SHDR_TO_V4L2SPARM(num_ref_idx_l0_active_minus1); | |
2252 SHDR_TO_V4L2SPARM(num_ref_idx_l1_active_minus1); | |
2253 SHDR_TO_V4L2SPARM(pic_order_cnt_bit_size); | |
2254 #undef SHDR_TO_V4L2SPARM | |
2255 | |
2256 #define SET_V4L2_SPARM_FLAG_IF(cond, flag) \ | |
2257 v4l2_slice_param.flags |= ((slice_hdr->cond) ? (flag) : 0) | |
2258 SET_V4L2_SPARM_FLAG_IF(field_pic_flag, V4L2_SLICE_FLAG_FIELD_PIC); | |
2259 SET_V4L2_SPARM_FLAG_IF(bottom_field_flag, V4L2_SLICE_FLAG_BOTTOM_FIELD); | |
2260 SET_V4L2_SPARM_FLAG_IF(direct_spatial_mv_pred_flag, | |
2261 V4L2_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED); | |
2262 SET_V4L2_SPARM_FLAG_IF(sp_for_switch_flag, V4L2_SLICE_FLAG_SP_FOR_SWITCH); | |
2263 #undef SET_V4L2_SPARM_FLAG_IF | |
2264 | |
2265 struct v4l2_h264_pred_weight_table* pred_weight_table = | |
2266 &v4l2_slice_param.pred_weight_table; | |
2267 | |
2268 if (((slice_hdr->IsPSlice() || slice_hdr->IsSPSlice()) && | |
2269 pps->weighted_pred_flag) || | |
2270 (slice_hdr->IsBSlice() && pps->weighted_bipred_idc == 1)) { | |
2271 pred_weight_table->luma_log2_weight_denom = | |
2272 slice_hdr->luma_log2_weight_denom; | |
2273 pred_weight_table->chroma_log2_weight_denom = | |
2274 slice_hdr->chroma_log2_weight_denom; | |
2275 | |
2276 struct v4l2_h264_weight_factors* factorsl0 = | |
2277 &pred_weight_table->weight_factors[0]; | |
2278 | |
2279 for (int i = 0; i < 32; ++i) { | |
2280 factorsl0->luma_weight[i] = | |
2281 slice_hdr->pred_weight_table_l0.luma_weight[i]; | |
2282 factorsl0->luma_offset[i] = | |
2283 slice_hdr->pred_weight_table_l0.luma_offset[i]; | |
2284 | |
2285 for (int j = 0; j < 2; ++j) { | |
2286 factorsl0->chroma_weight[i][j] = | |
2287 slice_hdr->pred_weight_table_l0.chroma_weight[i][j]; | |
2288 factorsl0->chroma_offset[i][j] = | |
2289 slice_hdr->pred_weight_table_l0.chroma_offset[i][j]; | |
2290 } | |
2291 } | |
2292 | |
2293 if (slice_hdr->IsBSlice()) { | |
2294 struct v4l2_h264_weight_factors* factorsl1 = | |
2295 &pred_weight_table->weight_factors[1]; | |
2296 | |
2297 for (int i = 0; i < 32; ++i) { | |
2298 factorsl1->luma_weight[i] = | |
2299 slice_hdr->pred_weight_table_l1.luma_weight[i]; | |
2300 factorsl1->luma_offset[i] = | |
2301 slice_hdr->pred_weight_table_l1.luma_offset[i]; | |
2302 | |
2303 for (int j = 0; j < 2; ++j) { | |
2304 factorsl1->chroma_weight[i][j] = | |
2305 slice_hdr->pred_weight_table_l1.chroma_weight[i][j]; | |
2306 factorsl1->chroma_offset[i][j] = | |
2307 slice_hdr->pred_weight_table_l1.chroma_offset[i][j]; | |
2308 } | |
2309 } | |
2310 } | |
2311 } | |
2312 | |
2313 H264PictureListToDPBIndicesList(ref_pic_list0, | |
2314 v4l2_slice_param.ref_pic_list0); | |
2315 H264PictureListToDPBIndicesList(ref_pic_list1, | |
2316 v4l2_slice_param.ref_pic_list1); | |
2317 | |
2318 scoped_refptr<V4L2DecodeSurface> dec_surface = | |
2319 H264PictureToV4L2DecodeSurface(pic); | |
2320 | |
2321 v4l2_decode_param_.nal_ref_idc = slice_hdr->nal_ref_idc; | |
2322 | |
2323 // TODO(posciak): Don't add start code back here, but have it passed from | |
2324 // the parser. | |
2325 size_t data_copy_size = size + 3; | |
2326 std::unique_ptr<uint8_t[]> data_copy(new uint8_t[data_copy_size]); | |
2327 memset(data_copy.get(), 0, data_copy_size); | |
2328 data_copy[2] = 0x01; | |
2329 memcpy(data_copy.get() + 3, data, size); | |
2330 return v4l2_dec_->SubmitSlice(dec_surface->input_record(), data_copy.get(), | |
2331 data_copy_size); | |
2332 } | |
2333 | |
2334 bool V4L2SliceVideoDecodeAccelerator::SubmitSlice(int index, | |
2335 const uint8_t* data, | |
2336 size_t size) { | |
2337 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
2338 | |
2339 InputRecord& input_record = input_buffer_map_[index]; | |
2340 | |
2341 if (input_record.bytes_used + size > input_record.length) { | |
2342 DVLOGF(1) << "Input buffer too small"; | |
2343 return false; | |
2344 } | |
2345 | |
2346 memcpy(static_cast<uint8_t*>(input_record.address) + input_record.bytes_used, | |
2347 data, size); | |
2348 input_record.bytes_used += size; | |
2349 | |
2350 return true; | |
2351 } | |
2352 | |
2353 bool V4L2SliceVideoDecodeAccelerator::SubmitExtControls( | |
2354 struct v4l2_ext_controls* ext_ctrls) { | |
2355 DCHECK_GT(ext_ctrls->config_store, 0u); | |
2356 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_EXT_CTRLS, ext_ctrls); | |
2357 return true; | |
2358 } | |
2359 | |
2360 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitDecode( | |
2361 const scoped_refptr<H264Picture>& pic) { | |
2362 scoped_refptr<V4L2DecodeSurface> dec_surface = | |
2363 H264PictureToV4L2DecodeSurface(pic); | |
2364 | |
2365 v4l2_decode_param_.num_slices = num_slices_; | |
2366 v4l2_decode_param_.idr_pic_flag = pic->idr; | |
2367 v4l2_decode_param_.top_field_order_cnt = pic->top_field_order_cnt; | |
2368 v4l2_decode_param_.bottom_field_order_cnt = pic->bottom_field_order_cnt; | |
2369 | |
2370 struct v4l2_ext_control ctrl; | |
2371 std::vector<struct v4l2_ext_control> ctrls; | |
2372 | |
2373 memset(&ctrl, 0, sizeof(ctrl)); | |
2374 ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM; | |
2375 ctrl.size = sizeof(v4l2_slice_params_); | |
2376 ctrl.p_h264_slice_param = v4l2_slice_params_; | |
2377 ctrls.push_back(ctrl); | |
2378 | |
2379 memset(&ctrl, 0, sizeof(ctrl)); | |
2380 ctrl.id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAM; | |
2381 ctrl.size = sizeof(v4l2_decode_param_); | |
2382 ctrl.p_h264_decode_param = &v4l2_decode_param_; | |
2383 ctrls.push_back(ctrl); | |
2384 | |
2385 struct v4l2_ext_controls ext_ctrls; | |
2386 memset(&ext_ctrls, 0, sizeof(ext_ctrls)); | |
2387 ext_ctrls.count = ctrls.size(); | |
2388 ext_ctrls.controls = &ctrls[0]; | |
2389 ext_ctrls.config_store = dec_surface->config_store(); | |
2390 v4l2_dec_->SubmitExtControls(&ext_ctrls); | |
2391 | |
2392 Reset(); | |
2393 | |
2394 v4l2_dec_->DecodeSurface(dec_surface); | |
2395 return true; | |
2396 } | |
2397 | |
2398 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::OutputPicture( | |
2399 const scoped_refptr<H264Picture>& pic) { | |
2400 scoped_refptr<V4L2DecodeSurface> dec_surface = | |
2401 H264PictureToV4L2DecodeSurface(pic); | |
2402 v4l2_dec_->SurfaceReady(dec_surface); | |
2403 return true; | |
2404 } | |
2405 | |
2406 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::Reset() { | |
2407 num_slices_ = 0; | |
2408 memset(&v4l2_decode_param_, 0, sizeof(v4l2_decode_param_)); | |
2409 memset(&v4l2_slice_params_, 0, sizeof(v4l2_slice_params_)); | |
2410 } | |
2411 | |
2412 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface> | |
2413 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator:: | |
2414 H264PictureToV4L2DecodeSurface(const scoped_refptr<H264Picture>& pic) { | |
2415 V4L2H264Picture* v4l2_pic = pic->AsV4L2H264Picture(); | |
2416 CHECK(v4l2_pic); | |
2417 return v4l2_pic->dec_surface(); | |
2418 } | |
2419 | |
2420 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::V4L2VP8Accelerator( | |
2421 V4L2SliceVideoDecodeAccelerator* v4l2_dec) | |
2422 : v4l2_dec_(v4l2_dec) { | |
2423 DCHECK(v4l2_dec_); | |
2424 } | |
2425 | |
2426 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::~V4L2VP8Accelerator() { | |
2427 } | |
2428 | |
2429 scoped_refptr<VP8Picture> | |
2430 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::CreateVP8Picture() { | |
2431 scoped_refptr<V4L2DecodeSurface> dec_surface = v4l2_dec_->CreateSurface(); | |
2432 if (!dec_surface) | |
2433 return nullptr; | |
2434 | |
2435 return new V4L2VP8Picture(dec_surface); | |
2436 } | |
2437 | |
2438 #define ARRAY_MEMCPY_CHECKED(to, from) \ | |
2439 do { \ | |
2440 static_assert(sizeof(to) == sizeof(from), \ | |
2441 #from " and " #to " arrays must be of same size"); \ | |
2442 memcpy(to, from, sizeof(to)); \ | |
2443 } while (0) | |
2444 | |
2445 static void FillV4L2SegmentationHeader( | |
2446 const media::Vp8SegmentationHeader& vp8_sgmnt_hdr, | |
2447 struct v4l2_vp8_sgmnt_hdr* v4l2_sgmnt_hdr) { | |
2448 #define SET_V4L2_SGMNT_HDR_FLAG_IF(cond, flag) \ | |
2449 v4l2_sgmnt_hdr->flags |= ((vp8_sgmnt_hdr.cond) ? (flag) : 0) | |
2450 SET_V4L2_SGMNT_HDR_FLAG_IF(segmentation_enabled, | |
2451 V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED); | |
2452 SET_V4L2_SGMNT_HDR_FLAG_IF(update_mb_segmentation_map, | |
2453 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP); | |
2454 SET_V4L2_SGMNT_HDR_FLAG_IF(update_segment_feature_data, | |
2455 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA); | |
2456 #undef SET_V4L2_SPARM_FLAG_IF | |
2457 v4l2_sgmnt_hdr->segment_feature_mode = vp8_sgmnt_hdr.segment_feature_mode; | |
2458 | |
2459 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->quant_update, | |
2460 vp8_sgmnt_hdr.quantizer_update_value); | |
2461 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->lf_update, | |
2462 vp8_sgmnt_hdr.lf_update_value); | |
2463 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->segment_probs, | |
2464 vp8_sgmnt_hdr.segment_prob); | |
2465 } | |
2466 | |
2467 static void FillV4L2LoopfilterHeader( | |
2468 const media::Vp8LoopFilterHeader& vp8_loopfilter_hdr, | |
2469 struct v4l2_vp8_loopfilter_hdr* v4l2_lf_hdr) { | |
2470 #define SET_V4L2_LF_HDR_FLAG_IF(cond, flag) \ | |
2471 v4l2_lf_hdr->flags |= ((vp8_loopfilter_hdr.cond) ? (flag) : 0) | |
2472 SET_V4L2_LF_HDR_FLAG_IF(loop_filter_adj_enable, V4L2_VP8_LF_HDR_ADJ_ENABLE); | |
2473 SET_V4L2_LF_HDR_FLAG_IF(mode_ref_lf_delta_update, | |
2474 V4L2_VP8_LF_HDR_DELTA_UPDATE); | |
2475 #undef SET_V4L2_SGMNT_HDR_FLAG_IF | |
2476 | |
2477 #define LF_HDR_TO_V4L2_LF_HDR(a) v4l2_lf_hdr->a = vp8_loopfilter_hdr.a; | |
2478 LF_HDR_TO_V4L2_LF_HDR(type); | |
2479 LF_HDR_TO_V4L2_LF_HDR(level); | |
2480 LF_HDR_TO_V4L2_LF_HDR(sharpness_level); | |
2481 #undef LF_HDR_TO_V4L2_LF_HDR | |
2482 | |
2483 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr->ref_frm_delta_magnitude, | |
2484 vp8_loopfilter_hdr.ref_frame_delta); | |
2485 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr->mb_mode_delta_magnitude, | |
2486 vp8_loopfilter_hdr.mb_mode_delta); | |
2487 } | |
2488 | |
2489 static void FillV4L2QuantizationHeader( | |
2490 const media::Vp8QuantizationHeader& vp8_quant_hdr, | |
2491 struct v4l2_vp8_quantization_hdr* v4l2_quant_hdr) { | |
2492 v4l2_quant_hdr->y_ac_qi = vp8_quant_hdr.y_ac_qi; | |
2493 v4l2_quant_hdr->y_dc_delta = vp8_quant_hdr.y_dc_delta; | |
2494 v4l2_quant_hdr->y2_dc_delta = vp8_quant_hdr.y2_dc_delta; | |
2495 v4l2_quant_hdr->y2_ac_delta = vp8_quant_hdr.y2_ac_delta; | |
2496 v4l2_quant_hdr->uv_dc_delta = vp8_quant_hdr.uv_dc_delta; | |
2497 v4l2_quant_hdr->uv_ac_delta = vp8_quant_hdr.uv_ac_delta; | |
2498 } | |
2499 | |
2500 static void FillV4L2EntropyHeader( | |
2501 const media::Vp8EntropyHeader& vp8_entropy_hdr, | |
2502 struct v4l2_vp8_entropy_hdr* v4l2_entropy_hdr) { | |
2503 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->coeff_probs, | |
2504 vp8_entropy_hdr.coeff_probs); | |
2505 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->y_mode_probs, | |
2506 vp8_entropy_hdr.y_mode_probs); | |
2507 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->uv_mode_probs, | |
2508 vp8_entropy_hdr.uv_mode_probs); | |
2509 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->mv_probs, | |
2510 vp8_entropy_hdr.mv_probs); | |
2511 } | |
2512 | |
2513 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::SubmitDecode( | |
2514 const scoped_refptr<VP8Picture>& pic, | |
2515 const media::Vp8FrameHeader* frame_hdr, | |
2516 const scoped_refptr<VP8Picture>& last_frame, | |
2517 const scoped_refptr<VP8Picture>& golden_frame, | |
2518 const scoped_refptr<VP8Picture>& alt_frame) { | |
2519 struct v4l2_ctrl_vp8_frame_hdr v4l2_frame_hdr; | |
2520 memset(&v4l2_frame_hdr, 0, sizeof(v4l2_frame_hdr)); | |
2521 | |
2522 #define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a | |
2523 FHDR_TO_V4L2_FHDR(key_frame); | |
2524 FHDR_TO_V4L2_FHDR(version); | |
2525 FHDR_TO_V4L2_FHDR(width); | |
2526 FHDR_TO_V4L2_FHDR(horizontal_scale); | |
2527 FHDR_TO_V4L2_FHDR(height); | |
2528 FHDR_TO_V4L2_FHDR(vertical_scale); | |
2529 FHDR_TO_V4L2_FHDR(sign_bias_golden); | |
2530 FHDR_TO_V4L2_FHDR(sign_bias_alternate); | |
2531 FHDR_TO_V4L2_FHDR(prob_skip_false); | |
2532 FHDR_TO_V4L2_FHDR(prob_intra); | |
2533 FHDR_TO_V4L2_FHDR(prob_last); | |
2534 FHDR_TO_V4L2_FHDR(prob_gf); | |
2535 FHDR_TO_V4L2_FHDR(bool_dec_range); | |
2536 FHDR_TO_V4L2_FHDR(bool_dec_value); | |
2537 FHDR_TO_V4L2_FHDR(bool_dec_count); | |
2538 #undef FHDR_TO_V4L2_FHDR | |
2539 | |
2540 #define SET_V4L2_FRM_HDR_FLAG_IF(cond, flag) \ | |
2541 v4l2_frame_hdr.flags |= ((frame_hdr->cond) ? (flag) : 0) | |
2542 SET_V4L2_FRM_HDR_FLAG_IF(is_experimental, | |
2543 V4L2_VP8_FRAME_HDR_FLAG_EXPERIMENTAL); | |
2544 SET_V4L2_FRM_HDR_FLAG_IF(show_frame, V4L2_VP8_FRAME_HDR_FLAG_SHOW_FRAME); | |
2545 SET_V4L2_FRM_HDR_FLAG_IF(mb_no_skip_coeff, | |
2546 V4L2_VP8_FRAME_HDR_FLAG_MB_NO_SKIP_COEFF); | |
2547 #undef SET_V4L2_FRM_HDR_FLAG_IF | |
2548 | |
2549 FillV4L2SegmentationHeader(frame_hdr->segmentation_hdr, | |
2550 &v4l2_frame_hdr.sgmnt_hdr); | |
2551 | |
2552 FillV4L2LoopfilterHeader(frame_hdr->loopfilter_hdr, &v4l2_frame_hdr.lf_hdr); | |
2553 | |
2554 FillV4L2QuantizationHeader(frame_hdr->quantization_hdr, | |
2555 &v4l2_frame_hdr.quant_hdr); | |
2556 | |
2557 FillV4L2EntropyHeader(frame_hdr->entropy_hdr, &v4l2_frame_hdr.entropy_hdr); | |
2558 | |
2559 v4l2_frame_hdr.first_part_size = | |
2560 base::checked_cast<__u32>(frame_hdr->first_part_size); | |
2561 v4l2_frame_hdr.first_part_offset = | |
2562 base::checked_cast<__u32>(frame_hdr->first_part_offset); | |
2563 v4l2_frame_hdr.macroblock_bit_offset = | |
2564 base::checked_cast<__u32>(frame_hdr->macroblock_bit_offset); | |
2565 v4l2_frame_hdr.num_dct_parts = frame_hdr->num_of_dct_partitions; | |
2566 | |
2567 static_assert(arraysize(v4l2_frame_hdr.dct_part_sizes) == | |
2568 arraysize(frame_hdr->dct_partition_sizes), | |
2569 "DCT partition size arrays must have equal number of elements"); | |
2570 for (size_t i = 0; i < frame_hdr->num_of_dct_partitions && | |
2571 i < arraysize(v4l2_frame_hdr.dct_part_sizes); ++i) | |
2572 v4l2_frame_hdr.dct_part_sizes[i] = frame_hdr->dct_partition_sizes[i]; | |
2573 | |
2574 scoped_refptr<V4L2DecodeSurface> dec_surface = | |
2575 VP8PictureToV4L2DecodeSurface(pic); | |
2576 std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces; | |
2577 | |
2578 if (last_frame) { | |
2579 scoped_refptr<V4L2DecodeSurface> last_frame_surface = | |
2580 VP8PictureToV4L2DecodeSurface(last_frame); | |
2581 v4l2_frame_hdr.last_frame = last_frame_surface->output_record(); | |
2582 ref_surfaces.push_back(last_frame_surface); | |
2583 } else { | |
2584 v4l2_frame_hdr.last_frame = VIDEO_MAX_FRAME; | |
2585 } | |
2586 | |
2587 if (golden_frame) { | |
2588 scoped_refptr<V4L2DecodeSurface> golden_frame_surface = | |
2589 VP8PictureToV4L2DecodeSurface(golden_frame); | |
2590 v4l2_frame_hdr.golden_frame = golden_frame_surface->output_record(); | |
2591 ref_surfaces.push_back(golden_frame_surface); | |
2592 } else { | |
2593 v4l2_frame_hdr.golden_frame = VIDEO_MAX_FRAME; | |
2594 } | |
2595 | |
2596 if (alt_frame) { | |
2597 scoped_refptr<V4L2DecodeSurface> alt_frame_surface = | |
2598 VP8PictureToV4L2DecodeSurface(alt_frame); | |
2599 v4l2_frame_hdr.alt_frame = alt_frame_surface->output_record(); | |
2600 ref_surfaces.push_back(alt_frame_surface); | |
2601 } else { | |
2602 v4l2_frame_hdr.alt_frame = VIDEO_MAX_FRAME; | |
2603 } | |
2604 | |
2605 struct v4l2_ext_control ctrl; | |
2606 memset(&ctrl, 0, sizeof(ctrl)); | |
2607 ctrl.id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR; | |
2608 ctrl.size = sizeof(v4l2_frame_hdr); | |
2609 ctrl.p_vp8_frame_hdr = &v4l2_frame_hdr; | |
2610 | |
2611 struct v4l2_ext_controls ext_ctrls; | |
2612 memset(&ext_ctrls, 0, sizeof(ext_ctrls)); | |
2613 ext_ctrls.count = 1; | |
2614 ext_ctrls.controls = &ctrl; | |
2615 ext_ctrls.config_store = dec_surface->config_store(); | |
2616 | |
2617 if (!v4l2_dec_->SubmitExtControls(&ext_ctrls)) | |
2618 return false; | |
2619 | |
2620 dec_surface->SetReferenceSurfaces(ref_surfaces); | |
2621 | |
2622 if (!v4l2_dec_->SubmitSlice(dec_surface->input_record(), frame_hdr->data, | |
2623 frame_hdr->frame_size)) | |
2624 return false; | |
2625 | |
2626 v4l2_dec_->DecodeSurface(dec_surface); | |
2627 return true; | |
2628 } | |
2629 | |
2630 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::OutputPicture( | |
2631 const scoped_refptr<VP8Picture>& pic) { | |
2632 scoped_refptr<V4L2DecodeSurface> dec_surface = | |
2633 VP8PictureToV4L2DecodeSurface(pic); | |
2634 | |
2635 v4l2_dec_->SurfaceReady(dec_surface); | |
2636 return true; | |
2637 } | |
2638 | |
2639 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface> | |
2640 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator:: | |
2641 VP8PictureToV4L2DecodeSurface(const scoped_refptr<VP8Picture>& pic) { | |
2642 V4L2VP8Picture* v4l2_pic = pic->AsV4L2VP8Picture(); | |
2643 CHECK(v4l2_pic); | |
2644 return v4l2_pic->dec_surface(); | |
2645 } | |
2646 | |
2647 void V4L2SliceVideoDecodeAccelerator::DecodeSurface( | |
2648 const scoped_refptr<V4L2DecodeSurface>& dec_surface) { | |
2649 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
2650 | |
2651 DVLOGF(3) << "Submitting decode for surface: " << dec_surface->ToString(); | |
2652 Enqueue(dec_surface); | |
2653 } | |
2654 | |
2655 void V4L2SliceVideoDecodeAccelerator::SurfaceReady( | |
2656 const scoped_refptr<V4L2DecodeSurface>& dec_surface) { | |
2657 DVLOGF(3); | |
2658 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
2659 | |
2660 decoder_display_queue_.push(dec_surface); | |
2661 TryOutputSurfaces(); | |
2662 } | |
2663 | |
2664 void V4L2SliceVideoDecodeAccelerator::TryOutputSurfaces() { | |
2665 while (!decoder_display_queue_.empty()) { | |
2666 scoped_refptr<V4L2DecodeSurface> dec_surface = | |
2667 decoder_display_queue_.front(); | |
2668 | |
2669 if (!dec_surface->decoded()) | |
2670 break; | |
2671 | |
2672 decoder_display_queue_.pop(); | |
2673 OutputSurface(dec_surface); | |
2674 } | |
2675 } | |
2676 | |
2677 void V4L2SliceVideoDecodeAccelerator::OutputSurface( | |
2678 const scoped_refptr<V4L2DecodeSurface>& dec_surface) { | |
2679 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
2680 | |
2681 OutputRecord& output_record = | |
2682 output_buffer_map_[dec_surface->output_record()]; | |
2683 | |
2684 bool inserted = | |
2685 surfaces_at_display_.insert(std::make_pair(output_record.picture_id, | |
2686 dec_surface)).second; | |
2687 DCHECK(inserted); | |
2688 | |
2689 DCHECK(!output_record.at_client); | |
2690 DCHECK(!output_record.at_device); | |
2691 DCHECK_NE(output_record.picture_id, -1); | |
2692 output_record.at_client = true; | |
2693 | |
2694 // TODO(posciak): Use visible size from decoder here instead | |
2695 // (crbug.com/402760). Passing (0, 0) results in the client using the | |
2696 // visible size extracted from the container instead. | |
2697 media::Picture picture(output_record.picture_id, dec_surface->bitstream_id(), | |
2698 gfx::Rect(0, 0), false); | |
2699 DVLOGF(3) << dec_surface->ToString() | |
2700 << ", bitstream_id: " << picture.bitstream_buffer_id() | |
2701 << ", picture_id: " << picture.picture_buffer_id(); | |
2702 pending_picture_ready_.push(PictureRecord(output_record.cleared, picture)); | |
2703 SendPictureReady(); | |
2704 output_record.cleared = true; | |
2705 } | |
2706 | |
2707 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface> | |
2708 V4L2SliceVideoDecodeAccelerator::CreateSurface() { | |
2709 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
2710 DCHECK_EQ(state_, kDecoding); | |
2711 | |
2712 if (free_input_buffers_.empty() || free_output_buffers_.empty()) | |
2713 return nullptr; | |
2714 | |
2715 int input = free_input_buffers_.front(); | |
2716 free_input_buffers_.pop_front(); | |
2717 int output = free_output_buffers_.front(); | |
2718 free_output_buffers_.pop_front(); | |
2719 | |
2720 InputRecord& input_record = input_buffer_map_[input]; | |
2721 DCHECK_EQ(input_record.bytes_used, 0u); | |
2722 DCHECK_EQ(input_record.input_id, -1); | |
2723 DCHECK(decoder_current_bitstream_buffer_ != nullptr); | |
2724 input_record.input_id = decoder_current_bitstream_buffer_->input_id; | |
2725 | |
2726 scoped_refptr<V4L2DecodeSurface> dec_surface = new V4L2DecodeSurface( | |
2727 decoder_current_bitstream_buffer_->input_id, input, output, | |
2728 base::Bind(&V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer, | |
2729 base::Unretained(this))); | |
2730 | |
2731 DVLOGF(4) << "Created surface " << input << " -> " << output; | |
2732 return dec_surface; | |
2733 } | |
2734 | |
2735 void V4L2SliceVideoDecodeAccelerator::SendPictureReady() { | |
2736 DVLOGF(3); | |
2737 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
2738 bool resetting_or_flushing = (decoder_resetting_ || decoder_flushing_); | |
2739 while (!pending_picture_ready_.empty()) { | |
2740 bool cleared = pending_picture_ready_.front().cleared; | |
2741 const media::Picture& picture = pending_picture_ready_.front().picture; | |
2742 if (cleared && picture_clearing_count_ == 0) { | |
2743 DVLOGF(4) << "Posting picture ready to decode task runner for: " | |
2744 << picture.picture_buffer_id(); | |
2745 // This picture is cleared. It can be posted to a thread different than | |
2746 // the main GPU thread to reduce latency. This should be the case after | |
2747 // all pictures are cleared at the beginning. | |
2748 decode_task_runner_->PostTask( | |
2749 FROM_HERE, | |
2750 base::Bind(&Client::PictureReady, decode_client_, picture)); | |
2751 pending_picture_ready_.pop(); | |
2752 } else if (!cleared || resetting_or_flushing) { | |
2753 DVLOGF(3) << "cleared=" << pending_picture_ready_.front().cleared | |
2754 << ", decoder_resetting_=" << decoder_resetting_ | |
2755 << ", decoder_flushing_=" << decoder_flushing_ | |
2756 << ", picture_clearing_count_=" << picture_clearing_count_; | |
2757 DVLOGF(4) << "Posting picture ready to GPU for: " | |
2758 << picture.picture_buffer_id(); | |
2759 // If the picture is not cleared, post it to the child thread because it | |
2760 // has to be cleared in the child thread. A picture only needs to be | |
2761 // cleared once. If the decoder is resetting or flushing, send all | |
2762 // pictures to ensure PictureReady arrive before reset or flush done. | |
2763 child_task_runner_->PostTaskAndReply( | |
2764 FROM_HERE, base::Bind(&Client::PictureReady, client_, picture), | |
2765 // Unretained is safe. If Client::PictureReady gets to run, |this| is | |
2766 // alive. Destroy() will wait the decode thread to finish. | |
2767 base::Bind(&V4L2SliceVideoDecodeAccelerator::PictureCleared, | |
2768 base::Unretained(this))); | |
2769 picture_clearing_count_++; | |
2770 pending_picture_ready_.pop(); | |
2771 } else { | |
2772 // This picture is cleared. But some pictures are about to be cleared on | |
2773 // the child thread. To preserve the order, do not send this until those | |
2774 // pictures are cleared. | |
2775 break; | |
2776 } | |
2777 } | |
2778 } | |
2779 | |
2780 void V4L2SliceVideoDecodeAccelerator::PictureCleared() { | |
2781 DVLOGF(3) << "clearing count=" << picture_clearing_count_; | |
2782 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
2783 DCHECK_GT(picture_clearing_count_, 0); | |
2784 picture_clearing_count_--; | |
2785 SendPictureReady(); | |
2786 } | |
2787 | |
2788 bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( | |
2789 const base::WeakPtr<Client>& decode_client, | |
2790 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { | |
2791 decode_client_ = decode_client_; | |
2792 decode_task_runner_ = decode_task_runner; | |
2793 return true; | |
2794 } | |
2795 | |
2796 media::VideoPixelFormat V4L2SliceVideoDecodeAccelerator::GetOutputFormat() | |
2797 const { | |
2798 return V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_); | |
2799 } | |
2800 | |
2801 // static | |
2802 media::VideoDecodeAccelerator::SupportedProfiles | |
2803 V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() { | |
2804 scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder); | |
2805 if (!device) | |
2806 return SupportedProfiles(); | |
2807 | |
2808 return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_), | |
2809 supported_input_fourccs_); | |
2810 } | |
2811 | |
2812 } // namespace content | |
OLD | NEW |