Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(63)

Side by Side Diff: content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc

Issue 1882373004: Migrate content/common/gpu/media code to media/gpu (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fix several more bot-identified build issues Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <errno.h>
6 #include <fcntl.h>
7 #include <linux/videodev2.h>
8 #include <poll.h>
9 #include <string.h>
10 #include <sys/eventfd.h>
11 #include <sys/ioctl.h>
12 #include <sys/mman.h>
13
14 #include "base/bind.h"
15 #include "base/bind_helpers.h"
16 #include "base/callback.h"
17 #include "base/callback_helpers.h"
18 #include "base/command_line.h"
19 #include "base/macros.h"
20 #include "base/numerics/safe_conversions.h"
21 #include "base/strings/stringprintf.h"
22 #include "content/common/gpu/media/shared_memory_region.h"
23 #include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h"
24 #include "media/base/bind_to_current_loop.h"
25 #include "media/base/media_switches.h"
26 #include "ui/gl/gl_context.h"
27 #include "ui/gl/scoped_binders.h"
28
29 #define LOGF(level) LOG(level) << __FUNCTION__ << "(): "
30 #define DVLOGF(level) DVLOG(level) << __FUNCTION__ << "(): "
31
32 #define NOTIFY_ERROR(x) \
33 do { \
34 LOG(ERROR) << "Setting error state:" << x; \
35 SetErrorState(x); \
36 } while (0)
37
38 #define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_str) \
39 do { \
40 if (device_->Ioctl(type, arg) != 0) { \
41 PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << type_str; \
42 return value; \
43 } \
44 } while (0)
45
46 #define IOCTL_OR_ERROR_RETURN(type, arg) \
47 IOCTL_OR_ERROR_RETURN_VALUE(type, arg, ((void)0), #type)
48
49 #define IOCTL_OR_ERROR_RETURN_FALSE(type, arg) \
50 IOCTL_OR_ERROR_RETURN_VALUE(type, arg, false, #type)
51
52 #define IOCTL_OR_LOG_ERROR(type, arg) \
53 do { \
54 if (device_->Ioctl(type, arg) != 0) \
55 PLOG(ERROR) << __FUNCTION__ << "(): ioctl() failed: " << #type; \
56 } while (0)
57
58 namespace content {
59
60 // static
61 const uint32_t V4L2SliceVideoDecodeAccelerator::supported_input_fourccs_[] = {
62 V4L2_PIX_FMT_H264_SLICE, V4L2_PIX_FMT_VP8_FRAME,
63 };
64
65 class V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface
66 : public base::RefCounted<V4L2DecodeSurface> {
67 public:
68 using ReleaseCB = base::Callback<void(int)>;
69
70 V4L2DecodeSurface(int32_t bitstream_id,
71 int input_record,
72 int output_record,
73 const ReleaseCB& release_cb);
74
75 // Mark the surface as decoded. This will also release all references, as
76 // they are not needed anymore.
77 void SetDecoded();
78 bool decoded() const { return decoded_; }
79
80 int32_t bitstream_id() const { return bitstream_id_; }
81 int input_record() const { return input_record_; }
82 int output_record() const { return output_record_; }
83 uint32_t config_store() const { return config_store_; }
84
85 // Take references to each reference surface and keep them until the
86 // target surface is decoded.
87 void SetReferenceSurfaces(
88 const std::vector<scoped_refptr<V4L2DecodeSurface>>& ref_surfaces);
89
90 std::string ToString() const;
91
92 private:
93 friend class base::RefCounted<V4L2DecodeSurface>;
94 ~V4L2DecodeSurface();
95
96 int32_t bitstream_id_;
97 int input_record_;
98 int output_record_;
99 uint32_t config_store_;
100
101 bool decoded_;
102 ReleaseCB release_cb_;
103
104 std::vector<scoped_refptr<V4L2DecodeSurface>> reference_surfaces_;
105
106 DISALLOW_COPY_AND_ASSIGN(V4L2DecodeSurface);
107 };
108
109 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::V4L2DecodeSurface(
110 int32_t bitstream_id,
111 int input_record,
112 int output_record,
113 const ReleaseCB& release_cb)
114 : bitstream_id_(bitstream_id),
115 input_record_(input_record),
116 output_record_(output_record),
117 config_store_(input_record + 1),
118 decoded_(false),
119 release_cb_(release_cb) {}
120
121 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::~V4L2DecodeSurface() {
122 DVLOGF(5) << "Releasing output record id=" << output_record_;
123 release_cb_.Run(output_record_);
124 }
125
126 void V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::SetReferenceSurfaces(
127 const std::vector<scoped_refptr<V4L2DecodeSurface>>& ref_surfaces) {
128 DCHECK(reference_surfaces_.empty());
129 reference_surfaces_ = ref_surfaces;
130 }
131
132 void V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::SetDecoded() {
133 DCHECK(!decoded_);
134 decoded_ = true;
135
136 // We can now drop references to all reference surfaces for this surface
137 // as we are done with decoding.
138 reference_surfaces_.clear();
139 }
140
141 std::string V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface::ToString()
142 const {
143 std::string out;
144 base::StringAppendF(&out, "Buffer %d -> %d. ", input_record_, output_record_);
145 base::StringAppendF(&out, "Reference surfaces:");
146 for (const auto& ref : reference_surfaces_) {
147 DCHECK_NE(ref->output_record(), output_record_);
148 base::StringAppendF(&out, " %d", ref->output_record());
149 }
150 return out;
151 }
152
153 V4L2SliceVideoDecodeAccelerator::InputRecord::InputRecord()
154 : input_id(-1),
155 address(nullptr),
156 length(0),
157 bytes_used(0),
158 at_device(false) {
159 }
160
161 V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord()
162 : at_device(false),
163 at_client(false),
164 picture_id(-1),
165 egl_image(EGL_NO_IMAGE_KHR),
166 egl_sync(EGL_NO_SYNC_KHR),
167 cleared(false) {
168 }
169
170 struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef {
171 BitstreamBufferRef(
172 base::WeakPtr<VideoDecodeAccelerator::Client>& client,
173 const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
174 SharedMemoryRegion* shm,
175 int32_t input_id);
176 ~BitstreamBufferRef();
177 const base::WeakPtr<VideoDecodeAccelerator::Client> client;
178 const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
179 const std::unique_ptr<SharedMemoryRegion> shm;
180 off_t bytes_used;
181 const int32_t input_id;
182 };
183
184 V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::BitstreamBufferRef(
185 base::WeakPtr<VideoDecodeAccelerator::Client>& client,
186 const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
187 SharedMemoryRegion* shm,
188 int32_t input_id)
189 : client(client),
190 client_task_runner(client_task_runner),
191 shm(shm),
192 bytes_used(0),
193 input_id(input_id) {}
194
195 V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() {
196 if (input_id >= 0) {
197 DVLOGF(5) << "returning input_id: " << input_id;
198 client_task_runner->PostTask(
199 FROM_HERE,
200 base::Bind(&VideoDecodeAccelerator::Client::NotifyEndOfBitstreamBuffer,
201 client, input_id));
202 }
203 }
204
205 struct V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef {
206 EGLSyncKHRRef(EGLDisplay egl_display, EGLSyncKHR egl_sync);
207 ~EGLSyncKHRRef();
208 EGLDisplay const egl_display;
209 EGLSyncKHR egl_sync;
210 };
211
212 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::EGLSyncKHRRef(
213 EGLDisplay egl_display,
214 EGLSyncKHR egl_sync)
215 : egl_display(egl_display), egl_sync(egl_sync) {
216 }
217
218 V4L2SliceVideoDecodeAccelerator::EGLSyncKHRRef::~EGLSyncKHRRef() {
219 // We don't check for eglDestroySyncKHR failures, because if we get here
220 // with a valid sync object, something went wrong and we are getting
221 // destroyed anyway.
222 if (egl_sync != EGL_NO_SYNC_KHR)
223 eglDestroySyncKHR(egl_display, egl_sync);
224 }
225
226 struct V4L2SliceVideoDecodeAccelerator::PictureRecord {
227 PictureRecord(bool cleared, const media::Picture& picture);
228 ~PictureRecord();
229 bool cleared; // Whether the texture is cleared and safe to render from.
230 media::Picture picture; // The decoded picture.
231 };
232
233 V4L2SliceVideoDecodeAccelerator::PictureRecord::PictureRecord(
234 bool cleared,
235 const media::Picture& picture)
236 : cleared(cleared), picture(picture) {
237 }
238
239 V4L2SliceVideoDecodeAccelerator::PictureRecord::~PictureRecord() {
240 }
241
242 class V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator
243 : public H264Decoder::H264Accelerator {
244 public:
245 V4L2H264Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec);
246 ~V4L2H264Accelerator() override;
247
248 // H264Decoder::H264Accelerator implementation.
249 scoped_refptr<H264Picture> CreateH264Picture() override;
250
251 bool SubmitFrameMetadata(const media::H264SPS* sps,
252 const media::H264PPS* pps,
253 const H264DPB& dpb,
254 const H264Picture::Vector& ref_pic_listp0,
255 const H264Picture::Vector& ref_pic_listb0,
256 const H264Picture::Vector& ref_pic_listb1,
257 const scoped_refptr<H264Picture>& pic) override;
258
259 bool SubmitSlice(const media::H264PPS* pps,
260 const media::H264SliceHeader* slice_hdr,
261 const H264Picture::Vector& ref_pic_list0,
262 const H264Picture::Vector& ref_pic_list1,
263 const scoped_refptr<H264Picture>& pic,
264 const uint8_t* data,
265 size_t size) override;
266
267 bool SubmitDecode(const scoped_refptr<H264Picture>& pic) override;
268 bool OutputPicture(const scoped_refptr<H264Picture>& pic) override;
269
270 void Reset() override;
271
272 private:
273 // Max size of reference list.
274 static const size_t kDPBIndicesListSize = 32;
275 void H264PictureListToDPBIndicesList(const H264Picture::Vector& src_pic_list,
276 uint8_t dst_list[kDPBIndicesListSize]);
277
278 void H264DPBToV4L2DPB(
279 const H264DPB& dpb,
280 std::vector<scoped_refptr<V4L2DecodeSurface>>* ref_surfaces);
281
282 scoped_refptr<V4L2DecodeSurface> H264PictureToV4L2DecodeSurface(
283 const scoped_refptr<H264Picture>& pic);
284
285 size_t num_slices_;
286 V4L2SliceVideoDecodeAccelerator* v4l2_dec_;
287
288 // TODO(posciak): This should be queried from hardware once supported.
289 static const size_t kMaxSlices = 16;
290 struct v4l2_ctrl_h264_slice_param v4l2_slice_params_[kMaxSlices];
291 struct v4l2_ctrl_h264_decode_param v4l2_decode_param_;
292
293 DISALLOW_COPY_AND_ASSIGN(V4L2H264Accelerator);
294 };
295
296 class V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator
297 : public VP8Decoder::VP8Accelerator {
298 public:
299 V4L2VP8Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec);
300 ~V4L2VP8Accelerator() override;
301
302 // VP8Decoder::VP8Accelerator implementation.
303 scoped_refptr<VP8Picture> CreateVP8Picture() override;
304
305 bool SubmitDecode(const scoped_refptr<VP8Picture>& pic,
306 const media::Vp8FrameHeader* frame_hdr,
307 const scoped_refptr<VP8Picture>& last_frame,
308 const scoped_refptr<VP8Picture>& golden_frame,
309 const scoped_refptr<VP8Picture>& alt_frame) override;
310
311 bool OutputPicture(const scoped_refptr<VP8Picture>& pic) override;
312
313 private:
314 scoped_refptr<V4L2DecodeSurface> VP8PictureToV4L2DecodeSurface(
315 const scoped_refptr<VP8Picture>& pic);
316
317 V4L2SliceVideoDecodeAccelerator* v4l2_dec_;
318
319 DISALLOW_COPY_AND_ASSIGN(V4L2VP8Accelerator);
320 };
321
322 // Codec-specific subclasses of software decoder picture classes.
323 // This allows us to keep decoders oblivious of our implementation details.
324 class V4L2H264Picture : public H264Picture {
325 public:
326 V4L2H264Picture(const scoped_refptr<
327 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>& dec_surface);
328
329 V4L2H264Picture* AsV4L2H264Picture() override { return this; }
330 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
331 dec_surface() {
332 return dec_surface_;
333 }
334
335 private:
336 ~V4L2H264Picture() override;
337
338 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
339 dec_surface_;
340
341 DISALLOW_COPY_AND_ASSIGN(V4L2H264Picture);
342 };
343
344 V4L2H264Picture::V4L2H264Picture(const scoped_refptr<
345 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>& dec_surface)
346 : dec_surface_(dec_surface) {
347 }
348
349 V4L2H264Picture::~V4L2H264Picture() {
350 }
351
352 class V4L2VP8Picture : public VP8Picture {
353 public:
354 V4L2VP8Picture(const scoped_refptr<
355 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>& dec_surface);
356
357 V4L2VP8Picture* AsV4L2VP8Picture() override { return this; }
358 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
359 dec_surface() {
360 return dec_surface_;
361 }
362
363 private:
364 ~V4L2VP8Picture() override;
365
366 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
367 dec_surface_;
368
369 DISALLOW_COPY_AND_ASSIGN(V4L2VP8Picture);
370 };
371
372 V4L2VP8Picture::V4L2VP8Picture(const scoped_refptr<
373 V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>& dec_surface)
374 : dec_surface_(dec_surface) {
375 }
376
377 V4L2VP8Picture::~V4L2VP8Picture() {
378 }
379
380 V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
381 const scoped_refptr<V4L2Device>& device,
382 EGLDisplay egl_display,
383 const GetGLContextCallback& get_gl_context_cb,
384 const MakeGLContextCurrentCallback& make_context_current_cb)
385 : input_planes_count_(0),
386 output_planes_count_(0),
387 child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
388 device_(device),
389 decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"),
390 device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"),
391 input_streamon_(false),
392 input_buffer_queued_count_(0),
393 output_streamon_(false),
394 output_buffer_queued_count_(0),
395 video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN),
396 output_format_fourcc_(0),
397 state_(kUninitialized),
398 decoder_flushing_(false),
399 decoder_resetting_(false),
400 surface_set_change_pending_(false),
401 picture_clearing_count_(0),
402 pictures_assigned_(false, false),
403 egl_display_(egl_display),
404 get_gl_context_cb_(get_gl_context_cb),
405 make_context_current_cb_(make_context_current_cb),
406 weak_this_factory_(this) {
407 weak_this_ = weak_this_factory_.GetWeakPtr();
408 }
409
410 V4L2SliceVideoDecodeAccelerator::~V4L2SliceVideoDecodeAccelerator() {
411 DVLOGF(2);
412
413 DCHECK(child_task_runner_->BelongsToCurrentThread());
414 DCHECK(!decoder_thread_.IsRunning());
415 DCHECK(!device_poll_thread_.IsRunning());
416
417 DCHECK(input_buffer_map_.empty());
418 DCHECK(output_buffer_map_.empty());
419 }
420
421 void V4L2SliceVideoDecodeAccelerator::NotifyError(Error error) {
422 if (!child_task_runner_->BelongsToCurrentThread()) {
423 child_task_runner_->PostTask(
424 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::NotifyError,
425 weak_this_, error));
426 return;
427 }
428
429 if (client_) {
430 client_->NotifyError(error);
431 client_ptr_factory_.reset();
432 }
433 }
434
435 bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
436 Client* client) {
437 DVLOGF(3) << "profile: " << config.profile;
438 DCHECK(child_task_runner_->BelongsToCurrentThread());
439 DCHECK_EQ(state_, kUninitialized);
440
441 if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
442 NOTREACHED() << "GL callbacks are required for this VDA";
443 return false;
444 }
445
446 if (config.is_encrypted) {
447 NOTREACHED() << "Encrypted streams are not supported for this VDA";
448 return false;
449 }
450
451 if (!device_->SupportsDecodeProfileForV4L2PixelFormats(
452 config.profile, arraysize(supported_input_fourccs_),
453 supported_input_fourccs_)) {
454 DVLOGF(1) << "unsupported profile " << config.profile;
455 return false;
456 }
457
458 client_ptr_factory_.reset(
459 new base::WeakPtrFactory<VideoDecodeAccelerator::Client>(client));
460 client_ = client_ptr_factory_->GetWeakPtr();
461 // If we haven't been set up to decode on separate thread via
462 // TryToSetupDecodeOnSeparateThread(), use the main thread/client for
463 // decode tasks.
464 if (!decode_task_runner_) {
465 decode_task_runner_ = child_task_runner_;
466 DCHECK(!decode_client_);
467 decode_client_ = client_;
468 }
469
470 video_profile_ = config.profile;
471
472 if (video_profile_ >= media::H264PROFILE_MIN &&
473 video_profile_ <= media::H264PROFILE_MAX) {
474 h264_accelerator_.reset(new V4L2H264Accelerator(this));
475 decoder_.reset(new H264Decoder(h264_accelerator_.get()));
476 } else if (video_profile_ >= media::VP8PROFILE_MIN &&
477 video_profile_ <= media::VP8PROFILE_MAX) {
478 vp8_accelerator_.reset(new V4L2VP8Accelerator(this));
479 decoder_.reset(new VP8Decoder(vp8_accelerator_.get()));
480 } else {
481 NOTREACHED() << "Unsupported profile " << video_profile_;
482 return false;
483 }
484
485 // TODO(posciak): This needs to be queried once supported.
486 input_planes_count_ = 1;
487 output_planes_count_ = 1;
488
489 if (egl_display_ == EGL_NO_DISPLAY) {
490 LOG(ERROR) << "Initialize(): could not get EGLDisplay";
491 return false;
492 }
493
494 // We need the context to be initialized to query extensions.
495 if (!make_context_current_cb_.Run()) {
496 LOG(ERROR) << "Initialize(): could not make context current";
497 return false;
498 }
499
500 if (!gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync) {
501 LOG(ERROR) << "Initialize(): context does not have EGL_KHR_fence_sync";
502 return false;
503 }
504
505 // Capabilities check.
506 struct v4l2_capability caps;
507 const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
508 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
509 if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
510 LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
511 ", caps check failed: 0x" << std::hex << caps.capabilities;
512 return false;
513 }
514
515 if (!SetupFormats())
516 return false;
517
518 if (!decoder_thread_.Start()) {
519 DLOG(ERROR) << "Initialize(): device thread failed to start";
520 return false;
521 }
522 decoder_thread_task_runner_ = decoder_thread_.task_runner();
523
524 state_ = kInitialized;
525
526 // InitializeTask will NOTIFY_ERROR on failure.
527 decoder_thread_task_runner_->PostTask(
528 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::InitializeTask,
529 base::Unretained(this)));
530
531 DVLOGF(1) << "V4L2SliceVideoDecodeAccelerator initialized";
532 return true;
533 }
534
535 void V4L2SliceVideoDecodeAccelerator::InitializeTask() {
536 DVLOGF(3);
537 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
538 DCHECK_EQ(state_, kInitialized);
539
540 if (!CreateInputBuffers())
541 NOTIFY_ERROR(PLATFORM_FAILURE);
542
543 // Output buffers will be created once decoder gives us information
544 // about their size and required count.
545 state_ = kDecoding;
546 }
547
548 void V4L2SliceVideoDecodeAccelerator::Destroy() {
549 DVLOGF(3);
550 DCHECK(child_task_runner_->BelongsToCurrentThread());
551
552 if (decoder_thread_.IsRunning()) {
553 decoder_thread_task_runner_->PostTask(
554 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DestroyTask,
555 base::Unretained(this)));
556
557 // Wake up decoder thread in case we are waiting in CreateOutputBuffers
558 // for client to provide pictures. Since this is Destroy, we won't be
559 // getting them anymore (AssignPictureBuffers won't be called).
560 pictures_assigned_.Signal();
561
562 // Wait for tasks to finish/early-exit.
563 decoder_thread_.Stop();
564 }
565
566 delete this;
567 DVLOGF(3) << "Destroyed";
568 }
569
570 void V4L2SliceVideoDecodeAccelerator::DestroyTask() {
571 DVLOGF(3);
572 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
573
574 state_ = kError;
575
576 decoder_->Reset();
577
578 decoder_current_bitstream_buffer_.reset();
579 while (!decoder_input_queue_.empty())
580 decoder_input_queue_.pop();
581
582 // Stop streaming and the device_poll_thread_.
583 StopDevicePoll(false);
584
585 DestroyInputBuffers();
586 DestroyOutputs(false);
587
588 DCHECK(surfaces_at_device_.empty());
589 DCHECK(surfaces_at_display_.empty());
590 DCHECK(decoder_display_queue_.empty());
591 }
592
593 bool V4L2SliceVideoDecodeAccelerator::SetupFormats() {
594 DCHECK_EQ(state_, kUninitialized);
595
596 __u32 input_format_fourcc =
597 V4L2Device::VideoCodecProfileToV4L2PixFmt(video_profile_, true);
598 if (!input_format_fourcc) {
599 NOTREACHED();
600 return false;
601 }
602
603 size_t input_size;
604 gfx::Size max_resolution, min_resolution;
605 device_->GetSupportedResolution(input_format_fourcc, &min_resolution,
606 &max_resolution);
607 if (max_resolution.width() > 1920 && max_resolution.height() > 1088)
608 input_size = kInputBufferMaxSizeFor4k;
609 else
610 input_size = kInputBufferMaxSizeFor1080p;
611
612 struct v4l2_fmtdesc fmtdesc;
613 memset(&fmtdesc, 0, sizeof(fmtdesc));
614 fmtdesc.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
615 bool is_format_supported = false;
616 while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
617 if (fmtdesc.pixelformat == input_format_fourcc) {
618 is_format_supported = true;
619 break;
620 }
621 ++fmtdesc.index;
622 }
623
624 if (!is_format_supported) {
625 DVLOG(1) << "Input fourcc " << input_format_fourcc
626 << " not supported by device.";
627 return false;
628 }
629
630 struct v4l2_format format;
631 memset(&format, 0, sizeof(format));
632 format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
633 format.fmt.pix_mp.pixelformat = input_format_fourcc;
634 format.fmt.pix_mp.plane_fmt[0].sizeimage = input_size;
635 format.fmt.pix_mp.num_planes = input_planes_count_;
636 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format);
637
638 // We have to set up the format for output, because the driver may not allow
639 // changing it once we start streaming; whether it can support our chosen
640 // output format or not may depend on the input format.
641 memset(&fmtdesc, 0, sizeof(fmtdesc));
642 fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
643 output_format_fourcc_ = 0;
644 while (device_->Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
645 if (device_->CanCreateEGLImageFrom(fmtdesc.pixelformat)) {
646 output_format_fourcc_ = fmtdesc.pixelformat;
647 break;
648 }
649 ++fmtdesc.index;
650 }
651
652 if (output_format_fourcc_ == 0) {
653 LOG(ERROR) << "Could not find a usable output format";
654 return false;
655 }
656
657 // Only set fourcc for output; resolution, etc., will come from the
658 // driver once it extracts it from the stream.
659 memset(&format, 0, sizeof(format));
660 format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
661 format.fmt.pix_mp.pixelformat = output_format_fourcc_;
662 format.fmt.pix_mp.num_planes = output_planes_count_;
663 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format);
664
665 return true;
666 }
667
668 bool V4L2SliceVideoDecodeAccelerator::CreateInputBuffers() {
669 DVLOGF(3);
670 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
671 DCHECK(!input_streamon_);
672 DCHECK(input_buffer_map_.empty());
673
674 struct v4l2_requestbuffers reqbufs;
675 memset(&reqbufs, 0, sizeof(reqbufs));
676 reqbufs.count = kNumInputBuffers;
677 reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
678 reqbufs.memory = V4L2_MEMORY_MMAP;
679 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
680 if (reqbufs.count < kNumInputBuffers) {
681 PLOG(ERROR) << "Could not allocate enough output buffers";
682 return false;
683 }
684 input_buffer_map_.resize(reqbufs.count);
685 for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
686 free_input_buffers_.push_back(i);
687
688 // Query for the MEMORY_MMAP pointer.
689 struct v4l2_plane planes[VIDEO_MAX_PLANES];
690 struct v4l2_buffer buffer;
691 memset(&buffer, 0, sizeof(buffer));
692 memset(planes, 0, sizeof(planes));
693 buffer.index = i;
694 buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
695 buffer.memory = V4L2_MEMORY_MMAP;
696 buffer.m.planes = planes;
697 buffer.length = input_planes_count_;
698 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
699 void* address = device_->Mmap(nullptr,
700 buffer.m.planes[0].length,
701 PROT_READ | PROT_WRITE,
702 MAP_SHARED,
703 buffer.m.planes[0].m.mem_offset);
704 if (address == MAP_FAILED) {
705 PLOG(ERROR) << "CreateInputBuffers(): mmap() failed";
706 return false;
707 }
708 input_buffer_map_[i].address = address;
709 input_buffer_map_[i].length = buffer.m.planes[0].length;
710 }
711
712 return true;
713 }
714
715 bool V4L2SliceVideoDecodeAccelerator::CreateOutputBuffers() {
716 DVLOGF(3);
717 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
718 DCHECK(!output_streamon_);
719 DCHECK(output_buffer_map_.empty());
720 DCHECK(surfaces_at_display_.empty());
721 DCHECK(surfaces_at_device_.empty());
722
723 visible_size_ = decoder_->GetPicSize();
724 size_t num_pictures = decoder_->GetRequiredNumOfPictures();
725
726 DCHECK_GT(num_pictures, 0u);
727 DCHECK(!visible_size_.IsEmpty());
728
729 struct v4l2_format format;
730 memset(&format, 0, sizeof(format));
731 format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
732 format.fmt.pix_mp.pixelformat = output_format_fourcc_;
733 format.fmt.pix_mp.width = visible_size_.width();
734 format.fmt.pix_mp.height = visible_size_.height();
735 format.fmt.pix_mp.num_planes = input_planes_count_;
736
737 if (device_->Ioctl(VIDIOC_S_FMT, &format) != 0) {
738 PLOG(ERROR) << "Failed setting format to: " << output_format_fourcc_;
739 NOTIFY_ERROR(PLATFORM_FAILURE);
740 return false;
741 }
742
743 coded_size_.SetSize(base::checked_cast<int>(format.fmt.pix_mp.width),
744 base::checked_cast<int>(format.fmt.pix_mp.height));
745 DCHECK_EQ(coded_size_.width() % 16, 0);
746 DCHECK_EQ(coded_size_.height() % 16, 0);
747
748 if (!gfx::Rect(coded_size_).Contains(gfx::Rect(visible_size_))) {
749 LOG(ERROR) << "Got invalid adjusted coded size: " << coded_size_.ToString();
750 return false;
751 }
752
753 DVLOGF(3) << "buffer_count=" << num_pictures
754 << ", visible size=" << visible_size_.ToString()
755 << ", coded size=" << coded_size_.ToString();
756
757 child_task_runner_->PostTask(
758 FROM_HERE,
759 base::Bind(&VideoDecodeAccelerator::Client::ProvidePictureBuffers,
760 client_, num_pictures, 1, coded_size_,
761 device_->GetTextureTarget()));
762
763 // Wait for the client to call AssignPictureBuffers() on the Child thread.
764 // We do this, because if we continue decoding without finishing buffer
765 // allocation, we may end up Resetting before AssignPictureBuffers arrives,
766 // resulting in unnecessary complications and subtle bugs.
767 pictures_assigned_.Wait();
768
769 return true;
770 }
771
772 void V4L2SliceVideoDecodeAccelerator::DestroyInputBuffers() {
773 DVLOGF(3);
774 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread() ||
775 !decoder_thread_.IsRunning());
776 DCHECK(!input_streamon_);
777
778 for (auto& input_record : input_buffer_map_) {
779 if (input_record.address != nullptr)
780 device_->Munmap(input_record.address, input_record.length);
781 }
782
783 struct v4l2_requestbuffers reqbufs;
784 memset(&reqbufs, 0, sizeof(reqbufs));
785 reqbufs.count = 0;
786 reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
787 reqbufs.memory = V4L2_MEMORY_MMAP;
788 IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS, &reqbufs);
789
790 input_buffer_map_.clear();
791 free_input_buffers_.clear();
792 }
793
794 void V4L2SliceVideoDecodeAccelerator::DismissPictures(
795 std::vector<int32_t> picture_buffer_ids,
796 base::WaitableEvent* done) {
797 DVLOGF(3);
798 DCHECK(child_task_runner_->BelongsToCurrentThread());
799
800 for (auto picture_buffer_id : picture_buffer_ids) {
801 DVLOGF(1) << "dismissing PictureBuffer id=" << picture_buffer_id;
802 client_->DismissPictureBuffer(picture_buffer_id);
803 }
804
805 done->Signal();
806 }
807
808 void V4L2SliceVideoDecodeAccelerator::DevicePollTask(bool poll_device) {
809 DVLOGF(4);
810 DCHECK_EQ(device_poll_thread_.message_loop(), base::MessageLoop::current());
811
812 bool event_pending;
813 if (!device_->Poll(poll_device, &event_pending)) {
814 NOTIFY_ERROR(PLATFORM_FAILURE);
815 return;
816 }
817
818 // All processing should happen on ServiceDeviceTask(), since we shouldn't
819 // touch encoder state from this thread.
820 decoder_thread_task_runner_->PostTask(
821 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask,
822 base::Unretained(this)));
823 }
824
825 void V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask() {
826 DVLOGF(4);
827 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
828
829 // ServiceDeviceTask() should only ever be scheduled from DevicePollTask().
830
831 Dequeue();
832 SchedulePollIfNeeded();
833 }
834
835 void V4L2SliceVideoDecodeAccelerator::SchedulePollIfNeeded() {
836 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
837
838 if (!device_poll_thread_.IsRunning()) {
839 DVLOGF(2) << "Device poll thread stopped, will not schedule poll";
840 return;
841 }
842
843 DCHECK(input_streamon_ || output_streamon_);
844
845 if (input_buffer_queued_count_ + output_buffer_queued_count_ == 0) {
846 DVLOGF(4) << "No buffers queued, will not schedule poll";
847 return;
848 }
849
850 DVLOGF(4) << "Scheduling device poll task";
851
852 device_poll_thread_.message_loop()->PostTask(
853 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask,
854 base::Unretained(this), true));
855
856 DVLOGF(2) << "buffer counts: "
857 << "INPUT[" << decoder_input_queue_.size() << "]"
858 << " => DEVICE["
859 << free_input_buffers_.size() << "+"
860 << input_buffer_queued_count_ << "/"
861 << input_buffer_map_.size() << "]->["
862 << free_output_buffers_.size() << "+"
863 << output_buffer_queued_count_ << "/"
864 << output_buffer_map_.size() << "]"
865 << " => DISPLAYQ[" << decoder_display_queue_.size() << "]"
866 << " => CLIENT[" << surfaces_at_display_.size() << "]";
867 }
868
869 void V4L2SliceVideoDecodeAccelerator::Enqueue(
870 const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
871 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
872
873 const int old_inputs_queued = input_buffer_queued_count_;
874 const int old_outputs_queued = output_buffer_queued_count_;
875
876 if (!EnqueueInputRecord(dec_surface->input_record(),
877 dec_surface->config_store())) {
878 DVLOGF(1) << "Failed queueing an input buffer";
879 NOTIFY_ERROR(PLATFORM_FAILURE);
880 return;
881 }
882
883 if (!EnqueueOutputRecord(dec_surface->output_record())) {
884 DVLOGF(1) << "Failed queueing an output buffer";
885 NOTIFY_ERROR(PLATFORM_FAILURE);
886 return;
887 }
888
889 bool inserted =
890 surfaces_at_device_.insert(std::make_pair(dec_surface->output_record(),
891 dec_surface)).second;
892 DCHECK(inserted);
893
894 if (old_inputs_queued == 0 && old_outputs_queued == 0)
895 SchedulePollIfNeeded();
896 }
897
898 void V4L2SliceVideoDecodeAccelerator::Dequeue() {
899 DVLOGF(3);
900 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
901
902 struct v4l2_buffer dqbuf;
903 struct v4l2_plane planes[VIDEO_MAX_PLANES];
904 while (input_buffer_queued_count_ > 0) {
905 DCHECK(input_streamon_);
906 memset(&dqbuf, 0, sizeof(dqbuf));
907 memset(&planes, 0, sizeof(planes));
908 dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
909 dqbuf.memory = V4L2_MEMORY_USERPTR;
910 dqbuf.m.planes = planes;
911 dqbuf.length = input_planes_count_;
912 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
913 if (errno == EAGAIN) {
914 // EAGAIN if we're just out of buffers to dequeue.
915 break;
916 }
917 PLOG(ERROR) << "ioctl() failed: VIDIOC_DQBUF";
918 NOTIFY_ERROR(PLATFORM_FAILURE);
919 return;
920 }
921 InputRecord& input_record = input_buffer_map_[dqbuf.index];
922 DCHECK(input_record.at_device);
923 input_record.at_device = false;
924 ReuseInputBuffer(dqbuf.index);
925 input_buffer_queued_count_--;
926 DVLOGF(4) << "Dequeued input=" << dqbuf.index
927 << " count: " << input_buffer_queued_count_;
928 }
929
930 while (output_buffer_queued_count_ > 0) {
931 DCHECK(output_streamon_);
932 memset(&dqbuf, 0, sizeof(dqbuf));
933 memset(&planes, 0, sizeof(planes));
934 dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
935 dqbuf.memory = V4L2_MEMORY_MMAP;
936 dqbuf.m.planes = planes;
937 dqbuf.length = output_planes_count_;
938 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
939 if (errno == EAGAIN) {
940 // EAGAIN if we're just out of buffers to dequeue.
941 break;
942 }
943 PLOG(ERROR) << "ioctl() failed: VIDIOC_DQBUF";
944 NOTIFY_ERROR(PLATFORM_FAILURE);
945 return;
946 }
947 OutputRecord& output_record = output_buffer_map_[dqbuf.index];
948 DCHECK(output_record.at_device);
949 output_record.at_device = false;
950 output_buffer_queued_count_--;
951 DVLOGF(3) << "Dequeued output=" << dqbuf.index
952 << " count " << output_buffer_queued_count_;
953
954 V4L2DecodeSurfaceByOutputId::iterator it =
955 surfaces_at_device_.find(dqbuf.index);
956 if (it == surfaces_at_device_.end()) {
957 DLOG(ERROR) << "Got invalid surface from device.";
958 NOTIFY_ERROR(PLATFORM_FAILURE);
959 }
960
961 it->second->SetDecoded();
962 surfaces_at_device_.erase(it);
963 }
964
965 // A frame was decoded, see if we can output it.
966 TryOutputSurfaces();
967
968 ProcessPendingEventsIfNeeded();
969 }
970
971 void V4L2SliceVideoDecodeAccelerator::ProcessPendingEventsIfNeeded() {
972 // Process pending events, if any, in the correct order.
973 // We always first process the surface set change, as it is an internal
974 // event from the decoder and interleaving it with external requests would
975 // put the decoder in an undefined state.
976 FinishSurfaceSetChangeIfNeeded();
977
978 // Process external (client) requests.
979 FinishFlushIfNeeded();
980 FinishResetIfNeeded();
981 }
982
983 void V4L2SliceVideoDecodeAccelerator::ReuseInputBuffer(int index) {
984 DVLOGF(4) << "Reusing input buffer, index=" << index;
985 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
986
987 DCHECK_LT(index, static_cast<int>(input_buffer_map_.size()));
988 InputRecord& input_record = input_buffer_map_[index];
989
990 DCHECK(!input_record.at_device);
991 input_record.input_id = -1;
992 input_record.bytes_used = 0;
993
994 DCHECK_EQ(std::count(free_input_buffers_.begin(), free_input_buffers_.end(),
995 index), 0);
996 free_input_buffers_.push_back(index);
997 }
998
999 void V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer(int index) {
1000 DVLOGF(4) << "Reusing output buffer, index=" << index;
1001 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1002
1003 DCHECK_LT(index, static_cast<int>(output_buffer_map_.size()));
1004 OutputRecord& output_record = output_buffer_map_[index];
1005 DCHECK(!output_record.at_device);
1006 DCHECK(!output_record.at_client);
1007
1008 DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
1009 index), 0);
1010 free_output_buffers_.push_back(index);
1011
1012 ScheduleDecodeBufferTaskIfNeeded();
1013 }
1014
1015 bool V4L2SliceVideoDecodeAccelerator::EnqueueInputRecord(
1016 int index,
1017 uint32_t config_store) {
1018 DVLOGF(3);
1019 DCHECK_LT(index, static_cast<int>(input_buffer_map_.size()));
1020 DCHECK_GT(config_store, 0u);
1021
1022 // Enqueue an input (VIDEO_OUTPUT) buffer for an input video frame.
1023 InputRecord& input_record = input_buffer_map_[index];
1024 DCHECK(!input_record.at_device);
1025 struct v4l2_buffer qbuf;
1026 struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES];
1027 memset(&qbuf, 0, sizeof(qbuf));
1028 memset(qbuf_planes, 0, sizeof(qbuf_planes));
1029 qbuf.index = index;
1030 qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1031 qbuf.memory = V4L2_MEMORY_MMAP;
1032 qbuf.m.planes = qbuf_planes;
1033 qbuf.m.planes[0].bytesused = input_record.bytes_used;
1034 qbuf.length = input_planes_count_;
1035 qbuf.config_store = config_store;
1036 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
1037 input_record.at_device = true;
1038 input_buffer_queued_count_++;
1039 DVLOGF(4) << "Enqueued input=" << qbuf.index
1040 << " count: " << input_buffer_queued_count_;
1041
1042 return true;
1043 }
1044
1045 bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) {
1046 DVLOGF(3);
1047 DCHECK_LT(index, static_cast<int>(output_buffer_map_.size()));
1048
1049 // Enqueue an output (VIDEO_CAPTURE) buffer.
1050 OutputRecord& output_record = output_buffer_map_[index];
1051 DCHECK(!output_record.at_device);
1052 DCHECK(!output_record.at_client);
1053 DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
1054 DCHECK_NE(output_record.picture_id, -1);
1055
1056 if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
1057 // If we have to wait for completion, wait. Note that
1058 // free_output_buffers_ is a FIFO queue, so we always wait on the
1059 // buffer that has been in the queue the longest.
1060 if (eglClientWaitSyncKHR(egl_display_, output_record.egl_sync, 0,
1061 EGL_FOREVER_KHR) == EGL_FALSE) {
1062 // This will cause tearing, but is safe otherwise.
1063 DVLOGF(1) << "eglClientWaitSyncKHR failed!";
1064 }
1065 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) {
1066 LOGF(ERROR) << "eglDestroySyncKHR failed!";
1067 NOTIFY_ERROR(PLATFORM_FAILURE);
1068 return false;
1069 }
1070 output_record.egl_sync = EGL_NO_SYNC_KHR;
1071 }
1072
1073 struct v4l2_buffer qbuf;
1074 struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES];
1075 memset(&qbuf, 0, sizeof(qbuf));
1076 memset(qbuf_planes, 0, sizeof(qbuf_planes));
1077 qbuf.index = index;
1078 qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1079 qbuf.memory = V4L2_MEMORY_MMAP;
1080 qbuf.m.planes = qbuf_planes;
1081 qbuf.length = output_planes_count_;
1082 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
1083 output_record.at_device = true;
1084 output_buffer_queued_count_++;
1085 DVLOGF(4) << "Enqueued output=" << qbuf.index
1086 << " count: " << output_buffer_queued_count_;
1087
1088 return true;
1089 }
1090
1091 bool V4L2SliceVideoDecodeAccelerator::StartDevicePoll() {
1092 DVLOGF(3) << "Starting device poll";
1093 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1094 DCHECK(!device_poll_thread_.IsRunning());
1095
1096 // Start up the device poll thread and schedule its first DevicePollTask().
1097 if (!device_poll_thread_.Start()) {
1098 DLOG(ERROR) << "StartDevicePoll(): Device thread failed to start";
1099 NOTIFY_ERROR(PLATFORM_FAILURE);
1100 return false;
1101 }
1102 if (!input_streamon_) {
1103 __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1104 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMON, &type);
1105 input_streamon_ = true;
1106 }
1107
1108 if (!output_streamon_) {
1109 __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1110 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMON, &type);
1111 output_streamon_ = true;
1112 }
1113
1114 device_poll_thread_.message_loop()->PostTask(
1115 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DevicePollTask,
1116 base::Unretained(this), true));
1117
1118 return true;
1119 }
1120
1121 bool V4L2SliceVideoDecodeAccelerator::StopDevicePoll(bool keep_input_state) {
1122 DVLOGF(3) << "Stopping device poll";
1123 if (decoder_thread_.IsRunning())
1124 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1125
1126 // Signal the DevicePollTask() to stop, and stop the device poll thread.
1127 if (!device_->SetDevicePollInterrupt()) {
1128 PLOG(ERROR) << "SetDevicePollInterrupt(): failed";
1129 NOTIFY_ERROR(PLATFORM_FAILURE);
1130 return false;
1131 }
1132 device_poll_thread_.Stop();
1133 DVLOGF(3) << "Device poll thread stopped";
1134
1135 // Clear the interrupt now, to be sure.
1136 if (!device_->ClearDevicePollInterrupt()) {
1137 NOTIFY_ERROR(PLATFORM_FAILURE);
1138 return false;
1139 }
1140
1141 if (!keep_input_state) {
1142 if (input_streamon_) {
1143 __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1144 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type);
1145 }
1146 input_streamon_ = false;
1147 }
1148
1149 if (output_streamon_) {
1150 __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1151 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type);
1152 }
1153 output_streamon_ = false;
1154
1155 if (!keep_input_state) {
1156 for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
1157 InputRecord& input_record = input_buffer_map_[i];
1158 if (input_record.at_device) {
1159 input_record.at_device = false;
1160 ReuseInputBuffer(i);
1161 input_buffer_queued_count_--;
1162 }
1163 }
1164 DCHECK_EQ(input_buffer_queued_count_, 0);
1165 }
1166
1167 // STREAMOFF makes the driver drop all buffers without decoding and DQBUFing,
1168 // so we mark them all as at_device = false and clear surfaces_at_device_.
1169 for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
1170 OutputRecord& output_record = output_buffer_map_[i];
1171 if (output_record.at_device) {
1172 output_record.at_device = false;
1173 output_buffer_queued_count_--;
1174 }
1175 }
1176 surfaces_at_device_.clear();
1177 DCHECK_EQ(output_buffer_queued_count_, 0);
1178
1179 // Drop all surfaces that were awaiting decode before being displayed,
1180 // since we've just cancelled all outstanding decodes.
1181 while (!decoder_display_queue_.empty())
1182 decoder_display_queue_.pop();
1183
1184 DVLOGF(3) << "Device poll stopped";
1185 return true;
1186 }
1187
1188 void V4L2SliceVideoDecodeAccelerator::Decode(
1189 const media::BitstreamBuffer& bitstream_buffer) {
1190 DVLOGF(3) << "input_id=" << bitstream_buffer.id()
1191 << ", size=" << bitstream_buffer.size();
1192 DCHECK(decode_task_runner_->BelongsToCurrentThread());
1193
1194 if (bitstream_buffer.id() < 0) {
1195 LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
1196 if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
1197 base::SharedMemory::CloseHandle(bitstream_buffer.handle());
1198 NOTIFY_ERROR(INVALID_ARGUMENT);
1199 return;
1200 }
1201
1202 decoder_thread_task_runner_->PostTask(
1203 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeTask,
1204 base::Unretained(this), bitstream_buffer));
1205 }
1206
1207 void V4L2SliceVideoDecodeAccelerator::DecodeTask(
1208 const media::BitstreamBuffer& bitstream_buffer) {
1209 DVLOGF(3) << "input_id=" << bitstream_buffer.id()
1210 << " size=" << bitstream_buffer.size();
1211 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1212
1213 std::unique_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef(
1214 decode_client_, decode_task_runner_,
1215 new SharedMemoryRegion(bitstream_buffer, true), bitstream_buffer.id()));
1216 if (!bitstream_record->shm->Map()) {
1217 LOGF(ERROR) << "Could not map bitstream_buffer";
1218 NOTIFY_ERROR(UNREADABLE_INPUT);
1219 return;
1220 }
1221 DVLOGF(3) << "mapped at=" << bitstream_record->shm->memory();
1222
1223 decoder_input_queue_.push(
1224 linked_ptr<BitstreamBufferRef>(bitstream_record.release()));
1225
1226 ScheduleDecodeBufferTaskIfNeeded();
1227 }
1228
1229 bool V4L2SliceVideoDecodeAccelerator::TrySetNewBistreamBuffer() {
1230 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1231 DCHECK(!decoder_current_bitstream_buffer_);
1232
1233 if (decoder_input_queue_.empty())
1234 return false;
1235
1236 decoder_current_bitstream_buffer_.reset(
1237 decoder_input_queue_.front().release());
1238 decoder_input_queue_.pop();
1239
1240 if (decoder_current_bitstream_buffer_->input_id == kFlushBufferId) {
1241 // This is a buffer we queued for ourselves to trigger flush at this time.
1242 InitiateFlush();
1243 return false;
1244 }
1245
1246 const uint8_t* const data = reinterpret_cast<const uint8_t*>(
1247 decoder_current_bitstream_buffer_->shm->memory());
1248 const size_t data_size = decoder_current_bitstream_buffer_->shm->size();
1249 decoder_->SetStream(data, data_size);
1250
1251 return true;
1252 }
1253
1254 void V4L2SliceVideoDecodeAccelerator::ScheduleDecodeBufferTaskIfNeeded() {
1255 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1256 if (state_ == kDecoding) {
1257 decoder_thread_task_runner_->PostTask(
1258 FROM_HERE,
1259 base::Bind(&V4L2SliceVideoDecodeAccelerator::DecodeBufferTask,
1260 base::Unretained(this)));
1261 }
1262 }
1263
1264 void V4L2SliceVideoDecodeAccelerator::DecodeBufferTask() {
1265 DVLOGF(3);
1266 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1267
1268 if (state_ != kDecoding) {
1269 DVLOGF(3) << "Early exit, not in kDecoding";
1270 return;
1271 }
1272
1273 while (true) {
1274 AcceleratedVideoDecoder::DecodeResult res;
1275 res = decoder_->Decode();
1276 switch (res) {
1277 case AcceleratedVideoDecoder::kAllocateNewSurfaces:
1278 DVLOGF(2) << "Decoder requesting a new set of surfaces";
1279 InitiateSurfaceSetChange();
1280 return;
1281
1282 case AcceleratedVideoDecoder::kRanOutOfStreamData:
1283 decoder_current_bitstream_buffer_.reset();
1284 if (!TrySetNewBistreamBuffer())
1285 return;
1286
1287 break;
1288
1289 case AcceleratedVideoDecoder::kRanOutOfSurfaces:
1290 // No more surfaces for the decoder, we'll come back once we have more.
1291 DVLOGF(4) << "Ran out of surfaces";
1292 return;
1293
1294 case AcceleratedVideoDecoder::kDecodeError:
1295 DVLOGF(1) << "Error decoding stream";
1296 NOTIFY_ERROR(PLATFORM_FAILURE);
1297 return;
1298 }
1299 }
1300 }
1301
1302 void V4L2SliceVideoDecodeAccelerator::InitiateSurfaceSetChange() {
1303 DVLOGF(2);
1304 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1305
1306 DCHECK_EQ(state_, kDecoding);
1307 state_ = kIdle;
1308
1309 DCHECK(!surface_set_change_pending_);
1310 surface_set_change_pending_ = true;
1311
1312 FinishSurfaceSetChangeIfNeeded();
1313 }
1314
1315 void V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChangeIfNeeded() {
1316 DVLOGF(2);
1317 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1318
1319 if (!surface_set_change_pending_ || !surfaces_at_device_.empty())
1320 return;
1321
1322 DCHECK_EQ(state_, kIdle);
1323 DCHECK(decoder_display_queue_.empty());
1324 // All output buffers should've been returned from decoder and device by now.
1325 // The only remaining owner of surfaces may be display (client), and we will
1326 // dismiss them when destroying output buffers below.
1327 DCHECK_EQ(free_output_buffers_.size() + surfaces_at_display_.size(),
1328 output_buffer_map_.size());
1329
1330 // Keep input queue running while we switch outputs.
1331 if (!StopDevicePoll(true)) {
1332 NOTIFY_ERROR(PLATFORM_FAILURE);
1333 return;
1334 }
1335
1336 // This will return only once all buffers are dismissed and destroyed.
1337 // This does not wait until they are displayed however, as display retains
1338 // references to the buffers bound to textures and will release them
1339 // after displaying.
1340 if (!DestroyOutputs(true)) {
1341 NOTIFY_ERROR(PLATFORM_FAILURE);
1342 return;
1343 }
1344
1345 if (!CreateOutputBuffers()) {
1346 NOTIFY_ERROR(PLATFORM_FAILURE);
1347 return;
1348 }
1349
1350 if (!StartDevicePoll()) {
1351 NOTIFY_ERROR(PLATFORM_FAILURE);
1352 return;
1353 }
1354
1355 DVLOGF(3) << "Surface set change finished";
1356
1357 surface_set_change_pending_ = false;
1358 state_ = kDecoding;
1359 ScheduleDecodeBufferTaskIfNeeded();
1360 }
1361
1362 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) {
1363 DVLOGF(3);
1364 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1365 std::vector<EGLImageKHR> egl_images_to_destroy;
1366 std::vector<int32_t> picture_buffers_to_dismiss;
1367
1368 if (output_buffer_map_.empty())
1369 return true;
1370
1371 for (auto output_record : output_buffer_map_) {
1372 DCHECK(!output_record.at_device);
1373
1374 if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
1375 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE)
1376 DVLOGF(1) << "eglDestroySyncKHR failed.";
1377 }
1378
1379 if (output_record.egl_image != EGL_NO_IMAGE_KHR) {
1380 child_task_runner_->PostTask(
1381 FROM_HERE,
1382 base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_,
1383 egl_display_, output_record.egl_image));
1384 }
1385
1386 picture_buffers_to_dismiss.push_back(output_record.picture_id);
1387 }
1388
1389 if (dismiss) {
1390 DVLOGF(2) << "Scheduling picture dismissal";
1391 base::WaitableEvent done(false, false);
1392 child_task_runner_->PostTask(
1393 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::DismissPictures,
1394 weak_this_, picture_buffers_to_dismiss, &done));
1395 done.Wait();
1396 }
1397
1398 // At this point client can't call ReusePictureBuffer on any of the pictures
1399 // anymore, so it's safe to destroy.
1400 return DestroyOutputBuffers();
1401 }
1402
1403 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputBuffers() {
1404 DVLOGF(3);
1405 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread() ||
1406 !decoder_thread_.IsRunning());
1407 DCHECK(!output_streamon_);
1408 DCHECK(surfaces_at_device_.empty());
1409 DCHECK(decoder_display_queue_.empty());
1410 DCHECK_EQ(surfaces_at_display_.size() + free_output_buffers_.size(),
1411 output_buffer_map_.size());
1412
1413 if (output_buffer_map_.empty())
1414 return true;
1415
1416 // It's ok to do this, client will retain references to textures, but we are
1417 // not interested in reusing the surfaces anymore.
1418 // This will prevent us from reusing old surfaces in case we have some
1419 // ReusePictureBuffer() pending on ChildThread already. It's ok to ignore
1420 // them, because we have already dismissed them (in DestroyOutputs()).
1421 for (const auto& surface_at_display : surfaces_at_display_) {
1422 size_t index = surface_at_display.second->output_record();
1423 DCHECK_LT(index, output_buffer_map_.size());
1424 OutputRecord& output_record = output_buffer_map_[index];
1425 DCHECK(output_record.at_client);
1426 output_record.at_client = false;
1427 }
1428 surfaces_at_display_.clear();
1429 DCHECK_EQ(free_output_buffers_.size(), output_buffer_map_.size());
1430
1431 free_output_buffers_.clear();
1432 output_buffer_map_.clear();
1433
1434 struct v4l2_requestbuffers reqbufs;
1435 memset(&reqbufs, 0, sizeof(reqbufs));
1436 reqbufs.count = 0;
1437 reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1438 reqbufs.memory = V4L2_MEMORY_MMAP;
1439 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
1440
1441 return true;
1442 }
1443
1444 void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffers(
1445 const std::vector<media::PictureBuffer>& buffers) {
1446 DVLOGF(3);
1447 DCHECK(child_task_runner_->BelongsToCurrentThread());
1448
1449 const uint32_t req_buffer_count = decoder_->GetRequiredNumOfPictures();
1450
1451 if (buffers.size() < req_buffer_count) {
1452 DLOG(ERROR) << "Failed to provide requested picture buffers. "
1453 << "(Got " << buffers.size()
1454 << ", requested " << req_buffer_count << ")";
1455 NOTIFY_ERROR(INVALID_ARGUMENT);
1456 return;
1457 }
1458
1459 gfx::GLContext* gl_context = get_gl_context_cb_.Run();
1460 if (!gl_context || !make_context_current_cb_.Run()) {
1461 DLOG(ERROR) << "No GL context";
1462 NOTIFY_ERROR(PLATFORM_FAILURE);
1463 return;
1464 }
1465
1466 gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0);
1467
1468 // It's safe to manipulate all the buffer state here, because the decoder
1469 // thread is waiting on pictures_assigned_.
1470
1471 // Allocate the output buffers.
1472 struct v4l2_requestbuffers reqbufs;
1473 memset(&reqbufs, 0, sizeof(reqbufs));
1474 reqbufs.count = buffers.size();
1475 reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1476 reqbufs.memory = V4L2_MEMORY_MMAP;
1477 IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
1478
1479 if (reqbufs.count != buffers.size()) {
1480 DLOG(ERROR) << "Could not allocate enough output buffers";
1481 NOTIFY_ERROR(PLATFORM_FAILURE);
1482 return;
1483 }
1484
1485 output_buffer_map_.resize(buffers.size());
1486
1487 DCHECK(free_output_buffers_.empty());
1488 for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
1489 DCHECK(buffers[i].size() == coded_size_);
1490
1491 OutputRecord& output_record = output_buffer_map_[i];
1492 DCHECK(!output_record.at_device);
1493 DCHECK(!output_record.at_client);
1494 DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
1495 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
1496 DCHECK_EQ(output_record.picture_id, -1);
1497 DCHECK_EQ(output_record.cleared, false);
1498
1499 DCHECK_LE(1u, buffers[i].texture_ids().size());
1500 EGLImageKHR egl_image = device_->CreateEGLImage(
1501 egl_display_, gl_context->GetHandle(), buffers[i].texture_ids()[0],
1502 buffers[i].size(), i, output_format_fourcc_, output_planes_count_);
1503 if (egl_image == EGL_NO_IMAGE_KHR) {
1504 LOGF(ERROR) << "Could not create EGLImageKHR";
1505 // Ownership of EGLImages allocated in previous iterations of this loop
1506 // has been transferred to output_buffer_map_. After we error-out here
1507 // the destructor will handle their cleanup.
1508 NOTIFY_ERROR(PLATFORM_FAILURE);
1509 return;
1510 }
1511
1512 output_record.egl_image = egl_image;
1513 output_record.picture_id = buffers[i].id();
1514 free_output_buffers_.push_back(i);
1515 DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
1516 }
1517
1518 pictures_assigned_.Signal();
1519 }
1520
1521 void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
1522 int32_t picture_buffer_id) {
1523 DCHECK(child_task_runner_->BelongsToCurrentThread());
1524 DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
1525
1526 if (!make_context_current_cb_.Run()) {
1527 LOGF(ERROR) << "could not make context current";
1528 NOTIFY_ERROR(PLATFORM_FAILURE);
1529 return;
1530 }
1531
1532 EGLSyncKHR egl_sync =
1533 eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL);
1534 if (egl_sync == EGL_NO_SYNC_KHR) {
1535 LOGF(ERROR) << "eglCreateSyncKHR() failed";
1536 NOTIFY_ERROR(PLATFORM_FAILURE);
1537 return;
1538 }
1539
1540 std::unique_ptr<EGLSyncKHRRef> egl_sync_ref(
1541 new EGLSyncKHRRef(egl_display_, egl_sync));
1542 decoder_thread_task_runner_->PostTask(
1543 FROM_HERE,
1544 base::Bind(&V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask,
1545 base::Unretained(this), picture_buffer_id,
1546 base::Passed(&egl_sync_ref)));
1547 }
1548
1549 void V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask(
1550 int32_t picture_buffer_id,
1551 std::unique_ptr<EGLSyncKHRRef> egl_sync_ref) {
1552 DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
1553 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1554
1555 V4L2DecodeSurfaceByPictureBufferId::iterator it =
1556 surfaces_at_display_.find(picture_buffer_id);
1557 if (it == surfaces_at_display_.end()) {
1558 // It's possible that we've already posted a DismissPictureBuffer for this
1559 // picture, but it has not yet executed when this ReusePictureBuffer was
1560 // posted to us by the client. In that case just ignore this (we've already
1561 // dismissed it and accounted for that) and let the sync object get
1562 // destroyed.
1563 DVLOGF(3) << "got picture id=" << picture_buffer_id
1564 << " not in use (anymore?).";
1565 return;
1566 }
1567
1568 OutputRecord& output_record = output_buffer_map_[it->second->output_record()];
1569 if (output_record.at_device || !output_record.at_client) {
1570 DVLOGF(1) << "picture_buffer_id not reusable";
1571 NOTIFY_ERROR(INVALID_ARGUMENT);
1572 return;
1573 }
1574
1575 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
1576 DCHECK(!output_record.at_device);
1577 output_record.at_client = false;
1578 output_record.egl_sync = egl_sync_ref->egl_sync;
1579 // Take ownership of the EGLSync.
1580 egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR;
1581 surfaces_at_display_.erase(it);
1582 }
1583
1584 void V4L2SliceVideoDecodeAccelerator::Flush() {
1585 DVLOGF(3);
1586 DCHECK(child_task_runner_->BelongsToCurrentThread());
1587
1588 decoder_thread_task_runner_->PostTask(
1589 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::FlushTask,
1590 base::Unretained(this)));
1591 }
1592
1593 void V4L2SliceVideoDecodeAccelerator::FlushTask() {
1594 DVLOGF(3);
1595 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1596
1597 if (!decoder_input_queue_.empty()) {
1598 // We are not done with pending inputs, so queue an empty buffer,
1599 // which - when reached - will trigger flush sequence.
1600 decoder_input_queue_.push(
1601 linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef(
1602 decode_client_, decode_task_runner_, nullptr, kFlushBufferId)));
1603 return;
1604 }
1605
1606 // No more inputs pending, so just finish flushing here.
1607 InitiateFlush();
1608 }
1609
1610 void V4L2SliceVideoDecodeAccelerator::InitiateFlush() {
1611 DVLOGF(3);
1612 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1613
1614 DCHECK(!decoder_flushing_);
1615 DCHECK_EQ(state_, kDecoding);
1616 state_ = kIdle;
1617
1618 // This will trigger output for all remaining surfaces in the decoder.
1619 // However, not all of them may be decoded yet (they would be queued
1620 // in hardware then).
1621 if (!decoder_->Flush()) {
1622 DVLOGF(1) << "Failed flushing the decoder.";
1623 NOTIFY_ERROR(PLATFORM_FAILURE);
1624 return;
1625 }
1626
1627 // Put the decoder in an idle state, ready to resume.
1628 decoder_->Reset();
1629
1630 decoder_flushing_ = true;
1631
1632 decoder_thread_task_runner_->PostTask(
1633 FROM_HERE,
1634 base::Bind(&V4L2SliceVideoDecodeAccelerator::FinishFlushIfNeeded,
1635 base::Unretained(this)));
1636 }
1637
1638 void V4L2SliceVideoDecodeAccelerator::FinishFlushIfNeeded() {
1639 DVLOGF(3);
1640 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1641
1642 if (!decoder_flushing_ || !surfaces_at_device_.empty())
1643 return;
1644
1645 DCHECK_EQ(state_, kIdle);
1646
1647 // At this point, all remaining surfaces are decoded and dequeued, and since
1648 // we have already scheduled output for them in InitiateFlush(), their
1649 // respective PictureReady calls have been posted (or they have been queued on
1650 // pending_picture_ready_). So at this time, once we SendPictureReady(),
1651 // we will have all remaining PictureReady() posted to the client and we
1652 // can post NotifyFlushDone().
1653 DCHECK(decoder_display_queue_.empty());
1654
1655 // Decoder should have already returned all surfaces and all surfaces are
1656 // out of hardware. There can be no other owners of input buffers.
1657 DCHECK_EQ(free_input_buffers_.size(), input_buffer_map_.size());
1658
1659 SendPictureReady();
1660
1661 child_task_runner_->PostTask(FROM_HERE,
1662 base::Bind(&Client::NotifyFlushDone, client_));
1663
1664 decoder_flushing_ = false;
1665
1666 DVLOGF(3) << "Flush finished";
1667 state_ = kDecoding;
1668 ScheduleDecodeBufferTaskIfNeeded();
1669 }
1670
1671 void V4L2SliceVideoDecodeAccelerator::Reset() {
1672 DVLOGF(3);
1673 DCHECK(child_task_runner_->BelongsToCurrentThread());
1674
1675 decoder_thread_task_runner_->PostTask(
1676 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::ResetTask,
1677 base::Unretained(this)));
1678 }
1679
1680 void V4L2SliceVideoDecodeAccelerator::ResetTask() {
1681 DVLOGF(3);
1682 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1683
1684 if (decoder_resetting_) {
1685 // This is a bug in the client, multiple Reset()s before NotifyResetDone()
1686 // are not allowed.
1687 NOTREACHED() << "Client should not be requesting multiple Reset()s";
1688 return;
1689 }
1690
1691 DCHECK_EQ(state_, kDecoding);
1692 state_ = kIdle;
1693
1694 // Put the decoder in an idle state, ready to resume.
1695 decoder_->Reset();
1696
1697 decoder_resetting_ = true;
1698
1699 // Drop all remaining inputs.
1700 decoder_current_bitstream_buffer_.reset();
1701 while (!decoder_input_queue_.empty())
1702 decoder_input_queue_.pop();
1703
1704 FinishResetIfNeeded();
1705 }
1706
1707 void V4L2SliceVideoDecodeAccelerator::FinishResetIfNeeded() {
1708 DVLOGF(3);
1709 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1710
1711 if (!decoder_resetting_ || !surfaces_at_device_.empty())
1712 return;
1713
1714 DCHECK_EQ(state_, kIdle);
1715 DCHECK(!decoder_flushing_);
1716 SendPictureReady();
1717
1718 // Drop any pending outputs.
1719 while (!decoder_display_queue_.empty())
1720 decoder_display_queue_.pop();
1721
1722 // At this point we can have no input buffers in the decoder, because we
1723 // Reset()ed it in ResetTask(), and have not scheduled any new Decode()s
1724 // having been in kIdle since. We don't have any surfaces in the HW either -
1725 // we just checked that surfaces_at_device_.empty(), and inputs are tied
1726 // to surfaces. Since there can be no other owners of input buffers, we can
1727 // simply mark them all as available.
1728 DCHECK_EQ(input_buffer_queued_count_, 0);
1729 free_input_buffers_.clear();
1730 for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
1731 DCHECK(!input_buffer_map_[i].at_device);
1732 ReuseInputBuffer(i);
1733 }
1734
1735 decoder_resetting_ = false;
1736
1737 child_task_runner_->PostTask(FROM_HERE,
1738 base::Bind(&Client::NotifyResetDone, client_));
1739
1740 DVLOGF(3) << "Reset finished";
1741
1742 state_ = kDecoding;
1743 ScheduleDecodeBufferTaskIfNeeded();
1744 }
1745
1746 void V4L2SliceVideoDecodeAccelerator::SetErrorState(Error error) {
1747 // We can touch decoder_state_ only if this is the decoder thread or the
1748 // decoder thread isn't running.
1749 if (decoder_thread_.IsRunning() &&
1750 !decoder_thread_task_runner_->BelongsToCurrentThread()) {
1751 decoder_thread_task_runner_->PostTask(
1752 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::SetErrorState,
1753 base::Unretained(this), error));
1754 return;
1755 }
1756
1757 // Post NotifyError only if we are already initialized, as the API does
1758 // not allow doing so before that.
1759 if (state_ != kError && state_ != kUninitialized)
1760 NotifyError(error);
1761
1762 state_ = kError;
1763 }
1764
1765 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::V4L2H264Accelerator(
1766 V4L2SliceVideoDecodeAccelerator* v4l2_dec)
1767 : num_slices_(0), v4l2_dec_(v4l2_dec) {
1768 DCHECK(v4l2_dec_);
1769 }
1770
1771 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::~V4L2H264Accelerator() {
1772 }
1773
1774 scoped_refptr<H264Picture>
1775 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::CreateH264Picture() {
1776 scoped_refptr<V4L2DecodeSurface> dec_surface = v4l2_dec_->CreateSurface();
1777 if (!dec_surface)
1778 return nullptr;
1779
1780 return new V4L2H264Picture(dec_surface);
1781 }
1782
1783 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::
1784 H264PictureListToDPBIndicesList(const H264Picture::Vector& src_pic_list,
1785 uint8_t dst_list[kDPBIndicesListSize]) {
1786 size_t i;
1787 for (i = 0; i < src_pic_list.size() && i < kDPBIndicesListSize; ++i) {
1788 const scoped_refptr<H264Picture>& pic = src_pic_list[i];
1789 dst_list[i] = pic ? pic->dpb_position : VIDEO_MAX_FRAME;
1790 }
1791
1792 while (i < kDPBIndicesListSize)
1793 dst_list[i++] = VIDEO_MAX_FRAME;
1794 }
1795
1796 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::H264DPBToV4L2DPB(
1797 const H264DPB& dpb,
1798 std::vector<scoped_refptr<V4L2DecodeSurface>>* ref_surfaces) {
1799 memset(v4l2_decode_param_.dpb, 0, sizeof(v4l2_decode_param_.dpb));
1800 size_t i = 0;
1801 for (const auto& pic : dpb) {
1802 if (i >= arraysize(v4l2_decode_param_.dpb)) {
1803 DVLOG(1) << "Invalid DPB size";
1804 break;
1805 }
1806
1807 int index = VIDEO_MAX_FRAME;
1808 if (!pic->nonexisting) {
1809 scoped_refptr<V4L2DecodeSurface> dec_surface =
1810 H264PictureToV4L2DecodeSurface(pic);
1811 index = dec_surface->output_record();
1812 ref_surfaces->push_back(dec_surface);
1813 }
1814
1815 struct v4l2_h264_dpb_entry& entry = v4l2_decode_param_.dpb[i++];
1816 entry.buf_index = index;
1817 entry.frame_num = pic->frame_num;
1818 entry.pic_num = pic->pic_num;
1819 entry.top_field_order_cnt = pic->top_field_order_cnt;
1820 entry.bottom_field_order_cnt = pic->bottom_field_order_cnt;
1821 entry.flags = (pic->ref ? V4L2_H264_DPB_ENTRY_FLAG_ACTIVE : 0) |
1822 (pic->long_term ? V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM : 0);
1823 }
1824 }
1825
1826 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitFrameMetadata(
1827 const media::H264SPS* sps,
1828 const media::H264PPS* pps,
1829 const H264DPB& dpb,
1830 const H264Picture::Vector& ref_pic_listp0,
1831 const H264Picture::Vector& ref_pic_listb0,
1832 const H264Picture::Vector& ref_pic_listb1,
1833 const scoped_refptr<H264Picture>& pic) {
1834 struct v4l2_ext_control ctrl;
1835 std::vector<struct v4l2_ext_control> ctrls;
1836
1837 struct v4l2_ctrl_h264_sps v4l2_sps;
1838 memset(&v4l2_sps, 0, sizeof(v4l2_sps));
1839 v4l2_sps.constraint_set_flags =
1840 sps->constraint_set0_flag ? V4L2_H264_SPS_CONSTRAINT_SET0_FLAG : 0 |
1841 sps->constraint_set1_flag ? V4L2_H264_SPS_CONSTRAINT_SET1_FLAG : 0 |
1842 sps->constraint_set2_flag ? V4L2_H264_SPS_CONSTRAINT_SET2_FLAG : 0 |
1843 sps->constraint_set3_flag ? V4L2_H264_SPS_CONSTRAINT_SET3_FLAG : 0 |
1844 sps->constraint_set4_flag ? V4L2_H264_SPS_CONSTRAINT_SET4_FLAG : 0 |
1845 sps->constraint_set5_flag ? V4L2_H264_SPS_CONSTRAINT_SET5_FLAG : 0;
1846 #define SPS_TO_V4L2SPS(a) v4l2_sps.a = sps->a
1847 SPS_TO_V4L2SPS(profile_idc);
1848 SPS_TO_V4L2SPS(level_idc);
1849 SPS_TO_V4L2SPS(seq_parameter_set_id);
1850 SPS_TO_V4L2SPS(chroma_format_idc);
1851 SPS_TO_V4L2SPS(bit_depth_luma_minus8);
1852 SPS_TO_V4L2SPS(bit_depth_chroma_minus8);
1853 SPS_TO_V4L2SPS(log2_max_frame_num_minus4);
1854 SPS_TO_V4L2SPS(pic_order_cnt_type);
1855 SPS_TO_V4L2SPS(log2_max_pic_order_cnt_lsb_minus4);
1856 SPS_TO_V4L2SPS(offset_for_non_ref_pic);
1857 SPS_TO_V4L2SPS(offset_for_top_to_bottom_field);
1858 SPS_TO_V4L2SPS(num_ref_frames_in_pic_order_cnt_cycle);
1859
1860 static_assert(arraysize(v4l2_sps.offset_for_ref_frame) ==
1861 arraysize(sps->offset_for_ref_frame),
1862 "offset_for_ref_frame arrays must be same size");
1863 for (size_t i = 0; i < arraysize(v4l2_sps.offset_for_ref_frame); ++i)
1864 v4l2_sps.offset_for_ref_frame[i] = sps->offset_for_ref_frame[i];
1865 SPS_TO_V4L2SPS(max_num_ref_frames);
1866 SPS_TO_V4L2SPS(pic_width_in_mbs_minus1);
1867 SPS_TO_V4L2SPS(pic_height_in_map_units_minus1);
1868 #undef SPS_TO_V4L2SPS
1869
1870 #define SET_V4L2_SPS_FLAG_IF(cond, flag) \
1871 v4l2_sps.flags |= ((sps->cond) ? (flag) : 0)
1872 SET_V4L2_SPS_FLAG_IF(separate_colour_plane_flag,
1873 V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE);
1874 SET_V4L2_SPS_FLAG_IF(qpprime_y_zero_transform_bypass_flag,
1875 V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS);
1876 SET_V4L2_SPS_FLAG_IF(delta_pic_order_always_zero_flag,
1877 V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO);
1878 SET_V4L2_SPS_FLAG_IF(gaps_in_frame_num_value_allowed_flag,
1879 V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED);
1880 SET_V4L2_SPS_FLAG_IF(frame_mbs_only_flag, V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY);
1881 SET_V4L2_SPS_FLAG_IF(mb_adaptive_frame_field_flag,
1882 V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
1883 SET_V4L2_SPS_FLAG_IF(direct_8x8_inference_flag,
1884 V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE);
1885 #undef SET_FLAG
1886 memset(&ctrl, 0, sizeof(ctrl));
1887 ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SPS;
1888 ctrl.size = sizeof(v4l2_sps);
1889 ctrl.p_h264_sps = &v4l2_sps;
1890 ctrls.push_back(ctrl);
1891
1892 struct v4l2_ctrl_h264_pps v4l2_pps;
1893 memset(&v4l2_pps, 0, sizeof(v4l2_pps));
1894 #define PPS_TO_V4L2PPS(a) v4l2_pps.a = pps->a
1895 PPS_TO_V4L2PPS(pic_parameter_set_id);
1896 PPS_TO_V4L2PPS(seq_parameter_set_id);
1897 PPS_TO_V4L2PPS(num_slice_groups_minus1);
1898 PPS_TO_V4L2PPS(num_ref_idx_l0_default_active_minus1);
1899 PPS_TO_V4L2PPS(num_ref_idx_l1_default_active_minus1);
1900 PPS_TO_V4L2PPS(weighted_bipred_idc);
1901 PPS_TO_V4L2PPS(pic_init_qp_minus26);
1902 PPS_TO_V4L2PPS(pic_init_qs_minus26);
1903 PPS_TO_V4L2PPS(chroma_qp_index_offset);
1904 PPS_TO_V4L2PPS(second_chroma_qp_index_offset);
1905 #undef PPS_TO_V4L2PPS
1906
1907 #define SET_V4L2_PPS_FLAG_IF(cond, flag) \
1908 v4l2_pps.flags |= ((pps->cond) ? (flag) : 0)
1909 SET_V4L2_PPS_FLAG_IF(entropy_coding_mode_flag,
1910 V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE);
1911 SET_V4L2_PPS_FLAG_IF(
1912 bottom_field_pic_order_in_frame_present_flag,
1913 V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT);
1914 SET_V4L2_PPS_FLAG_IF(weighted_pred_flag, V4L2_H264_PPS_FLAG_WEIGHTED_PRED);
1915 SET_V4L2_PPS_FLAG_IF(deblocking_filter_control_present_flag,
1916 V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT);
1917 SET_V4L2_PPS_FLAG_IF(constrained_intra_pred_flag,
1918 V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED);
1919 SET_V4L2_PPS_FLAG_IF(redundant_pic_cnt_present_flag,
1920 V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT);
1921 SET_V4L2_PPS_FLAG_IF(transform_8x8_mode_flag,
1922 V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE);
1923 SET_V4L2_PPS_FLAG_IF(pic_scaling_matrix_present_flag,
1924 V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT);
1925 #undef SET_V4L2_PPS_FLAG_IF
1926 memset(&ctrl, 0, sizeof(ctrl));
1927 ctrl.id = V4L2_CID_MPEG_VIDEO_H264_PPS;
1928 ctrl.size = sizeof(v4l2_pps);
1929 ctrl.p_h264_pps = &v4l2_pps;
1930 ctrls.push_back(ctrl);
1931
1932 struct v4l2_ctrl_h264_scaling_matrix v4l2_scaling_matrix;
1933 memset(&v4l2_scaling_matrix, 0, sizeof(v4l2_scaling_matrix));
1934 static_assert(arraysize(v4l2_scaling_matrix.scaling_list_4x4) <=
1935 arraysize(pps->scaling_list4x4) &&
1936 arraysize(v4l2_scaling_matrix.scaling_list_4x4[0]) <=
1937 arraysize(pps->scaling_list4x4[0]) &&
1938 arraysize(v4l2_scaling_matrix.scaling_list_8x8) <=
1939 arraysize(pps->scaling_list8x8) &&
1940 arraysize(v4l2_scaling_matrix.scaling_list_8x8[0]) <=
1941 arraysize(pps->scaling_list8x8[0]),
1942 "scaling_lists must be of correct size");
1943 for (size_t i = 0; i < arraysize(v4l2_scaling_matrix.scaling_list_4x4); ++i) {
1944 for (size_t j = 0; j < arraysize(v4l2_scaling_matrix.scaling_list_4x4[i]);
1945 ++j) {
1946 v4l2_scaling_matrix.scaling_list_4x4[i][j] = pps->scaling_list4x4[i][j];
1947 }
1948 }
1949 for (size_t i = 0; i < arraysize(v4l2_scaling_matrix.scaling_list_8x8); ++i) {
1950 for (size_t j = 0; j < arraysize(v4l2_scaling_matrix.scaling_list_8x8[i]);
1951 ++j) {
1952 v4l2_scaling_matrix.scaling_list_8x8[i][j] = pps->scaling_list8x8[i][j];
1953 }
1954 }
1955 memset(&ctrl, 0, sizeof(ctrl));
1956 ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX;
1957 ctrl.size = sizeof(v4l2_scaling_matrix);
1958 ctrl.p_h264_scal_mtrx = &v4l2_scaling_matrix;
1959 ctrls.push_back(ctrl);
1960
1961 scoped_refptr<V4L2DecodeSurface> dec_surface =
1962 H264PictureToV4L2DecodeSurface(pic);
1963
1964 struct v4l2_ext_controls ext_ctrls;
1965 memset(&ext_ctrls, 0, sizeof(ext_ctrls));
1966 ext_ctrls.count = ctrls.size();
1967 ext_ctrls.controls = &ctrls[0];
1968 ext_ctrls.config_store = dec_surface->config_store();
1969 v4l2_dec_->SubmitExtControls(&ext_ctrls);
1970
1971 H264PictureListToDPBIndicesList(ref_pic_listp0,
1972 v4l2_decode_param_.ref_pic_list_p0);
1973 H264PictureListToDPBIndicesList(ref_pic_listb0,
1974 v4l2_decode_param_.ref_pic_list_b0);
1975 H264PictureListToDPBIndicesList(ref_pic_listb1,
1976 v4l2_decode_param_.ref_pic_list_b1);
1977
1978 std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces;
1979 H264DPBToV4L2DPB(dpb, &ref_surfaces);
1980 dec_surface->SetReferenceSurfaces(ref_surfaces);
1981
1982 return true;
1983 }
1984
1985 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitSlice(
1986 const media::H264PPS* pps,
1987 const media::H264SliceHeader* slice_hdr,
1988 const H264Picture::Vector& ref_pic_list0,
1989 const H264Picture::Vector& ref_pic_list1,
1990 const scoped_refptr<H264Picture>& pic,
1991 const uint8_t* data,
1992 size_t size) {
1993 if (num_slices_ == kMaxSlices) {
1994 LOGF(ERROR) << "Over limit of supported slices per frame";
1995 return false;
1996 }
1997
1998 struct v4l2_ctrl_h264_slice_param& v4l2_slice_param =
1999 v4l2_slice_params_[num_slices_++];
2000 memset(&v4l2_slice_param, 0, sizeof(v4l2_slice_param));
2001
2002 v4l2_slice_param.size = size;
2003 #define SHDR_TO_V4L2SPARM(a) v4l2_slice_param.a = slice_hdr->a
2004 SHDR_TO_V4L2SPARM(header_bit_size);
2005 SHDR_TO_V4L2SPARM(first_mb_in_slice);
2006 SHDR_TO_V4L2SPARM(slice_type);
2007 SHDR_TO_V4L2SPARM(pic_parameter_set_id);
2008 SHDR_TO_V4L2SPARM(colour_plane_id);
2009 SHDR_TO_V4L2SPARM(frame_num);
2010 SHDR_TO_V4L2SPARM(idr_pic_id);
2011 SHDR_TO_V4L2SPARM(pic_order_cnt_lsb);
2012 SHDR_TO_V4L2SPARM(delta_pic_order_cnt_bottom);
2013 SHDR_TO_V4L2SPARM(delta_pic_order_cnt0);
2014 SHDR_TO_V4L2SPARM(delta_pic_order_cnt1);
2015 SHDR_TO_V4L2SPARM(redundant_pic_cnt);
2016 SHDR_TO_V4L2SPARM(dec_ref_pic_marking_bit_size);
2017 SHDR_TO_V4L2SPARM(cabac_init_idc);
2018 SHDR_TO_V4L2SPARM(slice_qp_delta);
2019 SHDR_TO_V4L2SPARM(slice_qs_delta);
2020 SHDR_TO_V4L2SPARM(disable_deblocking_filter_idc);
2021 SHDR_TO_V4L2SPARM(slice_alpha_c0_offset_div2);
2022 SHDR_TO_V4L2SPARM(slice_beta_offset_div2);
2023 SHDR_TO_V4L2SPARM(num_ref_idx_l0_active_minus1);
2024 SHDR_TO_V4L2SPARM(num_ref_idx_l1_active_minus1);
2025 SHDR_TO_V4L2SPARM(pic_order_cnt_bit_size);
2026 #undef SHDR_TO_V4L2SPARM
2027
2028 #define SET_V4L2_SPARM_FLAG_IF(cond, flag) \
2029 v4l2_slice_param.flags |= ((slice_hdr->cond) ? (flag) : 0)
2030 SET_V4L2_SPARM_FLAG_IF(field_pic_flag, V4L2_SLICE_FLAG_FIELD_PIC);
2031 SET_V4L2_SPARM_FLAG_IF(bottom_field_flag, V4L2_SLICE_FLAG_BOTTOM_FIELD);
2032 SET_V4L2_SPARM_FLAG_IF(direct_spatial_mv_pred_flag,
2033 V4L2_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED);
2034 SET_V4L2_SPARM_FLAG_IF(sp_for_switch_flag, V4L2_SLICE_FLAG_SP_FOR_SWITCH);
2035 #undef SET_V4L2_SPARM_FLAG_IF
2036
2037 struct v4l2_h264_pred_weight_table* pred_weight_table =
2038 &v4l2_slice_param.pred_weight_table;
2039
2040 if (((slice_hdr->IsPSlice() || slice_hdr->IsSPSlice()) &&
2041 pps->weighted_pred_flag) ||
2042 (slice_hdr->IsBSlice() && pps->weighted_bipred_idc == 1)) {
2043 pred_weight_table->luma_log2_weight_denom =
2044 slice_hdr->luma_log2_weight_denom;
2045 pred_weight_table->chroma_log2_weight_denom =
2046 slice_hdr->chroma_log2_weight_denom;
2047
2048 struct v4l2_h264_weight_factors* factorsl0 =
2049 &pred_weight_table->weight_factors[0];
2050
2051 for (int i = 0; i < 32; ++i) {
2052 factorsl0->luma_weight[i] =
2053 slice_hdr->pred_weight_table_l0.luma_weight[i];
2054 factorsl0->luma_offset[i] =
2055 slice_hdr->pred_weight_table_l0.luma_offset[i];
2056
2057 for (int j = 0; j < 2; ++j) {
2058 factorsl0->chroma_weight[i][j] =
2059 slice_hdr->pred_weight_table_l0.chroma_weight[i][j];
2060 factorsl0->chroma_offset[i][j] =
2061 slice_hdr->pred_weight_table_l0.chroma_offset[i][j];
2062 }
2063 }
2064
2065 if (slice_hdr->IsBSlice()) {
2066 struct v4l2_h264_weight_factors* factorsl1 =
2067 &pred_weight_table->weight_factors[1];
2068
2069 for (int i = 0; i < 32; ++i) {
2070 factorsl1->luma_weight[i] =
2071 slice_hdr->pred_weight_table_l1.luma_weight[i];
2072 factorsl1->luma_offset[i] =
2073 slice_hdr->pred_weight_table_l1.luma_offset[i];
2074
2075 for (int j = 0; j < 2; ++j) {
2076 factorsl1->chroma_weight[i][j] =
2077 slice_hdr->pred_weight_table_l1.chroma_weight[i][j];
2078 factorsl1->chroma_offset[i][j] =
2079 slice_hdr->pred_weight_table_l1.chroma_offset[i][j];
2080 }
2081 }
2082 }
2083 }
2084
2085 H264PictureListToDPBIndicesList(ref_pic_list0,
2086 v4l2_slice_param.ref_pic_list0);
2087 H264PictureListToDPBIndicesList(ref_pic_list1,
2088 v4l2_slice_param.ref_pic_list1);
2089
2090 scoped_refptr<V4L2DecodeSurface> dec_surface =
2091 H264PictureToV4L2DecodeSurface(pic);
2092
2093 v4l2_decode_param_.nal_ref_idc = slice_hdr->nal_ref_idc;
2094
2095 // TODO(posciak): Don't add start code back here, but have it passed from
2096 // the parser.
2097 size_t data_copy_size = size + 3;
2098 std::unique_ptr<uint8_t[]> data_copy(new uint8_t[data_copy_size]);
2099 memset(data_copy.get(), 0, data_copy_size);
2100 data_copy[2] = 0x01;
2101 memcpy(data_copy.get() + 3, data, size);
2102 return v4l2_dec_->SubmitSlice(dec_surface->input_record(), data_copy.get(),
2103 data_copy_size);
2104 }
2105
2106 bool V4L2SliceVideoDecodeAccelerator::SubmitSlice(int index,
2107 const uint8_t* data,
2108 size_t size) {
2109 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
2110
2111 InputRecord& input_record = input_buffer_map_[index];
2112
2113 if (input_record.bytes_used + size > input_record.length) {
2114 DVLOGF(1) << "Input buffer too small";
2115 return false;
2116 }
2117
2118 memcpy(static_cast<uint8_t*>(input_record.address) + input_record.bytes_used,
2119 data, size);
2120 input_record.bytes_used += size;
2121
2122 return true;
2123 }
2124
2125 bool V4L2SliceVideoDecodeAccelerator::SubmitExtControls(
2126 struct v4l2_ext_controls* ext_ctrls) {
2127 DCHECK_GT(ext_ctrls->config_store, 0u);
2128 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_EXT_CTRLS, ext_ctrls);
2129 return true;
2130 }
2131
2132 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitDecode(
2133 const scoped_refptr<H264Picture>& pic) {
2134 scoped_refptr<V4L2DecodeSurface> dec_surface =
2135 H264PictureToV4L2DecodeSurface(pic);
2136
2137 v4l2_decode_param_.num_slices = num_slices_;
2138 v4l2_decode_param_.idr_pic_flag = pic->idr;
2139 v4l2_decode_param_.top_field_order_cnt = pic->top_field_order_cnt;
2140 v4l2_decode_param_.bottom_field_order_cnt = pic->bottom_field_order_cnt;
2141
2142 struct v4l2_ext_control ctrl;
2143 std::vector<struct v4l2_ext_control> ctrls;
2144
2145 memset(&ctrl, 0, sizeof(ctrl));
2146 ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM;
2147 ctrl.size = sizeof(v4l2_slice_params_);
2148 ctrl.p_h264_slice_param = v4l2_slice_params_;
2149 ctrls.push_back(ctrl);
2150
2151 memset(&ctrl, 0, sizeof(ctrl));
2152 ctrl.id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAM;
2153 ctrl.size = sizeof(v4l2_decode_param_);
2154 ctrl.p_h264_decode_param = &v4l2_decode_param_;
2155 ctrls.push_back(ctrl);
2156
2157 struct v4l2_ext_controls ext_ctrls;
2158 memset(&ext_ctrls, 0, sizeof(ext_ctrls));
2159 ext_ctrls.count = ctrls.size();
2160 ext_ctrls.controls = &ctrls[0];
2161 ext_ctrls.config_store = dec_surface->config_store();
2162 v4l2_dec_->SubmitExtControls(&ext_ctrls);
2163
2164 Reset();
2165
2166 v4l2_dec_->DecodeSurface(dec_surface);
2167 return true;
2168 }
2169
2170 bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::OutputPicture(
2171 const scoped_refptr<H264Picture>& pic) {
2172 scoped_refptr<V4L2DecodeSurface> dec_surface =
2173 H264PictureToV4L2DecodeSurface(pic);
2174 v4l2_dec_->SurfaceReady(dec_surface);
2175 return true;
2176 }
2177
2178 void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::Reset() {
2179 num_slices_ = 0;
2180 memset(&v4l2_decode_param_, 0, sizeof(v4l2_decode_param_));
2181 memset(&v4l2_slice_params_, 0, sizeof(v4l2_slice_params_));
2182 }
2183
2184 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
2185 V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::
2186 H264PictureToV4L2DecodeSurface(const scoped_refptr<H264Picture>& pic) {
2187 V4L2H264Picture* v4l2_pic = pic->AsV4L2H264Picture();
2188 CHECK(v4l2_pic);
2189 return v4l2_pic->dec_surface();
2190 }
2191
2192 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::V4L2VP8Accelerator(
2193 V4L2SliceVideoDecodeAccelerator* v4l2_dec)
2194 : v4l2_dec_(v4l2_dec) {
2195 DCHECK(v4l2_dec_);
2196 }
2197
2198 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::~V4L2VP8Accelerator() {
2199 }
2200
2201 scoped_refptr<VP8Picture>
2202 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::CreateVP8Picture() {
2203 scoped_refptr<V4L2DecodeSurface> dec_surface = v4l2_dec_->CreateSurface();
2204 if (!dec_surface)
2205 return nullptr;
2206
2207 return new V4L2VP8Picture(dec_surface);
2208 }
2209
2210 #define ARRAY_MEMCPY_CHECKED(to, from) \
2211 do { \
2212 static_assert(sizeof(to) == sizeof(from), \
2213 #from " and " #to " arrays must be of same size"); \
2214 memcpy(to, from, sizeof(to)); \
2215 } while (0)
2216
2217 static void FillV4L2SegmentationHeader(
2218 const media::Vp8SegmentationHeader& vp8_sgmnt_hdr,
2219 struct v4l2_vp8_sgmnt_hdr* v4l2_sgmnt_hdr) {
2220 #define SET_V4L2_SGMNT_HDR_FLAG_IF(cond, flag) \
2221 v4l2_sgmnt_hdr->flags |= ((vp8_sgmnt_hdr.cond) ? (flag) : 0)
2222 SET_V4L2_SGMNT_HDR_FLAG_IF(segmentation_enabled,
2223 V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED);
2224 SET_V4L2_SGMNT_HDR_FLAG_IF(update_mb_segmentation_map,
2225 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP);
2226 SET_V4L2_SGMNT_HDR_FLAG_IF(update_segment_feature_data,
2227 V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA);
2228 #undef SET_V4L2_SPARM_FLAG_IF
2229 v4l2_sgmnt_hdr->segment_feature_mode = vp8_sgmnt_hdr.segment_feature_mode;
2230
2231 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->quant_update,
2232 vp8_sgmnt_hdr.quantizer_update_value);
2233 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->lf_update,
2234 vp8_sgmnt_hdr.lf_update_value);
2235 ARRAY_MEMCPY_CHECKED(v4l2_sgmnt_hdr->segment_probs,
2236 vp8_sgmnt_hdr.segment_prob);
2237 }
2238
2239 static void FillV4L2LoopfilterHeader(
2240 const media::Vp8LoopFilterHeader& vp8_loopfilter_hdr,
2241 struct v4l2_vp8_loopfilter_hdr* v4l2_lf_hdr) {
2242 #define SET_V4L2_LF_HDR_FLAG_IF(cond, flag) \
2243 v4l2_lf_hdr->flags |= ((vp8_loopfilter_hdr.cond) ? (flag) : 0)
2244 SET_V4L2_LF_HDR_FLAG_IF(loop_filter_adj_enable, V4L2_VP8_LF_HDR_ADJ_ENABLE);
2245 SET_V4L2_LF_HDR_FLAG_IF(mode_ref_lf_delta_update,
2246 V4L2_VP8_LF_HDR_DELTA_UPDATE);
2247 #undef SET_V4L2_SGMNT_HDR_FLAG_IF
2248
2249 #define LF_HDR_TO_V4L2_LF_HDR(a) v4l2_lf_hdr->a = vp8_loopfilter_hdr.a;
2250 LF_HDR_TO_V4L2_LF_HDR(type);
2251 LF_HDR_TO_V4L2_LF_HDR(level);
2252 LF_HDR_TO_V4L2_LF_HDR(sharpness_level);
2253 #undef LF_HDR_TO_V4L2_LF_HDR
2254
2255 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr->ref_frm_delta_magnitude,
2256 vp8_loopfilter_hdr.ref_frame_delta);
2257 ARRAY_MEMCPY_CHECKED(v4l2_lf_hdr->mb_mode_delta_magnitude,
2258 vp8_loopfilter_hdr.mb_mode_delta);
2259 }
2260
2261 static void FillV4L2QuantizationHeader(
2262 const media::Vp8QuantizationHeader& vp8_quant_hdr,
2263 struct v4l2_vp8_quantization_hdr* v4l2_quant_hdr) {
2264 v4l2_quant_hdr->y_ac_qi = vp8_quant_hdr.y_ac_qi;
2265 v4l2_quant_hdr->y_dc_delta = vp8_quant_hdr.y_dc_delta;
2266 v4l2_quant_hdr->y2_dc_delta = vp8_quant_hdr.y2_dc_delta;
2267 v4l2_quant_hdr->y2_ac_delta = vp8_quant_hdr.y2_ac_delta;
2268 v4l2_quant_hdr->uv_dc_delta = vp8_quant_hdr.uv_dc_delta;
2269 v4l2_quant_hdr->uv_ac_delta = vp8_quant_hdr.uv_ac_delta;
2270 }
2271
2272 static void FillV4L2EntropyHeader(
2273 const media::Vp8EntropyHeader& vp8_entropy_hdr,
2274 struct v4l2_vp8_entropy_hdr* v4l2_entropy_hdr) {
2275 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->coeff_probs,
2276 vp8_entropy_hdr.coeff_probs);
2277 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->y_mode_probs,
2278 vp8_entropy_hdr.y_mode_probs);
2279 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->uv_mode_probs,
2280 vp8_entropy_hdr.uv_mode_probs);
2281 ARRAY_MEMCPY_CHECKED(v4l2_entropy_hdr->mv_probs,
2282 vp8_entropy_hdr.mv_probs);
2283 }
2284
2285 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::SubmitDecode(
2286 const scoped_refptr<VP8Picture>& pic,
2287 const media::Vp8FrameHeader* frame_hdr,
2288 const scoped_refptr<VP8Picture>& last_frame,
2289 const scoped_refptr<VP8Picture>& golden_frame,
2290 const scoped_refptr<VP8Picture>& alt_frame) {
2291 struct v4l2_ctrl_vp8_frame_hdr v4l2_frame_hdr;
2292 memset(&v4l2_frame_hdr, 0, sizeof(v4l2_frame_hdr));
2293
2294 #define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a
2295 FHDR_TO_V4L2_FHDR(key_frame);
2296 FHDR_TO_V4L2_FHDR(version);
2297 FHDR_TO_V4L2_FHDR(width);
2298 FHDR_TO_V4L2_FHDR(horizontal_scale);
2299 FHDR_TO_V4L2_FHDR(height);
2300 FHDR_TO_V4L2_FHDR(vertical_scale);
2301 FHDR_TO_V4L2_FHDR(sign_bias_golden);
2302 FHDR_TO_V4L2_FHDR(sign_bias_alternate);
2303 FHDR_TO_V4L2_FHDR(prob_skip_false);
2304 FHDR_TO_V4L2_FHDR(prob_intra);
2305 FHDR_TO_V4L2_FHDR(prob_last);
2306 FHDR_TO_V4L2_FHDR(prob_gf);
2307 FHDR_TO_V4L2_FHDR(bool_dec_range);
2308 FHDR_TO_V4L2_FHDR(bool_dec_value);
2309 FHDR_TO_V4L2_FHDR(bool_dec_count);
2310 #undef FHDR_TO_V4L2_FHDR
2311
2312 #define SET_V4L2_FRM_HDR_FLAG_IF(cond, flag) \
2313 v4l2_frame_hdr.flags |= ((frame_hdr->cond) ? (flag) : 0)
2314 SET_V4L2_FRM_HDR_FLAG_IF(is_experimental,
2315 V4L2_VP8_FRAME_HDR_FLAG_EXPERIMENTAL);
2316 SET_V4L2_FRM_HDR_FLAG_IF(show_frame, V4L2_VP8_FRAME_HDR_FLAG_SHOW_FRAME);
2317 SET_V4L2_FRM_HDR_FLAG_IF(mb_no_skip_coeff,
2318 V4L2_VP8_FRAME_HDR_FLAG_MB_NO_SKIP_COEFF);
2319 #undef SET_V4L2_FRM_HDR_FLAG_IF
2320
2321 FillV4L2SegmentationHeader(frame_hdr->segmentation_hdr,
2322 &v4l2_frame_hdr.sgmnt_hdr);
2323
2324 FillV4L2LoopfilterHeader(frame_hdr->loopfilter_hdr, &v4l2_frame_hdr.lf_hdr);
2325
2326 FillV4L2QuantizationHeader(frame_hdr->quantization_hdr,
2327 &v4l2_frame_hdr.quant_hdr);
2328
2329 FillV4L2EntropyHeader(frame_hdr->entropy_hdr, &v4l2_frame_hdr.entropy_hdr);
2330
2331 v4l2_frame_hdr.first_part_size =
2332 base::checked_cast<__u32>(frame_hdr->first_part_size);
2333 v4l2_frame_hdr.first_part_offset =
2334 base::checked_cast<__u32>(frame_hdr->first_part_offset);
2335 v4l2_frame_hdr.macroblock_bit_offset =
2336 base::checked_cast<__u32>(frame_hdr->macroblock_bit_offset);
2337 v4l2_frame_hdr.num_dct_parts = frame_hdr->num_of_dct_partitions;
2338
2339 static_assert(arraysize(v4l2_frame_hdr.dct_part_sizes) ==
2340 arraysize(frame_hdr->dct_partition_sizes),
2341 "DCT partition size arrays must have equal number of elements");
2342 for (size_t i = 0; i < frame_hdr->num_of_dct_partitions &&
2343 i < arraysize(v4l2_frame_hdr.dct_part_sizes); ++i)
2344 v4l2_frame_hdr.dct_part_sizes[i] = frame_hdr->dct_partition_sizes[i];
2345
2346 scoped_refptr<V4L2DecodeSurface> dec_surface =
2347 VP8PictureToV4L2DecodeSurface(pic);
2348 std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces;
2349
2350 if (last_frame) {
2351 scoped_refptr<V4L2DecodeSurface> last_frame_surface =
2352 VP8PictureToV4L2DecodeSurface(last_frame);
2353 v4l2_frame_hdr.last_frame = last_frame_surface->output_record();
2354 ref_surfaces.push_back(last_frame_surface);
2355 } else {
2356 v4l2_frame_hdr.last_frame = VIDEO_MAX_FRAME;
2357 }
2358
2359 if (golden_frame) {
2360 scoped_refptr<V4L2DecodeSurface> golden_frame_surface =
2361 VP8PictureToV4L2DecodeSurface(golden_frame);
2362 v4l2_frame_hdr.golden_frame = golden_frame_surface->output_record();
2363 ref_surfaces.push_back(golden_frame_surface);
2364 } else {
2365 v4l2_frame_hdr.golden_frame = VIDEO_MAX_FRAME;
2366 }
2367
2368 if (alt_frame) {
2369 scoped_refptr<V4L2DecodeSurface> alt_frame_surface =
2370 VP8PictureToV4L2DecodeSurface(alt_frame);
2371 v4l2_frame_hdr.alt_frame = alt_frame_surface->output_record();
2372 ref_surfaces.push_back(alt_frame_surface);
2373 } else {
2374 v4l2_frame_hdr.alt_frame = VIDEO_MAX_FRAME;
2375 }
2376
2377 struct v4l2_ext_control ctrl;
2378 memset(&ctrl, 0, sizeof(ctrl));
2379 ctrl.id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR;
2380 ctrl.size = sizeof(v4l2_frame_hdr);
2381 ctrl.p_vp8_frame_hdr = &v4l2_frame_hdr;
2382
2383 struct v4l2_ext_controls ext_ctrls;
2384 memset(&ext_ctrls, 0, sizeof(ext_ctrls));
2385 ext_ctrls.count = 1;
2386 ext_ctrls.controls = &ctrl;
2387 ext_ctrls.config_store = dec_surface->config_store();
2388
2389 if (!v4l2_dec_->SubmitExtControls(&ext_ctrls))
2390 return false;
2391
2392 dec_surface->SetReferenceSurfaces(ref_surfaces);
2393
2394 if (!v4l2_dec_->SubmitSlice(dec_surface->input_record(), frame_hdr->data,
2395 frame_hdr->frame_size))
2396 return false;
2397
2398 v4l2_dec_->DecodeSurface(dec_surface);
2399 return true;
2400 }
2401
2402 bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::OutputPicture(
2403 const scoped_refptr<VP8Picture>& pic) {
2404 scoped_refptr<V4L2DecodeSurface> dec_surface =
2405 VP8PictureToV4L2DecodeSurface(pic);
2406
2407 v4l2_dec_->SurfaceReady(dec_surface);
2408 return true;
2409 }
2410
2411 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
2412 V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::
2413 VP8PictureToV4L2DecodeSurface(const scoped_refptr<VP8Picture>& pic) {
2414 V4L2VP8Picture* v4l2_pic = pic->AsV4L2VP8Picture();
2415 CHECK(v4l2_pic);
2416 return v4l2_pic->dec_surface();
2417 }
2418
2419 void V4L2SliceVideoDecodeAccelerator::DecodeSurface(
2420 const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
2421 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
2422
2423 DVLOGF(3) << "Submitting decode for surface: " << dec_surface->ToString();
2424 Enqueue(dec_surface);
2425 }
2426
2427 void V4L2SliceVideoDecodeAccelerator::SurfaceReady(
2428 const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
2429 DVLOGF(3);
2430 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
2431
2432 decoder_display_queue_.push(dec_surface);
2433 TryOutputSurfaces();
2434 }
2435
2436 void V4L2SliceVideoDecodeAccelerator::TryOutputSurfaces() {
2437 while (!decoder_display_queue_.empty()) {
2438 scoped_refptr<V4L2DecodeSurface> dec_surface =
2439 decoder_display_queue_.front();
2440
2441 if (!dec_surface->decoded())
2442 break;
2443
2444 decoder_display_queue_.pop();
2445 OutputSurface(dec_surface);
2446 }
2447 }
2448
2449 void V4L2SliceVideoDecodeAccelerator::OutputSurface(
2450 const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
2451 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
2452
2453 OutputRecord& output_record =
2454 output_buffer_map_[dec_surface->output_record()];
2455
2456 bool inserted =
2457 surfaces_at_display_.insert(std::make_pair(output_record.picture_id,
2458 dec_surface)).second;
2459 DCHECK(inserted);
2460
2461 DCHECK(!output_record.at_client);
2462 DCHECK(!output_record.at_device);
2463 DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
2464 DCHECK_NE(output_record.picture_id, -1);
2465 output_record.at_client = true;
2466
2467 // TODO(posciak): Use visible size from decoder here instead
2468 // (crbug.com/402760). Passing (0, 0) results in the client using the
2469 // visible size extracted from the container instead.
2470 media::Picture picture(output_record.picture_id, dec_surface->bitstream_id(),
2471 gfx::Rect(0, 0), false);
2472 DVLOGF(3) << dec_surface->ToString()
2473 << ", bitstream_id: " << picture.bitstream_buffer_id()
2474 << ", picture_id: " << picture.picture_buffer_id();
2475 pending_picture_ready_.push(PictureRecord(output_record.cleared, picture));
2476 SendPictureReady();
2477 output_record.cleared = true;
2478 }
2479
2480 scoped_refptr<V4L2SliceVideoDecodeAccelerator::V4L2DecodeSurface>
2481 V4L2SliceVideoDecodeAccelerator::CreateSurface() {
2482 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
2483 DCHECK_EQ(state_, kDecoding);
2484
2485 if (free_input_buffers_.empty() || free_output_buffers_.empty())
2486 return nullptr;
2487
2488 int input = free_input_buffers_.front();
2489 free_input_buffers_.pop_front();
2490 int output = free_output_buffers_.front();
2491 free_output_buffers_.pop_front();
2492
2493 InputRecord& input_record = input_buffer_map_[input];
2494 DCHECK_EQ(input_record.bytes_used, 0u);
2495 DCHECK_EQ(input_record.input_id, -1);
2496 DCHECK(decoder_current_bitstream_buffer_ != nullptr);
2497 input_record.input_id = decoder_current_bitstream_buffer_->input_id;
2498
2499 scoped_refptr<V4L2DecodeSurface> dec_surface = new V4L2DecodeSurface(
2500 decoder_current_bitstream_buffer_->input_id, input, output,
2501 base::Bind(&V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer,
2502 base::Unretained(this)));
2503
2504 DVLOGF(4) << "Created surface " << input << " -> " << output;
2505 return dec_surface;
2506 }
2507
2508 void V4L2SliceVideoDecodeAccelerator::SendPictureReady() {
2509 DVLOGF(3);
2510 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
2511 bool resetting_or_flushing = (decoder_resetting_ || decoder_flushing_);
2512 while (!pending_picture_ready_.empty()) {
2513 bool cleared = pending_picture_ready_.front().cleared;
2514 const media::Picture& picture = pending_picture_ready_.front().picture;
2515 if (cleared && picture_clearing_count_ == 0) {
2516 DVLOGF(4) << "Posting picture ready to decode task runner for: "
2517 << picture.picture_buffer_id();
2518 // This picture is cleared. It can be posted to a thread different than
2519 // the main GPU thread to reduce latency. This should be the case after
2520 // all pictures are cleared at the beginning.
2521 decode_task_runner_->PostTask(
2522 FROM_HERE,
2523 base::Bind(&Client::PictureReady, decode_client_, picture));
2524 pending_picture_ready_.pop();
2525 } else if (!cleared || resetting_or_flushing) {
2526 DVLOGF(3) << "cleared=" << pending_picture_ready_.front().cleared
2527 << ", decoder_resetting_=" << decoder_resetting_
2528 << ", decoder_flushing_=" << decoder_flushing_
2529 << ", picture_clearing_count_=" << picture_clearing_count_;
2530 DVLOGF(4) << "Posting picture ready to GPU for: "
2531 << picture.picture_buffer_id();
2532 // If the picture is not cleared, post it to the child thread because it
2533 // has to be cleared in the child thread. A picture only needs to be
2534 // cleared once. If the decoder is resetting or flushing, send all
2535 // pictures to ensure PictureReady arrive before reset or flush done.
2536 child_task_runner_->PostTaskAndReply(
2537 FROM_HERE, base::Bind(&Client::PictureReady, client_, picture),
2538 // Unretained is safe. If Client::PictureReady gets to run, |this| is
2539 // alive. Destroy() will wait the decode thread to finish.
2540 base::Bind(&V4L2SliceVideoDecodeAccelerator::PictureCleared,
2541 base::Unretained(this)));
2542 picture_clearing_count_++;
2543 pending_picture_ready_.pop();
2544 } else {
2545 // This picture is cleared. But some pictures are about to be cleared on
2546 // the child thread. To preserve the order, do not send this until those
2547 // pictures are cleared.
2548 break;
2549 }
2550 }
2551 }
2552
2553 void V4L2SliceVideoDecodeAccelerator::PictureCleared() {
2554 DVLOGF(3) << "clearing count=" << picture_clearing_count_;
2555 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
2556 DCHECK_GT(picture_clearing_count_, 0);
2557 picture_clearing_count_--;
2558 SendPictureReady();
2559 }
2560
2561 bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
2562 const base::WeakPtr<Client>& decode_client,
2563 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
2564 decode_client_ = decode_client_;
2565 decode_task_runner_ = decode_task_runner;
2566 return true;
2567 }
2568
2569 // static
2570 media::VideoDecodeAccelerator::SupportedProfiles
2571 V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() {
2572 scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
2573 if (!device)
2574 return SupportedProfiles();
2575
2576 return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_),
2577 supported_input_fourccs_);
2578 }
2579
2580 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698