Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(344)

Side by Side Diff: content/common/gpu/media/vaapi_video_decode_accelerator.cc

Issue 1882373004: Migrate content/common/gpu/media code to media/gpu (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Squash and rebase Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
6
7 #include <string.h>
8
9 #include <memory>
10
11 #include "base/bind.h"
12 #include "base/logging.h"
13 #include "base/macros.h"
14 #include "base/metrics/histogram.h"
15 #include "base/stl_util.h"
16 #include "base/strings/string_util.h"
17 #include "base/synchronization/waitable_event.h"
18 #include "base/trace_event/trace_event.h"
19 #include "content/common/gpu/media/accelerated_video_decoder.h"
20 #include "content/common/gpu/media/h264_decoder.h"
21 #include "content/common/gpu/media/vaapi_picture.h"
22 #include "content/common/gpu/media/vp8_decoder.h"
23 #include "content/common/gpu/media/vp9_decoder.h"
24 #include "gpu/ipc/service/gpu_channel.h"
25 #include "media/base/bind_to_current_loop.h"
26 #include "media/video/picture.h"
27 #include "third_party/libva/va/va_dec_vp8.h"
28 #include "ui/gl/gl_bindings.h"
29 #include "ui/gl/gl_image.h"
30
31 namespace content {
32
33 namespace {
34 // UMA errors that the VaapiVideoDecodeAccelerator class reports.
35 enum VAVDADecoderFailure {
36 VAAPI_ERROR = 0,
37 // UMA requires that max must be greater than 1.
38 VAVDA_DECODER_FAILURES_MAX = 2,
39 };
40 }
41
42 static void ReportToUMA(VAVDADecoderFailure failure) {
43 UMA_HISTOGRAM_ENUMERATION("Media.VAVDA.DecoderFailure", failure,
44 VAVDA_DECODER_FAILURES_MAX);
45 }
46
47 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
48 do { \
49 if (!(result)) { \
50 LOG(ERROR) << log; \
51 NotifyError(error_code); \
52 return ret; \
53 } \
54 } while (0)
55
56 class VaapiVideoDecodeAccelerator::VaapiDecodeSurface
57 : public base::RefCountedThreadSafe<VaapiDecodeSurface> {
58 public:
59 VaapiDecodeSurface(int32_t bitstream_id,
60 const scoped_refptr<VASurface>& va_surface);
61
62 int32_t bitstream_id() const { return bitstream_id_; }
63 scoped_refptr<VASurface> va_surface() { return va_surface_; }
64
65 private:
66 friend class base::RefCountedThreadSafe<VaapiDecodeSurface>;
67 ~VaapiDecodeSurface();
68
69 int32_t bitstream_id_;
70 scoped_refptr<VASurface> va_surface_;
71 };
72
73 VaapiVideoDecodeAccelerator::VaapiDecodeSurface::VaapiDecodeSurface(
74 int32_t bitstream_id,
75 const scoped_refptr<VASurface>& va_surface)
76 : bitstream_id_(bitstream_id), va_surface_(va_surface) {}
77
78 VaapiVideoDecodeAccelerator::VaapiDecodeSurface::~VaapiDecodeSurface() {
79 }
80
81 class VaapiH264Picture : public H264Picture {
82 public:
83 VaapiH264Picture(const scoped_refptr<
84 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface);
85
86 VaapiH264Picture* AsVaapiH264Picture() override { return this; }
87 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface() {
88 return dec_surface_;
89 }
90
91 private:
92 ~VaapiH264Picture() override;
93
94 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface_;
95
96 DISALLOW_COPY_AND_ASSIGN(VaapiH264Picture);
97 };
98
99 VaapiH264Picture::VaapiH264Picture(const scoped_refptr<
100 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface)
101 : dec_surface_(dec_surface) {
102 }
103
104 VaapiH264Picture::~VaapiH264Picture() {
105 }
106
107 class VaapiVideoDecodeAccelerator::VaapiH264Accelerator
108 : public H264Decoder::H264Accelerator {
109 public:
110 VaapiH264Accelerator(VaapiVideoDecodeAccelerator* vaapi_dec,
111 VaapiWrapper* vaapi_wrapper);
112 ~VaapiH264Accelerator() override;
113
114 // H264Decoder::H264Accelerator implementation.
115 scoped_refptr<H264Picture> CreateH264Picture() override;
116
117 bool SubmitFrameMetadata(const media::H264SPS* sps,
118 const media::H264PPS* pps,
119 const H264DPB& dpb,
120 const H264Picture::Vector& ref_pic_listp0,
121 const H264Picture::Vector& ref_pic_listb0,
122 const H264Picture::Vector& ref_pic_listb1,
123 const scoped_refptr<H264Picture>& pic) override;
124
125 bool SubmitSlice(const media::H264PPS* pps,
126 const media::H264SliceHeader* slice_hdr,
127 const H264Picture::Vector& ref_pic_list0,
128 const H264Picture::Vector& ref_pic_list1,
129 const scoped_refptr<H264Picture>& pic,
130 const uint8_t* data,
131 size_t size) override;
132
133 bool SubmitDecode(const scoped_refptr<H264Picture>& pic) override;
134 bool OutputPicture(const scoped_refptr<H264Picture>& pic) override;
135
136 void Reset() override;
137
138 private:
139 scoped_refptr<VaapiDecodeSurface> H264PictureToVaapiDecodeSurface(
140 const scoped_refptr<H264Picture>& pic);
141
142 void FillVAPicture(VAPictureH264* va_pic, scoped_refptr<H264Picture> pic);
143 int FillVARefFramesFromDPB(const H264DPB& dpb,
144 VAPictureH264* va_pics,
145 int num_pics);
146
147 VaapiWrapper* vaapi_wrapper_;
148 VaapiVideoDecodeAccelerator* vaapi_dec_;
149
150 DISALLOW_COPY_AND_ASSIGN(VaapiH264Accelerator);
151 };
152
153 class VaapiVP8Picture : public VP8Picture {
154 public:
155 VaapiVP8Picture(const scoped_refptr<
156 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface);
157
158 VaapiVP8Picture* AsVaapiVP8Picture() override { return this; }
159 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface() {
160 return dec_surface_;
161 }
162
163 private:
164 ~VaapiVP8Picture() override;
165
166 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface_;
167
168 DISALLOW_COPY_AND_ASSIGN(VaapiVP8Picture);
169 };
170
171 VaapiVP8Picture::VaapiVP8Picture(const scoped_refptr<
172 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface)
173 : dec_surface_(dec_surface) {
174 }
175
176 VaapiVP8Picture::~VaapiVP8Picture() {
177 }
178
179 class VaapiVideoDecodeAccelerator::VaapiVP8Accelerator
180 : public VP8Decoder::VP8Accelerator {
181 public:
182 VaapiVP8Accelerator(VaapiVideoDecodeAccelerator* vaapi_dec,
183 VaapiWrapper* vaapi_wrapper);
184 ~VaapiVP8Accelerator() override;
185
186 // VP8Decoder::VP8Accelerator implementation.
187 scoped_refptr<VP8Picture> CreateVP8Picture() override;
188
189 bool SubmitDecode(const scoped_refptr<VP8Picture>& pic,
190 const media::Vp8FrameHeader* frame_hdr,
191 const scoped_refptr<VP8Picture>& last_frame,
192 const scoped_refptr<VP8Picture>& golden_frame,
193 const scoped_refptr<VP8Picture>& alt_frame) override;
194
195 bool OutputPicture(const scoped_refptr<VP8Picture>& pic) override;
196
197 private:
198 scoped_refptr<VaapiDecodeSurface> VP8PictureToVaapiDecodeSurface(
199 const scoped_refptr<VP8Picture>& pic);
200
201 VaapiWrapper* vaapi_wrapper_;
202 VaapiVideoDecodeAccelerator* vaapi_dec_;
203
204 DISALLOW_COPY_AND_ASSIGN(VaapiVP8Accelerator);
205 };
206
207 class VaapiVP9Picture : public VP9Picture {
208 public:
209 VaapiVP9Picture(
210 const scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>&
211 dec_surface);
212
213 VaapiVP9Picture* AsVaapiVP9Picture() override { return this; }
214 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface() {
215 return dec_surface_;
216 }
217
218 private:
219 ~VaapiVP9Picture() override;
220
221 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface_;
222
223 DISALLOW_COPY_AND_ASSIGN(VaapiVP9Picture);
224 };
225
226 VaapiVP9Picture::VaapiVP9Picture(
227 const scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>&
228 dec_surface)
229 : dec_surface_(dec_surface) {}
230
231 VaapiVP9Picture::~VaapiVP9Picture() {}
232
233 class VaapiVideoDecodeAccelerator::VaapiVP9Accelerator
234 : public VP9Decoder::VP9Accelerator {
235 public:
236 VaapiVP9Accelerator(VaapiVideoDecodeAccelerator* vaapi_dec,
237 VaapiWrapper* vaapi_wrapper);
238 ~VaapiVP9Accelerator() override;
239
240 // VP9Decoder::VP9Accelerator implementation.
241 scoped_refptr<VP9Picture> CreateVP9Picture() override;
242
243 bool SubmitDecode(
244 const scoped_refptr<VP9Picture>& pic,
245 const media::Vp9Segmentation& seg,
246 const media::Vp9LoopFilter& lf,
247 const std::vector<scoped_refptr<VP9Picture>>& ref_pictures) override;
248
249 bool OutputPicture(const scoped_refptr<VP9Picture>& pic) override;
250
251 private:
252 scoped_refptr<VaapiDecodeSurface> VP9PictureToVaapiDecodeSurface(
253 const scoped_refptr<VP9Picture>& pic);
254
255 VaapiWrapper* vaapi_wrapper_;
256 VaapiVideoDecodeAccelerator* vaapi_dec_;
257
258 DISALLOW_COPY_AND_ASSIGN(VaapiVP9Accelerator);
259 };
260
261 VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0) {}
262
263 VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() {
264 }
265
266 void VaapiVideoDecodeAccelerator::NotifyError(Error error) {
267 if (message_loop_ != base::MessageLoop::current()) {
268 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
269 message_loop_->PostTask(FROM_HERE, base::Bind(
270 &VaapiVideoDecodeAccelerator::NotifyError, weak_this_, error));
271 return;
272 }
273
274 // Post Cleanup() as a task so we don't recursively acquire lock_.
275 message_loop_->PostTask(FROM_HERE, base::Bind(
276 &VaapiVideoDecodeAccelerator::Cleanup, weak_this_));
277
278 LOG(ERROR) << "Notifying of error " << error;
279 if (client_) {
280 client_->NotifyError(error);
281 client_ptr_factory_.reset();
282 }
283 }
284
285 VaapiPicture* VaapiVideoDecodeAccelerator::PictureById(
286 int32_t picture_buffer_id) {
287 Pictures::iterator it = pictures_.find(picture_buffer_id);
288 if (it == pictures_.end()) {
289 LOG(ERROR) << "Picture id " << picture_buffer_id << " does not exist";
290 return NULL;
291 }
292
293 return it->second.get();
294 }
295
296 VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
297 const MakeGLContextCurrentCallback& make_context_current_cb,
298 const BindGLImageCallback& bind_image_cb)
299 : state_(kUninitialized),
300 input_ready_(&lock_),
301 surfaces_available_(&lock_),
302 message_loop_(base::MessageLoop::current()),
303 decoder_thread_("VaapiDecoderThread"),
304 num_frames_at_client_(0),
305 num_stream_bufs_at_decoder_(0),
306 finish_flush_pending_(false),
307 awaiting_va_surfaces_recycle_(false),
308 requested_num_pics_(0),
309 make_context_current_cb_(make_context_current_cb),
310 bind_image_cb_(bind_image_cb),
311 weak_this_factory_(this) {
312 weak_this_ = weak_this_factory_.GetWeakPtr();
313 va_surface_release_cb_ = media::BindToCurrentLoop(
314 base::Bind(&VaapiVideoDecodeAccelerator::RecycleVASurfaceID, weak_this_));
315 }
316
317 VaapiVideoDecodeAccelerator::~VaapiVideoDecodeAccelerator() {
318 DCHECK_EQ(message_loop_, base::MessageLoop::current());
319 }
320
321 bool VaapiVideoDecodeAccelerator::Initialize(const Config& config,
322 Client* client) {
323 DCHECK_EQ(message_loop_, base::MessageLoop::current());
324
325 if (make_context_current_cb_.is_null() || bind_image_cb_.is_null()) {
326 NOTREACHED() << "GL callbacks are required for this VDA";
327 return false;
328 }
329
330 if (config.is_encrypted) {
331 NOTREACHED() << "Encrypted streams are not supported for this VDA";
332 return false;
333 }
334
335 if (config.output_mode != Config::OutputMode::ALLOCATE) {
336 NOTREACHED() << "Only ALLOCATE OutputMode is supported by this VDA";
337 return false;
338 }
339
340 client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
341 client_ = client_ptr_factory_->GetWeakPtr();
342
343 media::VideoCodecProfile profile = config.profile;
344
345 base::AutoLock auto_lock(lock_);
346 DCHECK_EQ(state_, kUninitialized);
347 DVLOG(2) << "Initializing VAVDA, profile: " << profile;
348
349 #if defined(USE_X11)
350 if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL) {
351 DVLOG(1) << "HW video decode acceleration not available without "
352 "DesktopGL (GLX).";
353 return false;
354 }
355 #elif defined(USE_OZONE)
356 if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
357 DVLOG(1) << "HW video decode acceleration not available without "
358 << "EGLGLES2.";
359 return false;
360 }
361 #endif // USE_X11
362
363 vaapi_wrapper_ = VaapiWrapper::CreateForVideoCodec(
364 VaapiWrapper::kDecode, profile, base::Bind(&ReportToUMA, VAAPI_ERROR));
365
366 if (!vaapi_wrapper_.get()) {
367 DVLOG(1) << "Failed initializing VAAPI for profile " << profile;
368 return false;
369 }
370
371 if (profile >= media::H264PROFILE_MIN && profile <= media::H264PROFILE_MAX) {
372 h264_accelerator_.reset(
373 new VaapiH264Accelerator(this, vaapi_wrapper_.get()));
374 decoder_.reset(new H264Decoder(h264_accelerator_.get()));
375 } else if (profile >= media::VP8PROFILE_MIN &&
376 profile <= media::VP8PROFILE_MAX) {
377 vp8_accelerator_.reset(new VaapiVP8Accelerator(this, vaapi_wrapper_.get()));
378 decoder_.reset(new VP8Decoder(vp8_accelerator_.get()));
379 } else if (profile >= media::VP9PROFILE_MIN &&
380 profile <= media::VP9PROFILE_MAX) {
381 vp9_accelerator_.reset(new VaapiVP9Accelerator(this, vaapi_wrapper_.get()));
382 decoder_.reset(new VP9Decoder(vp9_accelerator_.get()));
383 } else {
384 DLOG(ERROR) << "Unsupported profile " << profile;
385 return false;
386 }
387
388 CHECK(decoder_thread_.Start());
389 decoder_thread_task_runner_ = decoder_thread_.task_runner();
390
391 state_ = kIdle;
392 return true;
393 }
394
395 void VaapiVideoDecodeAccelerator::OutputPicture(
396 const scoped_refptr<VASurface>& va_surface,
397 int32_t input_id,
398 VaapiPicture* picture) {
399 DCHECK_EQ(message_loop_, base::MessageLoop::current());
400
401 int32_t output_id = picture->picture_buffer_id();
402
403 TRACE_EVENT2("Video Decoder", "VAVDA::OutputSurface",
404 "input_id", input_id,
405 "output_id", output_id);
406
407 DVLOG(3) << "Outputting VASurface " << va_surface->id()
408 << " into pixmap bound to picture buffer id " << output_id;
409
410 RETURN_AND_NOTIFY_ON_FAILURE(picture->DownloadFromSurface(va_surface),
411 "Failed putting surface into pixmap",
412 PLATFORM_FAILURE, );
413
414 // Notify the client a picture is ready to be displayed.
415 ++num_frames_at_client_;
416 TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_);
417 DVLOG(4) << "Notifying output picture id " << output_id
418 << " for input "<< input_id << " is ready";
419 // TODO(posciak): Use visible size from decoder here instead
420 // (crbug.com/402760). Passing (0, 0) results in the client using the
421 // visible size extracted from the container instead.
422 if (client_)
423 client_->PictureReady(media::Picture(output_id, input_id,
424 gfx::Rect(0, 0),
425 picture->AllowOverlay()));
426 }
427
428 void VaapiVideoDecodeAccelerator::TryOutputSurface() {
429 DCHECK_EQ(message_loop_, base::MessageLoop::current());
430
431 // Handle Destroy() arriving while pictures are queued for output.
432 if (!client_)
433 return;
434
435 if (pending_output_cbs_.empty() || output_buffers_.empty())
436 return;
437
438 OutputCB output_cb = pending_output_cbs_.front();
439 pending_output_cbs_.pop();
440
441 VaapiPicture* picture = PictureById(output_buffers_.front());
442 DCHECK(picture);
443 output_buffers_.pop();
444
445 output_cb.Run(picture);
446
447 if (finish_flush_pending_ && pending_output_cbs_.empty())
448 FinishFlush();
449 }
450
451 void VaapiVideoDecodeAccelerator::MapAndQueueNewInputBuffer(
452 const media::BitstreamBuffer& bitstream_buffer) {
453 DCHECK_EQ(message_loop_, base::MessageLoop::current());
454 TRACE_EVENT1("Video Decoder", "MapAndQueueNewInputBuffer", "input_id",
455 bitstream_buffer.id());
456
457 DVLOG(4) << "Mapping new input buffer id: " << bitstream_buffer.id()
458 << " size: " << (int)bitstream_buffer.size();
459
460 std::unique_ptr<SharedMemoryRegion> shm(
461 new SharedMemoryRegion(bitstream_buffer, true));
462
463 // Skip empty buffers.
464 if (bitstream_buffer.size() == 0) {
465 if (client_)
466 client_->NotifyEndOfBitstreamBuffer(bitstream_buffer.id());
467 return;
468 }
469
470 RETURN_AND_NOTIFY_ON_FAILURE(shm->Map(), "Failed to map input buffer",
471 UNREADABLE_INPUT, );
472
473 base::AutoLock auto_lock(lock_);
474
475 // Set up a new input buffer and queue it for later.
476 linked_ptr<InputBuffer> input_buffer(new InputBuffer());
477 input_buffer->shm.reset(shm.release());
478 input_buffer->id = bitstream_buffer.id();
479
480 ++num_stream_bufs_at_decoder_;
481 TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
482 num_stream_bufs_at_decoder_);
483
484 input_buffers_.push(input_buffer);
485 input_ready_.Signal();
486 }
487
488 bool VaapiVideoDecodeAccelerator::GetInputBuffer_Locked() {
489 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
490 lock_.AssertAcquired();
491
492 if (curr_input_buffer_.get())
493 return true;
494
495 // Will only wait if it is expected that in current state new buffers will
496 // be queued from the client via Decode(). The state can change during wait.
497 while (input_buffers_.empty() && (state_ == kDecoding || state_ == kIdle)) {
498 input_ready_.Wait();
499 }
500
501 // We could have got woken up in a different state or never got to sleep
502 // due to current state; check for that.
503 switch (state_) {
504 case kFlushing:
505 // Here we are only interested in finishing up decoding buffers that are
506 // already queued up. Otherwise will stop decoding.
507 if (input_buffers_.empty())
508 return false;
509 // else fallthrough
510 case kDecoding:
511 case kIdle:
512 DCHECK(!input_buffers_.empty());
513
514 curr_input_buffer_ = input_buffers_.front();
515 input_buffers_.pop();
516
517 DVLOG(4) << "New current bitstream buffer, id: " << curr_input_buffer_->id
518 << " size: " << curr_input_buffer_->shm->size();
519
520 decoder_->SetStream(
521 static_cast<uint8_t*>(curr_input_buffer_->shm->memory()),
522 curr_input_buffer_->shm->size());
523 return true;
524
525 default:
526 // We got woken up due to being destroyed/reset, ignore any already
527 // queued inputs.
528 return false;
529 }
530 }
531
532 void VaapiVideoDecodeAccelerator::ReturnCurrInputBuffer_Locked() {
533 lock_.AssertAcquired();
534 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
535 DCHECK(curr_input_buffer_.get());
536
537 int32_t id = curr_input_buffer_->id;
538 curr_input_buffer_.reset();
539 DVLOG(4) << "End of input buffer " << id;
540 message_loop_->PostTask(FROM_HERE, base::Bind(
541 &Client::NotifyEndOfBitstreamBuffer, client_, id));
542
543 --num_stream_bufs_at_decoder_;
544 TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
545 num_stream_bufs_at_decoder_);
546 }
547
548 // TODO(posciak): refactor the whole class to remove sleeping in wait for
549 // surfaces, and reschedule DecodeTask instead.
550 bool VaapiVideoDecodeAccelerator::WaitForSurfaces_Locked() {
551 lock_.AssertAcquired();
552 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
553
554 while (available_va_surfaces_.empty() &&
555 (state_ == kDecoding || state_ == kFlushing || state_ == kIdle)) {
556 surfaces_available_.Wait();
557 }
558
559 if (state_ != kDecoding && state_ != kFlushing && state_ != kIdle)
560 return false;
561
562 return true;
563 }
564
565 void VaapiVideoDecodeAccelerator::DecodeTask() {
566 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
567 TRACE_EVENT0("Video Decoder", "VAVDA::DecodeTask");
568 base::AutoLock auto_lock(lock_);
569
570 if (state_ != kDecoding)
571 return;
572
573 // Main decode task.
574 DVLOG(4) << "Decode task";
575
576 // Try to decode what stream data is (still) in the decoder until we run out
577 // of it.
578 while (GetInputBuffer_Locked()) {
579 DCHECK(curr_input_buffer_.get());
580
581 AcceleratedVideoDecoder::DecodeResult res;
582 {
583 // We are OK releasing the lock here, as decoder never calls our methods
584 // directly and we will reacquire the lock before looking at state again.
585 // This is the main decode function of the decoder and while keeping
586 // the lock for its duration would be fine, it would defeat the purpose
587 // of having a separate decoder thread.
588 base::AutoUnlock auto_unlock(lock_);
589 res = decoder_->Decode();
590 }
591
592 switch (res) {
593 case AcceleratedVideoDecoder::kAllocateNewSurfaces:
594 DVLOG(1) << "Decoder requesting a new set of surfaces";
595 message_loop_->PostTask(FROM_HERE, base::Bind(
596 &VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange, weak_this_,
597 decoder_->GetRequiredNumOfPictures(),
598 decoder_->GetPicSize()));
599 // We'll get rescheduled once ProvidePictureBuffers() finishes.
600 return;
601
602 case AcceleratedVideoDecoder::kRanOutOfStreamData:
603 ReturnCurrInputBuffer_Locked();
604 break;
605
606 case AcceleratedVideoDecoder::kRanOutOfSurfaces:
607 // No more output buffers in the decoder, try getting more or go to
608 // sleep waiting for them.
609 if (!WaitForSurfaces_Locked())
610 return;
611
612 break;
613
614 case AcceleratedVideoDecoder::kDecodeError:
615 RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream",
616 PLATFORM_FAILURE, );
617 return;
618 }
619 }
620 }
621
622 void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(size_t num_pics,
623 gfx::Size size) {
624 DCHECK_EQ(message_loop_, base::MessageLoop::current());
625 DCHECK(!awaiting_va_surfaces_recycle_);
626
627 // At this point decoder has stopped running and has already posted onto our
628 // loop any remaining output request callbacks, which executed before we got
629 // here. Some of them might have been pended though, because we might not
630 // have had enough TFPictures to output surfaces to. Initiate a wait cycle,
631 // which will wait for client to return enough PictureBuffers to us, so that
632 // we can finish all pending output callbacks, releasing associated surfaces.
633 DVLOG(1) << "Initiating surface set change";
634 awaiting_va_surfaces_recycle_ = true;
635
636 requested_num_pics_ = num_pics;
637 requested_pic_size_ = size;
638
639 TryFinishSurfaceSetChange();
640 }
641
642 void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() {
643 DCHECK_EQ(message_loop_, base::MessageLoop::current());
644
645 if (!awaiting_va_surfaces_recycle_)
646 return;
647
648 if (!pending_output_cbs_.empty() ||
649 pictures_.size() != available_va_surfaces_.size()) {
650 // Either:
651 // 1. Not all pending pending output callbacks have been executed yet.
652 // Wait for the client to return enough pictures and retry later.
653 // 2. The above happened and all surface release callbacks have been posted
654 // as the result, but not all have executed yet. Post ourselves after them
655 // to let them release surfaces.
656 DVLOG(2) << "Awaiting pending output/surface release callbacks to finish";
657 message_loop_->PostTask(FROM_HERE, base::Bind(
658 &VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange, weak_this_));
659 return;
660 }
661
662 // All surfaces released, destroy them and dismiss all PictureBuffers.
663 awaiting_va_surfaces_recycle_ = false;
664 available_va_surfaces_.clear();
665 vaapi_wrapper_->DestroySurfaces();
666
667 for (Pictures::iterator iter = pictures_.begin(); iter != pictures_.end();
668 ++iter) {
669 DVLOG(2) << "Dismissing picture id: " << iter->first;
670 if (client_)
671 client_->DismissPictureBuffer(iter->first);
672 }
673 pictures_.clear();
674
675 // And ask for a new set as requested.
676 DVLOG(1) << "Requesting " << requested_num_pics_ << " pictures of size: "
677 << requested_pic_size_.ToString();
678
679 message_loop_->PostTask(
680 FROM_HERE,
681 base::Bind(&Client::ProvidePictureBuffers, client_, requested_num_pics_,
682 1, requested_pic_size_, VaapiPicture::GetGLTextureTarget()));
683 }
684
685 void VaapiVideoDecodeAccelerator::Decode(
686 const media::BitstreamBuffer& bitstream_buffer) {
687 DCHECK_EQ(message_loop_, base::MessageLoop::current());
688
689 TRACE_EVENT1("Video Decoder", "VAVDA::Decode", "Buffer id",
690 bitstream_buffer.id());
691
692 if (bitstream_buffer.id() < 0) {
693 if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
694 base::SharedMemory::CloseHandle(bitstream_buffer.handle());
695 LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
696 NotifyError(INVALID_ARGUMENT);
697 return;
698 }
699
700 // We got a new input buffer from the client, map it and queue for later use.
701 MapAndQueueNewInputBuffer(bitstream_buffer);
702
703 base::AutoLock auto_lock(lock_);
704 switch (state_) {
705 case kIdle:
706 state_ = kDecoding;
707 decoder_thread_task_runner_->PostTask(
708 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::DecodeTask,
709 base::Unretained(this)));
710 break;
711
712 case kDecoding:
713 // Decoder already running, fallthrough.
714 case kResetting:
715 // When resetting, allow accumulating bitstream buffers, so that
716 // the client can queue after-seek-buffers while we are finishing with
717 // the before-seek one.
718 break;
719
720 default:
721 RETURN_AND_NOTIFY_ON_FAILURE(false,
722 "Decode request from client in invalid state: " << state_,
723 PLATFORM_FAILURE, );
724 break;
725 }
726 }
727
728 void VaapiVideoDecodeAccelerator::RecycleVASurfaceID(
729 VASurfaceID va_surface_id) {
730 DCHECK_EQ(message_loop_, base::MessageLoop::current());
731 base::AutoLock auto_lock(lock_);
732
733 available_va_surfaces_.push_back(va_surface_id);
734 surfaces_available_.Signal();
735 }
736
737 void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
738 const std::vector<media::PictureBuffer>& buffers) {
739 DCHECK_EQ(message_loop_, base::MessageLoop::current());
740
741 base::AutoLock auto_lock(lock_);
742 DCHECK(pictures_.empty());
743
744 while (!output_buffers_.empty())
745 output_buffers_.pop();
746
747 RETURN_AND_NOTIFY_ON_FAILURE(
748 buffers.size() >= requested_num_pics_,
749 "Got an invalid number of picture buffers. (Got " << buffers.size()
750 << ", requested " << requested_num_pics_ << ")", INVALID_ARGUMENT, );
751 DCHECK(requested_pic_size_ == buffers[0].size());
752
753 std::vector<VASurfaceID> va_surface_ids;
754 RETURN_AND_NOTIFY_ON_FAILURE(
755 vaapi_wrapper_->CreateSurfaces(VA_RT_FORMAT_YUV420, requested_pic_size_,
756 buffers.size(), &va_surface_ids),
757 "Failed creating VA Surfaces", PLATFORM_FAILURE, );
758 DCHECK_EQ(va_surface_ids.size(), buffers.size());
759
760 for (size_t i = 0; i < buffers.size(); ++i) {
761 DCHECK_LE(1u, buffers[i].texture_ids().size());
762 DVLOG(2) << "Assigning picture id: " << buffers[i].id()
763 << " to texture id: " << buffers[i].texture_ids()[0]
764 << " VASurfaceID: " << va_surface_ids[i];
765
766 linked_ptr<VaapiPicture> picture(VaapiPicture::CreatePicture(
767 vaapi_wrapper_, make_context_current_cb_, buffers[i].id(),
768 buffers[i].texture_ids()[0], requested_pic_size_));
769
770 scoped_refptr<gl::GLImage> image = picture->GetImageToBind();
771 if (image && buffers[i].internal_texture_ids().size() > 0) {
772 RETURN_AND_NOTIFY_ON_FAILURE(
773 bind_image_cb_.Run(buffers[i].internal_texture_ids()[0],
774 VaapiPicture::GetGLTextureTarget(), image, true),
775 "Failed to bind image", PLATFORM_FAILURE, );
776 }
777
778 RETURN_AND_NOTIFY_ON_FAILURE(
779 picture.get(), "Failed assigning picture buffer to a texture.",
780 PLATFORM_FAILURE, );
781
782 bool inserted =
783 pictures_.insert(std::make_pair(buffers[i].id(), picture)).second;
784 DCHECK(inserted);
785
786 output_buffers_.push(buffers[i].id());
787 available_va_surfaces_.push_back(va_surface_ids[i]);
788 surfaces_available_.Signal();
789 }
790
791 state_ = kDecoding;
792 decoder_thread_task_runner_->PostTask(
793 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::DecodeTask,
794 base::Unretained(this)));
795 }
796
797 void VaapiVideoDecodeAccelerator::ReusePictureBuffer(
798 int32_t picture_buffer_id) {
799 DCHECK_EQ(message_loop_, base::MessageLoop::current());
800 TRACE_EVENT1("Video Decoder", "VAVDA::ReusePictureBuffer", "Picture id",
801 picture_buffer_id);
802
803 --num_frames_at_client_;
804 TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_);
805
806 output_buffers_.push(picture_buffer_id);
807 TryOutputSurface();
808 }
809
810 void VaapiVideoDecodeAccelerator::FlushTask() {
811 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
812 DVLOG(1) << "Flush task";
813
814 // First flush all the pictures that haven't been outputted, notifying the
815 // client to output them.
816 bool res = decoder_->Flush();
817 RETURN_AND_NOTIFY_ON_FAILURE(res, "Failed flushing the decoder.",
818 PLATFORM_FAILURE, );
819
820 // Put the decoder in idle state, ready to resume.
821 decoder_->Reset();
822
823 message_loop_->PostTask(FROM_HERE, base::Bind(
824 &VaapiVideoDecodeAccelerator::FinishFlush, weak_this_));
825 }
826
827 void VaapiVideoDecodeAccelerator::Flush() {
828 DCHECK_EQ(message_loop_, base::MessageLoop::current());
829 DVLOG(1) << "Got flush request";
830
831 base::AutoLock auto_lock(lock_);
832 state_ = kFlushing;
833 // Queue a flush task after all existing decoding tasks to clean up.
834 decoder_thread_task_runner_->PostTask(
835 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::FlushTask,
836 base::Unretained(this)));
837
838 input_ready_.Signal();
839 surfaces_available_.Signal();
840 }
841
842 void VaapiVideoDecodeAccelerator::FinishFlush() {
843 DCHECK_EQ(message_loop_, base::MessageLoop::current());
844
845 finish_flush_pending_ = false;
846
847 base::AutoLock auto_lock(lock_);
848 if (state_ != kFlushing) {
849 DCHECK_EQ(state_, kDestroying);
850 return; // We could've gotten destroyed already.
851 }
852
853 // Still waiting for textures from client to finish outputting all pending
854 // frames. Try again later.
855 if (!pending_output_cbs_.empty()) {
856 finish_flush_pending_ = true;
857 return;
858 }
859
860 state_ = kIdle;
861
862 message_loop_->PostTask(FROM_HERE, base::Bind(
863 &Client::NotifyFlushDone, client_));
864
865 DVLOG(1) << "Flush finished";
866 }
867
868 void VaapiVideoDecodeAccelerator::ResetTask() {
869 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
870 DVLOG(1) << "ResetTask";
871
872 // All the decoding tasks from before the reset request from client are done
873 // by now, as this task was scheduled after them and client is expected not
874 // to call Decode() after Reset() and before NotifyResetDone.
875 decoder_->Reset();
876
877 base::AutoLock auto_lock(lock_);
878
879 // Return current input buffer, if present.
880 if (curr_input_buffer_.get())
881 ReturnCurrInputBuffer_Locked();
882
883 // And let client know that we are done with reset.
884 message_loop_->PostTask(FROM_HERE, base::Bind(
885 &VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
886 }
887
888 void VaapiVideoDecodeAccelerator::Reset() {
889 DCHECK_EQ(message_loop_, base::MessageLoop::current());
890 DVLOG(1) << "Got reset request";
891
892 // This will make any new decode tasks exit early.
893 base::AutoLock auto_lock(lock_);
894 state_ = kResetting;
895 finish_flush_pending_ = false;
896
897 // Drop all remaining input buffers, if present.
898 while (!input_buffers_.empty()) {
899 message_loop_->PostTask(FROM_HERE, base::Bind(
900 &Client::NotifyEndOfBitstreamBuffer, client_,
901 input_buffers_.front()->id));
902 input_buffers_.pop();
903 }
904
905 decoder_thread_task_runner_->PostTask(
906 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::ResetTask,
907 base::Unretained(this)));
908
909 input_ready_.Signal();
910 surfaces_available_.Signal();
911 }
912
913 void VaapiVideoDecodeAccelerator::FinishReset() {
914 DCHECK_EQ(message_loop_, base::MessageLoop::current());
915 DVLOG(1) << "FinishReset";
916 base::AutoLock auto_lock(lock_);
917
918 if (state_ != kResetting) {
919 DCHECK(state_ == kDestroying || state_ == kUninitialized) << state_;
920 return; // We could've gotten destroyed already.
921 }
922
923 // Drop pending outputs.
924 while (!pending_output_cbs_.empty())
925 pending_output_cbs_.pop();
926
927 if (awaiting_va_surfaces_recycle_) {
928 // Decoder requested a new surface set while we were waiting for it to
929 // finish the last DecodeTask, running at the time of Reset().
930 // Let the surface set change finish first before resetting.
931 message_loop_->PostTask(FROM_HERE, base::Bind(
932 &VaapiVideoDecodeAccelerator::FinishReset, weak_this_));
933 return;
934 }
935
936 num_stream_bufs_at_decoder_ = 0;
937 state_ = kIdle;
938
939 message_loop_->PostTask(FROM_HERE, base::Bind(
940 &Client::NotifyResetDone, client_));
941
942 // The client might have given us new buffers via Decode() while we were
943 // resetting and might be waiting for our move, and not call Decode() anymore
944 // until we return something. Post a DecodeTask() so that we won't
945 // sleep forever waiting for Decode() in that case. Having two of them
946 // in the pipe is harmless, the additional one will return as soon as it sees
947 // that we are back in kDecoding state.
948 if (!input_buffers_.empty()) {
949 state_ = kDecoding;
950 decoder_thread_task_runner_->PostTask(
951 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::DecodeTask,
952 base::Unretained(this)));
953 }
954
955 DVLOG(1) << "Reset finished";
956 }
957
958 void VaapiVideoDecodeAccelerator::Cleanup() {
959 DCHECK_EQ(message_loop_, base::MessageLoop::current());
960
961 base::AutoLock auto_lock(lock_);
962 if (state_ == kUninitialized || state_ == kDestroying)
963 return;
964
965 DVLOG(1) << "Destroying VAVDA";
966 state_ = kDestroying;
967
968 client_ptr_factory_.reset();
969 weak_this_factory_.InvalidateWeakPtrs();
970
971 // Signal all potential waiters on the decoder_thread_, let them early-exit,
972 // as we've just moved to the kDestroying state, and wait for all tasks
973 // to finish.
974 input_ready_.Signal();
975 surfaces_available_.Signal();
976 {
977 base::AutoUnlock auto_unlock(lock_);
978 decoder_thread_.Stop();
979 }
980
981 state_ = kUninitialized;
982 }
983
984 void VaapiVideoDecodeAccelerator::Destroy() {
985 DCHECK_EQ(message_loop_, base::MessageLoop::current());
986 Cleanup();
987 delete this;
988 }
989
990 bool VaapiVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
991 const base::WeakPtr<Client>& decode_client,
992 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
993 return false;
994 }
995
996 bool VaapiVideoDecodeAccelerator::DecodeSurface(
997 const scoped_refptr<VaapiDecodeSurface>& dec_surface) {
998 if (!vaapi_wrapper_->ExecuteAndDestroyPendingBuffers(
999 dec_surface->va_surface()->id())) {
1000 DVLOG(1) << "Failed decoding picture";
1001 return false;
1002 }
1003
1004 return true;
1005 }
1006
1007 void VaapiVideoDecodeAccelerator::SurfaceReady(
1008 const scoped_refptr<VaapiDecodeSurface>& dec_surface) {
1009 if (message_loop_ != base::MessageLoop::current()) {
1010 message_loop_->PostTask(
1011 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::SurfaceReady,
1012 weak_this_, dec_surface));
1013 return;
1014 }
1015
1016 DCHECK(!awaiting_va_surfaces_recycle_);
1017
1018 {
1019 base::AutoLock auto_lock(lock_);
1020 // Drop any requests to output if we are resetting or being destroyed.
1021 if (state_ == kResetting || state_ == kDestroying)
1022 return;
1023 }
1024
1025 pending_output_cbs_.push(
1026 base::Bind(&VaapiVideoDecodeAccelerator::OutputPicture, weak_this_,
1027 dec_surface->va_surface(), dec_surface->bitstream_id()));
1028
1029 TryOutputSurface();
1030 }
1031
1032 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>
1033 VaapiVideoDecodeAccelerator::CreateSurface() {
1034 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1035 base::AutoLock auto_lock(lock_);
1036
1037 if (available_va_surfaces_.empty())
1038 return nullptr;
1039
1040 DCHECK(!awaiting_va_surfaces_recycle_);
1041 scoped_refptr<VASurface> va_surface(new VASurface(
1042 available_va_surfaces_.front(), requested_pic_size_,
1043 vaapi_wrapper_->va_surface_format(), va_surface_release_cb_));
1044 available_va_surfaces_.pop_front();
1045
1046 scoped_refptr<VaapiDecodeSurface> dec_surface =
1047 new VaapiDecodeSurface(curr_input_buffer_->id, va_surface);
1048
1049 return dec_surface;
1050 }
1051
1052 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::VaapiH264Accelerator(
1053 VaapiVideoDecodeAccelerator* vaapi_dec,
1054 VaapiWrapper* vaapi_wrapper)
1055 : vaapi_wrapper_(vaapi_wrapper), vaapi_dec_(vaapi_dec) {
1056 DCHECK(vaapi_wrapper_);
1057 DCHECK(vaapi_dec_);
1058 }
1059
1060 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::~VaapiH264Accelerator() {
1061 }
1062
1063 scoped_refptr<H264Picture>
1064 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::CreateH264Picture() {
1065 scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface();
1066 if (!va_surface)
1067 return nullptr;
1068
1069 return new VaapiH264Picture(va_surface);
1070 }
1071
1072 // Fill |va_pic| with default/neutral values.
1073 static void InitVAPicture(VAPictureH264* va_pic) {
1074 memset(va_pic, 0, sizeof(*va_pic));
1075 va_pic->picture_id = VA_INVALID_ID;
1076 va_pic->flags = VA_PICTURE_H264_INVALID;
1077 }
1078
1079 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitFrameMetadata(
1080 const media::H264SPS* sps,
1081 const media::H264PPS* pps,
1082 const H264DPB& dpb,
1083 const H264Picture::Vector& ref_pic_listp0,
1084 const H264Picture::Vector& ref_pic_listb0,
1085 const H264Picture::Vector& ref_pic_listb1,
1086 const scoped_refptr<H264Picture>& pic) {
1087 VAPictureParameterBufferH264 pic_param;
1088 memset(&pic_param, 0, sizeof(pic_param));
1089
1090 #define FROM_SPS_TO_PP(a) pic_param.a = sps->a
1091 #define FROM_SPS_TO_PP2(a, b) pic_param.b = sps->a
1092 FROM_SPS_TO_PP2(pic_width_in_mbs_minus1, picture_width_in_mbs_minus1);
1093 // This assumes non-interlaced video
1094 FROM_SPS_TO_PP2(pic_height_in_map_units_minus1, picture_height_in_mbs_minus1);
1095 FROM_SPS_TO_PP(bit_depth_luma_minus8);
1096 FROM_SPS_TO_PP(bit_depth_chroma_minus8);
1097 #undef FROM_SPS_TO_PP
1098 #undef FROM_SPS_TO_PP2
1099
1100 #define FROM_SPS_TO_PP_SF(a) pic_param.seq_fields.bits.a = sps->a
1101 #define FROM_SPS_TO_PP_SF2(a, b) pic_param.seq_fields.bits.b = sps->a
1102 FROM_SPS_TO_PP_SF(chroma_format_idc);
1103 FROM_SPS_TO_PP_SF2(separate_colour_plane_flag,
1104 residual_colour_transform_flag);
1105 FROM_SPS_TO_PP_SF(gaps_in_frame_num_value_allowed_flag);
1106 FROM_SPS_TO_PP_SF(frame_mbs_only_flag);
1107 FROM_SPS_TO_PP_SF(mb_adaptive_frame_field_flag);
1108 FROM_SPS_TO_PP_SF(direct_8x8_inference_flag);
1109 pic_param.seq_fields.bits.MinLumaBiPredSize8x8 = (sps->level_idc >= 31);
1110 FROM_SPS_TO_PP_SF(log2_max_frame_num_minus4);
1111 FROM_SPS_TO_PP_SF(pic_order_cnt_type);
1112 FROM_SPS_TO_PP_SF(log2_max_pic_order_cnt_lsb_minus4);
1113 FROM_SPS_TO_PP_SF(delta_pic_order_always_zero_flag);
1114 #undef FROM_SPS_TO_PP_SF
1115 #undef FROM_SPS_TO_PP_SF2
1116
1117 #define FROM_PPS_TO_PP(a) pic_param.a = pps->a
1118 FROM_PPS_TO_PP(num_slice_groups_minus1);
1119 pic_param.slice_group_map_type = 0;
1120 pic_param.slice_group_change_rate_minus1 = 0;
1121 FROM_PPS_TO_PP(pic_init_qp_minus26);
1122 FROM_PPS_TO_PP(pic_init_qs_minus26);
1123 FROM_PPS_TO_PP(chroma_qp_index_offset);
1124 FROM_PPS_TO_PP(second_chroma_qp_index_offset);
1125 #undef FROM_PPS_TO_PP
1126
1127 #define FROM_PPS_TO_PP_PF(a) pic_param.pic_fields.bits.a = pps->a
1128 #define FROM_PPS_TO_PP_PF2(a, b) pic_param.pic_fields.bits.b = pps->a
1129 FROM_PPS_TO_PP_PF(entropy_coding_mode_flag);
1130 FROM_PPS_TO_PP_PF(weighted_pred_flag);
1131 FROM_PPS_TO_PP_PF(weighted_bipred_idc);
1132 FROM_PPS_TO_PP_PF(transform_8x8_mode_flag);
1133
1134 pic_param.pic_fields.bits.field_pic_flag = 0;
1135 FROM_PPS_TO_PP_PF(constrained_intra_pred_flag);
1136 FROM_PPS_TO_PP_PF2(bottom_field_pic_order_in_frame_present_flag,
1137 pic_order_present_flag);
1138 FROM_PPS_TO_PP_PF(deblocking_filter_control_present_flag);
1139 FROM_PPS_TO_PP_PF(redundant_pic_cnt_present_flag);
1140 pic_param.pic_fields.bits.reference_pic_flag = pic->ref;
1141 #undef FROM_PPS_TO_PP_PF
1142 #undef FROM_PPS_TO_PP_PF2
1143
1144 pic_param.frame_num = pic->frame_num;
1145
1146 InitVAPicture(&pic_param.CurrPic);
1147 FillVAPicture(&pic_param.CurrPic, pic);
1148
1149 // Init reference pictures' array.
1150 for (int i = 0; i < 16; ++i)
1151 InitVAPicture(&pic_param.ReferenceFrames[i]);
1152
1153 // And fill it with picture info from DPB.
1154 FillVARefFramesFromDPB(dpb, pic_param.ReferenceFrames,
1155 arraysize(pic_param.ReferenceFrames));
1156
1157 pic_param.num_ref_frames = sps->max_num_ref_frames;
1158
1159 if (!vaapi_wrapper_->SubmitBuffer(VAPictureParameterBufferType,
1160 sizeof(pic_param),
1161 &pic_param))
1162 return false;
1163
1164 VAIQMatrixBufferH264 iq_matrix_buf;
1165 memset(&iq_matrix_buf, 0, sizeof(iq_matrix_buf));
1166
1167 if (pps->pic_scaling_matrix_present_flag) {
1168 for (int i = 0; i < 6; ++i) {
1169 for (int j = 0; j < 16; ++j)
1170 iq_matrix_buf.ScalingList4x4[i][j] = pps->scaling_list4x4[i][j];
1171 }
1172
1173 for (int i = 0; i < 2; ++i) {
1174 for (int j = 0; j < 64; ++j)
1175 iq_matrix_buf.ScalingList8x8[i][j] = pps->scaling_list8x8[i][j];
1176 }
1177 } else {
1178 for (int i = 0; i < 6; ++i) {
1179 for (int j = 0; j < 16; ++j)
1180 iq_matrix_buf.ScalingList4x4[i][j] = sps->scaling_list4x4[i][j];
1181 }
1182
1183 for (int i = 0; i < 2; ++i) {
1184 for (int j = 0; j < 64; ++j)
1185 iq_matrix_buf.ScalingList8x8[i][j] = sps->scaling_list8x8[i][j];
1186 }
1187 }
1188
1189 return vaapi_wrapper_->SubmitBuffer(VAIQMatrixBufferType,
1190 sizeof(iq_matrix_buf),
1191 &iq_matrix_buf);
1192 }
1193
1194 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitSlice(
1195 const media::H264PPS* pps,
1196 const media::H264SliceHeader* slice_hdr,
1197 const H264Picture::Vector& ref_pic_list0,
1198 const H264Picture::Vector& ref_pic_list1,
1199 const scoped_refptr<H264Picture>& pic,
1200 const uint8_t* data,
1201 size_t size) {
1202 VASliceParameterBufferH264 slice_param;
1203 memset(&slice_param, 0, sizeof(slice_param));
1204
1205 slice_param.slice_data_size = slice_hdr->nalu_size;
1206 slice_param.slice_data_offset = 0;
1207 slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
1208 slice_param.slice_data_bit_offset = slice_hdr->header_bit_size;
1209
1210 #define SHDRToSP(a) slice_param.a = slice_hdr->a
1211 SHDRToSP(first_mb_in_slice);
1212 slice_param.slice_type = slice_hdr->slice_type % 5;
1213 SHDRToSP(direct_spatial_mv_pred_flag);
1214
1215 // TODO posciak: make sure parser sets those even when override flags
1216 // in slice header is off.
1217 SHDRToSP(num_ref_idx_l0_active_minus1);
1218 SHDRToSP(num_ref_idx_l1_active_minus1);
1219 SHDRToSP(cabac_init_idc);
1220 SHDRToSP(slice_qp_delta);
1221 SHDRToSP(disable_deblocking_filter_idc);
1222 SHDRToSP(slice_alpha_c0_offset_div2);
1223 SHDRToSP(slice_beta_offset_div2);
1224
1225 if (((slice_hdr->IsPSlice() || slice_hdr->IsSPSlice()) &&
1226 pps->weighted_pred_flag) ||
1227 (slice_hdr->IsBSlice() && pps->weighted_bipred_idc == 1)) {
1228 SHDRToSP(luma_log2_weight_denom);
1229 SHDRToSP(chroma_log2_weight_denom);
1230
1231 SHDRToSP(luma_weight_l0_flag);
1232 SHDRToSP(luma_weight_l1_flag);
1233
1234 SHDRToSP(chroma_weight_l0_flag);
1235 SHDRToSP(chroma_weight_l1_flag);
1236
1237 for (int i = 0; i <= slice_param.num_ref_idx_l0_active_minus1; ++i) {
1238 slice_param.luma_weight_l0[i] =
1239 slice_hdr->pred_weight_table_l0.luma_weight[i];
1240 slice_param.luma_offset_l0[i] =
1241 slice_hdr->pred_weight_table_l0.luma_offset[i];
1242
1243 for (int j = 0; j < 2; ++j) {
1244 slice_param.chroma_weight_l0[i][j] =
1245 slice_hdr->pred_weight_table_l0.chroma_weight[i][j];
1246 slice_param.chroma_offset_l0[i][j] =
1247 slice_hdr->pred_weight_table_l0.chroma_offset[i][j];
1248 }
1249 }
1250
1251 if (slice_hdr->IsBSlice()) {
1252 for (int i = 0; i <= slice_param.num_ref_idx_l1_active_minus1; ++i) {
1253 slice_param.luma_weight_l1[i] =
1254 slice_hdr->pred_weight_table_l1.luma_weight[i];
1255 slice_param.luma_offset_l1[i] =
1256 slice_hdr->pred_weight_table_l1.luma_offset[i];
1257
1258 for (int j = 0; j < 2; ++j) {
1259 slice_param.chroma_weight_l1[i][j] =
1260 slice_hdr->pred_weight_table_l1.chroma_weight[i][j];
1261 slice_param.chroma_offset_l1[i][j] =
1262 slice_hdr->pred_weight_table_l1.chroma_offset[i][j];
1263 }
1264 }
1265 }
1266 }
1267
1268 static_assert(
1269 arraysize(slice_param.RefPicList0) == arraysize(slice_param.RefPicList1),
1270 "Invalid RefPicList sizes");
1271
1272 for (size_t i = 0; i < arraysize(slice_param.RefPicList0); ++i) {
1273 InitVAPicture(&slice_param.RefPicList0[i]);
1274 InitVAPicture(&slice_param.RefPicList1[i]);
1275 }
1276
1277 for (size_t i = 0;
1278 i < ref_pic_list0.size() && i < arraysize(slice_param.RefPicList0);
1279 ++i) {
1280 if (ref_pic_list0[i])
1281 FillVAPicture(&slice_param.RefPicList0[i], ref_pic_list0[i]);
1282 }
1283 for (size_t i = 0;
1284 i < ref_pic_list1.size() && i < arraysize(slice_param.RefPicList1);
1285 ++i) {
1286 if (ref_pic_list1[i])
1287 FillVAPicture(&slice_param.RefPicList1[i], ref_pic_list1[i]);
1288 }
1289
1290 if (!vaapi_wrapper_->SubmitBuffer(VASliceParameterBufferType,
1291 sizeof(slice_param),
1292 &slice_param))
1293 return false;
1294
1295 // Can't help it, blame libva...
1296 void* non_const_ptr = const_cast<uint8_t*>(data);
1297 return vaapi_wrapper_->SubmitBuffer(VASliceDataBufferType, size,
1298 non_const_ptr);
1299 }
1300
1301 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitDecode(
1302 const scoped_refptr<H264Picture>& pic) {
1303 DVLOG(4) << "Decoding POC " << pic->pic_order_cnt;
1304 scoped_refptr<VaapiDecodeSurface> dec_surface =
1305 H264PictureToVaapiDecodeSurface(pic);
1306
1307 return vaapi_dec_->DecodeSurface(dec_surface);
1308 }
1309
1310 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::OutputPicture(
1311 const scoped_refptr<H264Picture>& pic) {
1312 scoped_refptr<VaapiDecodeSurface> dec_surface =
1313 H264PictureToVaapiDecodeSurface(pic);
1314
1315 vaapi_dec_->SurfaceReady(dec_surface);
1316
1317 return true;
1318 }
1319
1320 void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::Reset() {
1321 vaapi_wrapper_->DestroyPendingBuffers();
1322 }
1323
1324 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>
1325 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::
1326 H264PictureToVaapiDecodeSurface(const scoped_refptr<H264Picture>& pic) {
1327 VaapiH264Picture* vaapi_pic = pic->AsVaapiH264Picture();
1328 CHECK(vaapi_pic);
1329 return vaapi_pic->dec_surface();
1330 }
1331
1332 void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::FillVAPicture(
1333 VAPictureH264* va_pic,
1334 scoped_refptr<H264Picture> pic) {
1335 VASurfaceID va_surface_id = VA_INVALID_SURFACE;
1336
1337 if (!pic->nonexisting) {
1338 scoped_refptr<VaapiDecodeSurface> dec_surface =
1339 H264PictureToVaapiDecodeSurface(pic);
1340 va_surface_id = dec_surface->va_surface()->id();
1341 }
1342
1343 va_pic->picture_id = va_surface_id;
1344 va_pic->frame_idx = pic->frame_num;
1345 va_pic->flags = 0;
1346
1347 switch (pic->field) {
1348 case H264Picture::FIELD_NONE:
1349 break;
1350 case H264Picture::FIELD_TOP:
1351 va_pic->flags |= VA_PICTURE_H264_TOP_FIELD;
1352 break;
1353 case H264Picture::FIELD_BOTTOM:
1354 va_pic->flags |= VA_PICTURE_H264_BOTTOM_FIELD;
1355 break;
1356 }
1357
1358 if (pic->ref) {
1359 va_pic->flags |= pic->long_term ? VA_PICTURE_H264_LONG_TERM_REFERENCE
1360 : VA_PICTURE_H264_SHORT_TERM_REFERENCE;
1361 }
1362
1363 va_pic->TopFieldOrderCnt = pic->top_field_order_cnt;
1364 va_pic->BottomFieldOrderCnt = pic->bottom_field_order_cnt;
1365 }
1366
1367 int VaapiVideoDecodeAccelerator::VaapiH264Accelerator::FillVARefFramesFromDPB(
1368 const H264DPB& dpb,
1369 VAPictureH264* va_pics,
1370 int num_pics) {
1371 H264Picture::Vector::const_reverse_iterator rit;
1372 int i;
1373
1374 // Return reference frames in reverse order of insertion.
1375 // Libva does not document this, but other implementations (e.g. mplayer)
1376 // do it this way as well.
1377 for (rit = dpb.rbegin(), i = 0; rit != dpb.rend() && i < num_pics; ++rit) {
1378 if ((*rit)->ref)
1379 FillVAPicture(&va_pics[i++], *rit);
1380 }
1381
1382 return i;
1383 }
1384
1385 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::VaapiVP8Accelerator(
1386 VaapiVideoDecodeAccelerator* vaapi_dec,
1387 VaapiWrapper* vaapi_wrapper)
1388 : vaapi_wrapper_(vaapi_wrapper), vaapi_dec_(vaapi_dec) {
1389 DCHECK(vaapi_wrapper_);
1390 DCHECK(vaapi_dec_);
1391 }
1392
1393 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::~VaapiVP8Accelerator() {
1394 }
1395
1396 scoped_refptr<VP8Picture>
1397 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::CreateVP8Picture() {
1398 scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface();
1399 if (!va_surface)
1400 return nullptr;
1401
1402 return new VaapiVP8Picture(va_surface);
1403 }
1404
1405 #define ARRAY_MEMCPY_CHECKED(to, from) \
1406 do { \
1407 static_assert(sizeof(to) == sizeof(from), \
1408 #from " and " #to " arrays must be of same size"); \
1409 memcpy(to, from, sizeof(to)); \
1410 } while (0)
1411
1412 bool VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::SubmitDecode(
1413 const scoped_refptr<VP8Picture>& pic,
1414 const media::Vp8FrameHeader* frame_hdr,
1415 const scoped_refptr<VP8Picture>& last_frame,
1416 const scoped_refptr<VP8Picture>& golden_frame,
1417 const scoped_refptr<VP8Picture>& alt_frame) {
1418 VAIQMatrixBufferVP8 iq_matrix_buf;
1419 memset(&iq_matrix_buf, 0, sizeof(VAIQMatrixBufferVP8));
1420
1421 const media::Vp8SegmentationHeader& sgmnt_hdr = frame_hdr->segmentation_hdr;
1422 const media::Vp8QuantizationHeader& quant_hdr = frame_hdr->quantization_hdr;
1423 static_assert(
1424 arraysize(iq_matrix_buf.quantization_index) == media::kMaxMBSegments,
1425 "incorrect quantization matrix size");
1426 for (size_t i = 0; i < media::kMaxMBSegments; ++i) {
1427 int q = quant_hdr.y_ac_qi;
1428
1429 if (sgmnt_hdr.segmentation_enabled) {
1430 if (sgmnt_hdr.segment_feature_mode ==
1431 media::Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE)
1432 q = sgmnt_hdr.quantizer_update_value[i];
1433 else
1434 q += sgmnt_hdr.quantizer_update_value[i];
1435 }
1436
1437 #define CLAMP_Q(q) std::min(std::max(q, 0), 127)
1438 static_assert(arraysize(iq_matrix_buf.quantization_index[i]) == 6,
1439 "incorrect quantization matrix size");
1440 iq_matrix_buf.quantization_index[i][0] = CLAMP_Q(q);
1441 iq_matrix_buf.quantization_index[i][1] = CLAMP_Q(q + quant_hdr.y_dc_delta);
1442 iq_matrix_buf.quantization_index[i][2] = CLAMP_Q(q + quant_hdr.y2_dc_delta);
1443 iq_matrix_buf.quantization_index[i][3] = CLAMP_Q(q + quant_hdr.y2_ac_delta);
1444 iq_matrix_buf.quantization_index[i][4] = CLAMP_Q(q + quant_hdr.uv_dc_delta);
1445 iq_matrix_buf.quantization_index[i][5] = CLAMP_Q(q + quant_hdr.uv_ac_delta);
1446 #undef CLAMP_Q
1447 }
1448
1449 if (!vaapi_wrapper_->SubmitBuffer(VAIQMatrixBufferType,
1450 sizeof(VAIQMatrixBufferVP8),
1451 &iq_matrix_buf))
1452 return false;
1453
1454 VAProbabilityDataBufferVP8 prob_buf;
1455 memset(&prob_buf, 0, sizeof(VAProbabilityDataBufferVP8));
1456
1457 const media::Vp8EntropyHeader& entr_hdr = frame_hdr->entropy_hdr;
1458 ARRAY_MEMCPY_CHECKED(prob_buf.dct_coeff_probs, entr_hdr.coeff_probs);
1459
1460 if (!vaapi_wrapper_->SubmitBuffer(VAProbabilityBufferType,
1461 sizeof(VAProbabilityDataBufferVP8),
1462 &prob_buf))
1463 return false;
1464
1465 VAPictureParameterBufferVP8 pic_param;
1466 memset(&pic_param, 0, sizeof(VAPictureParameterBufferVP8));
1467 pic_param.frame_width = frame_hdr->width;
1468 pic_param.frame_height = frame_hdr->height;
1469
1470 if (last_frame) {
1471 scoped_refptr<VaapiDecodeSurface> last_frame_surface =
1472 VP8PictureToVaapiDecodeSurface(last_frame);
1473 pic_param.last_ref_frame = last_frame_surface->va_surface()->id();
1474 } else {
1475 pic_param.last_ref_frame = VA_INVALID_SURFACE;
1476 }
1477
1478 if (golden_frame) {
1479 scoped_refptr<VaapiDecodeSurface> golden_frame_surface =
1480 VP8PictureToVaapiDecodeSurface(golden_frame);
1481 pic_param.golden_ref_frame = golden_frame_surface->va_surface()->id();
1482 } else {
1483 pic_param.golden_ref_frame = VA_INVALID_SURFACE;
1484 }
1485
1486 if (alt_frame) {
1487 scoped_refptr<VaapiDecodeSurface> alt_frame_surface =
1488 VP8PictureToVaapiDecodeSurface(alt_frame);
1489 pic_param.alt_ref_frame = alt_frame_surface->va_surface()->id();
1490 } else {
1491 pic_param.alt_ref_frame = VA_INVALID_SURFACE;
1492 }
1493
1494 pic_param.out_of_loop_frame = VA_INVALID_SURFACE;
1495
1496 const media::Vp8LoopFilterHeader& lf_hdr = frame_hdr->loopfilter_hdr;
1497
1498 #define FHDR_TO_PP_PF(a, b) pic_param.pic_fields.bits.a = (b)
1499 FHDR_TO_PP_PF(key_frame, frame_hdr->IsKeyframe() ? 0 : 1);
1500 FHDR_TO_PP_PF(version, frame_hdr->version);
1501 FHDR_TO_PP_PF(segmentation_enabled, sgmnt_hdr.segmentation_enabled);
1502 FHDR_TO_PP_PF(update_mb_segmentation_map,
1503 sgmnt_hdr.update_mb_segmentation_map);
1504 FHDR_TO_PP_PF(update_segment_feature_data,
1505 sgmnt_hdr.update_segment_feature_data);
1506 FHDR_TO_PP_PF(filter_type, lf_hdr.type);
1507 FHDR_TO_PP_PF(sharpness_level, lf_hdr.sharpness_level);
1508 FHDR_TO_PP_PF(loop_filter_adj_enable, lf_hdr.loop_filter_adj_enable);
1509 FHDR_TO_PP_PF(mode_ref_lf_delta_update, lf_hdr.mode_ref_lf_delta_update);
1510 FHDR_TO_PP_PF(sign_bias_golden, frame_hdr->sign_bias_golden);
1511 FHDR_TO_PP_PF(sign_bias_alternate, frame_hdr->sign_bias_alternate);
1512 FHDR_TO_PP_PF(mb_no_coeff_skip, frame_hdr->mb_no_skip_coeff);
1513 FHDR_TO_PP_PF(loop_filter_disable, lf_hdr.level == 0);
1514 #undef FHDR_TO_PP_PF
1515
1516 ARRAY_MEMCPY_CHECKED(pic_param.mb_segment_tree_probs, sgmnt_hdr.segment_prob);
1517
1518 static_assert(arraysize(sgmnt_hdr.lf_update_value) ==
1519 arraysize(pic_param.loop_filter_level),
1520 "loop filter level arrays mismatch");
1521 for (size_t i = 0; i < arraysize(sgmnt_hdr.lf_update_value); ++i) {
1522 int lf_level = lf_hdr.level;
1523 if (sgmnt_hdr.segmentation_enabled) {
1524 if (sgmnt_hdr.segment_feature_mode ==
1525 media::Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE)
1526 lf_level = sgmnt_hdr.lf_update_value[i];
1527 else
1528 lf_level += sgmnt_hdr.lf_update_value[i];
1529 }
1530
1531 // Clamp to [0..63] range.
1532 lf_level = std::min(std::max(lf_level, 0), 63);
1533 pic_param.loop_filter_level[i] = lf_level;
1534 }
1535
1536 static_assert(arraysize(lf_hdr.ref_frame_delta) ==
1537 arraysize(pic_param.loop_filter_deltas_ref_frame) &&
1538 arraysize(lf_hdr.mb_mode_delta) ==
1539 arraysize(pic_param.loop_filter_deltas_mode) &&
1540 arraysize(lf_hdr.ref_frame_delta) ==
1541 arraysize(lf_hdr.mb_mode_delta),
1542 "loop filter deltas arrays size mismatch");
1543 for (size_t i = 0; i < arraysize(lf_hdr.ref_frame_delta); ++i) {
1544 pic_param.loop_filter_deltas_ref_frame[i] = lf_hdr.ref_frame_delta[i];
1545 pic_param.loop_filter_deltas_mode[i] = lf_hdr.mb_mode_delta[i];
1546 }
1547
1548 #define FHDR_TO_PP(a) pic_param.a = frame_hdr->a
1549 FHDR_TO_PP(prob_skip_false);
1550 FHDR_TO_PP(prob_intra);
1551 FHDR_TO_PP(prob_last);
1552 FHDR_TO_PP(prob_gf);
1553 #undef FHDR_TO_PP
1554
1555 ARRAY_MEMCPY_CHECKED(pic_param.y_mode_probs, entr_hdr.y_mode_probs);
1556 ARRAY_MEMCPY_CHECKED(pic_param.uv_mode_probs, entr_hdr.uv_mode_probs);
1557 ARRAY_MEMCPY_CHECKED(pic_param.mv_probs, entr_hdr.mv_probs);
1558
1559 pic_param.bool_coder_ctx.range = frame_hdr->bool_dec_range;
1560 pic_param.bool_coder_ctx.value = frame_hdr->bool_dec_value;
1561 pic_param.bool_coder_ctx.count = frame_hdr->bool_dec_count;
1562
1563 if (!vaapi_wrapper_->SubmitBuffer(VAPictureParameterBufferType,
1564 sizeof(pic_param), &pic_param))
1565 return false;
1566
1567 VASliceParameterBufferVP8 slice_param;
1568 memset(&slice_param, 0, sizeof(slice_param));
1569 slice_param.slice_data_size = frame_hdr->frame_size;
1570 slice_param.slice_data_offset = frame_hdr->first_part_offset;
1571 slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
1572 slice_param.macroblock_offset = frame_hdr->macroblock_bit_offset;
1573 // Number of DCT partitions plus control partition.
1574 slice_param.num_of_partitions = frame_hdr->num_of_dct_partitions + 1;
1575
1576 // Per VAAPI, this size only includes the size of the macroblock data in
1577 // the first partition (in bytes), so we have to subtract the header size.
1578 slice_param.partition_size[0] =
1579 frame_hdr->first_part_size - ((frame_hdr->macroblock_bit_offset + 7) / 8);
1580
1581 for (size_t i = 0; i < frame_hdr->num_of_dct_partitions; ++i)
1582 slice_param.partition_size[i + 1] = frame_hdr->dct_partition_sizes[i];
1583
1584 if (!vaapi_wrapper_->SubmitBuffer(VASliceParameterBufferType,
1585 sizeof(VASliceParameterBufferVP8),
1586 &slice_param))
1587 return false;
1588
1589 void* non_const_ptr = const_cast<uint8_t*>(frame_hdr->data);
1590 if (!vaapi_wrapper_->SubmitBuffer(VASliceDataBufferType,
1591 frame_hdr->frame_size,
1592 non_const_ptr))
1593 return false;
1594
1595 scoped_refptr<VaapiDecodeSurface> dec_surface =
1596 VP8PictureToVaapiDecodeSurface(pic);
1597
1598 return vaapi_dec_->DecodeSurface(dec_surface);
1599 }
1600
1601 bool VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::OutputPicture(
1602 const scoped_refptr<VP8Picture>& pic) {
1603 scoped_refptr<VaapiDecodeSurface> dec_surface =
1604 VP8PictureToVaapiDecodeSurface(pic);
1605
1606 vaapi_dec_->SurfaceReady(dec_surface);
1607 return true;
1608 }
1609
1610 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>
1611 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::
1612 VP8PictureToVaapiDecodeSurface(const scoped_refptr<VP8Picture>& pic) {
1613 VaapiVP8Picture* vaapi_pic = pic->AsVaapiVP8Picture();
1614 CHECK(vaapi_pic);
1615 return vaapi_pic->dec_surface();
1616 }
1617
1618 VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::VaapiVP9Accelerator(
1619 VaapiVideoDecodeAccelerator* vaapi_dec,
1620 VaapiWrapper* vaapi_wrapper)
1621 : vaapi_wrapper_(vaapi_wrapper), vaapi_dec_(vaapi_dec) {
1622 DCHECK(vaapi_wrapper_);
1623 DCHECK(vaapi_dec_);
1624 }
1625
1626 VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::~VaapiVP9Accelerator() {}
1627
1628 scoped_refptr<VP9Picture>
1629 VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::CreateVP9Picture() {
1630 scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface();
1631 if (!va_surface)
1632 return nullptr;
1633
1634 return new VaapiVP9Picture(va_surface);
1635 }
1636
1637 bool VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::SubmitDecode(
1638 const scoped_refptr<VP9Picture>& pic,
1639 const media::Vp9Segmentation& seg,
1640 const media::Vp9LoopFilter& lf,
1641 const std::vector<scoped_refptr<VP9Picture>>& ref_pictures) {
1642 VADecPictureParameterBufferVP9 pic_param;
1643 memset(&pic_param, 0, sizeof(pic_param));
1644
1645 const media::Vp9FrameHeader* frame_hdr = pic->frame_hdr.get();
1646 DCHECK(frame_hdr);
1647
1648 if (frame_hdr->profile != 0) {
1649 DVLOG(1) << "Unsupported profile" << frame_hdr->profile;
1650 return false;
1651 }
1652
1653 pic_param.frame_width = base::checked_cast<uint16_t>(frame_hdr->width);
1654 pic_param.frame_height = base::checked_cast<uint16_t>(frame_hdr->height);
1655
1656 CHECK_EQ(ref_pictures.size(), arraysize(pic_param.reference_frames));
1657 for (size_t i = 0; i < arraysize(pic_param.reference_frames); ++i) {
1658 VASurfaceID va_surface_id;
1659 if (ref_pictures[i]) {
1660 scoped_refptr<VaapiDecodeSurface> surface =
1661 VP9PictureToVaapiDecodeSurface(ref_pictures[i]);
1662 va_surface_id = surface->va_surface()->id();
1663 } else {
1664 va_surface_id = VA_INVALID_SURFACE;
1665 }
1666
1667 pic_param.reference_frames[i] = va_surface_id;
1668 }
1669
1670 #define FHDR_TO_PP_PF1(a) pic_param.pic_fields.bits.a = frame_hdr->a
1671 #define FHDR_TO_PP_PF2(a, b) pic_param.pic_fields.bits.a = b
1672 FHDR_TO_PP_PF2(subsampling_x, frame_hdr->subsampling_x == 1);
1673 FHDR_TO_PP_PF2(subsampling_y, frame_hdr->subsampling_y == 1);
1674 FHDR_TO_PP_PF2(frame_type, frame_hdr->IsKeyframe() ? 0 : 1);
1675 FHDR_TO_PP_PF1(show_frame);
1676 FHDR_TO_PP_PF1(error_resilient_mode);
1677 FHDR_TO_PP_PF1(intra_only);
1678 FHDR_TO_PP_PF1(allow_high_precision_mv);
1679 FHDR_TO_PP_PF2(mcomp_filter_type, frame_hdr->interp_filter);
1680 FHDR_TO_PP_PF1(frame_parallel_decoding_mode);
1681 FHDR_TO_PP_PF2(reset_frame_context, frame_hdr->reset_context);
1682 FHDR_TO_PP_PF1(refresh_frame_context);
1683 FHDR_TO_PP_PF1(frame_context_idx);
1684 FHDR_TO_PP_PF2(segmentation_enabled, seg.enabled);
1685 FHDR_TO_PP_PF2(segmentation_temporal_update, seg.temporal_update);
1686 FHDR_TO_PP_PF2(segmentation_update_map, seg.update_map);
1687 FHDR_TO_PP_PF2(last_ref_frame, frame_hdr->frame_refs[0]);
1688 FHDR_TO_PP_PF2(last_ref_frame_sign_bias, frame_hdr->ref_sign_biases[0]);
1689 FHDR_TO_PP_PF2(golden_ref_frame, frame_hdr->frame_refs[1]);
1690 FHDR_TO_PP_PF2(golden_ref_frame_sign_bias, frame_hdr->ref_sign_biases[1]);
1691 FHDR_TO_PP_PF2(alt_ref_frame, frame_hdr->frame_refs[2]);
1692 FHDR_TO_PP_PF2(alt_ref_frame_sign_bias, frame_hdr->ref_sign_biases[2]);
1693 FHDR_TO_PP_PF2(lossless_flag, frame_hdr->quant_params.IsLossless());
1694 #undef FHDR_TO_PP_PF2
1695 #undef FHDR_TO_PP_PF1
1696
1697 pic_param.filter_level = lf.filter_level;
1698 pic_param.sharpness_level = lf.sharpness_level;
1699 pic_param.log2_tile_rows = frame_hdr->log2_tile_rows;
1700 pic_param.log2_tile_columns = frame_hdr->log2_tile_cols;
1701 pic_param.frame_header_length_in_bytes = frame_hdr->uncompressed_header_size;
1702 pic_param.first_partition_size = frame_hdr->first_partition_size;
1703
1704 ARRAY_MEMCPY_CHECKED(pic_param.mb_segment_tree_probs, seg.tree_probs);
1705 ARRAY_MEMCPY_CHECKED(pic_param.segment_pred_probs, seg.pred_probs);
1706
1707 pic_param.profile = frame_hdr->profile;
1708
1709 if (!vaapi_wrapper_->SubmitBuffer(VAPictureParameterBufferType,
1710 sizeof(pic_param), &pic_param))
1711 return false;
1712
1713 VASliceParameterBufferVP9 slice_param;
1714 memset(&slice_param, 0, sizeof(slice_param));
1715 slice_param.slice_data_size = frame_hdr->frame_size;
1716 slice_param.slice_data_offset = 0;
1717 slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
1718
1719 static_assert(arraysize(media::Vp9Segmentation::feature_enabled) ==
1720 arraysize(slice_param.seg_param),
1721 "seg_param array of incorrect size");
1722 for (size_t i = 0; i < arraysize(slice_param.seg_param); ++i) {
1723 VASegmentParameterVP9& seg_param = slice_param.seg_param[i];
1724 #define SEG_TO_SP_SF(a, b) seg_param.segment_flags.fields.a = b
1725 SEG_TO_SP_SF(
1726 segment_reference_enabled,
1727 seg.FeatureEnabled(i, media::Vp9Segmentation::SEG_LVL_REF_FRAME));
1728 SEG_TO_SP_SF(segment_reference,
1729 seg.FeatureData(i, media::Vp9Segmentation::SEG_LVL_REF_FRAME));
1730 SEG_TO_SP_SF(segment_reference_skipped,
1731 seg.FeatureEnabled(i, media::Vp9Segmentation::SEG_LVL_SKIP));
1732 #undef SEG_TO_SP_SF
1733
1734 ARRAY_MEMCPY_CHECKED(seg_param.filter_level, lf.lvl[i]);
1735
1736 seg_param.luma_dc_quant_scale = seg.y_dequant[i][0];
1737 seg_param.luma_ac_quant_scale = seg.y_dequant[i][1];
1738 seg_param.chroma_dc_quant_scale = seg.uv_dequant[i][0];
1739 seg_param.chroma_ac_quant_scale = seg.uv_dequant[i][1];
1740 }
1741
1742 if (!vaapi_wrapper_->SubmitBuffer(VASliceParameterBufferType,
1743 sizeof(slice_param), &slice_param))
1744 return false;
1745
1746 void* non_const_ptr = const_cast<uint8_t*>(frame_hdr->data);
1747 if (!vaapi_wrapper_->SubmitBuffer(VASliceDataBufferType,
1748 frame_hdr->frame_size, non_const_ptr))
1749 return false;
1750
1751 scoped_refptr<VaapiDecodeSurface> dec_surface =
1752 VP9PictureToVaapiDecodeSurface(pic);
1753
1754 return vaapi_dec_->DecodeSurface(dec_surface);
1755 }
1756
1757 bool VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::OutputPicture(
1758 const scoped_refptr<VP9Picture>& pic) {
1759 scoped_refptr<VaapiDecodeSurface> dec_surface =
1760 VP9PictureToVaapiDecodeSurface(pic);
1761
1762 vaapi_dec_->SurfaceReady(dec_surface);
1763 return true;
1764 }
1765
1766 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>
1767 VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::
1768 VP9PictureToVaapiDecodeSurface(const scoped_refptr<VP9Picture>& pic) {
1769 VaapiVP9Picture* vaapi_pic = pic->AsVaapiVP9Picture();
1770 CHECK(vaapi_pic);
1771 return vaapi_pic->dec_surface();
1772 }
1773
1774 // static
1775 media::VideoDecodeAccelerator::SupportedProfiles
1776 VaapiVideoDecodeAccelerator::GetSupportedProfiles() {
1777 return VaapiWrapper::GetSupportedDecodeProfiles();
1778 }
1779
1780 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698