Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(204)

Side by Side Diff: content/common/gpu/media/vaapi_video_decode_accelerator.cc

Issue 1040513003: VAVDA: Use the new, generic video decoder and accelerator infrastructure. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fix whitespace Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
6
5 #include "base/bind.h" 7 #include "base/bind.h"
6 #include "base/logging.h" 8 #include "base/logging.h"
7 #include "base/metrics/histogram.h" 9 #include "base/metrics/histogram.h"
8 #include "base/stl_util.h" 10 #include "base/stl_util.h"
9 #include "base/strings/string_util.h" 11 #include "base/strings/string_util.h"
10 #include "base/synchronization/waitable_event.h" 12 #include "base/synchronization/waitable_event.h"
11 #include "base/trace_event/trace_event.h" 13 #include "base/trace_event/trace_event.h"
12 #include "content/common/gpu/gpu_channel.h" 14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/media/accelerated_video_decoder.h"
16 #include "content/common/gpu/media/h264_decoder.h"
13 #include "content/common/gpu/media/vaapi_picture.h" 17 #include "content/common/gpu/media/vaapi_picture.h"
14 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h"
15 #include "media/base/bind_to_current_loop.h" 18 #include "media/base/bind_to_current_loop.h"
16 #include "media/video/picture.h" 19 #include "media/video/picture.h"
17 #include "ui/gl/gl_bindings.h" 20 #include "ui/gl/gl_bindings.h"
18 #include "ui/gl/gl_image.h" 21 #include "ui/gl/gl_image.h"
19 22
20 static void ReportToUMA( 23 namespace content {
21 content::VaapiH264Decoder::VAVDAH264DecoderFailure failure) { 24
22 UMA_HISTOGRAM_ENUMERATION( 25 namespace {
23 "Media.VAVDAH264.DecoderFailure", 26 // UMA errors that the VaapiVideoDecodeAccelerator class reports.
24 failure, 27 enum VAVDADecoderFailure {
25 content::VaapiH264Decoder::VAVDA_H264_DECODER_FAILURES_MAX); 28 VAAPI_ERROR = 0,
29 VAVDA_DECODER_FAILURES_MAX,
30 };
26 } 31 }
27 32
28 namespace content { 33 static void ReportToUMA(VAVDADecoderFailure failure) {
34 UMA_HISTOGRAM_ENUMERATION("Media.VAVDA.DecoderFailure", failure,
35 VAVDA_DECODER_FAILURES_MAX);
36 }
29 37
30 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \ 38 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
31 do { \ 39 do { \
32 if (!(result)) { \ 40 if (!(result)) { \
33 LOG(ERROR) << log; \ 41 LOG(ERROR) << log; \
34 NotifyError(error_code); \ 42 NotifyError(error_code); \
35 return ret; \ 43 return ret; \
36 } \ 44 } \
37 } while (0) 45 } while (0)
38 46
47 class VaapiVideoDecodeAccelerator::VaapiDecodeSurface
48 : public base::RefCountedThreadSafe<VaapiDecodeSurface> {
49 public:
50 VaapiDecodeSurface(int32 bitstream_id,
51 const scoped_refptr<VASurface>& va_surface);
52
53 int32 bitstream_id() const { return bitstream_id_; }
54 scoped_refptr<VASurface> va_surface() { return va_surface_; }
55
56 private:
57 friend class base::RefCountedThreadSafe<VaapiDecodeSurface>;
58 ~VaapiDecodeSurface();
59
60 int32 bitstream_id_;
61 scoped_refptr<VASurface> va_surface_;
62 };
63
64 VaapiVideoDecodeAccelerator::VaapiDecodeSurface::VaapiDecodeSurface(
65 int32 bitstream_id,
66 const scoped_refptr<VASurface>& va_surface)
67 : bitstream_id_(bitstream_id), va_surface_(va_surface) {
68 }
69
70 VaapiVideoDecodeAccelerator::VaapiDecodeSurface::~VaapiDecodeSurface() {
71 }
72
73 class VaapiH264Picture : public H264Picture {
74 public:
75 VaapiH264Picture(const scoped_refptr<
76 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface);
77
78 VaapiH264Picture* AsVaapiH264Picture() override { return this; }
79 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface() {
80 return dec_surface_;
81 }
82
83 private:
84 ~VaapiH264Picture() override;
85
86 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface_;
87
88 DISALLOW_COPY_AND_ASSIGN(VaapiH264Picture);
89 };
90
91 VaapiH264Picture::VaapiH264Picture(const scoped_refptr<
92 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface)
93 : dec_surface_(dec_surface) {
94 }
95
96 VaapiH264Picture::~VaapiH264Picture() {
97 }
98
99 class VaapiVideoDecodeAccelerator::VaapiH264Accelerator
100 : public H264Decoder::H264Accelerator {
101 public:
102 VaapiH264Accelerator(VaapiVideoDecodeAccelerator* vaapi_dec,
103 VaapiWrapper* vaapi_wrapper);
104 ~VaapiH264Accelerator() override;
105
106 // H264Decoder::H264Accelerator implementation.
107 scoped_refptr<H264Picture> CreateH264Picture() override;
108
109 bool SubmitFrameMetadata(const media::H264SPS* sps,
110 const media::H264PPS* pps,
111 const H264DPB& dpb,
112 const H264Picture::Vector& ref_pic_listp0,
113 const H264Picture::Vector& ref_pic_listb0,
114 const H264Picture::Vector& ref_pic_listb1,
115 const scoped_refptr<H264Picture>& pic) override;
116
117 bool SubmitSlice(const media::H264PPS* pps,
118 const media::H264SliceHeader* slice_hdr,
119 const H264Picture::Vector& ref_pic_list0,
120 const H264Picture::Vector& ref_pic_list1,
121 const scoped_refptr<H264Picture>& pic,
122 const uint8_t* data,
123 size_t size) override;
124
125 bool SubmitDecode(const scoped_refptr<H264Picture>& pic) override;
126 bool OutputPicture(const scoped_refptr<H264Picture>& pic) override;
127
128 void Reset() override;
129
130 private:
131 scoped_refptr<VaapiDecodeSurface> H264PictureToVaapiDecodeSurface(
132 const scoped_refptr<H264Picture>& pic);
133
134 void FillVAPicture(VAPictureH264* va_pic, scoped_refptr<H264Picture> pic);
135 int FillVARefFramesFromDPB(const H264DPB& dpb,
136 VAPictureH264* va_pics,
137 int num_pics);
138
139 VaapiWrapper* vaapi_wrapper_;
140 VaapiVideoDecodeAccelerator* vaapi_dec_;
141
142 DISALLOW_COPY_AND_ASSIGN(VaapiH264Accelerator);
143 };
144
39 VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0), size(0) { 145 VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0), size(0) {
40 } 146 }
41 147
42 VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() { 148 VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() {
43 } 149 }
44 150
45 void VaapiVideoDecodeAccelerator::NotifyError(Error error) { 151 void VaapiVideoDecodeAccelerator::NotifyError(Error error) {
46 if (message_loop_ != base::MessageLoop::current()) { 152 if (message_loop_ != base::MessageLoop::current()) {
47 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); 153 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
48 message_loop_->PostTask(FROM_HERE, base::Bind( 154 message_loop_->PostTask(FROM_HERE, base::Bind(
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
117 } 223 }
118 #elif defined(USE_OZONE) 224 #elif defined(USE_OZONE)
119 if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) { 225 if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
120 DVLOG(1) << "HW video decode acceleration not available without " 226 DVLOG(1) << "HW video decode acceleration not available without "
121 << "EGLGLES2."; 227 << "EGLGLES2.";
122 return false; 228 return false;
123 } 229 }
124 #endif // USE_X11 230 #endif // USE_X11
125 231
126 vaapi_wrapper_ = VaapiWrapper::CreateForVideoCodec( 232 vaapi_wrapper_ = VaapiWrapper::CreateForVideoCodec(
127 VaapiWrapper::kDecode, profile, 233 VaapiWrapper::kDecode, profile, base::Bind(&ReportToUMA, VAAPI_ERROR));
128 base::Bind(&ReportToUMA, content::VaapiH264Decoder::VAAPI_ERROR));
129 234
130 if (!vaapi_wrapper_.get()) { 235 if (!vaapi_wrapper_.get()) {
131 DVLOG(1) << "Failed initializing VAAPI for profile " << profile; 236 DVLOG(1) << "Failed initializing VAAPI for profile " << profile;
132 return false; 237 return false;
133 } 238 }
134 239
135 decoder_.reset( 240 if (!(profile >= media::H264PROFILE_MIN &&
136 new VaapiH264Decoder( 241 profile <= media::H264PROFILE_MAX)) {
137 vaapi_wrapper_.get(), 242 DLOG(ERROR) << "Unsupported profile " << profile;
138 media::BindToCurrentLoop(base::Bind( 243 return false;
139 &VaapiVideoDecodeAccelerator::SurfaceReady, weak_this_)), 244 }
140 base::Bind(&ReportToUMA))); 245
246 h264_accelerator_.reset(new VaapiH264Accelerator(this, vaapi_wrapper_.get()));
247 decoder_.reset(new H264Decoder(h264_accelerator_.get()));
141 248
142 CHECK(decoder_thread_.Start()); 249 CHECK(decoder_thread_.Start());
143 decoder_thread_proxy_ = decoder_thread_.message_loop_proxy(); 250 decoder_thread_proxy_ = decoder_thread_.message_loop_proxy();
144 251
145 state_ = kIdle; 252 state_ = kIdle;
146 return true; 253 return true;
147 } 254 }
148 255
149 void VaapiVideoDecodeAccelerator::SurfaceReady(
150 int32 input_id,
151 const scoped_refptr<VASurface>& va_surface) {
152 DCHECK_EQ(message_loop_, base::MessageLoop::current());
153 DCHECK(!awaiting_va_surfaces_recycle_);
154
155 // Drop any requests to output if we are resetting or being destroyed.
156 if (state_ == kResetting || state_ == kDestroying)
157 return;
158
159 pending_output_cbs_.push(
160 base::Bind(&VaapiVideoDecodeAccelerator::OutputPicture,
161 weak_this_, va_surface, input_id));
162
163 TryOutputSurface();
164 }
165
166 void VaapiVideoDecodeAccelerator::OutputPicture( 256 void VaapiVideoDecodeAccelerator::OutputPicture(
167 const scoped_refptr<VASurface>& va_surface, 257 const scoped_refptr<VASurface>& va_surface,
168 int32 input_id, 258 int32 input_id,
169 VaapiPicture* picture) { 259 VaapiPicture* picture) {
170 DCHECK_EQ(message_loop_, base::MessageLoop::current()); 260 DCHECK_EQ(message_loop_, base::MessageLoop::current());
171 261
172 int32 output_id = picture->picture_buffer_id(); 262 int32 output_id = picture->picture_buffer_id();
173 263
174 TRACE_EVENT2("Video Decoder", "VAVDA::OutputSurface", 264 TRACE_EVENT2("Video Decoder", "VAVDA::OutputSurface",
175 "input_id", input_id, 265 "input_id", input_id,
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
276 366
277 curr_input_buffer_ = input_buffers_.front(); 367 curr_input_buffer_ = input_buffers_.front();
278 input_buffers_.pop(); 368 input_buffers_.pop();
279 369
280 DVLOG(4) << "New current bitstream buffer, id: " 370 DVLOG(4) << "New current bitstream buffer, id: "
281 << curr_input_buffer_->id 371 << curr_input_buffer_->id
282 << " size: " << curr_input_buffer_->size; 372 << " size: " << curr_input_buffer_->size;
283 373
284 decoder_->SetStream( 374 decoder_->SetStream(
285 static_cast<uint8*>(curr_input_buffer_->shm->memory()), 375 static_cast<uint8*>(curr_input_buffer_->shm->memory()),
286 curr_input_buffer_->size, curr_input_buffer_->id); 376 curr_input_buffer_->size);
287 return true; 377 return true;
288 378
289 default: 379 default:
290 // We got woken up due to being destroyed/reset, ignore any already 380 // We got woken up due to being destroyed/reset, ignore any already
291 // queued inputs. 381 // queued inputs.
292 return false; 382 return false;
293 } 383 }
294 } 384 }
295 385
296 void VaapiVideoDecodeAccelerator::ReturnCurrInputBuffer_Locked() { 386 void VaapiVideoDecodeAccelerator::ReturnCurrInputBuffer_Locked() {
297 lock_.AssertAcquired(); 387 lock_.AssertAcquired();
298 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); 388 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
299 DCHECK(curr_input_buffer_.get()); 389 DCHECK(curr_input_buffer_.get());
300 390
301 int32 id = curr_input_buffer_->id; 391 int32 id = curr_input_buffer_->id;
302 curr_input_buffer_.reset(); 392 curr_input_buffer_.reset();
303 DVLOG(4) << "End of input buffer " << id; 393 DVLOG(4) << "End of input buffer " << id;
304 message_loop_->PostTask(FROM_HERE, base::Bind( 394 message_loop_->PostTask(FROM_HERE, base::Bind(
305 &Client::NotifyEndOfBitstreamBuffer, client_, id)); 395 &Client::NotifyEndOfBitstreamBuffer, client_, id));
306 396
307 --num_stream_bufs_at_decoder_; 397 --num_stream_bufs_at_decoder_;
308 TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder", 398 TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder",
309 num_stream_bufs_at_decoder_); 399 num_stream_bufs_at_decoder_);
310 } 400 }
311 401
312 bool VaapiVideoDecodeAccelerator::FeedDecoderWithOutputSurfaces_Locked() { 402 // TODO(posciak): refactor the whole class to remove sleeping in wait for
403 // surfaces, and reschedule DecodeTask instead.
wuchengli 2015/03/30 11:02:55 nit: I think one TODO in vaapi_video_decode_accele
Pawel Osciak 2015/04/03 07:06:00 Done.
404 bool VaapiVideoDecodeAccelerator::WaitForSurfaces_Locked() {
313 lock_.AssertAcquired(); 405 lock_.AssertAcquired();
314 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); 406 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
315 407
316 while (available_va_surfaces_.empty() && 408 while (available_va_surfaces_.empty() &&
317 (state_ == kDecoding || state_ == kFlushing || state_ == kIdle)) { 409 (state_ == kDecoding || state_ == kFlushing || state_ == kIdle)) {
318 surfaces_available_.Wait(); 410 surfaces_available_.Wait();
319 } 411 }
320 412
321 if (state_ != kDecoding && state_ != kFlushing && state_ != kIdle) 413 if (state_ != kDecoding && state_ != kFlushing && state_ != kIdle)
322 return false; 414 return false;
323 415
324 DCHECK(!awaiting_va_surfaces_recycle_);
325 while (!available_va_surfaces_.empty()) {
326 scoped_refptr<VASurface> va_surface(
327 new VASurface(available_va_surfaces_.front(), requested_pic_size_,
328 va_surface_release_cb_));
329 available_va_surfaces_.pop_front();
330 decoder_->ReuseSurface(va_surface);
331 }
332
333 return true; 416 return true;
334 } 417 }
335 418
336 void VaapiVideoDecodeAccelerator::DecodeTask() { 419 void VaapiVideoDecodeAccelerator::DecodeTask() {
337 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread()); 420 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
338 TRACE_EVENT0("Video Decoder", "VAVDA::DecodeTask"); 421 TRACE_EVENT0("Video Decoder", "VAVDA::DecodeTask");
339 base::AutoLock auto_lock(lock_); 422 base::AutoLock auto_lock(lock_);
340 423
341 if (state_ != kDecoding) 424 if (state_ != kDecoding)
342 return; 425 return;
343 426
344 // Main decode task. 427 // Main decode task.
345 DVLOG(4) << "Decode task"; 428 DVLOG(4) << "Decode task";
346 429
347 // Try to decode what stream data is (still) in the decoder until we run out 430 // Try to decode what stream data is (still) in the decoder until we run out
348 // of it. 431 // of it.
349 while (GetInputBuffer_Locked()) { 432 while (GetInputBuffer_Locked()) {
350 DCHECK(curr_input_buffer_.get()); 433 DCHECK(curr_input_buffer_.get());
351 434
352 VaapiH264Decoder::DecResult res; 435 AcceleratedVideoDecoder::DecodeResult res;
353 { 436 {
354 // We are OK releasing the lock here, as decoder never calls our methods 437 // We are OK releasing the lock here, as decoder never calls our methods
355 // directly and we will reacquire the lock before looking at state again. 438 // directly and we will reacquire the lock before looking at state again.
356 // This is the main decode function of the decoder and while keeping 439 // This is the main decode function of the decoder and while keeping
357 // the lock for its duration would be fine, it would defeat the purpose 440 // the lock for its duration would be fine, it would defeat the purpose
358 // of having a separate decoder thread. 441 // of having a separate decoder thread.
359 base::AutoUnlock auto_unlock(lock_); 442 base::AutoUnlock auto_unlock(lock_);
360 res = decoder_->Decode(); 443 res = decoder_->Decode();
361 } 444 }
362 445
363 switch (res) { 446 switch (res) {
364 case VaapiH264Decoder::kAllocateNewSurfaces: 447 case AcceleratedVideoDecoder::kAllocateNewSurfaces:
365 DVLOG(1) << "Decoder requesting a new set of surfaces"; 448 DVLOG(1) << "Decoder requesting a new set of surfaces";
366 message_loop_->PostTask(FROM_HERE, base::Bind( 449 message_loop_->PostTask(FROM_HERE, base::Bind(
367 &VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange, weak_this_, 450 &VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange, weak_this_,
368 decoder_->GetRequiredNumOfPictures(), 451 decoder_->GetRequiredNumOfPictures(),
369 decoder_->GetPicSize())); 452 decoder_->GetPicSize()));
370 // We'll get rescheduled once ProvidePictureBuffers() finishes. 453 // We'll get rescheduled once ProvidePictureBuffers() finishes.
371 return; 454 return;
372 455
373 case VaapiH264Decoder::kRanOutOfStreamData: 456 case AcceleratedVideoDecoder::kRanOutOfStreamData:
374 ReturnCurrInputBuffer_Locked(); 457 ReturnCurrInputBuffer_Locked();
375 break; 458 break;
376 459
377 case VaapiH264Decoder::kRanOutOfSurfaces: 460 case AcceleratedVideoDecoder::kRanOutOfSurfaces:
378 // No more output buffers in the decoder, try getting more or go to 461 // No more output buffers in the decoder, try getting more or go to
379 // sleep waiting for them. 462 // sleep waiting for them.
380 if (!FeedDecoderWithOutputSurfaces_Locked()) 463 if (!WaitForSurfaces_Locked())
381 return; 464 return;
382 465
383 break; 466 break;
384 467
385 case VaapiH264Decoder::kDecodeError: 468 case AcceleratedVideoDecoder::kDecodeError:
386 RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream", 469 RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream",
387 PLATFORM_FAILURE, ); 470 PLATFORM_FAILURE, );
388 return; 471 return;
389 } 472 }
390 } 473 }
391 } 474 }
392 475
393 void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(size_t num_pics, 476 void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(size_t num_pics,
394 gfx::Size size) { 477 gfx::Size size) {
395 DCHECK_EQ(message_loop_, base::MessageLoop::current()); 478 DCHECK_EQ(message_loop_, base::MessageLoop::current());
(...skipping 345 matching lines...) Expand 10 before | Expand all | Expand 10 after
741 void VaapiVideoDecodeAccelerator::Destroy() { 824 void VaapiVideoDecodeAccelerator::Destroy() {
742 DCHECK_EQ(message_loop_, base::MessageLoop::current()); 825 DCHECK_EQ(message_loop_, base::MessageLoop::current());
743 Cleanup(); 826 Cleanup();
744 delete this; 827 delete this;
745 } 828 }
746 829
747 bool VaapiVideoDecodeAccelerator::CanDecodeOnIOThread() { 830 bool VaapiVideoDecodeAccelerator::CanDecodeOnIOThread() {
748 return false; 831 return false;
749 } 832 }
750 833
834 bool VaapiVideoDecodeAccelerator::DecodeSurface(
835 const scoped_refptr<VaapiDecodeSurface>& dec_surface) {
836 if (!vaapi_wrapper_->ExecuteAndDestroyPendingBuffers(
837 dec_surface->va_surface()->id())) {
838 DVLOG(1) << "Failed decoding picture";
839 return false;
840 }
841
842 return true;
843 }
844
845 void VaapiVideoDecodeAccelerator::SurfaceReady(
846 const scoped_refptr<VaapiDecodeSurface>& dec_surface) {
847 if (message_loop_ != base::MessageLoop::current()) {
848 message_loop_->PostTask(
849 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::SurfaceReady,
850 weak_this_, dec_surface));
851 return;
852 }
853
854 DCHECK(!awaiting_va_surfaces_recycle_);
855
856 // Drop any requests to output if we are resetting or being destroyed.
857 if (state_ == kResetting || state_ == kDestroying)
wuchengli 2015/04/01 05:17:38 auto_lock(lock_); to protect |state_|?
Pawel Osciak 2015/04/03 07:06:00 Done.
858 return;
859
860 pending_output_cbs_.push(
861 base::Bind(&VaapiVideoDecodeAccelerator::OutputPicture, weak_this_,
862 dec_surface->va_surface(), dec_surface->bitstream_id()));
863
864 TryOutputSurface();
865 }
866
867 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>
868 VaapiVideoDecodeAccelerator::CreateSurface() {
869 DCHECK(decoder_thread_proxy_->BelongsToCurrentThread());
870 base::AutoLock auto_lock(lock_);
871
872 if (available_va_surfaces_.empty())
873 return nullptr;
874
875 DCHECK(!awaiting_va_surfaces_recycle_);
876 scoped_refptr<VASurface> va_surface(
877 new VASurface(available_va_surfaces_.front(), requested_pic_size_,
878 va_surface_release_cb_));
879 available_va_surfaces_.pop_front();
880
881 scoped_refptr<VaapiDecodeSurface> dec_surface =
882 new VaapiDecodeSurface(curr_input_buffer_->id, va_surface);
883
884 return dec_surface;
885 }
886
887 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::VaapiH264Accelerator(
888 VaapiVideoDecodeAccelerator* vaapi_dec,
889 VaapiWrapper* vaapi_wrapper)
890 : vaapi_wrapper_(vaapi_wrapper), vaapi_dec_(vaapi_dec) {
891 DCHECK(vaapi_wrapper_);
892 DCHECK(vaapi_dec_);
893 }
894
895 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::~VaapiH264Accelerator() {
896 }
897
898 scoped_refptr<H264Picture>
899 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::CreateH264Picture() {
900 scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface();
901 if (!va_surface)
902 return nullptr;
903
904 return new VaapiH264Picture(va_surface);
905 }
906
907 // Fill |va_pic| with default/neutral values.
908 static void InitVAPicture(VAPictureH264* va_pic) {
909 memset(va_pic, 0, sizeof(*va_pic));
910 va_pic->picture_id = VA_INVALID_ID;
911 va_pic->flags = VA_PICTURE_H264_INVALID;
912 }
913
914 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitFrameMetadata(
915 const media::H264SPS* sps,
916 const media::H264PPS* pps,
917 const H264DPB& dpb,
918 const H264Picture::Vector& ref_pic_listp0,
919 const H264Picture::Vector& ref_pic_listb0,
920 const H264Picture::Vector& ref_pic_listb1,
921 const scoped_refptr<H264Picture>& pic) {
922 VAPictureParameterBufferH264 pic_param;
923 memset(&pic_param, 0, sizeof(VAPictureParameterBufferH264));
924
925 #define FROM_SPS_TO_PP(a) pic_param.a = sps->a;
926 #define FROM_SPS_TO_PP2(a, b) pic_param.b = sps->a;
927 FROM_SPS_TO_PP2(pic_width_in_mbs_minus1, picture_width_in_mbs_minus1);
928 // This assumes non-interlaced video
929 FROM_SPS_TO_PP2(pic_height_in_map_units_minus1, picture_height_in_mbs_minus1);
930 FROM_SPS_TO_PP(bit_depth_luma_minus8);
931 FROM_SPS_TO_PP(bit_depth_chroma_minus8);
932 #undef FROM_SPS_TO_PP
933 #undef FROM_SPS_TO_PP2
934
935 #define FROM_SPS_TO_PP_SF(a) pic_param.seq_fields.bits.a = sps->a;
936 #define FROM_SPS_TO_PP_SF2(a, b) pic_param.seq_fields.bits.b = sps->a;
937 FROM_SPS_TO_PP_SF(chroma_format_idc);
938 FROM_SPS_TO_PP_SF2(separate_colour_plane_flag,
939 residual_colour_transform_flag);
940 FROM_SPS_TO_PP_SF(gaps_in_frame_num_value_allowed_flag);
941 FROM_SPS_TO_PP_SF(frame_mbs_only_flag);
942 FROM_SPS_TO_PP_SF(mb_adaptive_frame_field_flag);
943 FROM_SPS_TO_PP_SF(direct_8x8_inference_flag);
944 pic_param.seq_fields.bits.MinLumaBiPredSize8x8 = (sps->level_idc >= 31);
945 FROM_SPS_TO_PP_SF(log2_max_frame_num_minus4);
946 FROM_SPS_TO_PP_SF(pic_order_cnt_type);
947 FROM_SPS_TO_PP_SF(log2_max_pic_order_cnt_lsb_minus4);
948 FROM_SPS_TO_PP_SF(delta_pic_order_always_zero_flag);
949 #undef FROM_SPS_TO_PP_SF
950 #undef FROM_SPS_TO_PP_SF2
951
952 #define FROM_PPS_TO_PP(a) pic_param.a = pps->a;
953 FROM_PPS_TO_PP(num_slice_groups_minus1);
954 pic_param.slice_group_map_type = 0;
955 pic_param.slice_group_change_rate_minus1 = 0;
956 FROM_PPS_TO_PP(pic_init_qp_minus26);
957 FROM_PPS_TO_PP(pic_init_qs_minus26);
958 FROM_PPS_TO_PP(chroma_qp_index_offset);
959 FROM_PPS_TO_PP(second_chroma_qp_index_offset);
960 #undef FROM_PPS_TO_PP
961
962 #define FROM_PPS_TO_PP_PF(a) pic_param.pic_fields.bits.a = pps->a;
963 #define FROM_PPS_TO_PP_PF2(a, b) pic_param.pic_fields.bits.b = pps->a;
964 FROM_PPS_TO_PP_PF(entropy_coding_mode_flag);
965 FROM_PPS_TO_PP_PF(weighted_pred_flag);
966 FROM_PPS_TO_PP_PF(weighted_bipred_idc);
967 FROM_PPS_TO_PP_PF(transform_8x8_mode_flag);
968
969 pic_param.pic_fields.bits.field_pic_flag = 0;
970 FROM_PPS_TO_PP_PF(constrained_intra_pred_flag);
971 FROM_PPS_TO_PP_PF2(bottom_field_pic_order_in_frame_present_flag,
972 pic_order_present_flag);
973 FROM_PPS_TO_PP_PF(deblocking_filter_control_present_flag);
974 FROM_PPS_TO_PP_PF(redundant_pic_cnt_present_flag);
975 pic_param.pic_fields.bits.reference_pic_flag = pic->ref;
976 #undef FROM_PPS_TO_PP_PF
977 #undef FROM_PPS_TO_PP_PF2
978
979 pic_param.frame_num = pic->frame_num;
980
981 InitVAPicture(&pic_param.CurrPic);
982 FillVAPicture(&pic_param.CurrPic, pic);
983
984 // Init reference pictures' array.
985 for (int i = 0; i < 16; ++i)
986 InitVAPicture(&pic_param.ReferenceFrames[i]);
987
988 // And fill it with picture info from DPB.
989 FillVARefFramesFromDPB(dpb, pic_param.ReferenceFrames,
990 arraysize(pic_param.ReferenceFrames));
991
992 pic_param.num_ref_frames = sps->max_num_ref_frames;
993
994 if (!vaapi_wrapper_->SubmitBuffer(VAPictureParameterBufferType,
995 sizeof(VAPictureParameterBufferH264),
996 &pic_param))
997 return false;
998
999 VAIQMatrixBufferH264 iq_matrix_buf;
1000 memset(&iq_matrix_buf, 0, sizeof(VAIQMatrixBufferH264));
1001
1002 if (pps->pic_scaling_matrix_present_flag) {
1003 for (int i = 0; i < 6; ++i) {
1004 for (int j = 0; j < 16; ++j)
1005 iq_matrix_buf.ScalingList4x4[i][j] = pps->scaling_list4x4[i][j];
1006 }
1007
1008 for (int i = 0; i < 2; ++i) {
1009 for (int j = 0; j < 64; ++j)
1010 iq_matrix_buf.ScalingList8x8[i][j] = pps->scaling_list8x8[i][j];
1011 }
1012 } else {
1013 for (int i = 0; i < 6; ++i) {
1014 for (int j = 0; j < 16; ++j)
1015 iq_matrix_buf.ScalingList4x4[i][j] = sps->scaling_list4x4[i][j];
1016 }
1017
1018 for (int i = 0; i < 2; ++i) {
1019 for (int j = 0; j < 64; ++j)
1020 iq_matrix_buf.ScalingList8x8[i][j] = sps->scaling_list8x8[i][j];
1021 }
1022 }
1023
1024 return vaapi_wrapper_->SubmitBuffer(VAIQMatrixBufferType,
1025 sizeof(VAIQMatrixBufferH264),
1026 &iq_matrix_buf);
1027 }
1028
1029 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitSlice(
1030 const media::H264PPS* pps,
1031 const media::H264SliceHeader* slice_hdr,
1032 const H264Picture::Vector& ref_pic_list0,
1033 const H264Picture::Vector& ref_pic_list1,
1034 const scoped_refptr<H264Picture>& pic,
1035 const uint8_t* data,
1036 size_t size) {
1037 VASliceParameterBufferH264 slice_param;
1038 memset(&slice_param, 0, sizeof(VASliceParameterBufferH264));
kcwu 2015/03/31 15:20:09 I prefer sizeof(slice_param) instead.
Pawel Osciak 2015/04/03 07:06:00 Me too. Done.
1039
1040 slice_param.slice_data_size = slice_hdr->nalu_size;
1041 slice_param.slice_data_offset = 0;
1042 slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
1043 slice_param.slice_data_bit_offset = slice_hdr->header_bit_size;
1044
1045 #define SHDRToSP(a) slice_param.a = slice_hdr->a;
1046 SHDRToSP(first_mb_in_slice);
1047 slice_param.slice_type = slice_hdr->slice_type % 5;
1048 SHDRToSP(direct_spatial_mv_pred_flag);
1049
1050 // TODO posciak: make sure parser sets those even when override flags
1051 // in slice header is off.
1052 SHDRToSP(num_ref_idx_l0_active_minus1);
1053 SHDRToSP(num_ref_idx_l1_active_minus1);
1054 SHDRToSP(cabac_init_idc);
1055 SHDRToSP(slice_qp_delta);
1056 SHDRToSP(disable_deblocking_filter_idc);
1057 SHDRToSP(slice_alpha_c0_offset_div2);
1058 SHDRToSP(slice_beta_offset_div2);
1059
1060 if (((slice_hdr->IsPSlice() || slice_hdr->IsSPSlice()) &&
1061 pps->weighted_pred_flag) ||
1062 (slice_hdr->IsBSlice() && pps->weighted_bipred_idc == 1)) {
1063 SHDRToSP(luma_log2_weight_denom);
1064 SHDRToSP(chroma_log2_weight_denom);
1065
1066 SHDRToSP(luma_weight_l0_flag);
1067 SHDRToSP(luma_weight_l1_flag);
1068
1069 SHDRToSP(chroma_weight_l0_flag);
1070 SHDRToSP(chroma_weight_l1_flag);
1071
1072 for (int i = 0; i <= slice_param.num_ref_idx_l0_active_minus1; ++i) {
1073 slice_param.luma_weight_l0[i] =
1074 slice_hdr->pred_weight_table_l0.luma_weight[i];
1075 slice_param.luma_offset_l0[i] =
1076 slice_hdr->pred_weight_table_l0.luma_offset[i];
1077
1078 for (int j = 0; j < 2; ++j) {
1079 slice_param.chroma_weight_l0[i][j] =
1080 slice_hdr->pred_weight_table_l0.chroma_weight[i][j];
1081 slice_param.chroma_offset_l0[i][j] =
1082 slice_hdr->pred_weight_table_l0.chroma_offset[i][j];
1083 }
1084 }
1085
1086 if (slice_hdr->IsBSlice()) {
1087 for (int i = 0; i <= slice_param.num_ref_idx_l1_active_minus1; ++i) {
1088 slice_param.luma_weight_l1[i] =
1089 slice_hdr->pred_weight_table_l1.luma_weight[i];
1090 slice_param.luma_offset_l1[i] =
1091 slice_hdr->pred_weight_table_l1.luma_offset[i];
1092
1093 for (int j = 0; j < 2; ++j) {
1094 slice_param.chroma_weight_l1[i][j] =
1095 slice_hdr->pred_weight_table_l1.chroma_weight[i][j];
1096 slice_param.chroma_offset_l1[i][j] =
1097 slice_hdr->pred_weight_table_l1.chroma_offset[i][j];
1098 }
1099 }
1100 }
1101 }
1102
1103 static_assert(
1104 arraysize(slice_param.RefPicList0) == arraysize(slice_param.RefPicList1),
1105 "Invalid RefPicList sizes");
1106
1107 for (size_t i = 0; i < arraysize(slice_param.RefPicList0); ++i) {
1108 InitVAPicture(&slice_param.RefPicList0[i]);
1109 InitVAPicture(&slice_param.RefPicList1[i]);
1110 }
1111
1112 for (size_t i = 0;
1113 i < ref_pic_list0.size() && i < arraysize(slice_param.RefPicList0);
1114 ++i) {
1115 if (ref_pic_list0[i])
1116 FillVAPicture(&slice_param.RefPicList0[i], ref_pic_list0[i]);
1117 }
1118 for (size_t i = 0;
1119 i < ref_pic_list1.size() && i < arraysize(slice_param.RefPicList1);
1120 ++i) {
1121 if (ref_pic_list1[i])
1122 FillVAPicture(&slice_param.RefPicList1[i], ref_pic_list1[i]);
1123 }
1124
1125 if (!vaapi_wrapper_->SubmitBuffer(VASliceParameterBufferType,
1126 sizeof(VASliceParameterBufferH264),
1127 &slice_param))
1128 return false;
1129
1130 // Can't help it, blame libva...
1131 void* non_const_ptr = const_cast<uint8*>(data);
1132 return vaapi_wrapper_->SubmitBuffer(VASliceDataBufferType, size,
1133 non_const_ptr);
1134 }
1135
1136 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitDecode(
1137 const scoped_refptr<H264Picture>& pic) {
1138 DVLOG(4) << "Decoding POC " << pic->pic_order_cnt;
1139 scoped_refptr<VaapiDecodeSurface> dec_surface =
1140 H264PictureToVaapiDecodeSurface(pic);
1141
1142 return vaapi_dec_->DecodeSurface(dec_surface);
1143 }
1144
1145 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::OutputPicture(
1146 const scoped_refptr<H264Picture>& pic) {
1147 scoped_refptr<VaapiDecodeSurface> dec_surface =
1148 H264PictureToVaapiDecodeSurface(pic);
1149
1150 vaapi_dec_->SurfaceReady(dec_surface);
1151
1152 return true;
1153 }
1154
1155 void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::Reset() {
1156 vaapi_wrapper_->DestroyPendingBuffers();
1157 }
1158
1159 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>
1160 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::
1161 H264PictureToVaapiDecodeSurface(const scoped_refptr<H264Picture>& pic) {
1162 VaapiH264Picture* vaapi_pic = pic->AsVaapiH264Picture();
1163 CHECK(vaapi_pic);
1164 return vaapi_pic->dec_surface();
1165 }
1166
1167 void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::FillVAPicture(
1168 VAPictureH264* va_pic,
1169 scoped_refptr<H264Picture> pic) {
1170 scoped_refptr<VaapiDecodeSurface> dec_surface =
1171 H264PictureToVaapiDecodeSurface(pic);
1172
1173 va_pic->picture_id = dec_surface->va_surface()->id();
1174 va_pic->frame_idx = pic->frame_num;
1175 va_pic->flags = 0;
1176
1177 switch (pic->field) {
1178 case H264Picture::FIELD_NONE:
1179 break;
1180 case H264Picture::FIELD_TOP:
1181 va_pic->flags |= VA_PICTURE_H264_TOP_FIELD;
1182 break;
1183 case H264Picture::FIELD_BOTTOM:
1184 va_pic->flags |= VA_PICTURE_H264_BOTTOM_FIELD;
1185 break;
1186 }
1187
1188 if (pic->ref) {
1189 va_pic->flags |= pic->long_term ? VA_PICTURE_H264_LONG_TERM_REFERENCE
1190 : VA_PICTURE_H264_SHORT_TERM_REFERENCE;
1191 }
1192
1193 va_pic->TopFieldOrderCnt = pic->top_field_order_cnt;
1194 va_pic->BottomFieldOrderCnt = pic->bottom_field_order_cnt;
1195 }
1196
1197 int VaapiVideoDecodeAccelerator::VaapiH264Accelerator::FillVARefFramesFromDPB(
1198 const H264DPB& dpb,
1199 VAPictureH264* va_pics,
1200 int num_pics) {
1201 H264Picture::Vector::const_reverse_iterator rit;
1202 int i;
1203
1204 // Return reference frames in reverse order of insertion.
1205 // Libva does not document this, but other implementations (e.g. mplayer)
1206 // do it this way as well.
1207 for (rit = dpb.rbegin(), i = 0; rit != dpb.rend() && i < num_pics; ++rit) {
1208 if ((*rit)->ref)
1209 FillVAPicture(&va_pics[i++], *rit);
1210 }
1211
1212 return i;
1213 }
1214
751 } // namespace content 1215 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698