OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h" | |
6 | |
7 #include <string.h> | |
8 | |
9 #include "base/bind.h" | |
10 #include "base/logging.h" | |
11 #include "base/macros.h" | |
12 #include "base/metrics/histogram.h" | |
13 #include "base/stl_util.h" | |
14 #include "base/strings/string_util.h" | |
15 #include "base/synchronization/waitable_event.h" | |
16 #include "base/trace_event/trace_event.h" | |
17 #include "content/common/gpu/media/accelerated_video_decoder.h" | |
18 #include "content/common/gpu/media/h264_decoder.h" | |
19 #include "content/common/gpu/media/vaapi_picture.h" | |
20 #include "content/common/gpu/media/vp8_decoder.h" | |
21 #include "content/common/gpu/media/vp9_decoder.h" | |
22 #include "gpu/ipc/service/gpu_channel.h" | |
23 #include "media/base/bind_to_current_loop.h" | |
24 #include "media/video/picture.h" | |
25 #include "third_party/libva/va/va_dec_vp8.h" | |
26 #include "ui/gl/gl_bindings.h" | |
27 #include "ui/gl/gl_image.h" | |
28 | |
29 namespace content { | |
30 | |
31 namespace { | |
32 // UMA errors that the VaapiVideoDecodeAccelerator class reports. | |
33 enum VAVDADecoderFailure { | |
34 VAAPI_ERROR = 0, | |
35 // UMA requires that max must be greater than 1. | |
36 VAVDA_DECODER_FAILURES_MAX = 2, | |
37 }; | |
38 } | |
39 | |
40 static void ReportToUMA(VAVDADecoderFailure failure) { | |
41 UMA_HISTOGRAM_ENUMERATION("Media.VAVDA.DecoderFailure", failure, | |
42 VAVDA_DECODER_FAILURES_MAX); | |
43 } | |
44 | |
45 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \ | |
46 do { \ | |
47 if (!(result)) { \ | |
48 LOG(ERROR) << log; \ | |
49 NotifyError(error_code); \ | |
50 return ret; \ | |
51 } \ | |
52 } while (0) | |
53 | |
54 class VaapiVideoDecodeAccelerator::VaapiDecodeSurface | |
55 : public base::RefCountedThreadSafe<VaapiDecodeSurface> { | |
56 public: | |
57 VaapiDecodeSurface(int32_t bitstream_id, | |
58 const scoped_refptr<VASurface>& va_surface); | |
59 | |
60 int32_t bitstream_id() const { return bitstream_id_; } | |
61 scoped_refptr<VASurface> va_surface() { return va_surface_; } | |
62 | |
63 private: | |
64 friend class base::RefCountedThreadSafe<VaapiDecodeSurface>; | |
65 ~VaapiDecodeSurface(); | |
66 | |
67 int32_t bitstream_id_; | |
68 scoped_refptr<VASurface> va_surface_; | |
69 }; | |
70 | |
71 VaapiVideoDecodeAccelerator::VaapiDecodeSurface::VaapiDecodeSurface( | |
72 int32_t bitstream_id, | |
73 const scoped_refptr<VASurface>& va_surface) | |
74 : bitstream_id_(bitstream_id), va_surface_(va_surface) {} | |
75 | |
76 VaapiVideoDecodeAccelerator::VaapiDecodeSurface::~VaapiDecodeSurface() { | |
77 } | |
78 | |
79 class VaapiH264Picture : public H264Picture { | |
80 public: | |
81 VaapiH264Picture(const scoped_refptr< | |
82 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface); | |
83 | |
84 VaapiH264Picture* AsVaapiH264Picture() override { return this; } | |
85 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface() { | |
86 return dec_surface_; | |
87 } | |
88 | |
89 private: | |
90 ~VaapiH264Picture() override; | |
91 | |
92 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface_; | |
93 | |
94 DISALLOW_COPY_AND_ASSIGN(VaapiH264Picture); | |
95 }; | |
96 | |
97 VaapiH264Picture::VaapiH264Picture(const scoped_refptr< | |
98 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface) | |
99 : dec_surface_(dec_surface) { | |
100 } | |
101 | |
102 VaapiH264Picture::~VaapiH264Picture() { | |
103 } | |
104 | |
105 class VaapiVideoDecodeAccelerator::VaapiH264Accelerator | |
106 : public H264Decoder::H264Accelerator { | |
107 public: | |
108 VaapiH264Accelerator(VaapiVideoDecodeAccelerator* vaapi_dec, | |
109 VaapiWrapper* vaapi_wrapper); | |
110 ~VaapiH264Accelerator() override; | |
111 | |
112 // H264Decoder::H264Accelerator implementation. | |
113 scoped_refptr<H264Picture> CreateH264Picture() override; | |
114 | |
115 bool SubmitFrameMetadata(const media::H264SPS* sps, | |
116 const media::H264PPS* pps, | |
117 const H264DPB& dpb, | |
118 const H264Picture::Vector& ref_pic_listp0, | |
119 const H264Picture::Vector& ref_pic_listb0, | |
120 const H264Picture::Vector& ref_pic_listb1, | |
121 const scoped_refptr<H264Picture>& pic) override; | |
122 | |
123 bool SubmitSlice(const media::H264PPS* pps, | |
124 const media::H264SliceHeader* slice_hdr, | |
125 const H264Picture::Vector& ref_pic_list0, | |
126 const H264Picture::Vector& ref_pic_list1, | |
127 const scoped_refptr<H264Picture>& pic, | |
128 const uint8_t* data, | |
129 size_t size) override; | |
130 | |
131 bool SubmitDecode(const scoped_refptr<H264Picture>& pic) override; | |
132 bool OutputPicture(const scoped_refptr<H264Picture>& pic) override; | |
133 | |
134 void Reset() override; | |
135 | |
136 private: | |
137 scoped_refptr<VaapiDecodeSurface> H264PictureToVaapiDecodeSurface( | |
138 const scoped_refptr<H264Picture>& pic); | |
139 | |
140 void FillVAPicture(VAPictureH264* va_pic, scoped_refptr<H264Picture> pic); | |
141 int FillVARefFramesFromDPB(const H264DPB& dpb, | |
142 VAPictureH264* va_pics, | |
143 int num_pics); | |
144 | |
145 VaapiWrapper* vaapi_wrapper_; | |
146 VaapiVideoDecodeAccelerator* vaapi_dec_; | |
147 | |
148 DISALLOW_COPY_AND_ASSIGN(VaapiH264Accelerator); | |
149 }; | |
150 | |
151 class VaapiVP8Picture : public VP8Picture { | |
152 public: | |
153 VaapiVP8Picture(const scoped_refptr< | |
154 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface); | |
155 | |
156 VaapiVP8Picture* AsVaapiVP8Picture() override { return this; } | |
157 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface() { | |
158 return dec_surface_; | |
159 } | |
160 | |
161 private: | |
162 ~VaapiVP8Picture() override; | |
163 | |
164 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface_; | |
165 | |
166 DISALLOW_COPY_AND_ASSIGN(VaapiVP8Picture); | |
167 }; | |
168 | |
169 VaapiVP8Picture::VaapiVP8Picture(const scoped_refptr< | |
170 VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& dec_surface) | |
171 : dec_surface_(dec_surface) { | |
172 } | |
173 | |
174 VaapiVP8Picture::~VaapiVP8Picture() { | |
175 } | |
176 | |
177 class VaapiVideoDecodeAccelerator::VaapiVP8Accelerator | |
178 : public VP8Decoder::VP8Accelerator { | |
179 public: | |
180 VaapiVP8Accelerator(VaapiVideoDecodeAccelerator* vaapi_dec, | |
181 VaapiWrapper* vaapi_wrapper); | |
182 ~VaapiVP8Accelerator() override; | |
183 | |
184 // VP8Decoder::VP8Accelerator implementation. | |
185 scoped_refptr<VP8Picture> CreateVP8Picture() override; | |
186 | |
187 bool SubmitDecode(const scoped_refptr<VP8Picture>& pic, | |
188 const media::Vp8FrameHeader* frame_hdr, | |
189 const scoped_refptr<VP8Picture>& last_frame, | |
190 const scoped_refptr<VP8Picture>& golden_frame, | |
191 const scoped_refptr<VP8Picture>& alt_frame) override; | |
192 | |
193 bool OutputPicture(const scoped_refptr<VP8Picture>& pic) override; | |
194 | |
195 private: | |
196 scoped_refptr<VaapiDecodeSurface> VP8PictureToVaapiDecodeSurface( | |
197 const scoped_refptr<VP8Picture>& pic); | |
198 | |
199 VaapiWrapper* vaapi_wrapper_; | |
200 VaapiVideoDecodeAccelerator* vaapi_dec_; | |
201 | |
202 DISALLOW_COPY_AND_ASSIGN(VaapiVP8Accelerator); | |
203 }; | |
204 | |
205 class VaapiVP9Picture : public VP9Picture { | |
206 public: | |
207 VaapiVP9Picture( | |
208 const scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& | |
209 dec_surface); | |
210 | |
211 VaapiVP9Picture* AsVaapiVP9Picture() override { return this; } | |
212 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface() { | |
213 return dec_surface_; | |
214 } | |
215 | |
216 private: | |
217 ~VaapiVP9Picture() override; | |
218 | |
219 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> dec_surface_; | |
220 | |
221 DISALLOW_COPY_AND_ASSIGN(VaapiVP9Picture); | |
222 }; | |
223 | |
224 VaapiVP9Picture::VaapiVP9Picture( | |
225 const scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface>& | |
226 dec_surface) | |
227 : dec_surface_(dec_surface) {} | |
228 | |
229 VaapiVP9Picture::~VaapiVP9Picture() {} | |
230 | |
231 class VaapiVideoDecodeAccelerator::VaapiVP9Accelerator | |
232 : public VP9Decoder::VP9Accelerator { | |
233 public: | |
234 VaapiVP9Accelerator(VaapiVideoDecodeAccelerator* vaapi_dec, | |
235 VaapiWrapper* vaapi_wrapper); | |
236 ~VaapiVP9Accelerator() override; | |
237 | |
238 // VP9Decoder::VP9Accelerator implementation. | |
239 scoped_refptr<VP9Picture> CreateVP9Picture() override; | |
240 | |
241 bool SubmitDecode( | |
242 const scoped_refptr<VP9Picture>& pic, | |
243 const media::Vp9Segmentation& seg, | |
244 const media::Vp9LoopFilter& lf, | |
245 const std::vector<scoped_refptr<VP9Picture>>& ref_pictures) override; | |
246 | |
247 bool OutputPicture(const scoped_refptr<VP9Picture>& pic) override; | |
248 | |
249 private: | |
250 scoped_refptr<VaapiDecodeSurface> VP9PictureToVaapiDecodeSurface( | |
251 const scoped_refptr<VP9Picture>& pic); | |
252 | |
253 VaapiWrapper* vaapi_wrapper_; | |
254 VaapiVideoDecodeAccelerator* vaapi_dec_; | |
255 | |
256 DISALLOW_COPY_AND_ASSIGN(VaapiVP9Accelerator); | |
257 }; | |
258 | |
259 VaapiVideoDecodeAccelerator::InputBuffer::InputBuffer() : id(0) {} | |
260 | |
261 VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() { | |
262 } | |
263 | |
264 void VaapiVideoDecodeAccelerator::NotifyError(Error error) { | |
265 if (message_loop_ != base::MessageLoop::current()) { | |
266 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
267 message_loop_->PostTask(FROM_HERE, base::Bind( | |
268 &VaapiVideoDecodeAccelerator::NotifyError, weak_this_, error)); | |
269 return; | |
270 } | |
271 | |
272 // Post Cleanup() as a task so we don't recursively acquire lock_. | |
273 message_loop_->PostTask(FROM_HERE, base::Bind( | |
274 &VaapiVideoDecodeAccelerator::Cleanup, weak_this_)); | |
275 | |
276 LOG(ERROR) << "Notifying of error " << error; | |
277 if (client_) { | |
278 client_->NotifyError(error); | |
279 client_ptr_factory_.reset(); | |
280 } | |
281 } | |
282 | |
283 VaapiPicture* VaapiVideoDecodeAccelerator::PictureById( | |
284 int32_t picture_buffer_id) { | |
285 Pictures::iterator it = pictures_.find(picture_buffer_id); | |
286 if (it == pictures_.end()) { | |
287 LOG(ERROR) << "Picture id " << picture_buffer_id << " does not exist"; | |
288 return NULL; | |
289 } | |
290 | |
291 return it->second.get(); | |
292 } | |
293 | |
294 VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator( | |
295 const MakeGLContextCurrentCallback& make_context_current_cb, | |
296 const BindGLImageCallback& bind_image_cb) | |
297 : state_(kUninitialized), | |
298 input_ready_(&lock_), | |
299 surfaces_available_(&lock_), | |
300 message_loop_(base::MessageLoop::current()), | |
301 decoder_thread_("VaapiDecoderThread"), | |
302 num_frames_at_client_(0), | |
303 num_stream_bufs_at_decoder_(0), | |
304 finish_flush_pending_(false), | |
305 awaiting_va_surfaces_recycle_(false), | |
306 requested_num_pics_(0), | |
307 make_context_current_cb_(make_context_current_cb), | |
308 bind_image_cb_(bind_image_cb), | |
309 weak_this_factory_(this) { | |
310 weak_this_ = weak_this_factory_.GetWeakPtr(); | |
311 va_surface_release_cb_ = media::BindToCurrentLoop( | |
312 base::Bind(&VaapiVideoDecodeAccelerator::RecycleVASurfaceID, weak_this_)); | |
313 } | |
314 | |
315 VaapiVideoDecodeAccelerator::~VaapiVideoDecodeAccelerator() { | |
316 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
317 } | |
318 | |
319 bool VaapiVideoDecodeAccelerator::Initialize(const Config& config, | |
320 Client* client) { | |
321 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
322 | |
323 if (make_context_current_cb_.is_null() || bind_image_cb_.is_null()) { | |
324 NOTREACHED() << "GL callbacks are required for this VDA"; | |
325 return false; | |
326 } | |
327 | |
328 if (config.is_encrypted) { | |
329 NOTREACHED() << "Encrypted streams are not supported for this VDA"; | |
330 return false; | |
331 } | |
332 | |
333 client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client)); | |
334 client_ = client_ptr_factory_->GetWeakPtr(); | |
335 | |
336 media::VideoCodecProfile profile = config.profile; | |
337 | |
338 base::AutoLock auto_lock(lock_); | |
339 DCHECK_EQ(state_, kUninitialized); | |
340 DVLOG(2) << "Initializing VAVDA, profile: " << profile; | |
341 | |
342 #if defined(USE_X11) | |
343 if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL) { | |
344 DVLOG(1) << "HW video decode acceleration not available without " | |
345 "DesktopGL (GLX)."; | |
346 return false; | |
347 } | |
348 #elif defined(USE_OZONE) | |
349 if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) { | |
350 DVLOG(1) << "HW video decode acceleration not available without " | |
351 << "EGLGLES2."; | |
352 return false; | |
353 } | |
354 #endif // USE_X11 | |
355 | |
356 vaapi_wrapper_ = VaapiWrapper::CreateForVideoCodec( | |
357 VaapiWrapper::kDecode, profile, base::Bind(&ReportToUMA, VAAPI_ERROR)); | |
358 | |
359 if (!vaapi_wrapper_.get()) { | |
360 DVLOG(1) << "Failed initializing VAAPI for profile " << profile; | |
361 return false; | |
362 } | |
363 | |
364 if (profile >= media::H264PROFILE_MIN && profile <= media::H264PROFILE_MAX) { | |
365 h264_accelerator_.reset( | |
366 new VaapiH264Accelerator(this, vaapi_wrapper_.get())); | |
367 decoder_.reset(new H264Decoder(h264_accelerator_.get())); | |
368 } else if (profile >= media::VP8PROFILE_MIN && | |
369 profile <= media::VP8PROFILE_MAX) { | |
370 vp8_accelerator_.reset(new VaapiVP8Accelerator(this, vaapi_wrapper_.get())); | |
371 decoder_.reset(new VP8Decoder(vp8_accelerator_.get())); | |
372 } else if (profile >= media::VP9PROFILE_MIN && | |
373 profile <= media::VP9PROFILE_MAX) { | |
374 vp9_accelerator_.reset(new VaapiVP9Accelerator(this, vaapi_wrapper_.get())); | |
375 decoder_.reset(new VP9Decoder(vp9_accelerator_.get())); | |
376 } else { | |
377 DLOG(ERROR) << "Unsupported profile " << profile; | |
378 return false; | |
379 } | |
380 | |
381 CHECK(decoder_thread_.Start()); | |
382 decoder_thread_task_runner_ = decoder_thread_.task_runner(); | |
383 | |
384 state_ = kIdle; | |
385 return true; | |
386 } | |
387 | |
388 void VaapiVideoDecodeAccelerator::OutputPicture( | |
389 const scoped_refptr<VASurface>& va_surface, | |
390 int32_t input_id, | |
391 VaapiPicture* picture) { | |
392 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
393 | |
394 int32_t output_id = picture->picture_buffer_id(); | |
395 | |
396 TRACE_EVENT2("Video Decoder", "VAVDA::OutputSurface", | |
397 "input_id", input_id, | |
398 "output_id", output_id); | |
399 | |
400 DVLOG(3) << "Outputting VASurface " << va_surface->id() | |
401 << " into pixmap bound to picture buffer id " << output_id; | |
402 | |
403 RETURN_AND_NOTIFY_ON_FAILURE(picture->DownloadFromSurface(va_surface), | |
404 "Failed putting surface into pixmap", | |
405 PLATFORM_FAILURE, ); | |
406 | |
407 // Notify the client a picture is ready to be displayed. | |
408 ++num_frames_at_client_; | |
409 TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_); | |
410 DVLOG(4) << "Notifying output picture id " << output_id | |
411 << " for input "<< input_id << " is ready"; | |
412 // TODO(posciak): Use visible size from decoder here instead | |
413 // (crbug.com/402760). Passing (0, 0) results in the client using the | |
414 // visible size extracted from the container instead. | |
415 if (client_) | |
416 client_->PictureReady(media::Picture(output_id, input_id, | |
417 gfx::Rect(0, 0), | |
418 picture->AllowOverlay())); | |
419 } | |
420 | |
421 void VaapiVideoDecodeAccelerator::TryOutputSurface() { | |
422 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
423 | |
424 // Handle Destroy() arriving while pictures are queued for output. | |
425 if (!client_) | |
426 return; | |
427 | |
428 if (pending_output_cbs_.empty() || output_buffers_.empty()) | |
429 return; | |
430 | |
431 OutputCB output_cb = pending_output_cbs_.front(); | |
432 pending_output_cbs_.pop(); | |
433 | |
434 VaapiPicture* picture = PictureById(output_buffers_.front()); | |
435 DCHECK(picture); | |
436 output_buffers_.pop(); | |
437 | |
438 output_cb.Run(picture); | |
439 | |
440 if (finish_flush_pending_ && pending_output_cbs_.empty()) | |
441 FinishFlush(); | |
442 } | |
443 | |
444 void VaapiVideoDecodeAccelerator::MapAndQueueNewInputBuffer( | |
445 const media::BitstreamBuffer& bitstream_buffer) { | |
446 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
447 TRACE_EVENT1("Video Decoder", "MapAndQueueNewInputBuffer", "input_id", | |
448 bitstream_buffer.id()); | |
449 | |
450 DVLOG(4) << "Mapping new input buffer id: " << bitstream_buffer.id() | |
451 << " size: " << (int)bitstream_buffer.size(); | |
452 | |
453 std::unique_ptr<SharedMemoryRegion> shm( | |
454 new SharedMemoryRegion(bitstream_buffer, true)); | |
455 RETURN_AND_NOTIFY_ON_FAILURE(shm->Map(), "Failed to map input buffer", | |
456 UNREADABLE_INPUT, ); | |
457 | |
458 base::AutoLock auto_lock(lock_); | |
459 | |
460 // Set up a new input buffer and queue it for later. | |
461 linked_ptr<InputBuffer> input_buffer(new InputBuffer()); | |
462 input_buffer->shm.reset(shm.release()); | |
463 input_buffer->id = bitstream_buffer.id(); | |
464 | |
465 ++num_stream_bufs_at_decoder_; | |
466 TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder", | |
467 num_stream_bufs_at_decoder_); | |
468 | |
469 input_buffers_.push(input_buffer); | |
470 input_ready_.Signal(); | |
471 } | |
472 | |
473 bool VaapiVideoDecodeAccelerator::GetInputBuffer_Locked() { | |
474 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
475 lock_.AssertAcquired(); | |
476 | |
477 if (curr_input_buffer_.get()) | |
478 return true; | |
479 | |
480 // Will only wait if it is expected that in current state new buffers will | |
481 // be queued from the client via Decode(). The state can change during wait. | |
482 while (input_buffers_.empty() && (state_ == kDecoding || state_ == kIdle)) { | |
483 input_ready_.Wait(); | |
484 } | |
485 | |
486 // We could have got woken up in a different state or never got to sleep | |
487 // due to current state; check for that. | |
488 switch (state_) { | |
489 case kFlushing: | |
490 // Here we are only interested in finishing up decoding buffers that are | |
491 // already queued up. Otherwise will stop decoding. | |
492 if (input_buffers_.empty()) | |
493 return false; | |
494 // else fallthrough | |
495 case kDecoding: | |
496 case kIdle: | |
497 DCHECK(!input_buffers_.empty()); | |
498 | |
499 curr_input_buffer_ = input_buffers_.front(); | |
500 input_buffers_.pop(); | |
501 | |
502 DVLOG(4) << "New current bitstream buffer, id: " << curr_input_buffer_->id | |
503 << " size: " << curr_input_buffer_->shm->size(); | |
504 | |
505 decoder_->SetStream( | |
506 static_cast<uint8_t*>(curr_input_buffer_->shm->memory()), | |
507 curr_input_buffer_->shm->size()); | |
508 return true; | |
509 | |
510 default: | |
511 // We got woken up due to being destroyed/reset, ignore any already | |
512 // queued inputs. | |
513 return false; | |
514 } | |
515 } | |
516 | |
517 void VaapiVideoDecodeAccelerator::ReturnCurrInputBuffer_Locked() { | |
518 lock_.AssertAcquired(); | |
519 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
520 DCHECK(curr_input_buffer_.get()); | |
521 | |
522 int32_t id = curr_input_buffer_->id; | |
523 curr_input_buffer_.reset(); | |
524 DVLOG(4) << "End of input buffer " << id; | |
525 message_loop_->PostTask(FROM_HERE, base::Bind( | |
526 &Client::NotifyEndOfBitstreamBuffer, client_, id)); | |
527 | |
528 --num_stream_bufs_at_decoder_; | |
529 TRACE_COUNTER1("Video Decoder", "Stream buffers at decoder", | |
530 num_stream_bufs_at_decoder_); | |
531 } | |
532 | |
533 // TODO(posciak): refactor the whole class to remove sleeping in wait for | |
534 // surfaces, and reschedule DecodeTask instead. | |
535 bool VaapiVideoDecodeAccelerator::WaitForSurfaces_Locked() { | |
536 lock_.AssertAcquired(); | |
537 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
538 | |
539 while (available_va_surfaces_.empty() && | |
540 (state_ == kDecoding || state_ == kFlushing || state_ == kIdle)) { | |
541 surfaces_available_.Wait(); | |
542 } | |
543 | |
544 if (state_ != kDecoding && state_ != kFlushing && state_ != kIdle) | |
545 return false; | |
546 | |
547 return true; | |
548 } | |
549 | |
550 void VaapiVideoDecodeAccelerator::DecodeTask() { | |
551 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
552 TRACE_EVENT0("Video Decoder", "VAVDA::DecodeTask"); | |
553 base::AutoLock auto_lock(lock_); | |
554 | |
555 if (state_ != kDecoding) | |
556 return; | |
557 | |
558 // Main decode task. | |
559 DVLOG(4) << "Decode task"; | |
560 | |
561 // Try to decode what stream data is (still) in the decoder until we run out | |
562 // of it. | |
563 while (GetInputBuffer_Locked()) { | |
564 DCHECK(curr_input_buffer_.get()); | |
565 | |
566 AcceleratedVideoDecoder::DecodeResult res; | |
567 { | |
568 // We are OK releasing the lock here, as decoder never calls our methods | |
569 // directly and we will reacquire the lock before looking at state again. | |
570 // This is the main decode function of the decoder and while keeping | |
571 // the lock for its duration would be fine, it would defeat the purpose | |
572 // of having a separate decoder thread. | |
573 base::AutoUnlock auto_unlock(lock_); | |
574 res = decoder_->Decode(); | |
575 } | |
576 | |
577 switch (res) { | |
578 case AcceleratedVideoDecoder::kAllocateNewSurfaces: | |
579 DVLOG(1) << "Decoder requesting a new set of surfaces"; | |
580 message_loop_->PostTask(FROM_HERE, base::Bind( | |
581 &VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange, weak_this_, | |
582 decoder_->GetRequiredNumOfPictures(), | |
583 decoder_->GetPicSize())); | |
584 // We'll get rescheduled once ProvidePictureBuffers() finishes. | |
585 return; | |
586 | |
587 case AcceleratedVideoDecoder::kRanOutOfStreamData: | |
588 ReturnCurrInputBuffer_Locked(); | |
589 break; | |
590 | |
591 case AcceleratedVideoDecoder::kRanOutOfSurfaces: | |
592 // No more output buffers in the decoder, try getting more or go to | |
593 // sleep waiting for them. | |
594 if (!WaitForSurfaces_Locked()) | |
595 return; | |
596 | |
597 break; | |
598 | |
599 case AcceleratedVideoDecoder::kDecodeError: | |
600 RETURN_AND_NOTIFY_ON_FAILURE(false, "Error decoding stream", | |
601 PLATFORM_FAILURE, ); | |
602 return; | |
603 } | |
604 } | |
605 } | |
606 | |
607 void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(size_t num_pics, | |
608 gfx::Size size) { | |
609 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
610 DCHECK(!awaiting_va_surfaces_recycle_); | |
611 | |
612 // At this point decoder has stopped running and has already posted onto our | |
613 // loop any remaining output request callbacks, which executed before we got | |
614 // here. Some of them might have been pended though, because we might not | |
615 // have had enough TFPictures to output surfaces to. Initiate a wait cycle, | |
616 // which will wait for client to return enough PictureBuffers to us, so that | |
617 // we can finish all pending output callbacks, releasing associated surfaces. | |
618 DVLOG(1) << "Initiating surface set change"; | |
619 awaiting_va_surfaces_recycle_ = true; | |
620 | |
621 requested_num_pics_ = num_pics; | |
622 requested_pic_size_ = size; | |
623 | |
624 TryFinishSurfaceSetChange(); | |
625 } | |
626 | |
627 void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() { | |
628 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
629 | |
630 if (!awaiting_va_surfaces_recycle_) | |
631 return; | |
632 | |
633 if (!pending_output_cbs_.empty() || | |
634 pictures_.size() != available_va_surfaces_.size()) { | |
635 // Either: | |
636 // 1. Not all pending pending output callbacks have been executed yet. | |
637 // Wait for the client to return enough pictures and retry later. | |
638 // 2. The above happened and all surface release callbacks have been posted | |
639 // as the result, but not all have executed yet. Post ourselves after them | |
640 // to let them release surfaces. | |
641 DVLOG(2) << "Awaiting pending output/surface release callbacks to finish"; | |
642 message_loop_->PostTask(FROM_HERE, base::Bind( | |
643 &VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange, weak_this_)); | |
644 return; | |
645 } | |
646 | |
647 // All surfaces released, destroy them and dismiss all PictureBuffers. | |
648 awaiting_va_surfaces_recycle_ = false; | |
649 available_va_surfaces_.clear(); | |
650 vaapi_wrapper_->DestroySurfaces(); | |
651 | |
652 for (Pictures::iterator iter = pictures_.begin(); iter != pictures_.end(); | |
653 ++iter) { | |
654 DVLOG(2) << "Dismissing picture id: " << iter->first; | |
655 if (client_) | |
656 client_->DismissPictureBuffer(iter->first); | |
657 } | |
658 pictures_.clear(); | |
659 | |
660 // And ask for a new set as requested. | |
661 DVLOG(1) << "Requesting " << requested_num_pics_ << " pictures of size: " | |
662 << requested_pic_size_.ToString(); | |
663 | |
664 message_loop_->PostTask( | |
665 FROM_HERE, | |
666 base::Bind(&Client::ProvidePictureBuffers, client_, requested_num_pics_, | |
667 1, requested_pic_size_, VaapiPicture::GetGLTextureTarget())); | |
668 } | |
669 | |
670 void VaapiVideoDecodeAccelerator::Decode( | |
671 const media::BitstreamBuffer& bitstream_buffer) { | |
672 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
673 | |
674 TRACE_EVENT1("Video Decoder", "VAVDA::Decode", "Buffer id", | |
675 bitstream_buffer.id()); | |
676 | |
677 RETURN_AND_NOTIFY_ON_FAILURE( | |
678 bitstream_buffer.id() >= 0 && | |
679 base::SharedMemory::IsHandleValid(bitstream_buffer.handle()), | |
680 "Invalid bitstream_buffer, id: " << bitstream_buffer.id(), | |
681 INVALID_ARGUMENT, ); | |
682 | |
683 // We got a new input buffer from the client, map it and queue for later use. | |
684 MapAndQueueNewInputBuffer(bitstream_buffer); | |
685 | |
686 base::AutoLock auto_lock(lock_); | |
687 switch (state_) { | |
688 case kIdle: | |
689 state_ = kDecoding; | |
690 decoder_thread_task_runner_->PostTask( | |
691 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::DecodeTask, | |
692 base::Unretained(this))); | |
693 break; | |
694 | |
695 case kDecoding: | |
696 // Decoder already running, fallthrough. | |
697 case kResetting: | |
698 // When resetting, allow accumulating bitstream buffers, so that | |
699 // the client can queue after-seek-buffers while we are finishing with | |
700 // the before-seek one. | |
701 break; | |
702 | |
703 default: | |
704 RETURN_AND_NOTIFY_ON_FAILURE(false, | |
705 "Decode request from client in invalid state: " << state_, | |
706 PLATFORM_FAILURE, ); | |
707 break; | |
708 } | |
709 } | |
710 | |
711 void VaapiVideoDecodeAccelerator::RecycleVASurfaceID( | |
712 VASurfaceID va_surface_id) { | |
713 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
714 base::AutoLock auto_lock(lock_); | |
715 | |
716 available_va_surfaces_.push_back(va_surface_id); | |
717 surfaces_available_.Signal(); | |
718 } | |
719 | |
720 void VaapiVideoDecodeAccelerator::AssignPictureBuffers( | |
721 const std::vector<media::PictureBuffer>& buffers) { | |
722 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
723 | |
724 base::AutoLock auto_lock(lock_); | |
725 DCHECK(pictures_.empty()); | |
726 | |
727 while (!output_buffers_.empty()) | |
728 output_buffers_.pop(); | |
729 | |
730 RETURN_AND_NOTIFY_ON_FAILURE( | |
731 buffers.size() >= requested_num_pics_, | |
732 "Got an invalid number of picture buffers. (Got " << buffers.size() | |
733 << ", requested " << requested_num_pics_ << ")", INVALID_ARGUMENT, ); | |
734 DCHECK(requested_pic_size_ == buffers[0].size()); | |
735 | |
736 std::vector<VASurfaceID> va_surface_ids; | |
737 RETURN_AND_NOTIFY_ON_FAILURE( | |
738 vaapi_wrapper_->CreateSurfaces(VA_RT_FORMAT_YUV420, requested_pic_size_, | |
739 buffers.size(), &va_surface_ids), | |
740 "Failed creating VA Surfaces", PLATFORM_FAILURE, ); | |
741 DCHECK_EQ(va_surface_ids.size(), buffers.size()); | |
742 | |
743 for (size_t i = 0; i < buffers.size(); ++i) { | |
744 DCHECK_LE(1u, buffers[i].texture_ids().size()); | |
745 DVLOG(2) << "Assigning picture id: " << buffers[i].id() | |
746 << " to texture id: " << buffers[i].texture_ids()[0] | |
747 << " VASurfaceID: " << va_surface_ids[i]; | |
748 | |
749 linked_ptr<VaapiPicture> picture(VaapiPicture::CreatePicture( | |
750 vaapi_wrapper_, make_context_current_cb_, buffers[i].id(), | |
751 buffers[i].texture_ids()[0], requested_pic_size_)); | |
752 | |
753 scoped_refptr<gl::GLImage> image = picture->GetImageToBind(); | |
754 if (image) { | |
755 DCHECK_LE(1u, buffers[i].internal_texture_ids().size()); | |
756 RETURN_AND_NOTIFY_ON_FAILURE( | |
757 bind_image_cb_.Run(buffers[i].internal_texture_ids()[0], | |
758 VaapiPicture::GetGLTextureTarget(), image, true), | |
759 "Failed to bind image", PLATFORM_FAILURE, ); | |
760 } | |
761 | |
762 RETURN_AND_NOTIFY_ON_FAILURE( | |
763 picture.get(), "Failed assigning picture buffer to a texture.", | |
764 PLATFORM_FAILURE, ); | |
765 | |
766 bool inserted = | |
767 pictures_.insert(std::make_pair(buffers[i].id(), picture)).second; | |
768 DCHECK(inserted); | |
769 | |
770 output_buffers_.push(buffers[i].id()); | |
771 available_va_surfaces_.push_back(va_surface_ids[i]); | |
772 surfaces_available_.Signal(); | |
773 } | |
774 | |
775 state_ = kDecoding; | |
776 decoder_thread_task_runner_->PostTask( | |
777 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::DecodeTask, | |
778 base::Unretained(this))); | |
779 } | |
780 | |
781 void VaapiVideoDecodeAccelerator::ReusePictureBuffer( | |
782 int32_t picture_buffer_id) { | |
783 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
784 TRACE_EVENT1("Video Decoder", "VAVDA::ReusePictureBuffer", "Picture id", | |
785 picture_buffer_id); | |
786 | |
787 --num_frames_at_client_; | |
788 TRACE_COUNTER1("Video Decoder", "Textures at client", num_frames_at_client_); | |
789 | |
790 output_buffers_.push(picture_buffer_id); | |
791 TryOutputSurface(); | |
792 } | |
793 | |
794 void VaapiVideoDecodeAccelerator::FlushTask() { | |
795 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
796 DVLOG(1) << "Flush task"; | |
797 | |
798 // First flush all the pictures that haven't been outputted, notifying the | |
799 // client to output them. | |
800 bool res = decoder_->Flush(); | |
801 RETURN_AND_NOTIFY_ON_FAILURE(res, "Failed flushing the decoder.", | |
802 PLATFORM_FAILURE, ); | |
803 | |
804 // Put the decoder in idle state, ready to resume. | |
805 decoder_->Reset(); | |
806 | |
807 message_loop_->PostTask(FROM_HERE, base::Bind( | |
808 &VaapiVideoDecodeAccelerator::FinishFlush, weak_this_)); | |
809 } | |
810 | |
811 void VaapiVideoDecodeAccelerator::Flush() { | |
812 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
813 DVLOG(1) << "Got flush request"; | |
814 | |
815 base::AutoLock auto_lock(lock_); | |
816 state_ = kFlushing; | |
817 // Queue a flush task after all existing decoding tasks to clean up. | |
818 decoder_thread_task_runner_->PostTask( | |
819 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::FlushTask, | |
820 base::Unretained(this))); | |
821 | |
822 input_ready_.Signal(); | |
823 surfaces_available_.Signal(); | |
824 } | |
825 | |
826 void VaapiVideoDecodeAccelerator::FinishFlush() { | |
827 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
828 | |
829 finish_flush_pending_ = false; | |
830 | |
831 base::AutoLock auto_lock(lock_); | |
832 if (state_ != kFlushing) { | |
833 DCHECK_EQ(state_, kDestroying); | |
834 return; // We could've gotten destroyed already. | |
835 } | |
836 | |
837 // Still waiting for textures from client to finish outputting all pending | |
838 // frames. Try again later. | |
839 if (!pending_output_cbs_.empty()) { | |
840 finish_flush_pending_ = true; | |
841 return; | |
842 } | |
843 | |
844 state_ = kIdle; | |
845 | |
846 message_loop_->PostTask(FROM_HERE, base::Bind( | |
847 &Client::NotifyFlushDone, client_)); | |
848 | |
849 DVLOG(1) << "Flush finished"; | |
850 } | |
851 | |
852 void VaapiVideoDecodeAccelerator::ResetTask() { | |
853 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
854 DVLOG(1) << "ResetTask"; | |
855 | |
856 // All the decoding tasks from before the reset request from client are done | |
857 // by now, as this task was scheduled after them and client is expected not | |
858 // to call Decode() after Reset() and before NotifyResetDone. | |
859 decoder_->Reset(); | |
860 | |
861 base::AutoLock auto_lock(lock_); | |
862 | |
863 // Return current input buffer, if present. | |
864 if (curr_input_buffer_.get()) | |
865 ReturnCurrInputBuffer_Locked(); | |
866 | |
867 // And let client know that we are done with reset. | |
868 message_loop_->PostTask(FROM_HERE, base::Bind( | |
869 &VaapiVideoDecodeAccelerator::FinishReset, weak_this_)); | |
870 } | |
871 | |
872 void VaapiVideoDecodeAccelerator::Reset() { | |
873 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
874 DVLOG(1) << "Got reset request"; | |
875 | |
876 // This will make any new decode tasks exit early. | |
877 base::AutoLock auto_lock(lock_); | |
878 state_ = kResetting; | |
879 finish_flush_pending_ = false; | |
880 | |
881 // Drop all remaining input buffers, if present. | |
882 while (!input_buffers_.empty()) { | |
883 message_loop_->PostTask(FROM_HERE, base::Bind( | |
884 &Client::NotifyEndOfBitstreamBuffer, client_, | |
885 input_buffers_.front()->id)); | |
886 input_buffers_.pop(); | |
887 } | |
888 | |
889 decoder_thread_task_runner_->PostTask( | |
890 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::ResetTask, | |
891 base::Unretained(this))); | |
892 | |
893 input_ready_.Signal(); | |
894 surfaces_available_.Signal(); | |
895 } | |
896 | |
897 void VaapiVideoDecodeAccelerator::FinishReset() { | |
898 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
899 DVLOG(1) << "FinishReset"; | |
900 base::AutoLock auto_lock(lock_); | |
901 | |
902 if (state_ != kResetting) { | |
903 DCHECK(state_ == kDestroying || state_ == kUninitialized) << state_; | |
904 return; // We could've gotten destroyed already. | |
905 } | |
906 | |
907 // Drop pending outputs. | |
908 while (!pending_output_cbs_.empty()) | |
909 pending_output_cbs_.pop(); | |
910 | |
911 if (awaiting_va_surfaces_recycle_) { | |
912 // Decoder requested a new surface set while we were waiting for it to | |
913 // finish the last DecodeTask, running at the time of Reset(). | |
914 // Let the surface set change finish first before resetting. | |
915 message_loop_->PostTask(FROM_HERE, base::Bind( | |
916 &VaapiVideoDecodeAccelerator::FinishReset, weak_this_)); | |
917 return; | |
918 } | |
919 | |
920 num_stream_bufs_at_decoder_ = 0; | |
921 state_ = kIdle; | |
922 | |
923 message_loop_->PostTask(FROM_HERE, base::Bind( | |
924 &Client::NotifyResetDone, client_)); | |
925 | |
926 // The client might have given us new buffers via Decode() while we were | |
927 // resetting and might be waiting for our move, and not call Decode() anymore | |
928 // until we return something. Post a DecodeTask() so that we won't | |
929 // sleep forever waiting for Decode() in that case. Having two of them | |
930 // in the pipe is harmless, the additional one will return as soon as it sees | |
931 // that we are back in kDecoding state. | |
932 if (!input_buffers_.empty()) { | |
933 state_ = kDecoding; | |
934 decoder_thread_task_runner_->PostTask( | |
935 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::DecodeTask, | |
936 base::Unretained(this))); | |
937 } | |
938 | |
939 DVLOG(1) << "Reset finished"; | |
940 } | |
941 | |
942 void VaapiVideoDecodeAccelerator::Cleanup() { | |
943 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
944 | |
945 base::AutoLock auto_lock(lock_); | |
946 if (state_ == kUninitialized || state_ == kDestroying) | |
947 return; | |
948 | |
949 DVLOG(1) << "Destroying VAVDA"; | |
950 state_ = kDestroying; | |
951 | |
952 client_ptr_factory_.reset(); | |
953 weak_this_factory_.InvalidateWeakPtrs(); | |
954 | |
955 // Signal all potential waiters on the decoder_thread_, let them early-exit, | |
956 // as we've just moved to the kDestroying state, and wait for all tasks | |
957 // to finish. | |
958 input_ready_.Signal(); | |
959 surfaces_available_.Signal(); | |
960 { | |
961 base::AutoUnlock auto_unlock(lock_); | |
962 decoder_thread_.Stop(); | |
963 } | |
964 | |
965 state_ = kUninitialized; | |
966 } | |
967 | |
968 void VaapiVideoDecodeAccelerator::Destroy() { | |
969 DCHECK_EQ(message_loop_, base::MessageLoop::current()); | |
970 Cleanup(); | |
971 delete this; | |
972 } | |
973 | |
974 bool VaapiVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( | |
975 const base::WeakPtr<Client>& decode_client, | |
976 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { | |
977 return false; | |
978 } | |
979 | |
980 bool VaapiVideoDecodeAccelerator::DecodeSurface( | |
981 const scoped_refptr<VaapiDecodeSurface>& dec_surface) { | |
982 if (!vaapi_wrapper_->ExecuteAndDestroyPendingBuffers( | |
983 dec_surface->va_surface()->id())) { | |
984 DVLOG(1) << "Failed decoding picture"; | |
985 return false; | |
986 } | |
987 | |
988 return true; | |
989 } | |
990 | |
991 void VaapiVideoDecodeAccelerator::SurfaceReady( | |
992 const scoped_refptr<VaapiDecodeSurface>& dec_surface) { | |
993 if (message_loop_ != base::MessageLoop::current()) { | |
994 message_loop_->PostTask( | |
995 FROM_HERE, base::Bind(&VaapiVideoDecodeAccelerator::SurfaceReady, | |
996 weak_this_, dec_surface)); | |
997 return; | |
998 } | |
999 | |
1000 DCHECK(!awaiting_va_surfaces_recycle_); | |
1001 | |
1002 { | |
1003 base::AutoLock auto_lock(lock_); | |
1004 // Drop any requests to output if we are resetting or being destroyed. | |
1005 if (state_ == kResetting || state_ == kDestroying) | |
1006 return; | |
1007 } | |
1008 | |
1009 pending_output_cbs_.push( | |
1010 base::Bind(&VaapiVideoDecodeAccelerator::OutputPicture, weak_this_, | |
1011 dec_surface->va_surface(), dec_surface->bitstream_id())); | |
1012 | |
1013 TryOutputSurface(); | |
1014 } | |
1015 | |
1016 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> | |
1017 VaapiVideoDecodeAccelerator::CreateSurface() { | |
1018 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1019 base::AutoLock auto_lock(lock_); | |
1020 | |
1021 if (available_va_surfaces_.empty()) | |
1022 return nullptr; | |
1023 | |
1024 DCHECK(!awaiting_va_surfaces_recycle_); | |
1025 scoped_refptr<VASurface> va_surface(new VASurface( | |
1026 available_va_surfaces_.front(), requested_pic_size_, | |
1027 vaapi_wrapper_->va_surface_format(), va_surface_release_cb_)); | |
1028 available_va_surfaces_.pop_front(); | |
1029 | |
1030 scoped_refptr<VaapiDecodeSurface> dec_surface = | |
1031 new VaapiDecodeSurface(curr_input_buffer_->id, va_surface); | |
1032 | |
1033 return dec_surface; | |
1034 } | |
1035 | |
1036 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::VaapiH264Accelerator( | |
1037 VaapiVideoDecodeAccelerator* vaapi_dec, | |
1038 VaapiWrapper* vaapi_wrapper) | |
1039 : vaapi_wrapper_(vaapi_wrapper), vaapi_dec_(vaapi_dec) { | |
1040 DCHECK(vaapi_wrapper_); | |
1041 DCHECK(vaapi_dec_); | |
1042 } | |
1043 | |
1044 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::~VaapiH264Accelerator() { | |
1045 } | |
1046 | |
1047 scoped_refptr<H264Picture> | |
1048 VaapiVideoDecodeAccelerator::VaapiH264Accelerator::CreateH264Picture() { | |
1049 scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface(); | |
1050 if (!va_surface) | |
1051 return nullptr; | |
1052 | |
1053 return new VaapiH264Picture(va_surface); | |
1054 } | |
1055 | |
1056 // Fill |va_pic| with default/neutral values. | |
1057 static void InitVAPicture(VAPictureH264* va_pic) { | |
1058 memset(va_pic, 0, sizeof(*va_pic)); | |
1059 va_pic->picture_id = VA_INVALID_ID; | |
1060 va_pic->flags = VA_PICTURE_H264_INVALID; | |
1061 } | |
1062 | |
1063 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitFrameMetadata( | |
1064 const media::H264SPS* sps, | |
1065 const media::H264PPS* pps, | |
1066 const H264DPB& dpb, | |
1067 const H264Picture::Vector& ref_pic_listp0, | |
1068 const H264Picture::Vector& ref_pic_listb0, | |
1069 const H264Picture::Vector& ref_pic_listb1, | |
1070 const scoped_refptr<H264Picture>& pic) { | |
1071 VAPictureParameterBufferH264 pic_param; | |
1072 memset(&pic_param, 0, sizeof(pic_param)); | |
1073 | |
1074 #define FROM_SPS_TO_PP(a) pic_param.a = sps->a | |
1075 #define FROM_SPS_TO_PP2(a, b) pic_param.b = sps->a | |
1076 FROM_SPS_TO_PP2(pic_width_in_mbs_minus1, picture_width_in_mbs_minus1); | |
1077 // This assumes non-interlaced video | |
1078 FROM_SPS_TO_PP2(pic_height_in_map_units_minus1, picture_height_in_mbs_minus1); | |
1079 FROM_SPS_TO_PP(bit_depth_luma_minus8); | |
1080 FROM_SPS_TO_PP(bit_depth_chroma_minus8); | |
1081 #undef FROM_SPS_TO_PP | |
1082 #undef FROM_SPS_TO_PP2 | |
1083 | |
1084 #define FROM_SPS_TO_PP_SF(a) pic_param.seq_fields.bits.a = sps->a | |
1085 #define FROM_SPS_TO_PP_SF2(a, b) pic_param.seq_fields.bits.b = sps->a | |
1086 FROM_SPS_TO_PP_SF(chroma_format_idc); | |
1087 FROM_SPS_TO_PP_SF2(separate_colour_plane_flag, | |
1088 residual_colour_transform_flag); | |
1089 FROM_SPS_TO_PP_SF(gaps_in_frame_num_value_allowed_flag); | |
1090 FROM_SPS_TO_PP_SF(frame_mbs_only_flag); | |
1091 FROM_SPS_TO_PP_SF(mb_adaptive_frame_field_flag); | |
1092 FROM_SPS_TO_PP_SF(direct_8x8_inference_flag); | |
1093 pic_param.seq_fields.bits.MinLumaBiPredSize8x8 = (sps->level_idc >= 31); | |
1094 FROM_SPS_TO_PP_SF(log2_max_frame_num_minus4); | |
1095 FROM_SPS_TO_PP_SF(pic_order_cnt_type); | |
1096 FROM_SPS_TO_PP_SF(log2_max_pic_order_cnt_lsb_minus4); | |
1097 FROM_SPS_TO_PP_SF(delta_pic_order_always_zero_flag); | |
1098 #undef FROM_SPS_TO_PP_SF | |
1099 #undef FROM_SPS_TO_PP_SF2 | |
1100 | |
1101 #define FROM_PPS_TO_PP(a) pic_param.a = pps->a | |
1102 FROM_PPS_TO_PP(num_slice_groups_minus1); | |
1103 pic_param.slice_group_map_type = 0; | |
1104 pic_param.slice_group_change_rate_minus1 = 0; | |
1105 FROM_PPS_TO_PP(pic_init_qp_minus26); | |
1106 FROM_PPS_TO_PP(pic_init_qs_minus26); | |
1107 FROM_PPS_TO_PP(chroma_qp_index_offset); | |
1108 FROM_PPS_TO_PP(second_chroma_qp_index_offset); | |
1109 #undef FROM_PPS_TO_PP | |
1110 | |
1111 #define FROM_PPS_TO_PP_PF(a) pic_param.pic_fields.bits.a = pps->a | |
1112 #define FROM_PPS_TO_PP_PF2(a, b) pic_param.pic_fields.bits.b = pps->a | |
1113 FROM_PPS_TO_PP_PF(entropy_coding_mode_flag); | |
1114 FROM_PPS_TO_PP_PF(weighted_pred_flag); | |
1115 FROM_PPS_TO_PP_PF(weighted_bipred_idc); | |
1116 FROM_PPS_TO_PP_PF(transform_8x8_mode_flag); | |
1117 | |
1118 pic_param.pic_fields.bits.field_pic_flag = 0; | |
1119 FROM_PPS_TO_PP_PF(constrained_intra_pred_flag); | |
1120 FROM_PPS_TO_PP_PF2(bottom_field_pic_order_in_frame_present_flag, | |
1121 pic_order_present_flag); | |
1122 FROM_PPS_TO_PP_PF(deblocking_filter_control_present_flag); | |
1123 FROM_PPS_TO_PP_PF(redundant_pic_cnt_present_flag); | |
1124 pic_param.pic_fields.bits.reference_pic_flag = pic->ref; | |
1125 #undef FROM_PPS_TO_PP_PF | |
1126 #undef FROM_PPS_TO_PP_PF2 | |
1127 | |
1128 pic_param.frame_num = pic->frame_num; | |
1129 | |
1130 InitVAPicture(&pic_param.CurrPic); | |
1131 FillVAPicture(&pic_param.CurrPic, pic); | |
1132 | |
1133 // Init reference pictures' array. | |
1134 for (int i = 0; i < 16; ++i) | |
1135 InitVAPicture(&pic_param.ReferenceFrames[i]); | |
1136 | |
1137 // And fill it with picture info from DPB. | |
1138 FillVARefFramesFromDPB(dpb, pic_param.ReferenceFrames, | |
1139 arraysize(pic_param.ReferenceFrames)); | |
1140 | |
1141 pic_param.num_ref_frames = sps->max_num_ref_frames; | |
1142 | |
1143 if (!vaapi_wrapper_->SubmitBuffer(VAPictureParameterBufferType, | |
1144 sizeof(pic_param), | |
1145 &pic_param)) | |
1146 return false; | |
1147 | |
1148 VAIQMatrixBufferH264 iq_matrix_buf; | |
1149 memset(&iq_matrix_buf, 0, sizeof(iq_matrix_buf)); | |
1150 | |
1151 if (pps->pic_scaling_matrix_present_flag) { | |
1152 for (int i = 0; i < 6; ++i) { | |
1153 for (int j = 0; j < 16; ++j) | |
1154 iq_matrix_buf.ScalingList4x4[i][j] = pps->scaling_list4x4[i][j]; | |
1155 } | |
1156 | |
1157 for (int i = 0; i < 2; ++i) { | |
1158 for (int j = 0; j < 64; ++j) | |
1159 iq_matrix_buf.ScalingList8x8[i][j] = pps->scaling_list8x8[i][j]; | |
1160 } | |
1161 } else { | |
1162 for (int i = 0; i < 6; ++i) { | |
1163 for (int j = 0; j < 16; ++j) | |
1164 iq_matrix_buf.ScalingList4x4[i][j] = sps->scaling_list4x4[i][j]; | |
1165 } | |
1166 | |
1167 for (int i = 0; i < 2; ++i) { | |
1168 for (int j = 0; j < 64; ++j) | |
1169 iq_matrix_buf.ScalingList8x8[i][j] = sps->scaling_list8x8[i][j]; | |
1170 } | |
1171 } | |
1172 | |
1173 return vaapi_wrapper_->SubmitBuffer(VAIQMatrixBufferType, | |
1174 sizeof(iq_matrix_buf), | |
1175 &iq_matrix_buf); | |
1176 } | |
1177 | |
1178 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitSlice( | |
1179 const media::H264PPS* pps, | |
1180 const media::H264SliceHeader* slice_hdr, | |
1181 const H264Picture::Vector& ref_pic_list0, | |
1182 const H264Picture::Vector& ref_pic_list1, | |
1183 const scoped_refptr<H264Picture>& pic, | |
1184 const uint8_t* data, | |
1185 size_t size) { | |
1186 VASliceParameterBufferH264 slice_param; | |
1187 memset(&slice_param, 0, sizeof(slice_param)); | |
1188 | |
1189 slice_param.slice_data_size = slice_hdr->nalu_size; | |
1190 slice_param.slice_data_offset = 0; | |
1191 slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL; | |
1192 slice_param.slice_data_bit_offset = slice_hdr->header_bit_size; | |
1193 | |
1194 #define SHDRToSP(a) slice_param.a = slice_hdr->a | |
1195 SHDRToSP(first_mb_in_slice); | |
1196 slice_param.slice_type = slice_hdr->slice_type % 5; | |
1197 SHDRToSP(direct_spatial_mv_pred_flag); | |
1198 | |
1199 // TODO posciak: make sure parser sets those even when override flags | |
1200 // in slice header is off. | |
1201 SHDRToSP(num_ref_idx_l0_active_minus1); | |
1202 SHDRToSP(num_ref_idx_l1_active_minus1); | |
1203 SHDRToSP(cabac_init_idc); | |
1204 SHDRToSP(slice_qp_delta); | |
1205 SHDRToSP(disable_deblocking_filter_idc); | |
1206 SHDRToSP(slice_alpha_c0_offset_div2); | |
1207 SHDRToSP(slice_beta_offset_div2); | |
1208 | |
1209 if (((slice_hdr->IsPSlice() || slice_hdr->IsSPSlice()) && | |
1210 pps->weighted_pred_flag) || | |
1211 (slice_hdr->IsBSlice() && pps->weighted_bipred_idc == 1)) { | |
1212 SHDRToSP(luma_log2_weight_denom); | |
1213 SHDRToSP(chroma_log2_weight_denom); | |
1214 | |
1215 SHDRToSP(luma_weight_l0_flag); | |
1216 SHDRToSP(luma_weight_l1_flag); | |
1217 | |
1218 SHDRToSP(chroma_weight_l0_flag); | |
1219 SHDRToSP(chroma_weight_l1_flag); | |
1220 | |
1221 for (int i = 0; i <= slice_param.num_ref_idx_l0_active_minus1; ++i) { | |
1222 slice_param.luma_weight_l0[i] = | |
1223 slice_hdr->pred_weight_table_l0.luma_weight[i]; | |
1224 slice_param.luma_offset_l0[i] = | |
1225 slice_hdr->pred_weight_table_l0.luma_offset[i]; | |
1226 | |
1227 for (int j = 0; j < 2; ++j) { | |
1228 slice_param.chroma_weight_l0[i][j] = | |
1229 slice_hdr->pred_weight_table_l0.chroma_weight[i][j]; | |
1230 slice_param.chroma_offset_l0[i][j] = | |
1231 slice_hdr->pred_weight_table_l0.chroma_offset[i][j]; | |
1232 } | |
1233 } | |
1234 | |
1235 if (slice_hdr->IsBSlice()) { | |
1236 for (int i = 0; i <= slice_param.num_ref_idx_l1_active_minus1; ++i) { | |
1237 slice_param.luma_weight_l1[i] = | |
1238 slice_hdr->pred_weight_table_l1.luma_weight[i]; | |
1239 slice_param.luma_offset_l1[i] = | |
1240 slice_hdr->pred_weight_table_l1.luma_offset[i]; | |
1241 | |
1242 for (int j = 0; j < 2; ++j) { | |
1243 slice_param.chroma_weight_l1[i][j] = | |
1244 slice_hdr->pred_weight_table_l1.chroma_weight[i][j]; | |
1245 slice_param.chroma_offset_l1[i][j] = | |
1246 slice_hdr->pred_weight_table_l1.chroma_offset[i][j]; | |
1247 } | |
1248 } | |
1249 } | |
1250 } | |
1251 | |
1252 static_assert( | |
1253 arraysize(slice_param.RefPicList0) == arraysize(slice_param.RefPicList1), | |
1254 "Invalid RefPicList sizes"); | |
1255 | |
1256 for (size_t i = 0; i < arraysize(slice_param.RefPicList0); ++i) { | |
1257 InitVAPicture(&slice_param.RefPicList0[i]); | |
1258 InitVAPicture(&slice_param.RefPicList1[i]); | |
1259 } | |
1260 | |
1261 for (size_t i = 0; | |
1262 i < ref_pic_list0.size() && i < arraysize(slice_param.RefPicList0); | |
1263 ++i) { | |
1264 if (ref_pic_list0[i]) | |
1265 FillVAPicture(&slice_param.RefPicList0[i], ref_pic_list0[i]); | |
1266 } | |
1267 for (size_t i = 0; | |
1268 i < ref_pic_list1.size() && i < arraysize(slice_param.RefPicList1); | |
1269 ++i) { | |
1270 if (ref_pic_list1[i]) | |
1271 FillVAPicture(&slice_param.RefPicList1[i], ref_pic_list1[i]); | |
1272 } | |
1273 | |
1274 if (!vaapi_wrapper_->SubmitBuffer(VASliceParameterBufferType, | |
1275 sizeof(slice_param), | |
1276 &slice_param)) | |
1277 return false; | |
1278 | |
1279 // Can't help it, blame libva... | |
1280 void* non_const_ptr = const_cast<uint8_t*>(data); | |
1281 return vaapi_wrapper_->SubmitBuffer(VASliceDataBufferType, size, | |
1282 non_const_ptr); | |
1283 } | |
1284 | |
1285 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::SubmitDecode( | |
1286 const scoped_refptr<H264Picture>& pic) { | |
1287 DVLOG(4) << "Decoding POC " << pic->pic_order_cnt; | |
1288 scoped_refptr<VaapiDecodeSurface> dec_surface = | |
1289 H264PictureToVaapiDecodeSurface(pic); | |
1290 | |
1291 return vaapi_dec_->DecodeSurface(dec_surface); | |
1292 } | |
1293 | |
1294 bool VaapiVideoDecodeAccelerator::VaapiH264Accelerator::OutputPicture( | |
1295 const scoped_refptr<H264Picture>& pic) { | |
1296 scoped_refptr<VaapiDecodeSurface> dec_surface = | |
1297 H264PictureToVaapiDecodeSurface(pic); | |
1298 | |
1299 vaapi_dec_->SurfaceReady(dec_surface); | |
1300 | |
1301 return true; | |
1302 } | |
1303 | |
1304 void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::Reset() { | |
1305 vaapi_wrapper_->DestroyPendingBuffers(); | |
1306 } | |
1307 | |
1308 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> | |
1309 VaapiVideoDecodeAccelerator::VaapiH264Accelerator:: | |
1310 H264PictureToVaapiDecodeSurface(const scoped_refptr<H264Picture>& pic) { | |
1311 VaapiH264Picture* vaapi_pic = pic->AsVaapiH264Picture(); | |
1312 CHECK(vaapi_pic); | |
1313 return vaapi_pic->dec_surface(); | |
1314 } | |
1315 | |
1316 void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::FillVAPicture( | |
1317 VAPictureH264* va_pic, | |
1318 scoped_refptr<H264Picture> pic) { | |
1319 VASurfaceID va_surface_id = VA_INVALID_SURFACE; | |
1320 | |
1321 if (!pic->nonexisting) { | |
1322 scoped_refptr<VaapiDecodeSurface> dec_surface = | |
1323 H264PictureToVaapiDecodeSurface(pic); | |
1324 va_surface_id = dec_surface->va_surface()->id(); | |
1325 } | |
1326 | |
1327 va_pic->picture_id = va_surface_id; | |
1328 va_pic->frame_idx = pic->frame_num; | |
1329 va_pic->flags = 0; | |
1330 | |
1331 switch (pic->field) { | |
1332 case H264Picture::FIELD_NONE: | |
1333 break; | |
1334 case H264Picture::FIELD_TOP: | |
1335 va_pic->flags |= VA_PICTURE_H264_TOP_FIELD; | |
1336 break; | |
1337 case H264Picture::FIELD_BOTTOM: | |
1338 va_pic->flags |= VA_PICTURE_H264_BOTTOM_FIELD; | |
1339 break; | |
1340 } | |
1341 | |
1342 if (pic->ref) { | |
1343 va_pic->flags |= pic->long_term ? VA_PICTURE_H264_LONG_TERM_REFERENCE | |
1344 : VA_PICTURE_H264_SHORT_TERM_REFERENCE; | |
1345 } | |
1346 | |
1347 va_pic->TopFieldOrderCnt = pic->top_field_order_cnt; | |
1348 va_pic->BottomFieldOrderCnt = pic->bottom_field_order_cnt; | |
1349 } | |
1350 | |
1351 int VaapiVideoDecodeAccelerator::VaapiH264Accelerator::FillVARefFramesFromDPB( | |
1352 const H264DPB& dpb, | |
1353 VAPictureH264* va_pics, | |
1354 int num_pics) { | |
1355 H264Picture::Vector::const_reverse_iterator rit; | |
1356 int i; | |
1357 | |
1358 // Return reference frames in reverse order of insertion. | |
1359 // Libva does not document this, but other implementations (e.g. mplayer) | |
1360 // do it this way as well. | |
1361 for (rit = dpb.rbegin(), i = 0; rit != dpb.rend() && i < num_pics; ++rit) { | |
1362 if ((*rit)->ref) | |
1363 FillVAPicture(&va_pics[i++], *rit); | |
1364 } | |
1365 | |
1366 return i; | |
1367 } | |
1368 | |
1369 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::VaapiVP8Accelerator( | |
1370 VaapiVideoDecodeAccelerator* vaapi_dec, | |
1371 VaapiWrapper* vaapi_wrapper) | |
1372 : vaapi_wrapper_(vaapi_wrapper), vaapi_dec_(vaapi_dec) { | |
1373 DCHECK(vaapi_wrapper_); | |
1374 DCHECK(vaapi_dec_); | |
1375 } | |
1376 | |
1377 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::~VaapiVP8Accelerator() { | |
1378 } | |
1379 | |
1380 scoped_refptr<VP8Picture> | |
1381 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::CreateVP8Picture() { | |
1382 scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface(); | |
1383 if (!va_surface) | |
1384 return nullptr; | |
1385 | |
1386 return new VaapiVP8Picture(va_surface); | |
1387 } | |
1388 | |
1389 #define ARRAY_MEMCPY_CHECKED(to, from) \ | |
1390 do { \ | |
1391 static_assert(sizeof(to) == sizeof(from), \ | |
1392 #from " and " #to " arrays must be of same size"); \ | |
1393 memcpy(to, from, sizeof(to)); \ | |
1394 } while (0) | |
1395 | |
1396 bool VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::SubmitDecode( | |
1397 const scoped_refptr<VP8Picture>& pic, | |
1398 const media::Vp8FrameHeader* frame_hdr, | |
1399 const scoped_refptr<VP8Picture>& last_frame, | |
1400 const scoped_refptr<VP8Picture>& golden_frame, | |
1401 const scoped_refptr<VP8Picture>& alt_frame) { | |
1402 VAIQMatrixBufferVP8 iq_matrix_buf; | |
1403 memset(&iq_matrix_buf, 0, sizeof(VAIQMatrixBufferVP8)); | |
1404 | |
1405 const media::Vp8SegmentationHeader& sgmnt_hdr = frame_hdr->segmentation_hdr; | |
1406 const media::Vp8QuantizationHeader& quant_hdr = frame_hdr->quantization_hdr; | |
1407 static_assert( | |
1408 arraysize(iq_matrix_buf.quantization_index) == media::kMaxMBSegments, | |
1409 "incorrect quantization matrix size"); | |
1410 for (size_t i = 0; i < media::kMaxMBSegments; ++i) { | |
1411 int q = quant_hdr.y_ac_qi; | |
1412 | |
1413 if (sgmnt_hdr.segmentation_enabled) { | |
1414 if (sgmnt_hdr.segment_feature_mode == | |
1415 media::Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE) | |
1416 q = sgmnt_hdr.quantizer_update_value[i]; | |
1417 else | |
1418 q += sgmnt_hdr.quantizer_update_value[i]; | |
1419 } | |
1420 | |
1421 #define CLAMP_Q(q) std::min(std::max(q, 0), 127) | |
1422 static_assert(arraysize(iq_matrix_buf.quantization_index[i]) == 6, | |
1423 "incorrect quantization matrix size"); | |
1424 iq_matrix_buf.quantization_index[i][0] = CLAMP_Q(q); | |
1425 iq_matrix_buf.quantization_index[i][1] = CLAMP_Q(q + quant_hdr.y_dc_delta); | |
1426 iq_matrix_buf.quantization_index[i][2] = CLAMP_Q(q + quant_hdr.y2_dc_delta); | |
1427 iq_matrix_buf.quantization_index[i][3] = CLAMP_Q(q + quant_hdr.y2_ac_delta); | |
1428 iq_matrix_buf.quantization_index[i][4] = CLAMP_Q(q + quant_hdr.uv_dc_delta); | |
1429 iq_matrix_buf.quantization_index[i][5] = CLAMP_Q(q + quant_hdr.uv_ac_delta); | |
1430 #undef CLAMP_Q | |
1431 } | |
1432 | |
1433 if (!vaapi_wrapper_->SubmitBuffer(VAIQMatrixBufferType, | |
1434 sizeof(VAIQMatrixBufferVP8), | |
1435 &iq_matrix_buf)) | |
1436 return false; | |
1437 | |
1438 VAProbabilityDataBufferVP8 prob_buf; | |
1439 memset(&prob_buf, 0, sizeof(VAProbabilityDataBufferVP8)); | |
1440 | |
1441 const media::Vp8EntropyHeader& entr_hdr = frame_hdr->entropy_hdr; | |
1442 ARRAY_MEMCPY_CHECKED(prob_buf.dct_coeff_probs, entr_hdr.coeff_probs); | |
1443 | |
1444 if (!vaapi_wrapper_->SubmitBuffer(VAProbabilityBufferType, | |
1445 sizeof(VAProbabilityDataBufferVP8), | |
1446 &prob_buf)) | |
1447 return false; | |
1448 | |
1449 VAPictureParameterBufferVP8 pic_param; | |
1450 memset(&pic_param, 0, sizeof(VAPictureParameterBufferVP8)); | |
1451 pic_param.frame_width = frame_hdr->width; | |
1452 pic_param.frame_height = frame_hdr->height; | |
1453 | |
1454 if (last_frame) { | |
1455 scoped_refptr<VaapiDecodeSurface> last_frame_surface = | |
1456 VP8PictureToVaapiDecodeSurface(last_frame); | |
1457 pic_param.last_ref_frame = last_frame_surface->va_surface()->id(); | |
1458 } else { | |
1459 pic_param.last_ref_frame = VA_INVALID_SURFACE; | |
1460 } | |
1461 | |
1462 if (golden_frame) { | |
1463 scoped_refptr<VaapiDecodeSurface> golden_frame_surface = | |
1464 VP8PictureToVaapiDecodeSurface(golden_frame); | |
1465 pic_param.golden_ref_frame = golden_frame_surface->va_surface()->id(); | |
1466 } else { | |
1467 pic_param.golden_ref_frame = VA_INVALID_SURFACE; | |
1468 } | |
1469 | |
1470 if (alt_frame) { | |
1471 scoped_refptr<VaapiDecodeSurface> alt_frame_surface = | |
1472 VP8PictureToVaapiDecodeSurface(alt_frame); | |
1473 pic_param.alt_ref_frame = alt_frame_surface->va_surface()->id(); | |
1474 } else { | |
1475 pic_param.alt_ref_frame = VA_INVALID_SURFACE; | |
1476 } | |
1477 | |
1478 pic_param.out_of_loop_frame = VA_INVALID_SURFACE; | |
1479 | |
1480 const media::Vp8LoopFilterHeader& lf_hdr = frame_hdr->loopfilter_hdr; | |
1481 | |
1482 #define FHDR_TO_PP_PF(a, b) pic_param.pic_fields.bits.a = (b) | |
1483 FHDR_TO_PP_PF(key_frame, frame_hdr->IsKeyframe() ? 0 : 1); | |
1484 FHDR_TO_PP_PF(version, frame_hdr->version); | |
1485 FHDR_TO_PP_PF(segmentation_enabled, sgmnt_hdr.segmentation_enabled); | |
1486 FHDR_TO_PP_PF(update_mb_segmentation_map, | |
1487 sgmnt_hdr.update_mb_segmentation_map); | |
1488 FHDR_TO_PP_PF(update_segment_feature_data, | |
1489 sgmnt_hdr.update_segment_feature_data); | |
1490 FHDR_TO_PP_PF(filter_type, lf_hdr.type); | |
1491 FHDR_TO_PP_PF(sharpness_level, lf_hdr.sharpness_level); | |
1492 FHDR_TO_PP_PF(loop_filter_adj_enable, lf_hdr.loop_filter_adj_enable); | |
1493 FHDR_TO_PP_PF(mode_ref_lf_delta_update, lf_hdr.mode_ref_lf_delta_update); | |
1494 FHDR_TO_PP_PF(sign_bias_golden, frame_hdr->sign_bias_golden); | |
1495 FHDR_TO_PP_PF(sign_bias_alternate, frame_hdr->sign_bias_alternate); | |
1496 FHDR_TO_PP_PF(mb_no_coeff_skip, frame_hdr->mb_no_skip_coeff); | |
1497 FHDR_TO_PP_PF(loop_filter_disable, lf_hdr.level == 0); | |
1498 #undef FHDR_TO_PP_PF | |
1499 | |
1500 ARRAY_MEMCPY_CHECKED(pic_param.mb_segment_tree_probs, sgmnt_hdr.segment_prob); | |
1501 | |
1502 static_assert(arraysize(sgmnt_hdr.lf_update_value) == | |
1503 arraysize(pic_param.loop_filter_level), | |
1504 "loop filter level arrays mismatch"); | |
1505 for (size_t i = 0; i < arraysize(sgmnt_hdr.lf_update_value); ++i) { | |
1506 int lf_level = lf_hdr.level; | |
1507 if (sgmnt_hdr.segmentation_enabled) { | |
1508 if (sgmnt_hdr.segment_feature_mode == | |
1509 media::Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE) | |
1510 lf_level = sgmnt_hdr.lf_update_value[i]; | |
1511 else | |
1512 lf_level += sgmnt_hdr.lf_update_value[i]; | |
1513 } | |
1514 | |
1515 // Clamp to [0..63] range. | |
1516 lf_level = std::min(std::max(lf_level, 0), 63); | |
1517 pic_param.loop_filter_level[i] = lf_level; | |
1518 } | |
1519 | |
1520 static_assert(arraysize(lf_hdr.ref_frame_delta) == | |
1521 arraysize(pic_param.loop_filter_deltas_ref_frame) && | |
1522 arraysize(lf_hdr.mb_mode_delta) == | |
1523 arraysize(pic_param.loop_filter_deltas_mode) && | |
1524 arraysize(lf_hdr.ref_frame_delta) == | |
1525 arraysize(lf_hdr.mb_mode_delta), | |
1526 "loop filter deltas arrays size mismatch"); | |
1527 for (size_t i = 0; i < arraysize(lf_hdr.ref_frame_delta); ++i) { | |
1528 pic_param.loop_filter_deltas_ref_frame[i] = lf_hdr.ref_frame_delta[i]; | |
1529 pic_param.loop_filter_deltas_mode[i] = lf_hdr.mb_mode_delta[i]; | |
1530 } | |
1531 | |
1532 #define FHDR_TO_PP(a) pic_param.a = frame_hdr->a | |
1533 FHDR_TO_PP(prob_skip_false); | |
1534 FHDR_TO_PP(prob_intra); | |
1535 FHDR_TO_PP(prob_last); | |
1536 FHDR_TO_PP(prob_gf); | |
1537 #undef FHDR_TO_PP | |
1538 | |
1539 ARRAY_MEMCPY_CHECKED(pic_param.y_mode_probs, entr_hdr.y_mode_probs); | |
1540 ARRAY_MEMCPY_CHECKED(pic_param.uv_mode_probs, entr_hdr.uv_mode_probs); | |
1541 ARRAY_MEMCPY_CHECKED(pic_param.mv_probs, entr_hdr.mv_probs); | |
1542 | |
1543 pic_param.bool_coder_ctx.range = frame_hdr->bool_dec_range; | |
1544 pic_param.bool_coder_ctx.value = frame_hdr->bool_dec_value; | |
1545 pic_param.bool_coder_ctx.count = frame_hdr->bool_dec_count; | |
1546 | |
1547 if (!vaapi_wrapper_->SubmitBuffer(VAPictureParameterBufferType, | |
1548 sizeof(pic_param), &pic_param)) | |
1549 return false; | |
1550 | |
1551 VASliceParameterBufferVP8 slice_param; | |
1552 memset(&slice_param, 0, sizeof(slice_param)); | |
1553 slice_param.slice_data_size = frame_hdr->frame_size; | |
1554 slice_param.slice_data_offset = frame_hdr->first_part_offset; | |
1555 slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL; | |
1556 slice_param.macroblock_offset = frame_hdr->macroblock_bit_offset; | |
1557 // Number of DCT partitions plus control partition. | |
1558 slice_param.num_of_partitions = frame_hdr->num_of_dct_partitions + 1; | |
1559 | |
1560 // Per VAAPI, this size only includes the size of the macroblock data in | |
1561 // the first partition (in bytes), so we have to subtract the header size. | |
1562 slice_param.partition_size[0] = | |
1563 frame_hdr->first_part_size - ((frame_hdr->macroblock_bit_offset + 7) / 8); | |
1564 | |
1565 for (size_t i = 0; i < frame_hdr->num_of_dct_partitions; ++i) | |
1566 slice_param.partition_size[i + 1] = frame_hdr->dct_partition_sizes[i]; | |
1567 | |
1568 if (!vaapi_wrapper_->SubmitBuffer(VASliceParameterBufferType, | |
1569 sizeof(VASliceParameterBufferVP8), | |
1570 &slice_param)) | |
1571 return false; | |
1572 | |
1573 void* non_const_ptr = const_cast<uint8_t*>(frame_hdr->data); | |
1574 if (!vaapi_wrapper_->SubmitBuffer(VASliceDataBufferType, | |
1575 frame_hdr->frame_size, | |
1576 non_const_ptr)) | |
1577 return false; | |
1578 | |
1579 scoped_refptr<VaapiDecodeSurface> dec_surface = | |
1580 VP8PictureToVaapiDecodeSurface(pic); | |
1581 | |
1582 return vaapi_dec_->DecodeSurface(dec_surface); | |
1583 } | |
1584 | |
1585 bool VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::OutputPicture( | |
1586 const scoped_refptr<VP8Picture>& pic) { | |
1587 scoped_refptr<VaapiDecodeSurface> dec_surface = | |
1588 VP8PictureToVaapiDecodeSurface(pic); | |
1589 | |
1590 vaapi_dec_->SurfaceReady(dec_surface); | |
1591 return true; | |
1592 } | |
1593 | |
1594 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> | |
1595 VaapiVideoDecodeAccelerator::VaapiVP8Accelerator:: | |
1596 VP8PictureToVaapiDecodeSurface(const scoped_refptr<VP8Picture>& pic) { | |
1597 VaapiVP8Picture* vaapi_pic = pic->AsVaapiVP8Picture(); | |
1598 CHECK(vaapi_pic); | |
1599 return vaapi_pic->dec_surface(); | |
1600 } | |
1601 | |
1602 VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::VaapiVP9Accelerator( | |
1603 VaapiVideoDecodeAccelerator* vaapi_dec, | |
1604 VaapiWrapper* vaapi_wrapper) | |
1605 : vaapi_wrapper_(vaapi_wrapper), vaapi_dec_(vaapi_dec) { | |
1606 DCHECK(vaapi_wrapper_); | |
1607 DCHECK(vaapi_dec_); | |
1608 } | |
1609 | |
1610 VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::~VaapiVP9Accelerator() {} | |
1611 | |
1612 scoped_refptr<VP9Picture> | |
1613 VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::CreateVP9Picture() { | |
1614 scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface(); | |
1615 if (!va_surface) | |
1616 return nullptr; | |
1617 | |
1618 return new VaapiVP9Picture(va_surface); | |
1619 } | |
1620 | |
1621 bool VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::SubmitDecode( | |
1622 const scoped_refptr<VP9Picture>& pic, | |
1623 const media::Vp9Segmentation& seg, | |
1624 const media::Vp9LoopFilter& lf, | |
1625 const std::vector<scoped_refptr<VP9Picture>>& ref_pictures) { | |
1626 VADecPictureParameterBufferVP9 pic_param; | |
1627 memset(&pic_param, 0, sizeof(pic_param)); | |
1628 | |
1629 const media::Vp9FrameHeader* frame_hdr = pic->frame_hdr.get(); | |
1630 DCHECK(frame_hdr); | |
1631 | |
1632 if (frame_hdr->profile != 0) { | |
1633 DVLOG(1) << "Unsupported profile" << frame_hdr->profile; | |
1634 return false; | |
1635 } | |
1636 | |
1637 pic_param.frame_width = base::checked_cast<uint16_t>(frame_hdr->width); | |
1638 pic_param.frame_height = base::checked_cast<uint16_t>(frame_hdr->height); | |
1639 | |
1640 CHECK_EQ(ref_pictures.size(), arraysize(pic_param.reference_frames)); | |
1641 for (size_t i = 0; i < arraysize(pic_param.reference_frames); ++i) { | |
1642 VASurfaceID va_surface_id; | |
1643 if (ref_pictures[i]) { | |
1644 scoped_refptr<VaapiDecodeSurface> surface = | |
1645 VP9PictureToVaapiDecodeSurface(ref_pictures[i]); | |
1646 va_surface_id = surface->va_surface()->id(); | |
1647 } else { | |
1648 va_surface_id = VA_INVALID_SURFACE; | |
1649 } | |
1650 | |
1651 pic_param.reference_frames[i] = va_surface_id; | |
1652 } | |
1653 | |
1654 #define FHDR_TO_PP_PF1(a) pic_param.pic_fields.bits.a = frame_hdr->a | |
1655 #define FHDR_TO_PP_PF2(a, b) pic_param.pic_fields.bits.a = b | |
1656 FHDR_TO_PP_PF2(subsampling_x, frame_hdr->subsampling_x == 1); | |
1657 FHDR_TO_PP_PF2(subsampling_y, frame_hdr->subsampling_y == 1); | |
1658 FHDR_TO_PP_PF2(frame_type, frame_hdr->IsKeyframe() ? 0 : 1); | |
1659 FHDR_TO_PP_PF1(show_frame); | |
1660 FHDR_TO_PP_PF1(error_resilient_mode); | |
1661 FHDR_TO_PP_PF1(intra_only); | |
1662 FHDR_TO_PP_PF1(allow_high_precision_mv); | |
1663 FHDR_TO_PP_PF2(mcomp_filter_type, frame_hdr->interp_filter); | |
1664 FHDR_TO_PP_PF1(frame_parallel_decoding_mode); | |
1665 FHDR_TO_PP_PF2(reset_frame_context, frame_hdr->reset_context); | |
1666 FHDR_TO_PP_PF1(refresh_frame_context); | |
1667 FHDR_TO_PP_PF1(frame_context_idx); | |
1668 FHDR_TO_PP_PF2(segmentation_enabled, seg.enabled); | |
1669 FHDR_TO_PP_PF2(segmentation_temporal_update, seg.temporal_update); | |
1670 FHDR_TO_PP_PF2(segmentation_update_map, seg.update_map); | |
1671 FHDR_TO_PP_PF2(last_ref_frame, frame_hdr->frame_refs[0]); | |
1672 FHDR_TO_PP_PF2(last_ref_frame_sign_bias, frame_hdr->ref_sign_biases[0]); | |
1673 FHDR_TO_PP_PF2(golden_ref_frame, frame_hdr->frame_refs[1]); | |
1674 FHDR_TO_PP_PF2(golden_ref_frame_sign_bias, frame_hdr->ref_sign_biases[1]); | |
1675 FHDR_TO_PP_PF2(alt_ref_frame, frame_hdr->frame_refs[2]); | |
1676 FHDR_TO_PP_PF2(alt_ref_frame_sign_bias, frame_hdr->ref_sign_biases[2]); | |
1677 FHDR_TO_PP_PF2(lossless_flag, frame_hdr->quant_params.IsLossless()); | |
1678 #undef FHDR_TO_PP_PF2 | |
1679 #undef FHDR_TO_PP_PF1 | |
1680 | |
1681 pic_param.filter_level = lf.filter_level; | |
1682 pic_param.sharpness_level = lf.sharpness_level; | |
1683 pic_param.log2_tile_rows = frame_hdr->log2_tile_rows; | |
1684 pic_param.log2_tile_columns = frame_hdr->log2_tile_cols; | |
1685 pic_param.frame_header_length_in_bytes = frame_hdr->uncompressed_header_size; | |
1686 pic_param.first_partition_size = frame_hdr->first_partition_size; | |
1687 | |
1688 ARRAY_MEMCPY_CHECKED(pic_param.mb_segment_tree_probs, seg.tree_probs); | |
1689 ARRAY_MEMCPY_CHECKED(pic_param.segment_pred_probs, seg.pred_probs); | |
1690 | |
1691 pic_param.profile = frame_hdr->profile; | |
1692 | |
1693 if (!vaapi_wrapper_->SubmitBuffer(VAPictureParameterBufferType, | |
1694 sizeof(pic_param), &pic_param)) | |
1695 return false; | |
1696 | |
1697 VASliceParameterBufferVP9 slice_param; | |
1698 memset(&slice_param, 0, sizeof(slice_param)); | |
1699 slice_param.slice_data_size = frame_hdr->frame_size; | |
1700 slice_param.slice_data_offset = 0; | |
1701 slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL; | |
1702 | |
1703 static_assert(arraysize(media::Vp9Segmentation::feature_enabled) == | |
1704 arraysize(slice_param.seg_param), | |
1705 "seg_param array of incorrect size"); | |
1706 for (size_t i = 0; i < arraysize(slice_param.seg_param); ++i) { | |
1707 VASegmentParameterVP9& seg_param = slice_param.seg_param[i]; | |
1708 #define SEG_TO_SP_SF(a, b) seg_param.segment_flags.fields.a = b | |
1709 SEG_TO_SP_SF( | |
1710 segment_reference_enabled, | |
1711 seg.FeatureEnabled(i, media::Vp9Segmentation::SEG_LVL_REF_FRAME)); | |
1712 SEG_TO_SP_SF(segment_reference, | |
1713 seg.FeatureData(i, media::Vp9Segmentation::SEG_LVL_REF_FRAME)); | |
1714 SEG_TO_SP_SF(segment_reference_skipped, | |
1715 seg.FeatureEnabled(i, media::Vp9Segmentation::SEG_LVL_SKIP)); | |
1716 #undef SEG_TO_SP_SF | |
1717 | |
1718 ARRAY_MEMCPY_CHECKED(seg_param.filter_level, lf.lvl[i]); | |
1719 | |
1720 seg_param.luma_dc_quant_scale = seg.y_dequant[i][0]; | |
1721 seg_param.luma_ac_quant_scale = seg.y_dequant[i][1]; | |
1722 seg_param.chroma_dc_quant_scale = seg.uv_dequant[i][0]; | |
1723 seg_param.chroma_ac_quant_scale = seg.uv_dequant[i][1]; | |
1724 } | |
1725 | |
1726 if (!vaapi_wrapper_->SubmitBuffer(VASliceParameterBufferType, | |
1727 sizeof(slice_param), &slice_param)) | |
1728 return false; | |
1729 | |
1730 void* non_const_ptr = const_cast<uint8_t*>(frame_hdr->data); | |
1731 if (!vaapi_wrapper_->SubmitBuffer(VASliceDataBufferType, | |
1732 frame_hdr->frame_size, non_const_ptr)) | |
1733 return false; | |
1734 | |
1735 scoped_refptr<VaapiDecodeSurface> dec_surface = | |
1736 VP9PictureToVaapiDecodeSurface(pic); | |
1737 | |
1738 return vaapi_dec_->DecodeSurface(dec_surface); | |
1739 } | |
1740 | |
1741 bool VaapiVideoDecodeAccelerator::VaapiVP9Accelerator::OutputPicture( | |
1742 const scoped_refptr<VP9Picture>& pic) { | |
1743 scoped_refptr<VaapiDecodeSurface> dec_surface = | |
1744 VP9PictureToVaapiDecodeSurface(pic); | |
1745 | |
1746 vaapi_dec_->SurfaceReady(dec_surface); | |
1747 return true; | |
1748 } | |
1749 | |
1750 scoped_refptr<VaapiVideoDecodeAccelerator::VaapiDecodeSurface> | |
1751 VaapiVideoDecodeAccelerator::VaapiVP9Accelerator:: | |
1752 VP9PictureToVaapiDecodeSurface(const scoped_refptr<VP9Picture>& pic) { | |
1753 VaapiVP9Picture* vaapi_pic = pic->AsVaapiVP9Picture(); | |
1754 CHECK(vaapi_pic); | |
1755 return vaapi_pic->dec_surface(); | |
1756 } | |
1757 | |
1758 // static | |
1759 media::VideoDecodeAccelerator::SupportedProfiles | |
1760 VaapiVideoDecodeAccelerator::GetSupportedProfiles() { | |
1761 return VaapiWrapper::GetSupportedDecodeProfiles(); | |
1762 } | |
1763 | |
1764 } // namespace content | |
OLD | NEW |