Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(109)

Side by Side Diff: content/common/gpu/media/vt_video_decode_accelerator.cc

Issue 727893002: Implement a reorder queue in VTVideoDecodeAccelerator. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@vt_queue_frames
Patch Set: Rebase. Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/common/gpu/media/vt_video_decode_accelerator.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <CoreVideo/CoreVideo.h> 5 #include <CoreVideo/CoreVideo.h>
6 #include <OpenGL/CGLIOSurface.h> 6 #include <OpenGL/CGLIOSurface.h>
7 #include <OpenGL/gl.h> 7 #include <OpenGL/gl.h>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/command_line.h" 10 #include "base/command_line.h"
(...skipping 20 matching lines...) Expand all
31 31
32 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4). 32 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4).
33 static const int kNALUHeaderLength = 4; 33 static const int kNALUHeaderLength = 4;
34 34
35 // We request 5 picture buffers from the client, each of which has a texture ID 35 // We request 5 picture buffers from the client, each of which has a texture ID
36 // that we can bind decoded frames to. We need enough to satisfy preroll, and 36 // that we can bind decoded frames to. We need enough to satisfy preroll, and
37 // enough to avoid unnecessary stalling, but no more than that. The resource 37 // enough to avoid unnecessary stalling, but no more than that. The resource
38 // requirements are low, as we don't need the textures to be backed by storage. 38 // requirements are low, as we don't need the textures to be backed by storage.
39 static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1; 39 static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1;
40 40
41 // TODO(sandersd): Use the configured reorder window instead.
42 static const int kMinReorderQueueSize = 4;
43 static const int kMaxReorderQueueSize = 16;
44
41 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator. 45 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator.
42 static void OutputThunk( 46 static void OutputThunk(
43 void* decompression_output_refcon, 47 void* decompression_output_refcon,
44 void* source_frame_refcon, 48 void* source_frame_refcon,
45 OSStatus status, 49 OSStatus status,
46 VTDecodeInfoFlags info_flags, 50 VTDecodeInfoFlags info_flags,
47 CVImageBufferRef image_buffer, 51 CVImageBufferRef image_buffer,
48 CMTime presentation_time_stamp, 52 CMTime presentation_time_stamp,
49 CMTime presentation_duration) { 53 CMTime presentation_duration) {
50 VTVideoDecodeAccelerator* vda = 54 VTVideoDecodeAccelerator* vda =
51 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon); 55 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon);
52 vda->Output(source_frame_refcon, status, image_buffer); 56 vda->Output(source_frame_refcon, status, image_buffer);
53 } 57 }
54 58
55 VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) { 59 VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {
56 } 60 }
57 61
58 VTVideoDecodeAccelerator::Task::~Task() { 62 VTVideoDecodeAccelerator::Task::~Task() {
59 } 63 }
60 64
61 VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id) 65 VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id)
62 : bitstream_id(bitstream_id) { 66 : bitstream_id(bitstream_id), pic_order_cnt(0) {
63 } 67 }
64 68
65 VTVideoDecodeAccelerator::Frame::~Frame() { 69 VTVideoDecodeAccelerator::Frame::~Frame() {
66 } 70 }
67 71
72 bool VTVideoDecodeAccelerator::FrameOrder::operator()(
73 const linked_ptr<Frame>& lhs,
74 const linked_ptr<Frame>& rhs) const {
75 if (lhs->pic_order_cnt != rhs->pic_order_cnt)
76 return lhs->pic_order_cnt > rhs->pic_order_cnt;
77 // If the pic_order is the same, fallback on using the bitstream order.
78 // TODO(sandersd): Assign a sequence number in Decode().
79 return lhs->bitstream_id > rhs->bitstream_id;
80 }
81
82
68 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator( 83 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(
69 CGLContextObj cgl_context, 84 CGLContextObj cgl_context,
70 const base::Callback<bool(void)>& make_context_current) 85 const base::Callback<bool(void)>& make_context_current)
71 : cgl_context_(cgl_context), 86 : cgl_context_(cgl_context),
72 make_context_current_(make_context_current), 87 make_context_current_(make_context_current),
73 client_(NULL), 88 client_(NULL),
74 state_(STATE_DECODING), 89 state_(STATE_DECODING),
75 format_(NULL), 90 format_(NULL),
76 session_(NULL), 91 session_(NULL),
92 last_sps_id_(-1),
93 last_pps_id_(-1),
77 gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()), 94 gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()),
78 decoder_thread_("VTDecoderThread"), 95 decoder_thread_("VTDecoderThread"),
79 weak_this_factory_(this) { 96 weak_this_factory_(this) {
80 DCHECK(!make_context_current_.is_null()); 97 DCHECK(!make_context_current_.is_null());
81 callback_.decompressionOutputCallback = OutputThunk; 98 callback_.decompressionOutputCallback = OutputThunk;
82 callback_.decompressionOutputRefCon = this; 99 callback_.decompressionOutputRefCon = this;
83 weak_this_ = weak_this_factory_.GetWeakPtr(); 100 weak_this_ = weak_this_factory_.GetWeakPtr();
84 } 101 }
85 102
86 VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() { 103 VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() {
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after
255 size_t data_size = 0; 272 size_t data_size = 0;
256 std::vector<media::H264NALU> nalus; 273 std::vector<media::H264NALU> nalus;
257 parser_.SetStream(buf, size); 274 parser_.SetStream(buf, size);
258 media::H264NALU nalu; 275 media::H264NALU nalu;
259 while (true) { 276 while (true) {
260 media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu); 277 media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu);
261 if (result == media::H264Parser::kEOStream) 278 if (result == media::H264Parser::kEOStream)
262 break; 279 break;
263 if (result != media::H264Parser::kOk) { 280 if (result != media::H264Parser::kOk) {
264 DLOG(ERROR) << "Failed to find H.264 NALU"; 281 DLOG(ERROR) << "Failed to find H.264 NALU";
265 NotifyError(PLATFORM_FAILURE); 282 NotifyError(UNREADABLE_INPUT);
266 return; 283 return;
267 } 284 }
268 switch (nalu.nal_unit_type) { 285 switch (nalu.nal_unit_type) {
269 case media::H264NALU::kSPS: 286 case media::H264NALU::kSPS:
270 last_sps_.assign(nalu.data, nalu.data + nalu.size); 287 last_sps_.assign(nalu.data, nalu.data + nalu.size);
271 last_spsext_.clear(); 288 last_spsext_.clear();
272 config_changed = true; 289 config_changed = true;
290 if (parser_.ParseSPS(&last_sps_id_) != media::H264Parser::kOk) {
291 DLOG(ERROR) << "Could not parse SPS";
292 NotifyError(UNREADABLE_INPUT);
293 return;
294 }
273 break; 295 break;
296
274 case media::H264NALU::kSPSExt: 297 case media::H264NALU::kSPSExt:
275 // TODO(sandersd): Check that the previous NALU was an SPS. 298 // TODO(sandersd): Check that the previous NALU was an SPS.
276 last_spsext_.assign(nalu.data, nalu.data + nalu.size); 299 last_spsext_.assign(nalu.data, nalu.data + nalu.size);
277 config_changed = true; 300 config_changed = true;
278 break; 301 break;
302
279 case media::H264NALU::kPPS: 303 case media::H264NALU::kPPS:
280 last_pps_.assign(nalu.data, nalu.data + nalu.size); 304 last_pps_.assign(nalu.data, nalu.data + nalu.size);
281 config_changed = true; 305 config_changed = true;
306 if (parser_.ParsePPS(&last_pps_id_) != media::H264Parser::kOk) {
307 DLOG(ERROR) << "Could not parse PPS";
308 NotifyError(UNREADABLE_INPUT);
309 return;
310 }
282 break; 311 break;
312
283 case media::H264NALU::kSliceDataA: 313 case media::H264NALU::kSliceDataA:
284 case media::H264NALU::kSliceDataB: 314 case media::H264NALU::kSliceDataB:
285 case media::H264NALU::kSliceDataC: 315 case media::H264NALU::kSliceDataC:
286 DLOG(ERROR) << "Coded slide data partitions not implemented."; 316 DLOG(ERROR) << "Coded slide data partitions not implemented.";
287 NotifyError(PLATFORM_FAILURE); 317 NotifyError(PLATFORM_FAILURE);
288 return; 318 return;
319
320 case media::H264NALU::kNonIDRSlice:
321 // TODO(sandersd): Check that there has been an SPS or IDR slice since
322 // the last reset.
289 case media::H264NALU::kIDRSlice: 323 case media::H264NALU::kIDRSlice:
290 case media::H264NALU::kNonIDRSlice: 324 {
291 // TODO(sandersd): Compute pic_order_count. 325 // TODO(sandersd): Make sure this only happens once per frame.
326 DCHECK_EQ(frame->pic_order_cnt, 0);
327
328 media::H264SliceHeader slice_hdr;
329 result = parser_.ParseSliceHeader(nalu, &slice_hdr);
330 if (result != media::H264Parser::kOk) {
331 DLOG(ERROR) << "Could not parse slice header";
332 NotifyError(UNREADABLE_INPUT);
333 return;
334 }
335
336 // TODO(sandersd): Keep a cache of recent SPS/PPS units instead of
337 // only the most recent ones.
338 DCHECK_EQ(slice_hdr.pic_parameter_set_id, last_pps_id_);
339 const media::H264PPS* pps =
340 parser_.GetPPS(slice_hdr.pic_parameter_set_id);
341 if (!pps) {
342 DLOG(ERROR) << "Mising PPS referenced by slice";
343 NotifyError(UNREADABLE_INPUT);
344 return;
345 }
346
347 DCHECK_EQ(pps->seq_parameter_set_id, last_sps_id_);
348 const media::H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id);
349 if (!sps) {
350 DLOG(ERROR) << "Mising SPS referenced by PPS";
351 NotifyError(UNREADABLE_INPUT);
352 return;
353 }
354
355 // TODO(sandersd): Compute pic_order_cnt.
356 DCHECK(!slice_hdr.field_pic_flag);
357 frame->pic_order_cnt = 0;
358 }
292 default: 359 default:
293 nalus.push_back(nalu); 360 nalus.push_back(nalu);
294 data_size += kNALUHeaderLength + nalu.size; 361 data_size += kNALUHeaderLength + nalu.size;
295 break; 362 break;
296 } 363 }
297 } 364 }
298 365
299 // Initialize VideoToolbox. 366 // Initialize VideoToolbox.
300 // TODO(sandersd): Instead of assuming that the last SPS and PPS units are 367 // TODO(sandersd): Instead of assuming that the last SPS and PPS units are
301 // always the correct ones, maintain a cache of recent SPS and PPS units and 368 // always the correct ones, maintain a cache of recent SPS and PPS units and
(...skipping 18 matching lines...) Expand all
320 return; 387 return;
321 } 388 }
322 389
323 // If the session is not configured by this point, fail. 390 // If the session is not configured by this point, fail.
324 if (!session_) { 391 if (!session_) {
325 DLOG(ERROR) << "Image slice without configuration"; 392 DLOG(ERROR) << "Image slice without configuration";
326 NotifyError(INVALID_ARGUMENT); 393 NotifyError(INVALID_ARGUMENT);
327 return; 394 return;
328 } 395 }
329 396
397 // Update the frame metadata with configuration data.
398 frame->coded_size = coded_size_;
399
330 // Create a memory-backed CMBlockBuffer for the translated data. 400 // Create a memory-backed CMBlockBuffer for the translated data.
331 // TODO(sandersd): Pool of memory blocks. 401 // TODO(sandersd): Pool of memory blocks.
332 base::ScopedCFTypeRef<CMBlockBufferRef> data; 402 base::ScopedCFTypeRef<CMBlockBufferRef> data;
333 OSStatus status = CMBlockBufferCreateWithMemoryBlock( 403 OSStatus status = CMBlockBufferCreateWithMemoryBlock(
334 kCFAllocatorDefault, 404 kCFAllocatorDefault,
335 NULL, // &memory_block 405 NULL, // &memory_block
336 data_size, // block_length 406 data_size, // block_length
337 kCFAllocatorDefault, // block_allocator 407 kCFAllocatorDefault, // block_allocator
338 NULL, // &custom_block_source 408 NULL, // &custom_block_source
339 0, // offset_to_data 409 0, // offset_to_data
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
378 0, // num_sample_timing_entries 448 0, // num_sample_timing_entries
379 NULL, // &sample_timing_array 449 NULL, // &sample_timing_array
380 0, // num_sample_size_entries 450 0, // num_sample_size_entries
381 NULL, // &sample_size_array 451 NULL, // &sample_size_array
382 sample.InitializeInto()); 452 sample.InitializeInto());
383 if (status) { 453 if (status) {
384 NOTIFY_STATUS("CMSampleBufferCreate()", status); 454 NOTIFY_STATUS("CMSampleBufferCreate()", status);
385 return; 455 return;
386 } 456 }
387 457
388 // Update the frame data.
389 frame->coded_size = coded_size_;
390
391 // Send the frame for decoding. 458 // Send the frame for decoding.
392 // Asynchronous Decompression allows for parallel submission of frames 459 // Asynchronous Decompression allows for parallel submission of frames
393 // (without it, DecodeFrame() does not return until the frame has been 460 // (without it, DecodeFrame() does not return until the frame has been
394 // decoded). We don't enable Temporal Processing so that frames are always 461 // decoded). We don't enable Temporal Processing so that frames are always
395 // returned in decode order; this makes it easier to avoid deadlock. 462 // returned in decode order; this makes it easier to avoid deadlock.
396 VTDecodeFrameFlags decode_flags = 463 VTDecodeFrameFlags decode_flags =
397 kVTDecodeFrame_EnableAsynchronousDecompression; 464 kVTDecodeFrame_EnableAsynchronousDecompression;
398 status = VTDecompressionSessionDecodeFrame( 465 status = VTDecompressionSessionDecodeFrame(
399 session_, 466 session_,
400 sample, // sample_buffer 467 sample, // sample_buffer
(...skipping 23 matching lines...) Expand all
424 &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame)); 491 &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
425 } 492 }
426 } 493 }
427 494
428 void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) { 495 void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) {
429 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 496 DCHECK(gpu_thread_checker_.CalledOnValidThread());
430 DCHECK_EQ(frame->bitstream_id, pending_frames_.front()->bitstream_id); 497 DCHECK_EQ(frame->bitstream_id, pending_frames_.front()->bitstream_id);
431 Task task(TASK_FRAME); 498 Task task(TASK_FRAME);
432 task.frame = pending_frames_.front(); 499 task.frame = pending_frames_.front();
433 pending_frames_.pop(); 500 pending_frames_.pop();
434 pending_tasks_.push(task); 501 task_queue_.push(task);
435 ProcessTasks(); 502 ProcessWorkQueues();
436 } 503 }
437 504
438 void VTVideoDecodeAccelerator::FlushTask(TaskType type) { 505 void VTVideoDecodeAccelerator::FlushTask(TaskType type) {
439 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); 506 DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
440 FinishDelayedFrames(); 507 FinishDelayedFrames();
441 508
442 // Always queue a task, even if FinishDelayedFrames() fails, so that 509 // Always queue a task, even if FinishDelayedFrames() fails, so that
443 // destruction always completes. 510 // destruction always completes.
444 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( 511 gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
445 &VTVideoDecodeAccelerator::FlushDone, weak_this_, type)); 512 &VTVideoDecodeAccelerator::FlushDone, weak_this_, type));
446 } 513 }
447 514
448 void VTVideoDecodeAccelerator::FlushDone(TaskType type) { 515 void VTVideoDecodeAccelerator::FlushDone(TaskType type) {
449 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 516 DCHECK(gpu_thread_checker_.CalledOnValidThread());
450 pending_tasks_.push(Task(type)); 517 task_queue_.push(Task(type));
451 ProcessTasks(); 518 ProcessWorkQueues();
452 } 519 }
453 520
454 void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) { 521 void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) {
455 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 522 DCHECK(gpu_thread_checker_.CalledOnValidThread());
456 DCHECK_EQ(assigned_bitstream_ids_.count(bitstream.id()), 0u); 523 DCHECK_EQ(assigned_bitstream_ids_.count(bitstream.id()), 0u);
457 assigned_bitstream_ids_.insert(bitstream.id()); 524 assigned_bitstream_ids_.insert(bitstream.id());
458 Frame* frame = new Frame(bitstream.id()); 525 Frame* frame = new Frame(bitstream.id());
459 pending_frames_.push(make_linked_ptr(frame)); 526 pending_frames_.push(make_linked_ptr(frame));
460 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( 527 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind(
461 &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this), 528 &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this),
462 bitstream, frame)); 529 bitstream, frame));
463 } 530 }
464 531
465 void VTVideoDecodeAccelerator::AssignPictureBuffers( 532 void VTVideoDecodeAccelerator::AssignPictureBuffers(
466 const std::vector<media::PictureBuffer>& pictures) { 533 const std::vector<media::PictureBuffer>& pictures) {
467 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 534 DCHECK(gpu_thread_checker_.CalledOnValidThread());
468 535
469 for (const media::PictureBuffer& picture : pictures) { 536 for (const media::PictureBuffer& picture : pictures) {
470 DCHECK(!texture_ids_.count(picture.id())); 537 DCHECK(!texture_ids_.count(picture.id()));
471 assigned_picture_ids_.insert(picture.id()); 538 assigned_picture_ids_.insert(picture.id());
472 available_picture_ids_.push_back(picture.id()); 539 available_picture_ids_.push_back(picture.id());
473 texture_ids_[picture.id()] = picture.texture_id(); 540 texture_ids_[picture.id()] = picture.texture_id();
474 } 541 }
475 542
476 // Pictures are not marked as uncleared until after this method returns, and 543 // Pictures are not marked as uncleared until after this method returns, and
477 // they will be broken if they are used before that happens. So, schedule 544 // they will be broken if they are used before that happens. So, schedule
478 // future work after that happens. 545 // future work after that happens.
479 gpu_task_runner_->PostTask(FROM_HERE, base::Bind( 546 gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
480 &VTVideoDecodeAccelerator::ProcessTasks, weak_this_)); 547 &VTVideoDecodeAccelerator::ProcessWorkQueues, weak_this_));
481 } 548 }
482 549
483 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { 550 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) {
484 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 551 DCHECK(gpu_thread_checker_.CalledOnValidThread());
485 DCHECK_EQ(CFGetRetainCount(picture_bindings_[picture_id]), 1); 552 DCHECK_EQ(CFGetRetainCount(picture_bindings_[picture_id]), 1);
486 picture_bindings_.erase(picture_id); 553 picture_bindings_.erase(picture_id);
487 if (assigned_picture_ids_.count(picture_id) != 0) { 554 if (assigned_picture_ids_.count(picture_id) != 0) {
488 available_picture_ids_.push_back(picture_id); 555 available_picture_ids_.push_back(picture_id);
489 ProcessTasks(); 556 ProcessWorkQueues();
490 } else { 557 } else {
491 client_->DismissPictureBuffer(picture_id); 558 client_->DismissPictureBuffer(picture_id);
492 } 559 }
493 } 560 }
494 561
495 void VTVideoDecodeAccelerator::ProcessTasks() { 562 void VTVideoDecodeAccelerator::ProcessWorkQueues() {
496 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 563 DCHECK(gpu_thread_checker_.CalledOnValidThread());
564 switch (state_) {
565 case STATE_DECODING:
566 // TODO(sandersd): Batch where possible.
567 while (ProcessReorderQueue() || ProcessTaskQueue());
568 return;
497 569
498 while (!pending_tasks_.empty()) { 570 case STATE_ERROR:
499 const Task& task = pending_tasks_.front(); 571 // Do nothing until Destroy() is called.
572 return;
500 573
501 switch (state_) { 574 case STATE_DESTROYING:
502 case STATE_DECODING: 575 // Drop tasks until we are ready to destruct.
503 if (!ProcessTask(task)) 576 while (!task_queue_.empty()) {
504 return; 577 if (task_queue_.front().type == TASK_DESTROY) {
505 pending_tasks_.pop();
506 break;
507
508 case STATE_ERROR:
509 // Do nothing until Destroy() is called.
510 return;
511
512 case STATE_DESTROYING:
513 // Discard tasks until destruction is complete.
514 if (task.type == TASK_DESTROY) {
515 delete this; 578 delete this;
516 return; 579 return;
517 } 580 }
518 pending_tasks_.pop(); 581 task_queue_.pop();
519 break; 582 }
520 } 583 return;
521 } 584 }
522 } 585 }
523 586
524 bool VTVideoDecodeAccelerator::ProcessTask(const Task& task) { 587 bool VTVideoDecodeAccelerator::ProcessTaskQueue() {
525 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 588 DCHECK(gpu_thread_checker_.CalledOnValidThread());
526 DCHECK_EQ(state_, STATE_DECODING); 589 DCHECK_EQ(state_, STATE_DECODING);
527 590
591 if (task_queue_.empty())
592 return false;
593
594 const Task& task = task_queue_.front();
528 switch (task.type) { 595 switch (task.type) {
529 case TASK_FRAME: 596 case TASK_FRAME:
530 return ProcessFrame(*task.frame); 597 // TODO(sandersd): Signal IDR explicitly (not using pic_order_cnt == 0).
598 if (reorder_queue_.size() < kMaxReorderQueueSize &&
599 (task.frame->pic_order_cnt != 0 || reorder_queue_.empty())) {
600 assigned_bitstream_ids_.erase(task.frame->bitstream_id);
601 client_->NotifyEndOfBitstreamBuffer(task.frame->bitstream_id);
602 reorder_queue_.push(task.frame);
603 task_queue_.pop();
604 return true;
605 }
606 return false;
531 607
532 case TASK_FLUSH: 608 case TASK_FLUSH:
533 DCHECK_EQ(task.type, pending_flush_tasks_.front()); 609 DCHECK_EQ(task.type, pending_flush_tasks_.front());
534 pending_flush_tasks_.pop(); 610 if (reorder_queue_.size() == 0) {
535 client_->NotifyFlushDone(); 611 pending_flush_tasks_.pop();
536 return true; 612 client_->NotifyFlushDone();
613 task_queue_.pop();
614 return true;
615 }
616 return false;
537 617
538 case TASK_RESET: 618 case TASK_RESET:
539 DCHECK_EQ(task.type, pending_flush_tasks_.front()); 619 DCHECK_EQ(task.type, pending_flush_tasks_.front());
540 pending_flush_tasks_.pop(); 620 if (reorder_queue_.size() == 0) {
541 client_->NotifyResetDone(); 621 pending_flush_tasks_.pop();
542 return true; 622 client_->NotifyResetDone();
623 task_queue_.pop();
624 return true;
625 }
626 return false;
543 627
544 case TASK_DESTROY: 628 case TASK_DESTROY:
545 NOTREACHED() << "Can't destroy while in STATE_DECODING."; 629 NOTREACHED() << "Can't destroy while in STATE_DECODING.";
546 NotifyError(ILLEGAL_STATE); 630 NotifyError(ILLEGAL_STATE);
547 return false; 631 return false;
548 } 632 }
549 } 633 }
550 634
635 bool VTVideoDecodeAccelerator::ProcessReorderQueue() {
636 DCHECK(gpu_thread_checker_.CalledOnValidThread());
637 DCHECK_EQ(state_, STATE_DECODING);
638
639 if (reorder_queue_.empty())
640 return false;
641
642 // If the next task is a flush (because there is a pending flush or becuase
643 // the next frame is an IDR), then we don't need a full reorder buffer to send
644 // the next frame.
645 bool flushing = !task_queue_.empty() &&
646 (task_queue_.front().type != TASK_FRAME ||
647 task_queue_.front().frame->pic_order_cnt == 0);
648 if (flushing || reorder_queue_.size() >= kMinReorderQueueSize) {
649 if (ProcessFrame(*reorder_queue_.top())) {
650 reorder_queue_.pop();
651 return true;
652 }
653 }
654
655 return false;
656 }
657
551 bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) { 658 bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) {
552 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 659 DCHECK(gpu_thread_checker_.CalledOnValidThread());
553 DCHECK_EQ(state_, STATE_DECODING); 660 DCHECK_EQ(state_, STATE_DECODING);
661
554 // If the next pending flush is for a reset, then the frame will be dropped. 662 // If the next pending flush is for a reset, then the frame will be dropped.
555 bool resetting = !pending_flush_tasks_.empty() && 663 bool resetting = !pending_flush_tasks_.empty() &&
556 pending_flush_tasks_.front() == TASK_RESET; 664 pending_flush_tasks_.front() == TASK_RESET;
665
557 if (!resetting && frame.image.get()) { 666 if (!resetting && frame.image.get()) {
558 // If the |coded_size| has changed, request new picture buffers and then 667 // If the |coded_size| has changed, request new picture buffers and then
559 // wait for them. 668 // wait for them.
560 // TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of 669 // TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of
561 // textures, this would be unnecessary, as the size is actually a property 670 // textures, this would be unnecessary, as the size is actually a property
562 // of the texture binding, not the texture. We rebind every frame, so the 671 // of the texture binding, not the texture. We rebind every frame, so the
563 // size passed to ProvidePictureBuffers() is meaningless. 672 // size passed to ProvidePictureBuffers() is meaningless.
564 if (picture_size_ != frame.coded_size) { 673 if (picture_size_ != frame.coded_size) {
565 // Dismiss current pictures. 674 // Dismiss current pictures.
566 for (int32_t picture_id : assigned_picture_ids_) 675 for (int32_t picture_id : assigned_picture_ids_)
567 client_->DismissPictureBuffer(picture_id); 676 client_->DismissPictureBuffer(picture_id);
568 assigned_picture_ids_.clear(); 677 assigned_picture_ids_.clear();
569 available_picture_ids_.clear(); 678 available_picture_ids_.clear();
570 679
571 // Request new pictures. 680 // Request new pictures.
572 picture_size_ = frame.coded_size; 681 picture_size_ = frame.coded_size;
573 client_->ProvidePictureBuffers( 682 client_->ProvidePictureBuffers(
574 kNumPictureBuffers, coded_size_, GL_TEXTURE_RECTANGLE_ARB); 683 kNumPictureBuffers, coded_size_, GL_TEXTURE_RECTANGLE_ARB);
575 return false; 684 return false;
576 } 685 }
577 if (!SendFrame(frame)) 686 if (!SendFrame(frame))
578 return false; 687 return false;
579 } 688 }
580 assigned_bitstream_ids_.erase(frame.bitstream_id); 689
581 client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id);
582 return true; 690 return true;
583 } 691 }
584 692
585 bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) { 693 bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
586 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 694 DCHECK(gpu_thread_checker_.CalledOnValidThread());
587 DCHECK_EQ(state_, STATE_DECODING); 695 DCHECK_EQ(state_, STATE_DECODING);
588 696
589 if (available_picture_ids_.empty()) 697 if (available_picture_ids_.empty())
590 return false; 698 return false;
591 699
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
636 744
637 void VTVideoDecodeAccelerator::QueueFlush(TaskType type) { 745 void VTVideoDecodeAccelerator::QueueFlush(TaskType type) {
638 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 746 DCHECK(gpu_thread_checker_.CalledOnValidThread());
639 pending_flush_tasks_.push(type); 747 pending_flush_tasks_.push(type);
640 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( 748 decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind(
641 &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this), 749 &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this),
642 type)); 750 type));
643 751
644 // If this is a new flush request, see if we can make progress. 752 // If this is a new flush request, see if we can make progress.
645 if (pending_flush_tasks_.size() == 1) 753 if (pending_flush_tasks_.size() == 1)
646 ProcessTasks(); 754 ProcessWorkQueues();
647 } 755 }
648 756
649 void VTVideoDecodeAccelerator::Flush() { 757 void VTVideoDecodeAccelerator::Flush() {
650 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 758 DCHECK(gpu_thread_checker_.CalledOnValidThread());
651 QueueFlush(TASK_FLUSH); 759 QueueFlush(TASK_FLUSH);
652 } 760 }
653 761
654 void VTVideoDecodeAccelerator::Reset() { 762 void VTVideoDecodeAccelerator::Reset() {
655 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 763 DCHECK(gpu_thread_checker_.CalledOnValidThread());
656 QueueFlush(TASK_RESET); 764 QueueFlush(TASK_RESET);
657 } 765 }
658 766
659 void VTVideoDecodeAccelerator::Destroy() { 767 void VTVideoDecodeAccelerator::Destroy() {
660 DCHECK(gpu_thread_checker_.CalledOnValidThread()); 768 DCHECK(gpu_thread_checker_.CalledOnValidThread());
661 769
662 // In a forceful shutdown, the decoder thread may be dead already. 770 // In a forceful shutdown, the decoder thread may be dead already.
663 if (!decoder_thread_.IsRunning()) { 771 if (!decoder_thread_.IsRunning()) {
664 delete this; 772 delete this;
665 return; 773 return;
666 } 774 }
667 775
668 // For a graceful shutdown, return assigned buffers and flush before 776 // For a graceful shutdown, return assigned buffers and flush before
669 // destructing |this|. 777 // destructing |this|.
778 // TODO(sandersd): Make sure the decoder won't try to read the buffers again
779 // before discarding them.
670 for (int32_t bitstream_id : assigned_bitstream_ids_) 780 for (int32_t bitstream_id : assigned_bitstream_ids_)
671 client_->NotifyEndOfBitstreamBuffer(bitstream_id); 781 client_->NotifyEndOfBitstreamBuffer(bitstream_id);
672 assigned_bitstream_ids_.clear(); 782 assigned_bitstream_ids_.clear();
673 state_ = STATE_DESTROYING; 783 state_ = STATE_DESTROYING;
674 QueueFlush(TASK_DESTROY); 784 QueueFlush(TASK_DESTROY);
675 } 785 }
676 786
677 bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() { 787 bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() {
678 return false; 788 return false;
679 } 789 }
680 790
681 } // namespace content 791 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/media/vt_video_decode_accelerator.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698