Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <CoreVideo/CoreVideo.h> | 5 #include <CoreVideo/CoreVideo.h> |
| 6 #include <OpenGL/CGLIOSurface.h> | 6 #include <OpenGL/CGLIOSurface.h> |
| 7 #include <OpenGL/gl.h> | 7 #include <OpenGL/gl.h> |
| 8 | 8 |
| 9 #include "base/bind.h" | 9 #include "base/bind.h" |
| 10 #include "base/command_line.h" | 10 #include "base/command_line.h" |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 31 | 31 |
| 32 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4). | 32 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4). |
| 33 static const int kNALUHeaderLength = 4; | 33 static const int kNALUHeaderLength = 4; |
| 34 | 34 |
| 35 // We request 5 picture buffers from the client, each of which has a texture ID | 35 // We request 5 picture buffers from the client, each of which has a texture ID |
| 36 // that we can bind decoded frames to. We need enough to satisfy preroll, and | 36 // that we can bind decoded frames to. We need enough to satisfy preroll, and |
| 37 // enough to avoid unnecessary stalling, but no more than that. The resource | 37 // enough to avoid unnecessary stalling, but no more than that. The resource |
| 38 // requirements are low, as we don't need the textures to be backed by storage. | 38 // requirements are low, as we don't need the textures to be backed by storage. |
| 39 static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1; | 39 static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1; |
| 40 | 40 |
| 41 // TODO(sandersd): Use the configured reorder window instead. | 41 // Maximum number of frames to queue for reordering before we stop asking for |
| 42 static const int kMinReorderQueueSize = 4; | 42 // more. (NotifyEndOfBitstreamBuffer() is called when frames are moved into the |
| 43 // reorder queue.) | |
| 43 static const int kMaxReorderQueueSize = 16; | 44 static const int kMaxReorderQueueSize = 16; |
| 44 | 45 |
| 45 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator. | 46 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator. |
| 46 static void OutputThunk( | 47 static void OutputThunk( |
| 47 void* decompression_output_refcon, | 48 void* decompression_output_refcon, |
| 48 void* source_frame_refcon, | 49 void* source_frame_refcon, |
| 49 OSStatus status, | 50 OSStatus status, |
| 50 VTDecodeInfoFlags info_flags, | 51 VTDecodeInfoFlags info_flags, |
| 51 CVImageBufferRef image_buffer, | 52 CVImageBufferRef image_buffer, |
| 52 CMTime presentation_time_stamp, | 53 CMTime presentation_time_stamp, |
| 53 CMTime presentation_duration) { | 54 CMTime presentation_duration) { |
| 54 VTVideoDecodeAccelerator* vda = | 55 VTVideoDecodeAccelerator* vda = |
| 55 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon); | 56 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon); |
| 56 vda->Output(source_frame_refcon, status, image_buffer); | 57 vda->Output(source_frame_refcon, status, image_buffer); |
| 57 } | 58 } |
| 58 | 59 |
| 59 VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) { | 60 VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) { |
| 60 } | 61 } |
| 61 | 62 |
| 62 VTVideoDecodeAccelerator::Task::~Task() { | 63 VTVideoDecodeAccelerator::Task::~Task() { |
| 63 } | 64 } |
| 64 | 65 |
| 65 VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id) | 66 VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id) |
| 66 : bitstream_id(bitstream_id), pic_order_cnt(0) { | 67 : bitstream_id(bitstream_id), pic_order_cnt(0), reorder_window(0) { |
| 67 } | 68 } |
| 68 | 69 |
| 69 VTVideoDecodeAccelerator::Frame::~Frame() { | 70 VTVideoDecodeAccelerator::Frame::~Frame() { |
| 70 } | 71 } |
| 71 | 72 |
| 72 bool VTVideoDecodeAccelerator::FrameOrder::operator()( | 73 bool VTVideoDecodeAccelerator::FrameOrder::operator()( |
| 73 const linked_ptr<Frame>& lhs, | 74 const linked_ptr<Frame>& lhs, |
| 74 const linked_ptr<Frame>& rhs) const { | 75 const linked_ptr<Frame>& rhs) const { |
| 75 if (lhs->pic_order_cnt != rhs->pic_order_cnt) | 76 if (lhs->pic_order_cnt != rhs->pic_order_cnt) |
| 76 return lhs->pic_order_cnt > rhs->pic_order_cnt; | 77 return lhs->pic_order_cnt > rhs->pic_order_cnt; |
| 77 // If the pic_order is the same, fallback on using the bitstream order. | 78 // If |pic_order_cnt| is the same, fall back on using the bitstream order. |
| 78 // TODO(sandersd): Assign a sequence number in Decode(). | 79 // TODO(sandersd): Assign a sequence number in Decode() and use that instead. |
| 80 // TODO(sandersd): Using the sequence number, ensure that frames older than | |
| 81 // |kMaxReorderQueueSize| are ordered first, regardless of |pic_order_cnt|. | |
| 79 return lhs->bitstream_id > rhs->bitstream_id; | 82 return lhs->bitstream_id > rhs->bitstream_id; |
| 80 } | 83 } |
| 81 | 84 |
| 82 | 85 |
| 83 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator( | 86 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator( |
| 84 CGLContextObj cgl_context, | 87 CGLContextObj cgl_context, |
| 85 const base::Callback<bool(void)>& make_context_current) | 88 const base::Callback<bool(void)>& make_context_current) |
| 86 : cgl_context_(cgl_context), | 89 : cgl_context_(cgl_context), |
| 87 make_context_current_(make_context_current), | 90 make_context_current_(make_context_current), |
| 88 client_(NULL), | 91 client_(NULL), |
| (...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 311 break; | 314 break; |
| 312 | 315 |
| 313 case media::H264NALU::kSliceDataA: | 316 case media::H264NALU::kSliceDataA: |
| 314 case media::H264NALU::kSliceDataB: | 317 case media::H264NALU::kSliceDataB: |
| 315 case media::H264NALU::kSliceDataC: | 318 case media::H264NALU::kSliceDataC: |
| 316 DLOG(ERROR) << "Coded slide data partitions not implemented."; | 319 DLOG(ERROR) << "Coded slide data partitions not implemented."; |
| 317 NotifyError(PLATFORM_FAILURE); | 320 NotifyError(PLATFORM_FAILURE); |
| 318 return; | 321 return; |
| 319 | 322 |
| 320 case media::H264NALU::kNonIDRSlice: | 323 case media::H264NALU::kNonIDRSlice: |
| 321 // TODO(sandersd): Check that there has been an SPS or IDR slice since | 324 // TODO(sandersd): Check that there has been an IDR slice since the |
| 322 // the last reset. | 325 // last reset. |
| 323 case media::H264NALU::kIDRSlice: | 326 case media::H264NALU::kIDRSlice: |
| 324 { | 327 { |
| 325 // TODO(sandersd): Make sure this only happens once per frame. | 328 // TODO(sandersd): Make sure this only happens once per frame. |
| 326 DCHECK_EQ(frame->pic_order_cnt, 0); | 329 DCHECK_EQ(frame->pic_order_cnt, 0); |
| 327 | 330 |
| 328 media::H264SliceHeader slice_hdr; | 331 media::H264SliceHeader slice_hdr; |
| 329 result = parser_.ParseSliceHeader(nalu, &slice_hdr); | 332 result = parser_.ParseSliceHeader(nalu, &slice_hdr); |
| 330 if (result != media::H264Parser::kOk) { | 333 if (result != media::H264Parser::kOk) { |
| 331 DLOG(ERROR) << "Could not parse slice header"; | 334 DLOG(ERROR) << "Could not parse slice header"; |
| 332 NotifyError(UNREADABLE_INPUT); | 335 NotifyError(UNREADABLE_INPUT); |
| 333 return; | 336 return; |
| 334 } | 337 } |
| 335 | 338 |
| 336 // TODO(sandersd): Keep a cache of recent SPS/PPS units instead of | 339 // TODO(sandersd): Maintain a cache of configurations and reconfigure |
| 337 // only the most recent ones. | 340 // only when a slice references a new config. |
| 338 DCHECK_EQ(slice_hdr.pic_parameter_set_id, last_pps_id_); | 341 DCHECK_EQ(slice_hdr.pic_parameter_set_id, last_pps_id_); |
| 339 const media::H264PPS* pps = | 342 const media::H264PPS* pps = |
| 340 parser_.GetPPS(slice_hdr.pic_parameter_set_id); | 343 parser_.GetPPS(slice_hdr.pic_parameter_set_id); |
| 341 if (!pps) { | 344 if (!pps) { |
| 342 DLOG(ERROR) << "Mising PPS referenced by slice"; | 345 DLOG(ERROR) << "Mising PPS referenced by slice"; |
| 343 NotifyError(UNREADABLE_INPUT); | 346 NotifyError(UNREADABLE_INPUT); |
| 344 return; | 347 return; |
| 345 } | 348 } |
| 346 | 349 |
| 347 DCHECK_EQ(pps->seq_parameter_set_id, last_sps_id_); | 350 DCHECK_EQ(pps->seq_parameter_set_id, last_sps_id_); |
| 348 const media::H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id); | 351 const media::H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id); |
| 349 if (!sps) { | 352 if (!sps) { |
| 350 DLOG(ERROR) << "Mising SPS referenced by PPS"; | 353 DLOG(ERROR) << "Mising SPS referenced by PPS"; |
| 351 NotifyError(UNREADABLE_INPUT); | 354 NotifyError(UNREADABLE_INPUT); |
| 352 return; | 355 return; |
| 353 } | 356 } |
| 354 | 357 |
| 355 // TODO(sandersd): Compute pic_order_cnt. | 358 if (!poc_.ComputePicOrderCnt(sps, slice_hdr, &frame->pic_order_cnt)) { |
| 356 DCHECK(!slice_hdr.field_pic_flag); | 359 NotifyError(UNREADABLE_INPUT); |
| 357 frame->pic_order_cnt = 0; | 360 return; |
| 361 } | |
| 362 | |
| 363 if (sps->vui_parameters_present_flag && | |
| 364 sps->bitstream_restriction_flag) { | |
| 365 frame->reorder_window = std::min(sps->max_num_reorder_frames, | |
| 366 kMaxReorderQueueSize - 1); | |
| 367 } | |
| 358 } | 368 } |
| 359 default: | 369 default: |
| 360 nalus.push_back(nalu); | 370 nalus.push_back(nalu); |
| 361 data_size += kNALUHeaderLength + nalu.size; | 371 data_size += kNALUHeaderLength + nalu.size; |
| 362 break; | 372 break; |
| 363 } | 373 } |
| 364 } | 374 } |
| 365 | 375 |
| 366 // Initialize VideoToolbox. | 376 // Initialize VideoToolbox. |
| 367 // TODO(sandersd): Instead of assuming that the last SPS and PPS units are | 377 // TODO(sandersd): Instead of assuming that the last SPS and PPS units are |
| (...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 611 pending_flush_tasks_.pop(); | 621 pending_flush_tasks_.pop(); |
| 612 client_->NotifyFlushDone(); | 622 client_->NotifyFlushDone(); |
| 613 task_queue_.pop(); | 623 task_queue_.pop(); |
| 614 return true; | 624 return true; |
| 615 } | 625 } |
| 616 return false; | 626 return false; |
| 617 | 627 |
| 618 case TASK_RESET: | 628 case TASK_RESET: |
| 619 DCHECK_EQ(task.type, pending_flush_tasks_.front()); | 629 DCHECK_EQ(task.type, pending_flush_tasks_.front()); |
| 620 if (reorder_queue_.size() == 0) { | 630 if (reorder_queue_.size() == 0) { |
| 631 last_sps_id_ = -1; | |
| 632 last_pps_id_ = -1; | |
| 633 last_sps_.clear(); | |
| 634 last_spsext_.clear(); | |
| 635 last_pps_.clear(); | |
| 636 poc_.Reset(); | |
| 621 pending_flush_tasks_.pop(); | 637 pending_flush_tasks_.pop(); |
| 622 client_->NotifyResetDone(); | 638 client_->NotifyResetDone(); |
| 623 task_queue_.pop(); | 639 task_queue_.pop(); |
| 624 return true; | 640 return true; |
| 625 } | 641 } |
| 626 return false; | 642 return false; |
| 627 | 643 |
| 628 case TASK_DESTROY: | 644 case TASK_DESTROY: |
| 629 NOTREACHED() << "Can't destroy while in STATE_DECODING."; | 645 NOTREACHED() << "Can't destroy while in STATE_DECODING."; |
| 630 NotifyError(ILLEGAL_STATE); | 646 NotifyError(ILLEGAL_STATE); |
| 631 return false; | 647 return false; |
| 632 } | 648 } |
| 633 } | 649 } |
| 634 | 650 |
| 635 bool VTVideoDecodeAccelerator::ProcessReorderQueue() { | 651 bool VTVideoDecodeAccelerator::ProcessReorderQueue() { |
| 636 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 652 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 637 DCHECK_EQ(state_, STATE_DECODING); | 653 DCHECK_EQ(state_, STATE_DECODING); |
| 638 | 654 |
| 639 if (reorder_queue_.empty()) | 655 if (reorder_queue_.empty()) |
| 640 return false; | 656 return false; |
| 641 | 657 |
| 642 // If the next task is a flush (because there is a pending flush or becuase | 658 // If the next task is a flush (because there is a pending flush or becuase |
| 643 // the next frame is an IDR), then we don't need a full reorder buffer to send | 659 // the next frame is an IDR), then we don't need a full reorder buffer to send |
| 644 // the next frame. | 660 // the next frame. |
| 645 bool flushing = !task_queue_.empty() && | 661 bool flushing = !task_queue_.empty() && |
| 646 (task_queue_.front().type != TASK_FRAME || | 662 (task_queue_.front().type != TASK_FRAME || |
| 647 task_queue_.front().frame->pic_order_cnt == 0); | 663 task_queue_.front().frame->pic_order_cnt == 0); |
| 648 if (flushing || reorder_queue_.size() >= kMinReorderQueueSize) { | 664 |
| 665 int32_t reorder_window = reorder_queue_.top()->reorder_window; | |
| 666 if (flushing || (int32_t)reorder_queue_.size() > reorder_window) { | |
|
DaleCurtis
2014/11/21 21:42:05
static_cast, and you're forcing size_t into int32_
sandersd (OOO until July 31)
2014/11/21 23:33:16
Done.
I did it the previous way because there is
| |
| 649 if (ProcessFrame(*reorder_queue_.top())) { | 667 if (ProcessFrame(*reorder_queue_.top())) { |
| 650 reorder_queue_.pop(); | 668 reorder_queue_.pop(); |
| 651 return true; | 669 return true; |
| 652 } | 670 } |
| 653 } | 671 } |
| 654 | 672 |
| 655 return false; | 673 return false; |
| 656 } | 674 } |
| 657 | 675 |
| 658 bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) { | 676 bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) { |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 773 assigned_bitstream_ids_.clear(); | 791 assigned_bitstream_ids_.clear(); |
| 774 state_ = STATE_DESTROYING; | 792 state_ = STATE_DESTROYING; |
| 775 QueueFlush(TASK_DESTROY); | 793 QueueFlush(TASK_DESTROY); |
| 776 } | 794 } |
| 777 | 795 |
| 778 bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() { | 796 bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() { |
| 779 return false; | 797 return false; |
| 780 } | 798 } |
| 781 | 799 |
| 782 } // namespace content | 800 } // namespace content |
| OLD | NEW |