OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "media/gpu/vt_video_decode_accelerator_mac.h" | 5 #include "media/gpu/vt_video_decode_accelerator_mac.h" |
6 | 6 |
7 #include <CoreVideo/CoreVideo.h> | 7 #include <CoreVideo/CoreVideo.h> |
8 #include <OpenGL/CGLIOSurface.h> | 8 #include <OpenGL/CGLIOSurface.h> |
9 #include <OpenGL/gl.h> | 9 #include <OpenGL/gl.h> |
10 #include <stddef.h> | 10 #include <stddef.h> |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
59 | 59 |
60 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4). | 60 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4). |
61 const int kNALUHeaderLength = 4; | 61 const int kNALUHeaderLength = 4; |
62 | 62 |
63 // We request 5 picture buffers from the client, each of which has a texture ID | 63 // We request 5 picture buffers from the client, each of which has a texture ID |
64 // that we can bind decoded frames to. We need enough to satisfy preroll, and | 64 // that we can bind decoded frames to. We need enough to satisfy preroll, and |
65 // enough to avoid unnecessary stalling, but no more than that. The resource | 65 // enough to avoid unnecessary stalling, but no more than that. The resource |
66 // requirements are low, as we don't need the textures to be backed by storage. | 66 // requirements are low, as we don't need the textures to be backed by storage. |
67 const int kNumPictureBuffers = limits::kMaxVideoFrames + 1; | 67 const int kNumPictureBuffers = limits::kMaxVideoFrames + 1; |
68 | 68 |
69 // Maximum number of frames to queue for reordering before we stop asking for | 69 // Maximum number of frames to queue for reordering. (Also controls the maximum |
70 // more. (NotifyEndOfBitstreamBuffer() is called when frames are moved into the | 70 // number of in-flight frames, since NotifyEndOfBitstreamBuffer() is called when |
71 // reorder queue.) | 71 // frames are moved into the reorder queue.) |
72 const int kMaxReorderQueueSize = 16; | 72 // |
| 73 // Since the maximum possible |reorder_window| is 16 for H.264, 17 is the |
| 74 // minimum safe (static) size of the reorder queue. |
| 75 const int kMaxReorderQueueSize = 17; |
73 | 76 |
74 // Build an |image_config| dictionary for VideoToolbox initialization. | 77 // Build an |image_config| dictionary for VideoToolbox initialization. |
75 base::ScopedCFTypeRef<CFMutableDictionaryRef> BuildImageConfig( | 78 base::ScopedCFTypeRef<CFMutableDictionaryRef> BuildImageConfig( |
76 CMVideoDimensions coded_dimensions) { | 79 CMVideoDimensions coded_dimensions) { |
77 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config; | 80 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config; |
78 | 81 |
79 // Note that 4:2:0 textures cannot be used directly as RGBA in OpenGL, but are | 82 // Note that 4:2:0 textures cannot be used directly as RGBA in OpenGL, but are |
80 // lower power than 4:2:2 when composited directly by CoreAnimation. | 83 // lower power than 4:2:2 when composited directly by CoreAnimation. |
81 int32_t pixel_format = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; | 84 int32_t pixel_format = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; |
82 #define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i) | 85 #define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i) |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
211 DLOG(WARNING) << "Failed to create software VideoToolbox session"; | 214 DLOG(WARNING) << "Failed to create software VideoToolbox session"; |
212 return false; | 215 return false; |
213 } | 216 } |
214 | 217 |
215 return true; | 218 return true; |
216 } | 219 } |
217 | 220 |
218 // TODO(sandersd): Share this computation with the VAAPI decoder. | 221 // TODO(sandersd): Share this computation with the VAAPI decoder. |
219 int32_t ComputeReorderWindow(const H264SPS* sps) { | 222 int32_t ComputeReorderWindow(const H264SPS* sps) { |
220 // TODO(sandersd): Compute MaxDpbFrames. | 223 // TODO(sandersd): Compute MaxDpbFrames. |
221 int32_t max_dpb_frames = kMaxReorderQueueSize; | 224 int32_t max_dpb_frames = 16; |
222 | 225 |
223 // See AVC spec section E.2.1 definition of |max_num_reorder_frames|. | 226 // See AVC spec section E.2.1 definition of |max_num_reorder_frames|. |
224 if (sps->vui_parameters_present_flag && sps->bitstream_restriction_flag) { | 227 if (sps->vui_parameters_present_flag && sps->bitstream_restriction_flag) { |
225 return std::min(sps->max_num_reorder_frames, max_dpb_frames); | 228 return std::min(sps->max_num_reorder_frames, max_dpb_frames); |
226 } else if (sps->constraint_set3_flag) { | 229 } else if (sps->constraint_set3_flag) { |
227 if (sps->profile_idc == 44 || sps->profile_idc == 86 || | 230 if (sps->profile_idc == 44 || sps->profile_idc == 86 || |
228 sps->profile_idc == 100 || sps->profile_idc == 110 || | 231 sps->profile_idc == 100 || sps->profile_idc == 110 || |
229 sps->profile_idc == 122 || sps->profile_idc == 244) { | 232 sps->profile_idc == 122 || sps->profile_idc == 244) { |
230 return 0; | 233 return 0; |
231 } | 234 } |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
309 missing_idr_logged_(false), | 312 missing_idr_logged_(false), |
310 gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()), | 313 gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
311 decoder_thread_("VTDecoderThread"), | 314 decoder_thread_("VTDecoderThread"), |
312 weak_this_factory_(this) { | 315 weak_this_factory_(this) { |
313 callback_.decompressionOutputCallback = OutputThunk; | 316 callback_.decompressionOutputCallback = OutputThunk; |
314 callback_.decompressionOutputRefCon = this; | 317 callback_.decompressionOutputRefCon = this; |
315 weak_this_ = weak_this_factory_.GetWeakPtr(); | 318 weak_this_ = weak_this_factory_.GetWeakPtr(); |
316 } | 319 } |
317 | 320 |
318 VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() { | 321 VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() { |
| 322 DVLOG(1) << __func__; |
319 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 323 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
320 } | 324 } |
321 | 325 |
322 bool VTVideoDecodeAccelerator::Initialize(const Config& config, | 326 bool VTVideoDecodeAccelerator::Initialize(const Config& config, |
323 Client* client) { | 327 Client* client) { |
| 328 DVLOG(1) << __func__; |
324 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 329 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
325 | 330 |
326 if (make_context_current_cb_.is_null() || bind_image_cb_.is_null()) { | 331 if (make_context_current_cb_.is_null() || bind_image_cb_.is_null()) { |
327 NOTREACHED() << "GL callbacks are required for this VDA"; | 332 NOTREACHED() << "GL callbacks are required for this VDA"; |
328 return false; | 333 return false; |
329 } | 334 } |
330 | 335 |
331 if (config.is_encrypted) { | 336 if (config.is_encrypted) { |
332 NOTREACHED() << "Encrypted streams are not supported for this VDA"; | 337 NOTREACHED() << "Encrypted streams are not supported for this VDA"; |
333 return false; | 338 return false; |
(...skipping 23 matching lines...) Expand all Loading... |
357 if (!decoder_thread_.Start()) | 362 if (!decoder_thread_.Start()) |
358 return false; | 363 return false; |
359 | 364 |
360 // Count the session as successfully initialized. | 365 // Count the session as successfully initialized. |
361 UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason", | 366 UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason", |
362 SFT_SUCCESSFULLY_INITIALIZED, SFT_MAX + 1); | 367 SFT_SUCCESSFULLY_INITIALIZED, SFT_MAX + 1); |
363 return true; | 368 return true; |
364 } | 369 } |
365 | 370 |
366 bool VTVideoDecodeAccelerator::FinishDelayedFrames() { | 371 bool VTVideoDecodeAccelerator::FinishDelayedFrames() { |
| 372 DVLOG(3) << __func__; |
367 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); | 373 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); |
368 if (session_) { | 374 if (session_) { |
369 OSStatus status = VTDecompressionSessionWaitForAsynchronousFrames(session_); | 375 OSStatus status = VTDecompressionSessionWaitForAsynchronousFrames(session_); |
370 if (status) { | 376 if (status) { |
371 NOTIFY_STATUS("VTDecompressionSessionWaitForAsynchronousFrames()", status, | 377 NOTIFY_STATUS("VTDecompressionSessionWaitForAsynchronousFrames()", status, |
372 SFT_PLATFORM_ERROR); | 378 SFT_PLATFORM_ERROR); |
373 return false; | 379 return false; |
374 } | 380 } |
375 } | 381 } |
376 return true; | 382 return true; |
377 } | 383 } |
378 | 384 |
379 bool VTVideoDecodeAccelerator::ConfigureDecoder() { | 385 bool VTVideoDecodeAccelerator::ConfigureDecoder() { |
| 386 DVLOG(2) << __func__; |
380 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); | 387 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); |
381 DCHECK(!last_sps_.empty()); | 388 DCHECK(!last_sps_.empty()); |
382 DCHECK(!last_pps_.empty()); | 389 DCHECK(!last_pps_.empty()); |
383 | 390 |
384 // Build the configuration records. | 391 // Build the configuration records. |
385 std::vector<const uint8_t*> nalu_data_ptrs; | 392 std::vector<const uint8_t*> nalu_data_ptrs; |
386 std::vector<size_t> nalu_data_sizes; | 393 std::vector<size_t> nalu_data_sizes; |
387 nalu_data_ptrs.reserve(3); | 394 nalu_data_ptrs.reserve(3); |
388 nalu_data_sizes.reserve(3); | 395 nalu_data_sizes.reserve(3); |
389 nalu_data_ptrs.push_back(&last_sps_.front()); | 396 nalu_data_ptrs.push_back(&last_sps_.front()); |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
472 cf_using_hardware.InitializeInto()) == 0) { | 479 cf_using_hardware.InitializeInto()) == 0) { |
473 using_hardware = CFBooleanGetValue(cf_using_hardware); | 480 using_hardware = CFBooleanGetValue(cf_using_hardware); |
474 } | 481 } |
475 UMA_HISTOGRAM_BOOLEAN("Media.VTVDA.HardwareAccelerated", using_hardware); | 482 UMA_HISTOGRAM_BOOLEAN("Media.VTVDA.HardwareAccelerated", using_hardware); |
476 | 483 |
477 return true; | 484 return true; |
478 } | 485 } |
479 | 486 |
480 void VTVideoDecodeAccelerator::DecodeTask(const BitstreamBuffer& bitstream, | 487 void VTVideoDecodeAccelerator::DecodeTask(const BitstreamBuffer& bitstream, |
481 Frame* frame) { | 488 Frame* frame) { |
| 489 DVLOG(2) << __func__ << "(" << frame->bitstream_id << ")"; |
482 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); | 490 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); |
483 | 491 |
484 // Map the bitstream buffer. | 492 // Map the bitstream buffer. |
485 SharedMemoryRegion memory(bitstream, true); | 493 SharedMemoryRegion memory(bitstream, true); |
486 if (!memory.Map()) { | 494 if (!memory.Map()) { |
487 DLOG(ERROR) << "Failed to map bitstream buffer"; | 495 DLOG(ERROR) << "Failed to map bitstream buffer"; |
488 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR); | 496 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR); |
489 return; | 497 return; |
490 } | 498 } |
491 const uint8_t* buf = static_cast<uint8_t*>(memory.memory()); | 499 const uint8_t* buf = static_cast<uint8_t*>(memory.memory()); |
(...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
801 } | 809 } |
802 | 810 |
803 Frame* frame = reinterpret_cast<Frame*>(source_frame_refcon); | 811 Frame* frame = reinterpret_cast<Frame*>(source_frame_refcon); |
804 frame->image.reset(image_buffer, base::scoped_policy::RETAIN); | 812 frame->image.reset(image_buffer, base::scoped_policy::RETAIN); |
805 gpu_task_runner_->PostTask( | 813 gpu_task_runner_->PostTask( |
806 FROM_HERE, | 814 FROM_HERE, |
807 base::Bind(&VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame)); | 815 base::Bind(&VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame)); |
808 } | 816 } |
809 | 817 |
810 void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) { | 818 void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) { |
| 819 DVLOG(3) << __func__ << "(" << frame->bitstream_id << ")"; |
811 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 820 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 821 |
812 DCHECK_EQ(1u, pending_frames_.count(frame->bitstream_id)); | 822 DCHECK_EQ(1u, pending_frames_.count(frame->bitstream_id)); |
813 Task task(TASK_FRAME); | 823 Task task(TASK_FRAME); |
814 task.frame = pending_frames_[frame->bitstream_id]; | 824 task.frame = pending_frames_[frame->bitstream_id]; |
815 pending_frames_.erase(frame->bitstream_id); | 825 pending_frames_.erase(frame->bitstream_id); |
816 task_queue_.push(task); | 826 task_queue_.push(task); |
817 ProcessWorkQueues(); | 827 ProcessWorkQueues(); |
818 } | 828 } |
819 | 829 |
820 void VTVideoDecodeAccelerator::FlushTask(TaskType type) { | 830 void VTVideoDecodeAccelerator::FlushTask(TaskType type) { |
| 831 DVLOG(3) << __func__; |
821 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); | 832 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); |
| 833 |
822 FinishDelayedFrames(); | 834 FinishDelayedFrames(); |
823 | 835 |
824 // Always queue a task, even if FinishDelayedFrames() fails, so that | 836 // Always queue a task, even if FinishDelayedFrames() fails, so that |
825 // destruction always completes. | 837 // destruction always completes. |
826 gpu_task_runner_->PostTask( | 838 gpu_task_runner_->PostTask( |
827 FROM_HERE, | 839 FROM_HERE, |
828 base::Bind(&VTVideoDecodeAccelerator::FlushDone, weak_this_, type)); | 840 base::Bind(&VTVideoDecodeAccelerator::FlushDone, weak_this_, type)); |
829 } | 841 } |
830 | 842 |
831 void VTVideoDecodeAccelerator::FlushDone(TaskType type) { | 843 void VTVideoDecodeAccelerator::FlushDone(TaskType type) { |
| 844 DVLOG(3) << __func__; |
832 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 845 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
833 task_queue_.push(Task(type)); | 846 task_queue_.push(Task(type)); |
834 ProcessWorkQueues(); | 847 ProcessWorkQueues(); |
835 } | 848 } |
836 | 849 |
837 void VTVideoDecodeAccelerator::Decode(const BitstreamBuffer& bitstream) { | 850 void VTVideoDecodeAccelerator::Decode(const BitstreamBuffer& bitstream) { |
| 851 DVLOG(2) << __func__ << "(" << bitstream.id() << ")"; |
838 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 852 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
| 853 |
839 if (bitstream.id() < 0) { | 854 if (bitstream.id() < 0) { |
840 DLOG(ERROR) << "Invalid bitstream, id: " << bitstream.id(); | 855 DLOG(ERROR) << "Invalid bitstream, id: " << bitstream.id(); |
841 if (base::SharedMemory::IsHandleValid(bitstream.handle())) | 856 if (base::SharedMemory::IsHandleValid(bitstream.handle())) |
842 base::SharedMemory::CloseHandle(bitstream.handle()); | 857 base::SharedMemory::CloseHandle(bitstream.handle()); |
843 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM); | 858 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM); |
844 return; | 859 return; |
845 } | 860 } |
| 861 |
846 DCHECK_EQ(0u, assigned_bitstream_ids_.count(bitstream.id())); | 862 DCHECK_EQ(0u, assigned_bitstream_ids_.count(bitstream.id())); |
847 assigned_bitstream_ids_.insert(bitstream.id()); | 863 assigned_bitstream_ids_.insert(bitstream.id()); |
| 864 |
848 Frame* frame = new Frame(bitstream.id()); | 865 Frame* frame = new Frame(bitstream.id()); |
849 pending_frames_[frame->bitstream_id] = make_linked_ptr(frame); | 866 pending_frames_[frame->bitstream_id] = make_linked_ptr(frame); |
850 decoder_thread_.task_runner()->PostTask( | 867 decoder_thread_.task_runner()->PostTask( |
851 FROM_HERE, base::Bind(&VTVideoDecodeAccelerator::DecodeTask, | 868 FROM_HERE, base::Bind(&VTVideoDecodeAccelerator::DecodeTask, |
852 base::Unretained(this), bitstream, frame)); | 869 base::Unretained(this), bitstream, frame)); |
853 } | 870 } |
854 | 871 |
855 void VTVideoDecodeAccelerator::AssignPictureBuffers( | 872 void VTVideoDecodeAccelerator::AssignPictureBuffers( |
856 const std::vector<PictureBuffer>& pictures) { | 873 const std::vector<PictureBuffer>& pictures) { |
| 874 DVLOG(1) << __func__; |
857 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 875 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
858 | 876 |
859 for (const PictureBuffer& picture : pictures) { | 877 for (const PictureBuffer& picture : pictures) { |
860 DCHECK(!picture_info_map_.count(picture.id())); | 878 DCHECK(!picture_info_map_.count(picture.id())); |
861 assigned_picture_ids_.insert(picture.id()); | 879 assigned_picture_ids_.insert(picture.id()); |
862 available_picture_ids_.push_back(picture.id()); | 880 available_picture_ids_.push_back(picture.id()); |
863 DCHECK_LE(1u, picture.internal_texture_ids().size()); | 881 DCHECK_LE(1u, picture.internal_texture_ids().size()); |
864 DCHECK_LE(1u, picture.texture_ids().size()); | 882 DCHECK_LE(1u, picture.texture_ids().size()); |
865 picture_info_map_.insert(std::make_pair( | 883 picture_info_map_.insert(std::make_pair( |
866 picture.id(), | 884 picture.id(), |
867 base::MakeUnique<PictureInfo>(picture.internal_texture_ids()[0], | 885 base::MakeUnique<PictureInfo>(picture.internal_texture_ids()[0], |
868 picture.texture_ids()[0]))); | 886 picture.texture_ids()[0]))); |
869 } | 887 } |
870 | 888 |
871 // Pictures are not marked as uncleared until after this method returns, and | 889 // Pictures are not marked as uncleared until after this method returns, and |
872 // they will be broken if they are used before that happens. So, schedule | 890 // they will be broken if they are used before that happens. So, schedule |
873 // future work after that happens. | 891 // future work after that happens. |
874 gpu_task_runner_->PostTask( | 892 gpu_task_runner_->PostTask( |
875 FROM_HERE, | 893 FROM_HERE, |
876 base::Bind(&VTVideoDecodeAccelerator::ProcessWorkQueues, weak_this_)); | 894 base::Bind(&VTVideoDecodeAccelerator::ProcessWorkQueues, weak_this_)); |
877 } | 895 } |
878 | 896 |
879 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { | 897 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { |
| 898 DVLOG(2) << __func__ << "(" << picture_id << ")"; |
880 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 899 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
881 | 900 |
882 auto it = picture_info_map_.find(picture_id); | 901 auto it = picture_info_map_.find(picture_id); |
883 if (it != picture_info_map_.end()) { | 902 if (it != picture_info_map_.end()) { |
884 PictureInfo* picture_info = it->second.get(); | 903 PictureInfo* picture_info = it->second.get(); |
885 picture_info->cv_image.reset(); | 904 picture_info->cv_image.reset(); |
886 picture_info->gl_image->Destroy(false); | 905 picture_info->gl_image->Destroy(false); |
887 picture_info->gl_image = nullptr; | 906 picture_info->gl_image = nullptr; |
888 } | 907 } |
889 | 908 |
890 if (assigned_picture_ids_.count(picture_id)) { | 909 if (assigned_picture_ids_.count(picture_id)) { |
891 available_picture_ids_.push_back(picture_id); | 910 available_picture_ids_.push_back(picture_id); |
892 ProcessWorkQueues(); | 911 ProcessWorkQueues(); |
893 } else { | 912 } else { |
894 client_->DismissPictureBuffer(picture_id); | 913 client_->DismissPictureBuffer(picture_id); |
895 } | 914 } |
896 } | 915 } |
897 | 916 |
898 void VTVideoDecodeAccelerator::ProcessWorkQueues() { | 917 void VTVideoDecodeAccelerator::ProcessWorkQueues() { |
| 918 DVLOG(3) << __func__; |
899 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 919 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
900 switch (state_) { | 920 switch (state_) { |
901 case STATE_DECODING: | 921 case STATE_DECODING: |
902 // TODO(sandersd): Batch where possible. | 922 // TODO(sandersd): Batch where possible. |
903 while (state_ == STATE_DECODING) { | 923 while (state_ == STATE_DECODING) { |
904 if (!ProcessReorderQueue() && !ProcessTaskQueue()) | 924 if (!ProcessReorderQueue() && !ProcessTaskQueue()) |
905 break; | 925 break; |
906 } | 926 } |
907 return; | 927 return; |
908 | 928 |
909 case STATE_ERROR: | 929 case STATE_ERROR: |
910 // Do nothing until Destroy() is called. | 930 // Do nothing until Destroy() is called. |
911 return; | 931 return; |
912 | 932 |
913 case STATE_DESTROYING: | 933 case STATE_DESTROYING: |
914 // Drop tasks until we are ready to destruct. | 934 // Drop tasks until we are ready to destruct. |
915 while (!task_queue_.empty()) { | 935 while (!task_queue_.empty()) { |
916 if (task_queue_.front().type == TASK_DESTROY) { | 936 if (task_queue_.front().type == TASK_DESTROY) { |
917 delete this; | 937 delete this; |
918 return; | 938 return; |
919 } | 939 } |
920 task_queue_.pop(); | 940 task_queue_.pop(); |
921 } | 941 } |
922 return; | 942 return; |
923 } | 943 } |
924 } | 944 } |
925 | 945 |
926 bool VTVideoDecodeAccelerator::ProcessTaskQueue() { | 946 bool VTVideoDecodeAccelerator::ProcessTaskQueue() { |
| 947 DVLOG(3) << __func__ << " size=" << task_queue_.size(); |
927 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 948 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
928 DCHECK_EQ(state_, STATE_DECODING); | 949 DCHECK_EQ(state_, STATE_DECODING); |
929 | 950 |
930 if (task_queue_.empty()) | 951 if (task_queue_.empty()) |
931 return false; | 952 return false; |
932 | 953 |
933 const Task& task = task_queue_.front(); | 954 const Task& task = task_queue_.front(); |
934 switch (task.type) { | 955 switch (task.type) { |
935 case TASK_FRAME: | 956 case TASK_FRAME: |
936 if (reorder_queue_.size() < kMaxReorderQueueSize && | 957 if (reorder_queue_.size() < kMaxReorderQueueSize && |
937 (!task.frame->is_idr || reorder_queue_.empty())) { | 958 (!task.frame->is_idr || reorder_queue_.empty())) { |
| 959 DVLOG(2) << "Decode(" << task.frame->bitstream_id << ") complete"; |
938 assigned_bitstream_ids_.erase(task.frame->bitstream_id); | 960 assigned_bitstream_ids_.erase(task.frame->bitstream_id); |
939 client_->NotifyEndOfBitstreamBuffer(task.frame->bitstream_id); | 961 client_->NotifyEndOfBitstreamBuffer(task.frame->bitstream_id); |
940 reorder_queue_.push(task.frame); | 962 reorder_queue_.push(task.frame); |
941 task_queue_.pop(); | 963 task_queue_.pop(); |
942 return true; | 964 return true; |
943 } | 965 } |
944 return false; | 966 return false; |
945 | 967 |
946 case TASK_FLUSH: | 968 case TASK_FLUSH: |
947 DCHECK_EQ(task.type, pending_flush_tasks_.front()); | 969 DCHECK_EQ(task.type, pending_flush_tasks_.front()); |
948 if (reorder_queue_.size() == 0) { | 970 if (reorder_queue_.size() == 0) { |
| 971 DVLOG(1) << "Flush complete"; |
949 pending_flush_tasks_.pop(); | 972 pending_flush_tasks_.pop(); |
950 client_->NotifyFlushDone(); | 973 client_->NotifyFlushDone(); |
951 task_queue_.pop(); | 974 task_queue_.pop(); |
952 return true; | 975 return true; |
953 } | 976 } |
954 return false; | 977 return false; |
955 | 978 |
956 case TASK_RESET: | 979 case TASK_RESET: |
957 DCHECK_EQ(task.type, pending_flush_tasks_.front()); | 980 DCHECK_EQ(task.type, pending_flush_tasks_.front()); |
958 if (reorder_queue_.size() == 0) { | 981 if (reorder_queue_.size() == 0) { |
| 982 DVLOG(1) << "Reset complete"; |
959 waiting_for_idr_ = true; | 983 waiting_for_idr_ = true; |
960 pending_flush_tasks_.pop(); | 984 pending_flush_tasks_.pop(); |
961 client_->NotifyResetDone(); | 985 client_->NotifyResetDone(); |
962 task_queue_.pop(); | 986 task_queue_.pop(); |
963 return true; | 987 return true; |
964 } | 988 } |
965 return false; | 989 return false; |
966 | 990 |
967 case TASK_DESTROY: | 991 case TASK_DESTROY: |
968 NOTREACHED() << "Can't destroy while in STATE_DECODING"; | 992 NOTREACHED() << "Can't destroy while in STATE_DECODING"; |
(...skipping 10 matching lines...) Expand all Loading... |
979 return false; | 1003 return false; |
980 | 1004 |
981 // If the next task is a flush (because there is a pending flush or becuase | 1005 // If the next task is a flush (because there is a pending flush or becuase |
982 // the next frame is an IDR), then we don't need a full reorder buffer to send | 1006 // the next frame is an IDR), then we don't need a full reorder buffer to send |
983 // the next frame. | 1007 // the next frame. |
984 bool flushing = | 1008 bool flushing = |
985 !task_queue_.empty() && (task_queue_.front().type != TASK_FRAME || | 1009 !task_queue_.empty() && (task_queue_.front().type != TASK_FRAME || |
986 task_queue_.front().frame->is_idr); | 1010 task_queue_.front().frame->is_idr); |
987 | 1011 |
988 size_t reorder_window = std::max(0, reorder_queue_.top()->reorder_window); | 1012 size_t reorder_window = std::max(0, reorder_queue_.top()->reorder_window); |
| 1013 DVLOG(3) << __func__ << " size=" << reorder_queue_.size() |
| 1014 << " window=" << reorder_window << " flushing=" << flushing; |
989 if (flushing || reorder_queue_.size() > reorder_window) { | 1015 if (flushing || reorder_queue_.size() > reorder_window) { |
990 if (ProcessFrame(*reorder_queue_.top())) { | 1016 if (ProcessFrame(*reorder_queue_.top())) { |
991 reorder_queue_.pop(); | 1017 reorder_queue_.pop(); |
992 return true; | 1018 return true; |
993 } | 1019 } |
994 } | 1020 } |
995 | 1021 |
996 return false; | 1022 return false; |
997 } | 1023 } |
998 | 1024 |
999 bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) { | 1025 bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) { |
| 1026 DVLOG(3) << __func__ << "(" << frame.bitstream_id << ")"; |
1000 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 1027 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
1001 DCHECK_EQ(state_, STATE_DECODING); | 1028 DCHECK_EQ(state_, STATE_DECODING); |
1002 | 1029 |
1003 // If the next pending flush is for a reset, then the frame will be dropped. | 1030 // If the next pending flush is for a reset, then the frame will be dropped. |
1004 bool resetting = !pending_flush_tasks_.empty() && | 1031 bool resetting = !pending_flush_tasks_.empty() && |
1005 pending_flush_tasks_.front() == TASK_RESET; | 1032 pending_flush_tasks_.front() == TASK_RESET; |
1006 | 1033 |
1007 if (!resetting && frame.image.get()) { | 1034 if (!resetting && frame.image.get()) { |
1008 // If the |coded_size| has changed, request new picture buffers and then | 1035 // If the |coded_size| has changed, request new picture buffers and then |
1009 // wait for them. | 1036 // wait for them. |
(...skipping 15 matching lines...) Expand all Loading... |
1025 return false; | 1052 return false; |
1026 } | 1053 } |
1027 if (!SendFrame(frame)) | 1054 if (!SendFrame(frame)) |
1028 return false; | 1055 return false; |
1029 } | 1056 } |
1030 | 1057 |
1031 return true; | 1058 return true; |
1032 } | 1059 } |
1033 | 1060 |
1034 bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) { | 1061 bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) { |
| 1062 DVLOG(2) << __func__ << "(" << frame.bitstream_id << ")"; |
1035 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 1063 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
1036 DCHECK_EQ(state_, STATE_DECODING); | 1064 DCHECK_EQ(state_, STATE_DECODING); |
1037 | 1065 |
1038 if (available_picture_ids_.empty()) | 1066 if (available_picture_ids_.empty()) |
1039 return false; | 1067 return false; |
1040 | 1068 |
1041 int32_t picture_id = available_picture_ids_.back(); | 1069 int32_t picture_id = available_picture_ids_.back(); |
1042 auto it = picture_info_map_.find(picture_id); | 1070 auto it = picture_info_map_.find(picture_id); |
1043 DCHECK(it != picture_info_map_.end()); | 1071 DCHECK(it != picture_info_map_.end()); |
1044 PictureInfo* picture_info = it->second.get(); | 1072 PictureInfo* picture_info = it->second.get(); |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1105 decoder_thread_.task_runner()->PostTask( | 1133 decoder_thread_.task_runner()->PostTask( |
1106 FROM_HERE, base::Bind(&VTVideoDecodeAccelerator::FlushTask, | 1134 FROM_HERE, base::Bind(&VTVideoDecodeAccelerator::FlushTask, |
1107 base::Unretained(this), type)); | 1135 base::Unretained(this), type)); |
1108 | 1136 |
1109 // If this is a new flush request, see if we can make progress. | 1137 // If this is a new flush request, see if we can make progress. |
1110 if (pending_flush_tasks_.size() == 1) | 1138 if (pending_flush_tasks_.size() == 1) |
1111 ProcessWorkQueues(); | 1139 ProcessWorkQueues(); |
1112 } | 1140 } |
1113 | 1141 |
1114 void VTVideoDecodeAccelerator::Flush() { | 1142 void VTVideoDecodeAccelerator::Flush() { |
| 1143 DVLOG(1) << __func__; |
1115 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 1144 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
1116 QueueFlush(TASK_FLUSH); | 1145 QueueFlush(TASK_FLUSH); |
1117 } | 1146 } |
1118 | 1147 |
1119 void VTVideoDecodeAccelerator::Reset() { | 1148 void VTVideoDecodeAccelerator::Reset() { |
| 1149 DVLOG(1) << __func__; |
1120 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 1150 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
1121 QueueFlush(TASK_RESET); | 1151 QueueFlush(TASK_RESET); |
1122 } | 1152 } |
1123 | 1153 |
1124 void VTVideoDecodeAccelerator::Destroy() { | 1154 void VTVideoDecodeAccelerator::Destroy() { |
| 1155 DVLOG(1) << __func__; |
1125 DCHECK(gpu_thread_checker_.CalledOnValidThread()); | 1156 DCHECK(gpu_thread_checker_.CalledOnValidThread()); |
1126 | 1157 |
1127 // In a forceful shutdown, the decoder thread may be dead already. | 1158 // In a forceful shutdown, the decoder thread may be dead already. |
1128 if (!decoder_thread_.IsRunning()) { | 1159 if (!decoder_thread_.IsRunning()) { |
1129 delete this; | 1160 delete this; |
1130 return; | 1161 return; |
1131 } | 1162 } |
1132 | 1163 |
1133 // For a graceful shutdown, return assigned buffers and flush before | 1164 // For a graceful shutdown, return assigned buffers and flush before |
1134 // destructing |this|. | 1165 // destructing |this|. |
(...skipping 20 matching lines...) Expand all Loading... |
1155 SupportedProfile profile; | 1186 SupportedProfile profile; |
1156 profile.profile = supported_profile; | 1187 profile.profile = supported_profile; |
1157 profile.min_resolution.SetSize(16, 16); | 1188 profile.min_resolution.SetSize(16, 16); |
1158 profile.max_resolution.SetSize(4096, 2160); | 1189 profile.max_resolution.SetSize(4096, 2160); |
1159 profiles.push_back(profile); | 1190 profiles.push_back(profile); |
1160 } | 1191 } |
1161 return profiles; | 1192 return profiles; |
1162 } | 1193 } |
1163 | 1194 |
1164 } // namespace media | 1195 } // namespace media |
OLD | NEW |