Chromium Code Reviews| Index: content/common/gpu/media/vt_video_decode_accelerator.cc |
| diff --git a/content/common/gpu/media/vt_video_decode_accelerator.cc b/content/common/gpu/media/vt_video_decode_accelerator.cc |
| index f625fbcbc66728c12d188558dd5c2c6196bbf0f8..cb29a1cd049eadbcdc278ce4af253297414a6b7c 100644 |
| --- a/content/common/gpu/media/vt_video_decode_accelerator.cc |
| +++ b/content/common/gpu/media/vt_video_decode_accelerator.cc |
| @@ -7,7 +7,6 @@ |
| #include <OpenGL/gl.h> |
| #include "base/bind.h" |
| -#include "base/callback_helpers.h" |
| #include "base/command_line.h" |
| #include "base/sys_byteorder.h" |
| #include "base/thread_task_runner_handle.h" |
| @@ -29,12 +28,13 @@ using content_common_gpu_media::StubPathMap; |
| namespace content { |
| -// Size of NALU length headers in AVCC/MPEG-4 format (can be 1, 2, or 4). |
| +// Size to use for NALU length headers in AVC format (can be 1, 2, or 4). |
| static const int kNALUHeaderLength = 4; |
| -// We only request 5 picture buffers from the client which are used to hold the |
| -// decoded samples. These buffers are then reused when the client tells us that |
| -// it is done with the buffer. |
| +// We request 5 picture buffers from the client, each of which has a texture ID |
| +// that we can bind decoded frames to. We need enough to satisfy preroll, and |
| +// enough to avoid unnecessary stalling, but no more than that. The resource |
| +// requirements are low, as we don't need the textures to be backed by storage. |
| static const int kNumPictureBuffers = 5; |
| // Route decoded frame callbacks back into the VTVideoDecodeAccelerator. |
| @@ -48,28 +48,20 @@ static void OutputThunk( |
| CMTime presentation_duration) { |
| VTVideoDecodeAccelerator* vda = |
| reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon); |
| - int32_t bitstream_id = reinterpret_cast<intptr_t>(source_frame_refcon); |
| - vda->Output(bitstream_id, status, image_buffer); |
| + vda->Output(source_frame_refcon, status, image_buffer); |
| } |
| -VTVideoDecodeAccelerator::DecodedFrame::DecodedFrame( |
| - int32_t bitstream_id, |
| - CVImageBufferRef image_buffer) |
| - : bitstream_id(bitstream_id), |
| - image_buffer(image_buffer) { |
| +VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) { |
| } |
| -VTVideoDecodeAccelerator::DecodedFrame::~DecodedFrame() { |
| +VTVideoDecodeAccelerator::Task::~Task() { |
| } |
| -VTVideoDecodeAccelerator::PendingAction::PendingAction( |
| - Action action, |
| - int32_t bitstream_id) |
| - : action(action), |
| - bitstream_id(bitstream_id) { |
| +VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id) |
| + : bitstream_id(bitstream_id) { |
| } |
| -VTVideoDecodeAccelerator::PendingAction::~PendingAction() { |
| +VTVideoDecodeAccelerator::Frame::~Frame() { |
| } |
| VTVideoDecodeAccelerator::VTVideoDecodeAccelerator( |
| @@ -78,7 +70,7 @@ VTVideoDecodeAccelerator::VTVideoDecodeAccelerator( |
| : cgl_context_(cgl_context), |
| make_context_current_(make_context_current), |
| client_(NULL), |
| - has_error_(false), |
| + state_(STATE_NORMAL), |
| format_(NULL), |
| session_(NULL), |
| gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
| @@ -126,10 +118,34 @@ bool VTVideoDecodeAccelerator::Initialize( |
| return true; |
| } |
| -bool VTVideoDecodeAccelerator::ConfigureDecoder( |
| - const std::vector<const uint8_t*>& nalu_data_ptrs, |
| - const std::vector<size_t>& nalu_data_sizes) { |
| +bool VTVideoDecodeAccelerator::FinishDelayedFrames() { |
| DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
| + if (session_) { |
| + OSStatus status = VTDecompressionSessionFinishDelayedFrames(session_); |
| + if (status) { |
| + NOTIFY_STATUS("VTDecompressionSessionFinishDelayedFrames()", status); |
| + return false; |
| + } |
| + } |
| + return true; |
| +} |
| + |
| +bool VTVideoDecodeAccelerator::ConfigureDecoder() { |
| + DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
| + DCHECK(!last_sps_.empty()); |
| + DCHECK(!last_pps_.empty()); |
| + |
| + // Build the configuration records. |
| + std::vector<const uint8_t*> nalu_data_ptrs; |
|
DaleCurtis
2014/11/10 20:48:02
Yuck, I guess you could const initialize parallel
sandersd (OOO until July 31)
2014/11/11 18:46:06
Done. I'll see about a refactor to remove unnecess
|
| + std::vector<size_t> nalu_data_sizes; |
| + nalu_data_ptrs.push_back(&last_sps_.front()); |
| + nalu_data_sizes.push_back(last_sps_.size()); |
| + if (!last_spsext_.empty()) { |
| + nalu_data_ptrs.push_back(&last_spsext_.front()); |
| + nalu_data_sizes.push_back(last_spsext_.size()); |
| + } |
| + nalu_data_ptrs.push_back(&last_pps_.front()); |
| + nalu_data_sizes.push_back(last_pps_.size()); |
| // Construct a new format description from the parameter sets. |
| // TODO(sandersd): Replace this with custom code to support OS X < 10.9. |
| @@ -147,11 +163,19 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder( |
| return false; |
| } |
| - // If the session is compatible, there's nothing to do. |
| + // Flush all frames using the previous configuration to keep things simple. |
|
DaleCurtis
2014/11/10 20:48:02
Shouldn't this happen in the Reset() that precedes
sandersd (OOO until July 31)
2014/11/11 18:46:07
The decoder does not need to be reset to get a new
|
| + if (!FinishDelayedFrames()) |
| + return false; |
| + |
| + // Store the new configuration data. |
| + CMVideoDimensions coded_dimensions = |
| + CMVideoFormatDescriptionGetDimensions(format_); |
| + coded_size_.SetSize(coded_dimensions.width, coded_dimensions.height); |
| + |
| + // If the session is compatible, there's nothing else to do. |
| if (session_ && |
| - VTDecompressionSessionCanAcceptFormatDescription(session_, format_)) { |
| - return true; |
| - } |
| + VTDecompressionSessionCanAcceptFormatDescription(session_, format_)) |
|
DaleCurtis
2014/11/10 21:10:39
Multiline if should keep {}
sandersd (OOO until July 31)
2014/11/11 18:46:07
Done.
|
| + return true; |
| // Prepare VideoToolbox configuration dictionaries. |
| base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config( |
| @@ -174,8 +198,6 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder( |
| &kCFTypeDictionaryKeyCallBacks, |
| &kCFTypeDictionaryValueCallBacks)); |
| - CMVideoDimensions coded_dimensions = |
| - CMVideoFormatDescriptionGetDimensions(format_); |
| #define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i) |
| // TODO(sandersd): RGBA option for 4:4:4 video. |
| int32_t pixel_format = kCVPixelFormatType_422YpCbCr8; |
| @@ -207,33 +229,11 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder( |
| return true; |
| } |
| -void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) { |
| - DCHECK(CalledOnValidThread()); |
| - // Not actually a requirement of the VDA API, but we're lazy and use negative |
| - // values as flags internally. Revisit that if this actually happens. |
| - if (bitstream.id() < 0) { |
| - LOG(ERROR) << "Negative bitstream ID"; |
| - NotifyError(INVALID_ARGUMENT); |
| - client_->NotifyEndOfBitstreamBuffer(bitstream.id()); |
| - return; |
| - } |
| - pending_bitstream_ids_.push(bitstream.id()); |
| - decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( |
| - &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this), |
| - bitstream)); |
| -} |
| - |
| void VTVideoDecodeAccelerator::DecodeTask( |
| - const media::BitstreamBuffer& bitstream) { |
| + const media::BitstreamBuffer& bitstream, |
| + Frame* frame) { |
| DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
| - // Once we have a bitstream buffer, we must either decode it or drop it. |
| - // This construct ensures that the buffer is always dropped unless we call |
| - // drop_bitstream.Release(). |
| - base::ScopedClosureRunner drop_bitstream(base::Bind( |
| - &VTVideoDecodeAccelerator::DropBitstream, base::Unretained(this), |
| - bitstream.id())); |
| - |
| // Map the bitstream buffer. |
| base::SharedMemory memory(bitstream.handle(), true); |
| size_t size = bitstream.size(); |
| @@ -245,11 +245,11 @@ void VTVideoDecodeAccelerator::DecodeTask( |
| const uint8_t* buf = static_cast<uint8_t*>(memory.memory()); |
| // NALUs are stored with Annex B format in the bitstream buffer (start codes), |
|
DaleCurtis
2014/11/10 20:48:02
Peanut gallery: Seems like this entire NALU conver
sandersd (OOO until July 31)
2014/11/11 18:46:06
That's probably true, but there are a lot of outpu
|
| - // but VideoToolbox expects AVCC/MPEG-4 format (length headers), so we must |
| - // rewrite the data. |
| + // but VideoToolbox expects AVC-4 format (length headers), so we must rewrite |
| + // the data. |
| // |
| - // 1. Locate relevant NALUs and compute the size of the translated data. |
| - // Also record any parameter sets for VideoToolbox initialization. |
| + // Locate relevant NALUs and compute the size of the rewritten data. Also |
| + // record any parameter sets for VideoToolbox initialization. |
| bool config_changed = false; |
| size_t data_size = 0; |
| std::vector<media::H264NALU> nalus; |
| @@ -264,7 +264,6 @@ void VTVideoDecodeAccelerator::DecodeTask( |
| NotifyError(PLATFORM_FAILURE); |
| return; |
| } |
| - // TODO(sandersd): Strict ordering rules. |
| switch (nalu.nal_unit_type) { |
| case media::H264NALU::kSPS: |
| last_sps_.assign(nalu.data, nalu.data + nalu.size); |
| @@ -280,6 +279,15 @@ void VTVideoDecodeAccelerator::DecodeTask( |
| last_pps_.assign(nalu.data, nalu.data + nalu.size); |
| config_changed = true; |
| break; |
| + case media::H264NALU::kSliceDataA: |
| + case media::H264NALU::kSliceDataB: |
| + case media::H264NALU::kSliceDataC: |
| + LOG(ERROR) << "Coded slide data partitions not implemented."; |
| + NotifyError(PLATFORM_FAILURE); |
| + return; |
| + case media::H264NALU::kIDRSlice: |
| + case media::H264NALU::kNonIDRSlice: |
| + // TODO(sandersd): Compute pic_order_count. |
| default: |
| nalus.push_back(nalu); |
| data_size += kNALUHeaderLength + nalu.size; |
| @@ -287,47 +295,37 @@ void VTVideoDecodeAccelerator::DecodeTask( |
| } |
| } |
| - // 2. Initialize VideoToolbox. |
| - // TODO(sandersd): Check if the new configuration is identical before |
| - // reconfiguring. |
| + // Initialize VideoToolbox. |
| + // TODO(sandersd): Instead of assuming that the last SPS and PPS units are |
| + // always the correct ones, maintain a cache of recent SPS and PPS units and |
| + // select from them using the slice header. |
| if (config_changed) { |
|
DaleCurtis
2014/11/10 20:48:02
Do you do this here instead of Initialize() since
sandersd (OOO until July 31)
2014/11/11 18:46:07
That's part of it, but also the config can change
|
| if (last_sps_.size() == 0 || last_pps_.size() == 0) { |
| LOG(ERROR) << "Invalid configuration data"; |
|
DaleCurtis
2014/11/10 20:48:02
You should convert all these LOG(ERROR) calls to D
sandersd (OOO until July 31)
2014/11/11 18:46:07
Ah, you're quite right, the "VDA Error" message pr
DaleCurtis
2014/11/11 21:02:34
You might file a bug to do this after a default on
sandersd (OOO until July 31)
2014/11/11 22:14:39
Perhaps it would be better to temporarily add a lo
|
| NotifyError(INVALID_ARGUMENT); |
| return; |
| } |
| - // TODO(sandersd): Check that the SPS and PPS IDs match. |
| - std::vector<const uint8_t*> nalu_data_ptrs; |
| - std::vector<size_t> nalu_data_sizes; |
| - nalu_data_ptrs.push_back(&last_sps_.front()); |
| - nalu_data_sizes.push_back(last_sps_.size()); |
| - if (last_spsext_.size() != 0) { |
| - nalu_data_ptrs.push_back(&last_spsext_.front()); |
| - nalu_data_sizes.push_back(last_spsext_.size()); |
| - } |
| - nalu_data_ptrs.push_back(&last_pps_.front()); |
| - nalu_data_sizes.push_back(last_pps_.size()); |
| - |
| - // If ConfigureDecoder() fails, it already called NotifyError(). |
| - if (!ConfigureDecoder(nalu_data_ptrs, nalu_data_sizes)) |
| + if (!ConfigureDecoder()) |
| return; |
| } |
| - // If there are no non-configuration units, immediately return an empty |
| - // (ie. dropped) frame. It is an error to create a MemoryBlock with zero |
| - // size. |
| - if (!data_size) |
| + // If there are no non-configuration units, drop the bitstream buffer by |
| + // returning an empty frame. |
| + if (!data_size) { |
| + if (!FinishDelayedFrames()) |
| + return; |
| + DecodeDone(frame); |
| return; |
| + } |
| - // If the session is not configured, fail. |
| + // If the session is not configured by this point, fail. |
| if (!session_) { |
| - LOG(ERROR) << "Image slice without configuration data"; |
| + LOG(ERROR) << "Image slice without configuration"; |
| NotifyError(INVALID_ARGUMENT); |
| return; |
| } |
| - // 3. Allocate a memory-backed CMBlockBuffer for the translated data. |
| - // TODO(sandersd): Check that the slice's PPS matches the current PPS. |
| + // Create a memory-backed CMBlockBuffer for the translated data. |
| base::ScopedCFTypeRef<CMBlockBufferRef> data; |
| OSStatus status = CMBlockBufferCreateWithMemoryBlock( |
| kCFAllocatorDefault, |
| @@ -344,7 +342,7 @@ void VTVideoDecodeAccelerator::DecodeTask( |
| return; |
| } |
| - // 4. Copy NALU data, inserting length headers. |
| + // Copy NALU data into the CMBlockBuffer, inserting length headers. |
| size_t offset = 0; |
| for (size_t i = 0; i < nalus.size(); i++) { |
| media::H264NALU& nalu = nalus[i]; |
| @@ -364,8 +362,8 @@ void VTVideoDecodeAccelerator::DecodeTask( |
| offset += nalu.size; |
| } |
| - // 5. Package the data for VideoToolbox and request decoding. |
| - base::ScopedCFTypeRef<CMSampleBufferRef> frame; |
| + // Package the data in a CMSampleBuffer. |
| + base::ScopedCFTypeRef<CMSampleBufferRef> sample; |
| status = CMSampleBufferCreate( |
| kCFAllocatorDefault, |
| data, // data_buffer |
| @@ -378,79 +376,110 @@ void VTVideoDecodeAccelerator::DecodeTask( |
| NULL, // &sample_timing_array |
| 0, // num_sample_size_entries |
| NULL, // &sample_size_array |
| - frame.InitializeInto()); |
| + sample.InitializeInto()); |
| if (status) { |
| NOTIFY_STATUS("CMSampleBufferCreate()", status); |
| return; |
| } |
| + // Update the frame data. |
| + frame->coded_size = coded_size_; |
| + |
| + // Send the frame for decoding. |
| // Asynchronous Decompression allows for parallel submission of frames |
| // (without it, DecodeFrame() does not return until the frame has been |
| // decoded). We don't enable Temporal Processing so that frames are always |
| // returned in decode order; this makes it easier to avoid deadlock. |
| VTDecodeFrameFlags decode_flags = |
| kVTDecodeFrame_EnableAsynchronousDecompression; |
| - |
| - intptr_t bitstream_id = bitstream.id(); |
| status = VTDecompressionSessionDecodeFrame( |
| session_, |
| - frame, // sample_buffer |
| + sample, // sample_buffer |
| decode_flags, // decode_flags |
| - reinterpret_cast<void*>(bitstream_id), // source_frame_refcon |
| + reinterpret_cast<void*>(frame), // source_frame_refcon |
| NULL); // &info_flags_out |
| if (status) { |
| NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status); |
| return; |
| } |
| - |
| - // Now that the bitstream is decoding, don't drop it. |
| - (void)drop_bitstream.Release(); |
| } |
| // This method may be called on any VideoToolbox thread. |
| void VTVideoDecodeAccelerator::Output( |
| - int32_t bitstream_id, |
| + void* source_frame_refcon, |
| OSStatus status, |
| CVImageBufferRef image_buffer) { |
| if (status) { |
| - // TODO(sandersd): Handle dropped frames. |
| NOTIFY_STATUS("Decoding", status); |
| - image_buffer = NULL; |
| } else if (CFGetTypeID(image_buffer) != CVPixelBufferGetTypeID()) { |
| LOG(ERROR) << "Decoded frame is not a CVPixelBuffer"; |
| NotifyError(PLATFORM_FAILURE); |
| - image_buffer = NULL; |
| } else { |
| - CFRetain(image_buffer); |
| + Frame* frame = reinterpret_cast<Frame*>(source_frame_refcon); |
| + frame->image.reset(image_buffer, base::scoped_policy::RETAIN); |
| + DecodeDone(frame); |
| } |
| +} |
| + |
| +void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) { |
| + if (!CalledOnValidThread()) { |
|
DaleCurtis
2014/11/10 21:10:39
If Output() is always going to call on the wrong t
sandersd (OOO until July 31)
2014/11/11 18:46:07
Done.
|
| + gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
| + &VTVideoDecodeAccelerator::DecodeDone, |
| + weak_this_factory_.GetWeakPtr(), frame)); |
| + } else { |
| + DCHECK_EQ(frame->bitstream_id, pending_frames_.front()->bitstream_id); |
| + Task task(TASK_FRAME); |
| + task.frame = pending_frames_.front(); |
| + pending_frames_.pop(); |
| + pending_tasks_.push(task); |
| + ProcessTasks(); |
| + } |
| +} |
| + |
| +void VTVideoDecodeAccelerator::FlushTask(TaskType type) { |
| + DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
| + FinishDelayedFrames(); |
| + |
| + // Always queue a task, even if FinishDelayedFrames() fails, so that |
| + // destruction always completes. |
| gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
| - &VTVideoDecodeAccelerator::OutputTask, |
| - weak_this_factory_.GetWeakPtr(), |
| - DecodedFrame(bitstream_id, image_buffer))); |
| + &VTVideoDecodeAccelerator::FlushDone, |
| + weak_this_factory_.GetWeakPtr(), type)); |
| +} |
| + |
| +void VTVideoDecodeAccelerator::FlushDone(TaskType type) { |
| + DCHECK(CalledOnValidThread()); |
|
DaleCurtis
2014/11/10 21:10:39
CalledOnValidThread isn't very readable when there
sandersd (OOO until July 31)
2014/11/11 18:46:06
I could add a wrapper method, but CalledOnValidThr
DaleCurtis
2014/11/11 21:02:34
Hmm instead of inheriting non-threadsafe you shoul
sandersd (OOO until July 31)
2014/11/11 22:14:39
Done.
|
| + pending_tasks_.push(Task(type)); |
| + ProcessTasks(); |
| } |
| -void VTVideoDecodeAccelerator::OutputTask(DecodedFrame frame) { |
| +void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) { |
| DCHECK(CalledOnValidThread()); |
| - decoded_frames_.push(frame); |
| - ProcessDecodedFrames(); |
| + DCHECK_EQ(assigned_bitstream_ids_.count(bitstream.id()), 0u); |
| + assigned_bitstream_ids_.insert(bitstream.id()); |
| + Frame* frame = new Frame(bitstream.id()); |
|
DaleCurtis
2014/11/10 21:10:39
Why does Frame have a bitstream_id instead of owni
sandersd (OOO until July 31)
2014/11/11 18:46:07
The ownership and lifetime story was not very clea
DaleCurtis
2014/11/11 21:02:34
I'd spend some time cleaning up the ownership mode
sandersd (OOO until July 31)
2014/11/11 22:14:39
I do not, and this is the best I had on my third r
|
| + pending_frames_.push(make_linked_ptr(frame)); |
| + decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( |
| + &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this), |
| + bitstream, frame)); |
| } |
| void VTVideoDecodeAccelerator::AssignPictureBuffers( |
| const std::vector<media::PictureBuffer>& pictures) { |
| DCHECK(CalledOnValidThread()); |
| - for (size_t i = 0; i < pictures.size(); i++) { |
| - DCHECK(!texture_ids_.count(pictures[i].id())); |
| - assigned_picture_ids_.insert(pictures[i].id()); |
| - available_picture_ids_.push_back(pictures[i].id()); |
| - texture_ids_[pictures[i].id()] = pictures[i].texture_id(); |
| + for (const media::PictureBuffer& picture : pictures) { |
| + DCHECK(!texture_ids_.count(picture.id())); |
| + assigned_picture_ids_.insert(picture.id()); |
| + available_picture_ids_.push_back(picture.id()); |
| + texture_ids_[picture.id()] = picture.texture_id(); |
| } |
| // Pictures are not marked as uncleared until after this method returns, and |
| // they will be broken if they are used before that happens. So, schedule |
| // future work after that happens. |
| gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
| - &VTVideoDecodeAccelerator::ProcessDecodedFrames, |
| + &VTVideoDecodeAccelerator::ProcessTasks, |
| weak_this_factory_.GetWeakPtr())); |
| } |
| @@ -458,289 +487,179 @@ void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) { |
| DCHECK(CalledOnValidThread()); |
| DCHECK_EQ(CFGetRetainCount(picture_bindings_[picture_id]), 1); |
| picture_bindings_.erase(picture_id); |
| - // Don't put the picture back in the available list if has been dismissed. |
| if (assigned_picture_ids_.count(picture_id) != 0) { |
| available_picture_ids_.push_back(picture_id); |
| - ProcessDecodedFrames(); |
| - } |
| -} |
| - |
| -void VTVideoDecodeAccelerator::CompleteAction(Action action) { |
| - DCHECK(CalledOnValidThread()); |
| - |
| - switch (action) { |
| - case ACTION_FLUSH: |
| - client_->NotifyFlushDone(); |
| - break; |
| - case ACTION_RESET: |
| - client_->NotifyResetDone(); |
| - break; |
| - case ACTION_DESTROY: |
| - delete this; |
| - break; |
| - } |
| -} |
| - |
| -void VTVideoDecodeAccelerator::CompleteActions(int32_t bitstream_id) { |
| - DCHECK(CalledOnValidThread()); |
| - while (!pending_actions_.empty() && |
| - pending_actions_.front().bitstream_id == bitstream_id) { |
| - CompleteAction(pending_actions_.front().action); |
| - pending_actions_.pop(); |
| + ProcessTasks(); |
| + } else { |
| + client_->DismissPictureBuffer(picture_id); |
| } |
| } |
| -void VTVideoDecodeAccelerator::ProcessDecodedFrames() { |
| +void VTVideoDecodeAccelerator::ProcessTasks() { |
| DCHECK(CalledOnValidThread()); |
| - while (!decoded_frames_.empty()) { |
| - if (pending_actions_.empty()) { |
| - // No pending actions; send frames normally. |
| - if (!has_error_) |
| - SendPictures(pending_bitstream_ids_.back()); |
| - return; |
| - } |
| + while (!pending_tasks_.empty()) { |
| + const Task& task = pending_tasks_.front(); |
| - int32_t next_action_bitstream_id = pending_actions_.front().bitstream_id; |
| - int32_t last_sent_bitstream_id = -1; |
| - switch (pending_actions_.front().action) { |
| - case ACTION_FLUSH: |
| - // Send frames normally. |
| - if (has_error_) |
| + switch (state_) { |
| + case STATE_NORMAL: |
| + if (!ProcessTask(task)) |
| return; |
| - last_sent_bitstream_id = SendPictures(next_action_bitstream_id); |
| + pending_tasks_.pop(); |
| break; |
| - case ACTION_RESET: |
| - // Drop decoded frames. |
| - if (has_error_) |
| - return; |
| - while (!decoded_frames_.empty() && |
| - last_sent_bitstream_id != next_action_bitstream_id) { |
| - last_sent_bitstream_id = decoded_frames_.front().bitstream_id; |
| - decoded_frames_.pop(); |
| - DCHECK_EQ(pending_bitstream_ids_.front(), last_sent_bitstream_id); |
| - pending_bitstream_ids_.pop(); |
| - client_->NotifyEndOfBitstreamBuffer(last_sent_bitstream_id); |
| - } |
| + case STATE_ERROR: |
| + // Do nothing until Destroy() is called. |
| break; |
| - case ACTION_DESTROY: |
| - // Drop decoded frames, without bookkeeping. |
| - while (!decoded_frames_.empty()) { |
| - last_sent_bitstream_id = decoded_frames_.front().bitstream_id; |
| - decoded_frames_.pop(); |
| + case STATE_DESTROYING: |
| + // Discard tasks until destruction is complete. |
| + if (task.type == TASK_DESTROY) { |
| + delete this; |
| + return; |
| } |
| - |
| - // Handle completing the action specially, as it is important not to |
| - // access |this| after calling CompleteAction(). |
| - if (last_sent_bitstream_id == next_action_bitstream_id) |
| - CompleteAction(ACTION_DESTROY); |
| - |
| - // Either |this| was deleted or no more progress can be made. |
| - return; |
| + pending_tasks_.pop(); |
| + break; |
| } |
| - |
| - // If we ran out of buffers (or pictures), no more progress can be made |
| - // until more frames are decoded. |
| - if (last_sent_bitstream_id != next_action_bitstream_id) |
| - return; |
| - |
| - // Complete all actions pending for this |bitstream_id|, then loop to see |
| - // if progress can be made on the next action. |
| - CompleteActions(next_action_bitstream_id); |
| } |
| } |
| -int32_t VTVideoDecodeAccelerator::ProcessDroppedFrames( |
| - int32_t last_sent_bitstream_id, |
| - int32_t up_to_bitstream_id) { |
| +bool VTVideoDecodeAccelerator::ProcessTask(const Task& task) { |
| DCHECK(CalledOnValidThread()); |
| - // Drop frames as long as there is a frame, we have not reached the next |
| - // action, and the next frame has no image. |
| - while (!decoded_frames_.empty() && |
| - last_sent_bitstream_id != up_to_bitstream_id && |
| - decoded_frames_.front().image_buffer.get() == NULL) { |
| - const DecodedFrame& frame = decoded_frames_.front(); |
| - DCHECK_EQ(pending_bitstream_ids_.front(), frame.bitstream_id); |
| - client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id); |
| - last_sent_bitstream_id = frame.bitstream_id; |
| - decoded_frames_.pop(); |
| - pending_bitstream_ids_.pop(); |
| + DCHECK_EQ(state_, STATE_NORMAL); |
| + |
| + switch (task.type) { |
| + case TASK_FRAME: |
| + return ProcessFrame(*task.frame); |
| + |
| + case TASK_FLUSH: |
| + DCHECK_EQ(task.type, pending_flush_tasks_.front()); |
| + pending_flush_tasks_.pop(); |
| + client_->NotifyFlushDone(); |
| + return true; |
| + |
| + case TASK_RESET: |
| + DCHECK_EQ(task.type, pending_flush_tasks_.front()); |
| + pending_flush_tasks_.pop(); |
| + client_->NotifyResetDone(); |
| + return true; |
| + |
| + case TASK_DESTROY: |
| + NOTREACHED() << "Can't destroy while in STATE_NORMAL."; |
| + NotifyError(ILLEGAL_STATE); |
| + return false; |
| } |
| - return last_sent_bitstream_id; |
| } |
| -// TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of |
| -// textures, this would be unnecessary, as the size is actually a property of |
| -// the texture binding, not the texture. We rebind every frame, so the size |
| -// passed to ProvidePictureBuffers() is meaningless. |
| -void VTVideoDecodeAccelerator::ProcessSizeChange() { |
| +bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) { |
| DCHECK(CalledOnValidThread()); |
| - DCHECK(!decoded_frames_.empty()); |
| - |
| - // Find the size of the next image. |
| - const DecodedFrame& frame = decoded_frames_.front(); |
| - CVImageBufferRef image_buffer = frame.image_buffer.get(); |
| - size_t width = CVPixelBufferGetWidth(image_buffer); |
| - size_t height = CVPixelBufferGetHeight(image_buffer); |
| - gfx::Size image_size(width, height); |
| - |
| - if (picture_size_ != image_size) { |
| - // Dismiss all assigned picture buffers. |
| - for (int32_t picture_id : assigned_picture_ids_) |
| - client_->DismissPictureBuffer(picture_id); |
| - assigned_picture_ids_.clear(); |
| - available_picture_ids_.clear(); |
| - |
| - // Request new pictures. |
| - client_->ProvidePictureBuffers( |
| - kNumPictureBuffers, image_size, GL_TEXTURE_RECTANGLE_ARB); |
| - picture_size_ = image_size; |
| + DCHECK_EQ(state_, STATE_NORMAL); |
|
DaleCurtis
2014/11/10 21:10:39
This needs a comment on exactly what's happening h
sandersd (OOO until July 31)
2014/11/11 18:46:07
Done.
|
| + bool resetting = !pending_flush_tasks_.empty() && |
| + pending_flush_tasks_.front() == TASK_RESET; |
| + if (!resetting && frame.image.get()) { |
| + if (picture_size_ != frame.coded_size) { |
| + // Dismiss current pictures. |
| + for (int32_t picture_id : assigned_picture_ids_) |
| + client_->DismissPictureBuffer(picture_id); |
| + assigned_picture_ids_.clear(); |
| + available_picture_ids_.clear(); |
| + |
| + // Request new pictures. |
| + picture_size_ = frame.coded_size; |
| + client_->ProvidePictureBuffers( |
| + kNumPictureBuffers, coded_size_, GL_TEXTURE_RECTANGLE_ARB); |
| + return false; |
| + } |
| + if (!SendFrame(frame)) |
| + return false; |
| } |
| + assigned_bitstream_ids_.erase(frame.bitstream_id); |
| + client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id); |
| + return true; |
| } |
| -int32_t VTVideoDecodeAccelerator::SendPictures(int32_t up_to_bitstream_id) { |
| +bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) { |
| DCHECK(CalledOnValidThread()); |
| - DCHECK(!decoded_frames_.empty()); |
| - |
| - // TODO(sandersd): Store the actual last sent bitstream ID? |
| - int32_t last_sent_bitstream_id = -1; |
| + DCHECK_EQ(state_, STATE_NORMAL); |
| - last_sent_bitstream_id = |
| - ProcessDroppedFrames(last_sent_bitstream_id, up_to_bitstream_id); |
| - if (last_sent_bitstream_id == up_to_bitstream_id || decoded_frames_.empty()) |
| - return last_sent_bitstream_id; |
| - |
| - ProcessSizeChange(); |
| if (available_picture_ids_.empty()) |
| - return last_sent_bitstream_id; |
| + return false; |
| + |
| + int32_t picture_id = available_picture_ids_.back(); |
| + IOSurfaceRef surface = CVPixelBufferGetIOSurface(frame.image.get()); |
| if (!make_context_current_.Run()) { |
| LOG(ERROR) << "Failed to make GL context current"; |
| NotifyError(PLATFORM_FAILURE); |
| - return last_sent_bitstream_id; |
| + return false; |
| } |
| glEnable(GL_TEXTURE_RECTANGLE_ARB); |
| - while (!available_picture_ids_.empty() && !has_error_) { |
| - DCHECK_NE(last_sent_bitstream_id, up_to_bitstream_id); |
| - DCHECK(!decoded_frames_.empty()); |
| - |
| - // We don't pop |frame| or |picture_id| until they are consumed, which may |
| - // not happen if an error occurs. Conveniently, this also removes some |
| - // refcounting. |
| - const DecodedFrame& frame = decoded_frames_.front(); |
| - DCHECK_EQ(pending_bitstream_ids_.front(), frame.bitstream_id); |
| - int32_t picture_id = available_picture_ids_.back(); |
| - |
| - CVImageBufferRef image_buffer = frame.image_buffer.get(); |
| - IOSurfaceRef surface = CVPixelBufferGetIOSurface(image_buffer); |
| - |
| - gfx::ScopedTextureBinder |
| - texture_binder(GL_TEXTURE_RECTANGLE_ARB, texture_ids_[picture_id]); |
| - CGLError status = CGLTexImageIOSurface2D( |
| - cgl_context_, // ctx |
| - GL_TEXTURE_RECTANGLE_ARB, // target |
| - GL_RGB, // internal_format |
| - picture_size_.width(), // width |
| - picture_size_.height(), // height |
| - GL_YCBCR_422_APPLE, // format |
| - GL_UNSIGNED_SHORT_8_8_APPLE, // type |
| - surface, // io_surface |
| - 0); // plane |
| - if (status != kCGLNoError) { |
| - NOTIFY_STATUS("CGLTexImageIOSurface2D()", status); |
| - break; |
| - } |
| - |
| - picture_bindings_[picture_id] = frame.image_buffer; |
| - client_->PictureReady(media::Picture( |
| - picture_id, frame.bitstream_id, gfx::Rect(picture_size_))); |
| - available_picture_ids_.pop_back(); |
| - client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id); |
| - last_sent_bitstream_id = frame.bitstream_id; |
| - decoded_frames_.pop(); |
| - pending_bitstream_ids_.pop(); |
| - |
| - last_sent_bitstream_id = |
| - ProcessDroppedFrames(last_sent_bitstream_id, up_to_bitstream_id); |
| - if (last_sent_bitstream_id == up_to_bitstream_id || decoded_frames_.empty()) |
| - break; |
| - |
| - ProcessSizeChange(); |
| + gfx::ScopedTextureBinder |
| + texture_binder(GL_TEXTURE_RECTANGLE_ARB, texture_ids_[picture_id]); |
| + CGLError status = CGLTexImageIOSurface2D( |
| + cgl_context_, // ctx |
| + GL_TEXTURE_RECTANGLE_ARB, // target |
| + GL_RGB, // internal_format |
| + frame.coded_size.width(), // width |
| + frame.coded_size.height(), // height |
| + GL_YCBCR_422_APPLE, // format |
| + GL_UNSIGNED_SHORT_8_8_APPLE, // type |
| + surface, // io_surface |
| + 0); // plane |
| + if (status != kCGLNoError) { |
| + NOTIFY_STATUS("CGLTexImageIOSurface2D()", status); |
| + return false; |
| } |
| glDisable(GL_TEXTURE_RECTANGLE_ARB); |
| - return last_sent_bitstream_id; |
| -} |
| - |
| -void VTVideoDecodeAccelerator::FlushTask() { |
| - DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
| - OSStatus status = VTDecompressionSessionFinishDelayedFrames(session_); |
| - if (status) |
| - NOTIFY_STATUS("VTDecompressionSessionFinishDelayedFrames()", status); |
| -} |
| - |
| -void VTVideoDecodeAccelerator::QueueAction(Action action) { |
| - DCHECK(CalledOnValidThread()); |
| - if (pending_bitstream_ids_.empty()) { |
| - // If there are no pending frames, all actions complete immediately. |
| - CompleteAction(action); |
| - } else { |
| - // Otherwise, queue the action. |
| - pending_actions_.push(PendingAction(action, pending_bitstream_ids_.back())); |
| - |
| - // Request a flush to make sure the action will eventually complete. |
| - decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( |
| - &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this))); |
| - |
| - // See if we can make progress now that there is a new pending action. |
| - ProcessDecodedFrames(); |
| - } |
| + available_picture_ids_.pop_back(); |
| + picture_bindings_[picture_id] = frame.image; |
| + client_->PictureReady(media::Picture( |
| + picture_id, frame.bitstream_id, gfx::Rect(frame.coded_size))); |
| + return true; |
| } |
| void VTVideoDecodeAccelerator::NotifyError(Error error) { |
| if (!CalledOnValidThread()) { |
| gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
| &VTVideoDecodeAccelerator::NotifyError, |
| - weak_this_factory_.GetWeakPtr(), |
| - error)); |
| - return; |
| + weak_this_factory_.GetWeakPtr(), error)); |
| + } else if (state_ == STATE_NORMAL) { |
| + state_ = STATE_ERROR; |
| + client_->NotifyError(error); |
| } |
| - has_error_ = true; |
| - client_->NotifyError(error); |
| } |
| -void VTVideoDecodeAccelerator::DropBitstream(int32_t bitstream_id) { |
| - DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread()); |
| - gpu_task_runner_->PostTask(FROM_HERE, base::Bind( |
| - &VTVideoDecodeAccelerator::OutputTask, |
| - weak_this_factory_.GetWeakPtr(), |
| - DecodedFrame(bitstream_id, NULL))); |
| +void VTVideoDecodeAccelerator::QueueFlush(TaskType type) { |
| + DCHECK(CalledOnValidThread()); |
| + pending_flush_tasks_.push(type); |
| + decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind( |
| + &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this), |
| + type)); |
| + |
| + // If this is a new flush reuqest, see if we can make progress. |
|
DaleCurtis
2014/11/10 20:48:02
sp: request
sandersd (OOO until July 31)
2014/11/11 18:46:06
Done.
|
| + if (pending_flush_tasks_.size() == 1) |
| + ProcessTasks(); |
| } |
| void VTVideoDecodeAccelerator::Flush() { |
| DCHECK(CalledOnValidThread()); |
| - QueueAction(ACTION_FLUSH); |
| + QueueFlush(TASK_FLUSH); |
| } |
| void VTVideoDecodeAccelerator::Reset() { |
| DCHECK(CalledOnValidThread()); |
| - QueueAction(ACTION_RESET); |
| + QueueFlush(TASK_RESET); |
| } |
| void VTVideoDecodeAccelerator::Destroy() { |
| DCHECK(CalledOnValidThread()); |
| - // Drop any other pending actions. |
| - while (!pending_actions_.empty()) |
| - pending_actions_.pop(); |
| - // Return all bitstream buffers. |
| - while (!pending_bitstream_ids_.empty()) { |
| - client_->NotifyEndOfBitstreamBuffer(pending_bitstream_ids_.front()); |
| - pending_bitstream_ids_.pop(); |
| - } |
| - QueueAction(ACTION_DESTROY); |
| + for (int32_t bitstream_id : assigned_bitstream_ids_) |
| + client_->NotifyEndOfBitstreamBuffer(bitstream_id); |
| + assigned_bitstream_ids_.clear(); |
| + state_ = STATE_DESTROYING; |
| + QueueFlush(TASK_DESTROY); |
| } |
| bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() { |