Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1982)

Unified Diff: content/common/gpu/media/vt_video_decode_accelerator.cc

Issue 706023004: Collect VTVideoDecodeAccelerator frames into a work queue (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@vt_config_change
Patch Set: Work around PPAPI test failure. Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « content/common/gpu/media/vt_video_decode_accelerator.h ('k') | media/filters/gpu_video_decoder.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: content/common/gpu/media/vt_video_decode_accelerator.cc
diff --git a/content/common/gpu/media/vt_video_decode_accelerator.cc b/content/common/gpu/media/vt_video_decode_accelerator.cc
index b09a5e3e0c3ee36cc1ba0a1bf145be0e6fc42879..1cae8f255a43e391096088d820713abd00400fcf 100644
--- a/content/common/gpu/media/vt_video_decode_accelerator.cc
+++ b/content/common/gpu/media/vt_video_decode_accelerator.cc
@@ -7,12 +7,12 @@
#include <OpenGL/gl.h>
#include "base/bind.h"
-#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/sys_byteorder.h"
#include "base/thread_task_runner_handle.h"
#include "content/common/gpu/media/vt_video_decode_accelerator.h"
#include "content/public/common/content_switches.h"
+#include "media/base/limits.h"
#include "media/filters/h264_parser.h"
#include "ui/gl/scoped_binders.h"
@@ -21,21 +21,22 @@ using content_common_gpu_media::InitializeStubs;
using content_common_gpu_media::IsVtInitialized;
using content_common_gpu_media::StubPathMap;
-#define NOTIFY_STATUS(name, status) \
- do { \
- LOG(ERROR) << name << " failed with status " << status; \
- NotifyError(PLATFORM_FAILURE); \
+#define NOTIFY_STATUS(name, status) \
+ do { \
+ DLOG(ERROR) << name << " failed with status " << status; \
+ NotifyError(PLATFORM_FAILURE); \
} while (0)
namespace content {
-// Size of NALU length headers in AVCC/MPEG-4 format (can be 1, 2, or 4).
+// Size to use for NALU length headers in AVC format (can be 1, 2, or 4).
static const int kNALUHeaderLength = 4;
-// We only request 5 picture buffers from the client which are used to hold the
-// decoded samples. These buffers are then reused when the client tells us that
-// it is done with the buffer.
-static const int kNumPictureBuffers = 5;
+// We request 5 picture buffers from the client, each of which has a texture ID
+// that we can bind decoded frames to. We need enough to satisfy preroll, and
+// enough to avoid unnecessary stalling, but no more than that. The resource
+// requirements are low, as we don't need the textures to be backed by storage.
+static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1;
// Route decoded frame callbacks back into the VTVideoDecodeAccelerator.
static void OutputThunk(
@@ -48,28 +49,20 @@ static void OutputThunk(
CMTime presentation_duration) {
VTVideoDecodeAccelerator* vda =
reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon);
- int32_t bitstream_id = reinterpret_cast<intptr_t>(source_frame_refcon);
- vda->Output(bitstream_id, status, image_buffer);
+ vda->Output(source_frame_refcon, status, image_buffer);
}
-VTVideoDecodeAccelerator::DecodedFrame::DecodedFrame(
- int32_t bitstream_id,
- CVImageBufferRef image_buffer)
- : bitstream_id(bitstream_id),
- image_buffer(image_buffer) {
+VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {
}
-VTVideoDecodeAccelerator::DecodedFrame::~DecodedFrame() {
+VTVideoDecodeAccelerator::Task::~Task() {
}
-VTVideoDecodeAccelerator::PendingAction::PendingAction(
- Action action,
- int32_t bitstream_id)
- : action(action),
- bitstream_id(bitstream_id) {
+VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id)
+ : bitstream_id(bitstream_id) {
}
-VTVideoDecodeAccelerator::PendingAction::~PendingAction() {
+VTVideoDecodeAccelerator::Frame::~Frame() {
}
VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(
@@ -78,15 +71,16 @@ VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(
: cgl_context_(cgl_context),
make_context_current_(make_context_current),
client_(NULL),
- has_error_(false),
+ state_(STATE_DECODING),
format_(NULL),
session_(NULL),
gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()),
- weak_this_factory_(this),
- decoder_thread_("VTDecoderThread") {
+ decoder_thread_("VTDecoderThread"),
+ weak_this_factory_(this) {
DCHECK(!make_context_current_.is_null());
callback_.decompressionOutputCallback = OutputThunk;
callback_.decompressionOutputRefCon = this;
+ weak_this_ = weak_this_factory_.GetWeakPtr();
}
VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() {
@@ -95,7 +89,7 @@ VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() {
bool VTVideoDecodeAccelerator::Initialize(
media::VideoCodecProfile profile,
Client* client) {
- DCHECK(CalledOnValidThread());
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
client_ = client;
// Only H.264 is supported.
@@ -126,10 +120,36 @@ bool VTVideoDecodeAccelerator::Initialize(
return true;
}
-bool VTVideoDecodeAccelerator::ConfigureDecoder(
- const std::vector<const uint8_t*>& nalu_data_ptrs,
- const std::vector<size_t>& nalu_data_sizes) {
+bool VTVideoDecodeAccelerator::FinishDelayedFrames() {
DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
+ if (session_) {
+ OSStatus status = VTDecompressionSessionFinishDelayedFrames(session_);
+ if (status) {
+ NOTIFY_STATUS("VTDecompressionSessionFinishDelayedFrames()", status);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool VTVideoDecodeAccelerator::ConfigureDecoder() {
+ DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
+ DCHECK(!last_sps_.empty());
+ DCHECK(!last_pps_.empty());
+
+ // Build the configuration records.
+ std::vector<const uint8_t*> nalu_data_ptrs;
+ std::vector<size_t> nalu_data_sizes;
+ nalu_data_ptrs.reserve(3);
+ nalu_data_sizes.reserve(3);
+ nalu_data_ptrs.push_back(&last_sps_.front());
+ nalu_data_sizes.push_back(last_sps_.size());
+ if (!last_spsext_.empty()) {
+ nalu_data_ptrs.push_back(&last_spsext_.front());
+ nalu_data_sizes.push_back(last_spsext_.size());
+ }
+ nalu_data_ptrs.push_back(&last_pps_.front());
+ nalu_data_sizes.push_back(last_pps_.size());
// Construct a new format description from the parameter sets.
// TODO(sandersd): Replace this with custom code to support OS X < 10.9.
@@ -147,10 +167,15 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder(
return false;
}
- // If the session is compatible, there's nothing to do.
+ // Store the new configuration data.
+ CMVideoDimensions coded_dimensions =
+ CMVideoFormatDescriptionGetDimensions(format_);
+ coded_size_.SetSize(coded_dimensions.width, coded_dimensions.height);
+
+ // If the session is compatible, there's nothing else to do.
if (session_ &&
VTDecompressionSessionCanAcceptFormatDescription(session_, format_)) {
- return true;
+ return true;
}
// Prepare VideoToolbox configuration dictionaries.
@@ -174,8 +199,6 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder(
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks));
- CMVideoDimensions coded_dimensions =
- CMVideoFormatDescriptionGetDimensions(format_);
#define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i)
// TODO(sandersd): RGBA option for 4:4:4 video.
int32_t pixel_format = kCVPixelFormatType_422YpCbCr8;
@@ -207,49 +230,27 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder(
return true;
}
-void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) {
- DCHECK(CalledOnValidThread());
- // Not actually a requirement of the VDA API, but we're lazy and use negative
- // values as flags internally. Revisit that if this actually happens.
- if (bitstream.id() < 0) {
- LOG(ERROR) << "Negative bitstream ID";
- NotifyError(INVALID_ARGUMENT);
- client_->NotifyEndOfBitstreamBuffer(bitstream.id());
- return;
- }
- pending_bitstream_ids_.push(bitstream.id());
- decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this),
- bitstream));
-}
-
void VTVideoDecodeAccelerator::DecodeTask(
- const media::BitstreamBuffer& bitstream) {
+ const media::BitstreamBuffer& bitstream,
+ Frame* frame) {
DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
- // Once we have a bitstream buffer, we must either decode it or drop it.
- // This construct ensures that the buffer is always dropped unless we call
- // drop_bitstream.Release().
- base::ScopedClosureRunner drop_bitstream(base::Bind(
- &VTVideoDecodeAccelerator::DropBitstream, base::Unretained(this),
- bitstream.id()));
-
// Map the bitstream buffer.
base::SharedMemory memory(bitstream.handle(), true);
size_t size = bitstream.size();
if (!memory.Map(size)) {
- LOG(ERROR) << "Failed to map bitstream buffer";
+ DLOG(ERROR) << "Failed to map bitstream buffer";
NotifyError(PLATFORM_FAILURE);
return;
}
const uint8_t* buf = static_cast<uint8_t*>(memory.memory());
// NALUs are stored with Annex B format in the bitstream buffer (start codes),
- // but VideoToolbox expects AVCC/MPEG-4 format (length headers), so we must
- // rewrite the data.
+ // but VideoToolbox expects AVC format (length headers), so we must rewrite
+ // the data.
//
- // 1. Locate relevant NALUs and compute the size of the translated data.
- // Also record any parameter sets for VideoToolbox initialization.
+ // Locate relevant NALUs and compute the size of the rewritten data. Also
+ // record any parameter sets for VideoToolbox initialization.
bool config_changed = false;
size_t data_size = 0;
std::vector<media::H264NALU> nalus;
@@ -260,11 +261,10 @@ void VTVideoDecodeAccelerator::DecodeTask(
if (result == media::H264Parser::kEOStream)
break;
if (result != media::H264Parser::kOk) {
- LOG(ERROR) << "Failed to find H.264 NALU";
+ DLOG(ERROR) << "Failed to find H.264 NALU";
NotifyError(PLATFORM_FAILURE);
return;
}
- // TODO(sandersd): Strict ordering rules.
switch (nalu.nal_unit_type) {
case media::H264NALU::kSPS:
last_sps_.assign(nalu.data, nalu.data + nalu.size);
@@ -280,6 +280,15 @@ void VTVideoDecodeAccelerator::DecodeTask(
last_pps_.assign(nalu.data, nalu.data + nalu.size);
config_changed = true;
break;
+ case media::H264NALU::kSliceDataA:
+ case media::H264NALU::kSliceDataB:
+ case media::H264NALU::kSliceDataC:
+ DLOG(ERROR) << "Coded slide data partitions not implemented.";
+ NotifyError(PLATFORM_FAILURE);
+ return;
+ case media::H264NALU::kIDRSlice:
+ case media::H264NALU::kNonIDRSlice:
+ // TODO(sandersd): Compute pic_order_count.
default:
nalus.push_back(nalu);
data_size += kNALUHeaderLength + nalu.size;
@@ -287,47 +296,39 @@ void VTVideoDecodeAccelerator::DecodeTask(
}
}
- // 2. Initialize VideoToolbox.
- // TODO(sandersd): Check if the new configuration is identical before
- // reconfiguring.
+ // Initialize VideoToolbox.
+ // TODO(sandersd): Instead of assuming that the last SPS and PPS units are
+ // always the correct ones, maintain a cache of recent SPS and PPS units and
+ // select from them using the slice header.
if (config_changed) {
if (last_sps_.size() == 0 || last_pps_.size() == 0) {
- LOG(ERROR) << "Invalid configuration data";
+ DLOG(ERROR) << "Invalid configuration data";
NotifyError(INVALID_ARGUMENT);
return;
}
- // TODO(sandersd): Check that the SPS and PPS IDs match.
- std::vector<const uint8_t*> nalu_data_ptrs;
- std::vector<size_t> nalu_data_sizes;
- nalu_data_ptrs.push_back(&last_sps_.front());
- nalu_data_sizes.push_back(last_sps_.size());
- if (last_spsext_.size() != 0) {
- nalu_data_ptrs.push_back(&last_spsext_.front());
- nalu_data_sizes.push_back(last_spsext_.size());
- }
- nalu_data_ptrs.push_back(&last_pps_.front());
- nalu_data_sizes.push_back(last_pps_.size());
-
- // If ConfigureDecoder() fails, it already called NotifyError().
- if (!ConfigureDecoder(nalu_data_ptrs, nalu_data_sizes))
+ if (!ConfigureDecoder())
return;
}
- // If there are no non-configuration units, immediately return an empty
- // (ie. dropped) frame. It is an error to create a MemoryBlock with zero
- // size.
- if (!data_size)
+ // If there are no non-configuration units, drop the bitstream buffer by
+ // returning an empty frame.
+ if (!data_size) {
+ if (!FinishDelayedFrames())
+ return;
+ gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
+ &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
return;
+ }
- // If the session is not configured, fail.
+ // If the session is not configured by this point, fail.
if (!session_) {
- LOG(ERROR) << "Image slice without configuration data";
+ DLOG(ERROR) << "Image slice without configuration";
NotifyError(INVALID_ARGUMENT);
return;
}
- // 3. Allocate a memory-backed CMBlockBuffer for the translated data.
- // TODO(sandersd): Check that the slice's PPS matches the current PPS.
+ // Create a memory-backed CMBlockBuffer for the translated data.
+ // TODO(sandersd): Pool of memory blocks.
base::ScopedCFTypeRef<CMBlockBufferRef> data;
OSStatus status = CMBlockBufferCreateWithMemoryBlock(
kCFAllocatorDefault,
@@ -344,7 +345,7 @@ void VTVideoDecodeAccelerator::DecodeTask(
return;
}
- // 4. Copy NALU data, inserting length headers.
+ // Copy NALU data into the CMBlockBuffer, inserting length headers.
size_t offset = 0;
for (size_t i = 0; i < nalus.size(); i++) {
media::H264NALU& nalu = nalus[i];
@@ -364,8 +365,8 @@ void VTVideoDecodeAccelerator::DecodeTask(
offset += nalu.size;
}
- // 5. Package the data for VideoToolbox and request decoding.
- base::ScopedCFTypeRef<CMSampleBufferRef> frame;
+ // Package the data in a CMSampleBuffer.
+ base::ScopedCFTypeRef<CMSampleBufferRef> sample;
status = CMSampleBufferCreate(
kCFAllocatorDefault,
data, // data_buffer
@@ -378,369 +379,299 @@ void VTVideoDecodeAccelerator::DecodeTask(
NULL, // &sample_timing_array
0, // num_sample_size_entries
NULL, // &sample_size_array
- frame.InitializeInto());
+ sample.InitializeInto());
if (status) {
NOTIFY_STATUS("CMSampleBufferCreate()", status);
return;
}
+ // Update the frame data.
+ frame->coded_size = coded_size_;
+
+ // Send the frame for decoding.
// Asynchronous Decompression allows for parallel submission of frames
// (without it, DecodeFrame() does not return until the frame has been
// decoded). We don't enable Temporal Processing so that frames are always
// returned in decode order; this makes it easier to avoid deadlock.
VTDecodeFrameFlags decode_flags =
kVTDecodeFrame_EnableAsynchronousDecompression;
-
- intptr_t bitstream_id = bitstream.id();
status = VTDecompressionSessionDecodeFrame(
session_,
- frame, // sample_buffer
+ sample, // sample_buffer
decode_flags, // decode_flags
- reinterpret_cast<void*>(bitstream_id), // source_frame_refcon
+ reinterpret_cast<void*>(frame), // source_frame_refcon
NULL); // &info_flags_out
if (status) {
NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status);
return;
}
-
- // Now that the bitstream is decoding, don't drop it.
- (void)drop_bitstream.Release();
}
// This method may be called on any VideoToolbox thread.
void VTVideoDecodeAccelerator::Output(
- int32_t bitstream_id,
+ void* source_frame_refcon,
OSStatus status,
CVImageBufferRef image_buffer) {
if (status) {
- // TODO(sandersd): Handle dropped frames.
NOTIFY_STATUS("Decoding", status);
- image_buffer = NULL;
} else if (CFGetTypeID(image_buffer) != CVPixelBufferGetTypeID()) {
- LOG(ERROR) << "Decoded frame is not a CVPixelBuffer";
+ DLOG(ERROR) << "Decoded frame is not a CVPixelBuffer";
NotifyError(PLATFORM_FAILURE);
- image_buffer = NULL;
} else {
- CFRetain(image_buffer);
+ Frame* frame = reinterpret_cast<Frame*>(source_frame_refcon);
+ frame->image.reset(image_buffer, base::scoped_policy::RETAIN);
+ gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
+ &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
}
+}
+
+void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) {
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(frame->bitstream_id, pending_frames_.front()->bitstream_id);
+ Task task(TASK_FRAME);
+ task.frame = pending_frames_.front();
+ pending_frames_.pop();
+ pending_tasks_.push(task);
+ ProcessTasks();
+}
+
+void VTVideoDecodeAccelerator::FlushTask(TaskType type) {
+ DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
+ FinishDelayedFrames();
+
+ // Always queue a task, even if FinishDelayedFrames() fails, so that
+ // destruction always completes.
gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::OutputTask,
- weak_this_factory_.GetWeakPtr(),
- DecodedFrame(bitstream_id, image_buffer)));
+ &VTVideoDecodeAccelerator::FlushDone, weak_this_, type));
+}
+
+void VTVideoDecodeAccelerator::FlushDone(TaskType type) {
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ pending_tasks_.push(Task(type));
+ ProcessTasks();
}
-void VTVideoDecodeAccelerator::OutputTask(DecodedFrame frame) {
- DCHECK(CalledOnValidThread());
- decoded_frames_.push(frame);
- ProcessDecodedFrames();
+void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) {
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(assigned_bitstream_ids_.count(bitstream.id()), 0u);
+ assigned_bitstream_ids_.insert(bitstream.id());
+ Frame* frame = new Frame(bitstream.id());
+ pending_frames_.push(make_linked_ptr(frame));
+ decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind(
+ &VTVideoDecodeAccelerator::DecodeTask, base::Unretained(this),
+ bitstream, frame));
}
void VTVideoDecodeAccelerator::AssignPictureBuffers(
const std::vector<media::PictureBuffer>& pictures) {
- DCHECK(CalledOnValidThread());
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
- for (size_t i = 0; i < pictures.size(); i++) {
- DCHECK(!texture_ids_.count(pictures[i].id()));
- assigned_picture_ids_.insert(pictures[i].id());
- available_picture_ids_.push_back(pictures[i].id());
- texture_ids_[pictures[i].id()] = pictures[i].texture_id();
+ for (const media::PictureBuffer& picture : pictures) {
+ DCHECK(!texture_ids_.count(picture.id()));
+ assigned_picture_ids_.insert(picture.id());
+ available_picture_ids_.push_back(picture.id());
+ texture_ids_[picture.id()] = picture.texture_id();
}
// Pictures are not marked as uncleared until after this method returns, and
// they will be broken if they are used before that happens. So, schedule
// future work after that happens.
gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::ProcessDecodedFrames,
- weak_this_factory_.GetWeakPtr()));
+ &VTVideoDecodeAccelerator::ProcessTasks, weak_this_));
}
void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) {
- DCHECK(CalledOnValidThread());
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
DCHECK_EQ(CFGetRetainCount(picture_bindings_[picture_id]), 1);
picture_bindings_.erase(picture_id);
- // Don't put the picture back in the available list if has been dismissed.
if (assigned_picture_ids_.count(picture_id) != 0) {
available_picture_ids_.push_back(picture_id);
- ProcessDecodedFrames();
- }
-}
-
-void VTVideoDecodeAccelerator::CompleteAction(Action action) {
- DCHECK(CalledOnValidThread());
-
- switch (action) {
- case ACTION_FLUSH:
- client_->NotifyFlushDone();
- break;
- case ACTION_RESET:
- client_->NotifyResetDone();
- break;
- case ACTION_DESTROY:
- delete this;
- break;
- }
-}
-
-void VTVideoDecodeAccelerator::CompleteActions(int32_t bitstream_id) {
- DCHECK(CalledOnValidThread());
- while (!pending_actions_.empty() &&
- pending_actions_.front().bitstream_id == bitstream_id) {
- CompleteAction(pending_actions_.front().action);
- pending_actions_.pop();
+ ProcessTasks();
+ } else {
+ client_->DismissPictureBuffer(picture_id);
}
}
-void VTVideoDecodeAccelerator::ProcessDecodedFrames() {
- DCHECK(CalledOnValidThread());
+void VTVideoDecodeAccelerator::ProcessTasks() {
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
- while (!decoded_frames_.empty()) {
- if (pending_actions_.empty()) {
- // No pending actions; send frames normally.
- if (!has_error_)
- SendPictures(pending_bitstream_ids_.back());
- return;
- }
+ while (!pending_tasks_.empty()) {
+ const Task& task = pending_tasks_.front();
- int32_t next_action_bitstream_id = pending_actions_.front().bitstream_id;
- int32_t last_sent_bitstream_id = -1;
- switch (pending_actions_.front().action) {
- case ACTION_FLUSH:
- // Send frames normally.
- if (has_error_)
+ switch (state_) {
+ case STATE_DECODING:
+ if (!ProcessTask(task))
return;
- last_sent_bitstream_id = SendPictures(next_action_bitstream_id);
+ pending_tasks_.pop();
break;
- case ACTION_RESET:
- // Drop decoded frames.
- if (has_error_)
+ case STATE_ERROR:
+ // Do nothing until Destroy() is called.
+ return;
+
+ case STATE_DESTROYING:
+ // Discard tasks until destruction is complete.
+ if (task.type == TASK_DESTROY) {
+ delete this;
return;
- while (!decoded_frames_.empty() &&
- last_sent_bitstream_id != next_action_bitstream_id) {
- last_sent_bitstream_id = decoded_frames_.front().bitstream_id;
- decoded_frames_.pop();
- DCHECK_EQ(pending_bitstream_ids_.front(), last_sent_bitstream_id);
- pending_bitstream_ids_.pop();
- client_->NotifyEndOfBitstreamBuffer(last_sent_bitstream_id);
}
+ pending_tasks_.pop();
break;
+ }
+ }
+}
- case ACTION_DESTROY:
- // Drop decoded frames, without bookkeeping.
- while (!decoded_frames_.empty()) {
- last_sent_bitstream_id = decoded_frames_.front().bitstream_id;
- decoded_frames_.pop();
- }
-
- // Handle completing the action specially, as it is important not to
- // access |this| after calling CompleteAction().
- if (last_sent_bitstream_id == next_action_bitstream_id)
- CompleteAction(ACTION_DESTROY);
+bool VTVideoDecodeAccelerator::ProcessTask(const Task& task) {
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(state_, STATE_DECODING);
- // Either |this| was deleted or no more progress can be made.
- return;
- }
+ switch (task.type) {
+ case TASK_FRAME:
+ return ProcessFrame(*task.frame);
- // If we ran out of buffers (or pictures), no more progress can be made
- // until more frames are decoded.
- if (last_sent_bitstream_id != next_action_bitstream_id)
- return;
+ case TASK_FLUSH:
+ DCHECK_EQ(task.type, pending_flush_tasks_.front());
+ pending_flush_tasks_.pop();
+ client_->NotifyFlushDone();
+ return true;
- // Complete all actions pending for this |bitstream_id|, then loop to see
- // if progress can be made on the next action.
- CompleteActions(next_action_bitstream_id);
- }
-}
+ case TASK_RESET:
+ DCHECK_EQ(task.type, pending_flush_tasks_.front());
+ pending_flush_tasks_.pop();
+ client_->NotifyResetDone();
+ return true;
-int32_t VTVideoDecodeAccelerator::ProcessDroppedFrames(
- int32_t last_sent_bitstream_id,
- int32_t up_to_bitstream_id) {
- DCHECK(CalledOnValidThread());
- // Drop frames as long as there is a frame, we have not reached the next
- // action, and the next frame has no image.
- while (!decoded_frames_.empty() &&
- last_sent_bitstream_id != up_to_bitstream_id &&
- decoded_frames_.front().image_buffer.get() == NULL) {
- const DecodedFrame& frame = decoded_frames_.front();
- DCHECK_EQ(pending_bitstream_ids_.front(), frame.bitstream_id);
- client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id);
- last_sent_bitstream_id = frame.bitstream_id;
- decoded_frames_.pop();
- pending_bitstream_ids_.pop();
+ case TASK_DESTROY:
+ NOTREACHED() << "Can't destroy while in STATE_DECODING.";
+ NotifyError(ILLEGAL_STATE);
+ return false;
}
- return last_sent_bitstream_id;
}
-// TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of
-// textures, this would be unnecessary, as the size is actually a property of
-// the texture binding, not the texture. We rebind every frame, so the size
-// passed to ProvidePictureBuffers() is meaningless.
-void VTVideoDecodeAccelerator::ProcessSizeChangeIfNeeded() {
- DCHECK(CalledOnValidThread());
- DCHECK(!decoded_frames_.empty());
-
- // Find the size of the next image.
- const DecodedFrame& frame = decoded_frames_.front();
- CVImageBufferRef image_buffer = frame.image_buffer.get();
- size_t width = CVPixelBufferGetWidth(image_buffer);
- size_t height = CVPixelBufferGetHeight(image_buffer);
- gfx::Size image_size(width, height);
-
- if (picture_size_ != image_size) {
- // Dismiss all assigned picture buffers.
- for (int32_t picture_id : assigned_picture_ids_)
- client_->DismissPictureBuffer(picture_id);
- assigned_picture_ids_.clear();
- available_picture_ids_.clear();
-
- // Request new pictures.
- client_->ProvidePictureBuffers(
- kNumPictureBuffers, image_size, GL_TEXTURE_RECTANGLE_ARB);
- picture_size_ = image_size;
+bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) {
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(state_, STATE_DECODING);
+ // If the next pending flush is for a reset, then the frame will be dropped.
+ bool resetting = !pending_flush_tasks_.empty() &&
+ pending_flush_tasks_.front() == TASK_RESET;
+ if (!resetting && frame.image.get()) {
+ // If the |coded_size| has changed, request new picture buffers and then
+ // wait for them.
+ // TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of
+ // textures, this would be unnecessary, as the size is actually a property
+ // of the texture binding, not the texture. We rebind every frame, so the
+ // size passed to ProvidePictureBuffers() is meaningless.
+ if (picture_size_ != frame.coded_size) {
+ // Dismiss current pictures.
+ for (int32_t picture_id : assigned_picture_ids_)
+ client_->DismissPictureBuffer(picture_id);
+ assigned_picture_ids_.clear();
+ available_picture_ids_.clear();
+
+ // Request new pictures.
+ picture_size_ = frame.coded_size;
+ client_->ProvidePictureBuffers(
+ kNumPictureBuffers, coded_size_, GL_TEXTURE_RECTANGLE_ARB);
+ return false;
+ }
+ if (!SendFrame(frame))
+ return false;
}
+ assigned_bitstream_ids_.erase(frame.bitstream_id);
+ client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id);
+ return true;
}
-int32_t VTVideoDecodeAccelerator::SendPictures(int32_t up_to_bitstream_id) {
- DCHECK(CalledOnValidThread());
- DCHECK(!decoded_frames_.empty());
-
- // TODO(sandersd): Store the actual last sent bitstream ID?
- int32_t last_sent_bitstream_id = -1;
-
- last_sent_bitstream_id =
- ProcessDroppedFrames(last_sent_bitstream_id, up_to_bitstream_id);
- if (last_sent_bitstream_id == up_to_bitstream_id || decoded_frames_.empty())
- return last_sent_bitstream_id;
+bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(state_, STATE_DECODING);
- ProcessSizeChangeIfNeeded();
if (available_picture_ids_.empty())
- return last_sent_bitstream_id;
+ return false;
+
+ int32_t picture_id = available_picture_ids_.back();
+ IOSurfaceRef surface = CVPixelBufferGetIOSurface(frame.image.get());
if (!make_context_current_.Run()) {
- LOG(ERROR) << "Failed to make GL context current";
+ DLOG(ERROR) << "Failed to make GL context current";
NotifyError(PLATFORM_FAILURE);
- return last_sent_bitstream_id;
+ return false;
}
glEnable(GL_TEXTURE_RECTANGLE_ARB);
- while (!available_picture_ids_.empty() && !has_error_) {
- DCHECK_NE(last_sent_bitstream_id, up_to_bitstream_id);
- DCHECK(!decoded_frames_.empty());
-
- // We don't pop |frame| or |picture_id| until they are consumed, which may
- // not happen if an error occurs. Conveniently, this also removes some
- // refcounting.
- const DecodedFrame& frame = decoded_frames_.front();
- DCHECK_EQ(pending_bitstream_ids_.front(), frame.bitstream_id);
- int32_t picture_id = available_picture_ids_.back();
-
- CVImageBufferRef image_buffer = frame.image_buffer.get();
- IOSurfaceRef surface = CVPixelBufferGetIOSurface(image_buffer);
-
- gfx::ScopedTextureBinder
- texture_binder(GL_TEXTURE_RECTANGLE_ARB, texture_ids_[picture_id]);
- CGLError status = CGLTexImageIOSurface2D(
- cgl_context_, // ctx
- GL_TEXTURE_RECTANGLE_ARB, // target
- GL_RGB, // internal_format
- picture_size_.width(), // width
- picture_size_.height(), // height
- GL_YCBCR_422_APPLE, // format
- GL_UNSIGNED_SHORT_8_8_APPLE, // type
- surface, // io_surface
- 0); // plane
- if (status != kCGLNoError) {
- NOTIFY_STATUS("CGLTexImageIOSurface2D()", status);
- break;
- }
-
- picture_bindings_[picture_id] = frame.image_buffer;
- client_->PictureReady(media::Picture(
- picture_id, frame.bitstream_id, gfx::Rect(picture_size_)));
- available_picture_ids_.pop_back();
- client_->NotifyEndOfBitstreamBuffer(frame.bitstream_id);
- last_sent_bitstream_id = frame.bitstream_id;
- decoded_frames_.pop();
- pending_bitstream_ids_.pop();
-
- last_sent_bitstream_id =
- ProcessDroppedFrames(last_sent_bitstream_id, up_to_bitstream_id);
- if (last_sent_bitstream_id == up_to_bitstream_id || decoded_frames_.empty())
- break;
-
- ProcessSizeChangeIfNeeded();
+ gfx::ScopedTextureBinder
+ texture_binder(GL_TEXTURE_RECTANGLE_ARB, texture_ids_[picture_id]);
+ CGLError status = CGLTexImageIOSurface2D(
+ cgl_context_, // ctx
+ GL_TEXTURE_RECTANGLE_ARB, // target
+ GL_RGB, // internal_format
+ frame.coded_size.width(), // width
+ frame.coded_size.height(), // height
+ GL_YCBCR_422_APPLE, // format
+ GL_UNSIGNED_SHORT_8_8_APPLE, // type
+ surface, // io_surface
+ 0); // plane
+ if (status != kCGLNoError) {
+ NOTIFY_STATUS("CGLTexImageIOSurface2D()", status);
+ return false;
}
glDisable(GL_TEXTURE_RECTANGLE_ARB);
- return last_sent_bitstream_id;
-}
-
-void VTVideoDecodeAccelerator::FlushTask() {
- DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
- OSStatus status = VTDecompressionSessionFinishDelayedFrames(session_);
- if (status)
- NOTIFY_STATUS("VTDecompressionSessionFinishDelayedFrames()", status);
-}
-
-void VTVideoDecodeAccelerator::QueueAction(Action action) {
- DCHECK(CalledOnValidThread());
- if (pending_bitstream_ids_.empty()) {
- // If there are no pending frames, all actions complete immediately.
- CompleteAction(action);
- } else {
- // Otherwise, queue the action.
- pending_actions_.push(PendingAction(action, pending_bitstream_ids_.back()));
-
- // Request a flush to make sure the action will eventually complete.
- decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this)));
-
- // See if we can make progress now that there is a new pending action.
- ProcessDecodedFrames();
- }
+ available_picture_ids_.pop_back();
+ picture_bindings_[picture_id] = frame.image;
+ client_->PictureReady(media::Picture(
+ picture_id, frame.bitstream_id, gfx::Rect(frame.coded_size)));
+ return true;
}
void VTVideoDecodeAccelerator::NotifyError(Error error) {
- if (!CalledOnValidThread()) {
+ if (!gpu_thread_checker_.CalledOnValidThread()) {
gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::NotifyError,
- weak_this_factory_.GetWeakPtr(),
- error));
- return;
+ &VTVideoDecodeAccelerator::NotifyError, weak_this_, error));
+ } else if (state_ == STATE_DECODING) {
+ state_ = STATE_ERROR;
+ client_->NotifyError(error);
}
- has_error_ = true;
- client_->NotifyError(error);
}
-void VTVideoDecodeAccelerator::DropBitstream(int32_t bitstream_id) {
- DCHECK(decoder_thread_.message_loop_proxy()->BelongsToCurrentThread());
- gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::OutputTask,
- weak_this_factory_.GetWeakPtr(),
- DecodedFrame(bitstream_id, NULL)));
+void VTVideoDecodeAccelerator::QueueFlush(TaskType type) {
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ pending_flush_tasks_.push(type);
+ decoder_thread_.message_loop_proxy()->PostTask(FROM_HERE, base::Bind(
+ &VTVideoDecodeAccelerator::FlushTask, base::Unretained(this),
+ type));
+
+ // If this is a new flush request, see if we can make progress.
+ if (pending_flush_tasks_.size() == 1)
+ ProcessTasks();
}
void VTVideoDecodeAccelerator::Flush() {
- DCHECK(CalledOnValidThread());
- QueueAction(ACTION_FLUSH);
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ QueueFlush(TASK_FLUSH);
}
void VTVideoDecodeAccelerator::Reset() {
- DCHECK(CalledOnValidThread());
- QueueAction(ACTION_RESET);
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
+ QueueFlush(TASK_RESET);
}
void VTVideoDecodeAccelerator::Destroy() {
- DCHECK(CalledOnValidThread());
- // Drop any other pending actions.
- while (!pending_actions_.empty())
- pending_actions_.pop();
- // Return all bitstream buffers.
- while (!pending_bitstream_ids_.empty()) {
- client_->NotifyEndOfBitstreamBuffer(pending_bitstream_ids_.front());
- pending_bitstream_ids_.pop();
+ DCHECK(gpu_thread_checker_.CalledOnValidThread());
+
+ // In a forceful shutdown, the decoder thread may be dead already.
+ if (!decoder_thread_.IsRunning()) {
+ delete this;
+ return;
}
- QueueAction(ACTION_DESTROY);
+
+ // For a graceful shutdown, return assigned buffers and flush before
+ // destructing |this|.
+ for (int32_t bitstream_id : assigned_bitstream_ids_)
+ client_->NotifyEndOfBitstreamBuffer(bitstream_id);
+ assigned_bitstream_ids_.clear();
+ state_ = STATE_DESTROYING;
+ QueueFlush(TASK_DESTROY);
}
bool VTVideoDecodeAccelerator::CanDecodeOnIOThread() {
« no previous file with comments | « content/common/gpu/media/vt_video_decode_accelerator.h ('k') | media/filters/gpu_video_decoder.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698