Index: content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc |
diff --git a/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc b/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc |
index f48fc934813295f35d25886462f464ddcf46aed6..4dd1af0c67be26f15e55160387ee03c627349736 100644 |
--- a/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc |
+++ b/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc |
@@ -20,8 +20,10 @@ |
#include "base/numerics/safe_conversions.h" |
#include "base/strings/stringprintf.h" |
#include "content/common/gpu/media/v4l2_slice_video_decode_accelerator.h" |
+#include "content/public/common/gpu_video_decode_accelerator_helpers.h" |
#include "media/base/bind_to_current_loop.h" |
#include "media/base/media_switches.h" |
+#include "ui/gl/gl_context.h" |
#include "ui/gl/scoped_binders.h" |
#define LOGF(level) LOG(level) << __FUNCTION__ << "(): " |
@@ -382,15 +384,11 @@ V4L2VP8Picture::~V4L2VP8Picture() { |
V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator( |
const scoped_refptr<V4L2Device>& device, |
EGLDisplay egl_display, |
- EGLContext egl_context, |
- const base::WeakPtr<Client>& io_client, |
- const base::Callback<bool(void)>& make_context_current, |
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) |
+ const gpu_vda_helpers::GetGLContextCb& get_gl_context_cb, |
+ const gpu_vda_helpers::MakeGLContextCurrentCb& make_context_current_cb) |
: input_planes_count_(0), |
output_planes_count_(0), |
child_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
- io_task_runner_(io_task_runner), |
- io_client_(io_client), |
device_(device), |
decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"), |
device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"), |
@@ -405,9 +403,9 @@ V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator( |
decoder_resetting_(false), |
surface_set_change_pending_(false), |
picture_clearing_count_(0), |
- make_context_current_(make_context_current), |
egl_display_(egl_display), |
- egl_context_(egl_context), |
+ get_gl_context_cb_(get_gl_context_cb), |
+ make_context_current_cb_(make_context_current_cb), |
weak_this_factory_(this) { |
weak_this_ = weak_this_factory_.GetWeakPtr(); |
} |
@@ -443,6 +441,11 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config, |
DCHECK(child_task_runner_->BelongsToCurrentThread()); |
DCHECK_EQ(state_, kUninitialized); |
+ if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) { |
+ NOTREACHED() << "GL callbacks are required for this VDA"; |
+ return false; |
+ } |
+ |
if (config.is_encrypted) { |
NOTREACHED() << "Encrypted streams are not supported for this VDA"; |
return false; |
@@ -458,6 +461,14 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config, |
client_ptr_factory_.reset( |
new base::WeakPtrFactory<VideoDecodeAccelerator::Client>(client)); |
client_ = client_ptr_factory_->GetWeakPtr(); |
+ // If we haven't been set up to decode on separate thread via |
+ // TryInitializeDecodeOnSeparateThread(), use the main thread/client for |
+ // decode tasks. |
+ if (!decode_task_runner_) { |
+ decode_task_runner_ = child_task_runner_; |
+ DCHECK(!decode_client_); |
+ decode_client_ = client_; |
+ } |
video_profile_ = config.profile; |
@@ -484,7 +495,7 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config, |
} |
// We need the context to be initialized to query extensions. |
- if (!make_context_current_.Run()) { |
+ if (!make_context_current_cb_.Run()) { |
LOG(ERROR) << "Initialize(): could not make context current"; |
return false; |
} |
@@ -1188,7 +1199,7 @@ void V4L2SliceVideoDecodeAccelerator::Decode( |
const media::BitstreamBuffer& bitstream_buffer) { |
DVLOGF(3) << "input_id=" << bitstream_buffer.id() |
<< ", size=" << bitstream_buffer.size(); |
- DCHECK(io_task_runner_->BelongsToCurrentThread()); |
+ DCHECK(decode_task_runner_->BelongsToCurrentThread()); |
if (bitstream_buffer.id() < 0) { |
LOG(ERROR) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id(); |
@@ -1210,7 +1221,7 @@ void V4L2SliceVideoDecodeAccelerator::DecodeTask( |
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); |
scoped_ptr<BitstreamBufferRef> bitstream_record(new BitstreamBufferRef( |
- io_client_, io_task_runner_, |
+ decode_client_, decode_task_runner_, |
new base::SharedMemory(bitstream_buffer.handle(), true), |
bitstream_buffer.size(), bitstream_buffer.id())); |
if (!bitstream_record->shm->Map(bitstream_buffer.size())) { |
@@ -1492,8 +1503,9 @@ void V4L2SliceVideoDecodeAccelerator::CreateEGLImages( |
DVLOGF(3); |
DCHECK(child_task_runner_->BelongsToCurrentThread()); |
- if (!make_context_current_.Run()) { |
- DLOG(ERROR) << "could not make context current"; |
+ gfx::GLContext* gl_context = get_gl_context_cb_.Run(); |
+ if (!gl_context || !make_context_current_cb_.Run()) { |
+ DLOG(ERROR) << "No GL context"; |
NOTIFY_ERROR(PLATFORM_FAILURE); |
return; |
} |
@@ -1503,7 +1515,7 @@ void V4L2SliceVideoDecodeAccelerator::CreateEGLImages( |
std::vector<EGLImageKHR> egl_images; |
for (size_t i = 0; i < buffers.size(); ++i) { |
EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_, |
- egl_context_, |
+ gl_context->GetHandle(), |
buffers[i].texture_id(), |
buffers[i].size(), |
i, |
@@ -1569,7 +1581,7 @@ void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer( |
DCHECK(child_task_runner_->BelongsToCurrentThread()); |
DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id; |
- if (!make_context_current_.Run()) { |
+ if (!make_context_current_cb_.Run()) { |
LOGF(ERROR) << "could not make context current"; |
NOTIFY_ERROR(PLATFORM_FAILURE); |
return; |
@@ -1645,7 +1657,7 @@ void V4L2SliceVideoDecodeAccelerator::FlushTask() { |
// which - when reached - will trigger flush sequence. |
decoder_input_queue_.push( |
linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef( |
- io_client_, io_task_runner_, nullptr, 0, kFlushBufferId))); |
+ decode_client_, decode_task_runner_, nullptr, 0, kFlushBufferId))); |
return; |
} |
@@ -2556,12 +2568,14 @@ void V4L2SliceVideoDecodeAccelerator::SendPictureReady() { |
bool cleared = pending_picture_ready_.front().cleared; |
const media::Picture& picture = pending_picture_ready_.front().picture; |
if (cleared && picture_clearing_count_ == 0) { |
- DVLOGF(4) << "Posting picture ready to IO for: " |
+ DVLOGF(4) << "Posting picture ready to decode task runner for: " |
<< picture.picture_buffer_id(); |
- // This picture is cleared. Post it to IO thread to reduce latency. This |
- // should be the case after all pictures are cleared at the beginning. |
- io_task_runner_->PostTask( |
- FROM_HERE, base::Bind(&Client::PictureReady, io_client_, picture)); |
+ // This picture is cleared. It can be posted to a thread different than |
+ // the main GPU thread to reduce latency. This should be the case after |
+ // all pictures are cleared at the beginning. |
+ decode_task_runner_->PostTask( |
+ FROM_HERE, |
+ base::Bind(&Client::PictureReady, decode_client_, picture)); |
pending_picture_ready_.pop(); |
} else if (!cleared || resetting_or_flushing) { |
DVLOGF(3) << "cleared=" << pending_picture_ready_.front().cleared |
@@ -2599,7 +2613,11 @@ void V4L2SliceVideoDecodeAccelerator::PictureCleared() { |
SendPictureReady(); |
} |
-bool V4L2SliceVideoDecodeAccelerator::CanDecodeOnIOThread() { |
+bool V4L2SliceVideoDecodeAccelerator::TryInitializeDecodeOnSeparateThread( |
+ const base::WeakPtr<Client>& decode_client, |
+ const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { |
+ decode_client_ = decode_client_; |
+ decode_task_runner_ = decode_task_runner; |
return true; |
} |