Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(924)

Unified Diff: content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc

Issue 1822983002: Support external buffer import in VDA interface and add a V4L2SVDA impl. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
diff --git a/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc b/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
index 1e0943d82f149de85e8f8ab93c9c5196aaacc875..a7fff993977a944be911d8f88a449ff95e641c20 100644
--- a/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
+++ b/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc
@@ -162,10 +162,10 @@ V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord()
: at_device(false),
at_client(false),
picture_id(-1),
+ texture_id(0),
egl_image(EGL_NO_IMAGE_KHR),
egl_sync(EGL_NO_SYNC_KHR),
- cleared(false) {
-}
+ cleared(false) {}
struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef {
BitstreamBufferRef(
@@ -395,6 +395,7 @@ V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN),
output_format_fourcc_(0),
state_(kUninitialized),
+ output_mode_(Config::OutputMode::ALLOCATE),
decoder_flushing_(false),
decoder_resetting_(false),
surface_set_change_pending_(false),
@@ -437,8 +438,10 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
DCHECK(child_task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kUninitialized);
- if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
- NOTREACHED() << "GL callbacks are required for this VDA";
+ if (!device_->SupportsDecodeProfileForV4L2PixelFormats(
+ config.profile, arraysize(supported_input_fourccs_),
+ supported_input_fourccs_)) {
+ DVLOGF(1) << "unsupported profile " << config.profile;
return false;
}
@@ -447,10 +450,9 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
return false;
}
- if (!device_->SupportsDecodeProfileForV4L2PixelFormats(
- config.profile, arraysize(supported_input_fourccs_),
- supported_input_fourccs_)) {
- DVLOGF(1) << "unsupported profile " << config.profile;
+ if (config.output_mode != Config::OutputMode::ALLOCATE &&
+ config.output_mode != Config::OutputMode::IMPORT) {
+ NOTREACHED() << "Only ALLOCATE and IMPORT OutputModes are supported";
return false;
}
@@ -491,14 +493,18 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
}
// We need the context to be initialized to query extensions.
- if (!make_context_current_cb_.Run()) {
- LOG(ERROR) << "Initialize(): could not make context current";
- return false;
- }
+ if (!make_context_current_cb_.is_null()) {
+ if (!make_context_current_cb_.Run()) {
+ LOG(ERROR) << "Initialize(): could not make context current";
+ return false;
+ }
- if (!gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync) {
- LOG(ERROR) << "Initialize(): context does not have EGL_KHR_fence_sync";
- return false;
+ if (!gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync) {
+ LOG(ERROR) << "Initialize(): context does not have EGL_KHR_fence_sync";
+ return false;
+ }
+ } else {
+ DVLOG(1) << "No GL callbacks provided, initializing without GL support";
}
// Capabilities check.
@@ -521,6 +527,7 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
decoder_thread_task_runner_ = decoder_thread_.task_runner();
state_ = kInitialized;
+ output_mode_ = config.output_mode;
// InitializeTask will NOTIFY_ERROR on failure.
decoder_thread_task_runner_->PostTask(
@@ -894,7 +901,7 @@ void V4L2SliceVideoDecodeAccelerator::Dequeue() {
memset(&dqbuf, 0, sizeof(dqbuf));
memset(&planes, 0, sizeof(planes));
dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- dqbuf.memory = V4L2_MEMORY_USERPTR;
+ dqbuf.memory = V4L2_MEMORY_MMAP;
dqbuf.m.planes = planes;
dqbuf.length = input_planes_count_;
if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
@@ -920,7 +927,9 @@ void V4L2SliceVideoDecodeAccelerator::Dequeue() {
memset(&dqbuf, 0, sizeof(dqbuf));
memset(&planes, 0, sizeof(planes));
dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dqbuf.memory = V4L2_MEMORY_MMAP;
+ dqbuf.memory =
+ (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP
+ : V4L2_MEMORY_DMABUF);
dqbuf.m.planes = planes;
dqbuf.length = output_planes_count_;
if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
@@ -1056,7 +1065,6 @@ bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) {
OutputRecord& output_record = output_buffer_map_[index];
DCHECK(!output_record.at_device);
DCHECK(!output_record.at_client);
- DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
DCHECK_NE(output_record.picture_id, -1);
if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
@@ -1082,7 +1090,16 @@ bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) {
memset(qbuf_planes, 0, sizeof(qbuf_planes));
qbuf.index = index;
qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- qbuf.memory = V4L2_MEMORY_MMAP;
+ if (output_mode_ == Config::OutputMode::ALLOCATE) {
+ qbuf.memory = V4L2_MEMORY_MMAP;
+ } else {
+ qbuf.memory = V4L2_MEMORY_DMABUF;
+ DCHECK_EQ(output_planes_count_, output_record.dmabuf_fds.size());
+ for (size_t i = 0; i < output_record.dmabuf_fds.size(); ++i) {
+ DCHECK(output_record.dmabuf_fds[i].is_valid());
+ qbuf_planes[i].m.fd = output_record.dmabuf_fds[i].get();
+ }
+ }
qbuf.m.planes = qbuf_planes;
qbuf.length = output_planes_count_;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
@@ -1365,13 +1382,12 @@ bool V4L2SliceVideoDecodeAccelerator::FinishSurfaceSetChange() {
bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) {
DVLOGF(3);
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
- std::vector<EGLImageKHR> egl_images_to_destroy;
std::vector<int32_t> picture_buffers_to_dismiss;
if (output_buffer_map_.empty())
return true;
- for (auto output_record : output_buffer_map_) {
+ for (const auto& output_record : output_buffer_map_) {
DCHECK(!output_record.at_device);
if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
@@ -1476,7 +1492,9 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask(
memset(&reqbufs, 0, sizeof(reqbufs));
reqbufs.count = buffers.size();
reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- reqbufs.memory = V4L2_MEMORY_MMAP;
+ reqbufs.memory =
+ (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP
+ : V4L2_MEMORY_DMABUF);
IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
if (reqbufs.count != buffers.size()) {
@@ -1485,19 +1503,64 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask(
return;
}
- child_task_runner_->PostTask(
- FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::CreateEGLImages,
- weak_this_, buffers, output_format_fourcc_,
- output_planes_count_));
+ DCHECK(free_output_buffers_.empty());
+ DCHECK(output_buffer_map_.empty());
+ output_buffer_map_.resize(buffers.size());
+ for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
+ DCHECK(buffers[i].size() == coded_size_);
+
+ OutputRecord& output_record = output_buffer_map_[i];
+ DCHECK(!output_record.at_device);
+ DCHECK(!output_record.at_client);
+ DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
+ DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
+ DCHECK_EQ(output_record.picture_id, -1);
+ DCHECK(output_record.dmabuf_fds.empty());
+ DCHECK_EQ(output_record.cleared, false);
+
+ output_record.picture_id = buffers[i].id();
+ output_record.texture_id = buffers[i].texture_id();
+ // This will remain true until ImportBufferForPicture is called, either by
+ // the client, or by ourselves, if we are allocating.
+ output_record.at_client = true;
+ if (output_mode_ == Config::OutputMode::ALLOCATE) {
+ std::vector<base::ScopedFD> dmabuf_fds =
+ std::move(device_->GetDmabufsForV4L2Buffer(
+ i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE));
+ if (dmabuf_fds.empty()) {
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ auto passed_dmabuf_fds(make_scoped_ptr(
+ new std::vector<base::ScopedFD>(std::move(dmabuf_fds))));
+ ImportBufferForPictureTask(output_record.picture_id,
+ std::move(passed_dmabuf_fds));
+ } // else we'll get triggered via ImportBufferForPicture() from client.
+ DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
+ }
+
+ if (!StartDevicePoll()) {
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
}
-void V4L2SliceVideoDecodeAccelerator::CreateEGLImages(
- const std::vector<media::PictureBuffer>& buffers,
- uint32_t output_format_fourcc,
- size_t output_planes_count) {
- DVLOGF(3);
+void V4L2SliceVideoDecodeAccelerator::CreateEGLImageFor(
+ size_t buffer_index,
+ scoped_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds,
+ GLuint texture_id,
+ const gfx::Size& size,
+ uint32_t fourcc) {
+ DVLOGF(3) << "index=" << buffer_index;
DCHECK(child_task_runner_->BelongsToCurrentThread());
+ if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
+ DLOG(ERROR) << "GL callbacks required for binding to EGLImages";
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
gfx::GLContext* gl_context = get_gl_context_cb_.Run();
if (!gl_context || !make_context_current_cb_.Run()) {
DLOG(ERROR) << "No GL context";
@@ -1507,68 +1570,132 @@ void V4L2SliceVideoDecodeAccelerator::CreateEGLImages(
gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0);
- std::vector<EGLImageKHR> egl_images;
- for (size_t i = 0; i < buffers.size(); ++i) {
- EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_,
- gl_context->GetHandle(),
- buffers[i].texture_id(),
- buffers[i].size(),
- i,
- output_format_fourcc,
- output_planes_count);
- if (egl_image == EGL_NO_IMAGE_KHR) {
- LOGF(ERROR) << "Could not create EGLImageKHR";
- for (const auto& image_to_destroy : egl_images)
- device_->DestroyEGLImage(egl_display_, image_to_destroy);
-
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
- }
-
- egl_images.push_back(egl_image);
+ EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_,
+ gl_context->GetHandle(),
+ texture_id,
+ size,
+ buffer_index,
+ fourcc,
+ *passed_dmabuf_fds);
+ if (egl_image == EGL_NO_IMAGE_KHR) {
+ LOGF(ERROR) << "Could not create EGLImageKHR,"
+ << " index=" << buffer_index << " texture_id=" << texture_id;
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
}
decoder_thread_task_runner_->PostTask(
- FROM_HERE, base::Bind(
- &V4L2SliceVideoDecodeAccelerator::AssignEGLImages,
- base::Unretained(this), buffers, egl_images));
+ FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::AssignEGLImage,
+ base::Unretained(this), buffer_index, egl_image,
+ base::Passed(&passed_dmabuf_fds)));
}
-void V4L2SliceVideoDecodeAccelerator::AssignEGLImages(
- const std::vector<media::PictureBuffer>& buffers,
- const std::vector<EGLImageKHR>& egl_images) {
- DVLOGF(3);
+void V4L2SliceVideoDecodeAccelerator::AssignEGLImage(
+ size_t buffer_index,
+ EGLImageKHR egl_image,
+ scoped_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) {
+ DVLOGF(3) << "index=" << buffer_index;
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
- DCHECK_EQ(buffers.size(), egl_images.size());
- DCHECK(free_output_buffers_.empty());
- DCHECK(output_buffer_map_.empty());
+ DCHECK_LT(buffer_index, output_buffer_map_.size());
+ OutputRecord& output_record = output_buffer_map_[buffer_index];
+ DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
+ DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
+ DCHECK(!output_record.at_client);
+ DCHECK(!output_record.at_device);
- output_buffer_map_.resize(buffers.size());
+ output_record.egl_image = egl_image;
+ if (output_mode_ == Config::OutputMode::IMPORT) {
+ DCHECK(output_record.dmabuf_fds.empty());
+ output_record.dmabuf_fds = std::move(*passed_dmabuf_fds);
+ }
- for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
- DCHECK(buffers[i].size() == coded_size_);
+ DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
+ buffer_index),
+ 0);
+ free_output_buffers_.push_back(buffer_index);
+ ScheduleDecodeBufferTaskIfNeeded();
+}
- OutputRecord& output_record = output_buffer_map_[i];
- DCHECK(!output_record.at_device);
- DCHECK(!output_record.at_client);
- DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
- DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
- DCHECK_EQ(output_record.picture_id, -1);
- DCHECK_EQ(output_record.cleared, false);
+void V4L2SliceVideoDecodeAccelerator::ImportBufferForPicture(
+ int32_t picture_buffer_id,
+ const std::vector<gfx::GpuMemoryBufferHandle>& gpu_memory_buffer_handles) {
+ DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
- output_record.egl_image = egl_images[i];
- output_record.picture_id = buffers[i].id();
- free_output_buffers_.push_back(i);
- DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
+ auto passed_dmabuf_fds(make_scoped_ptr(new std::vector<base::ScopedFD>()));
+ for (const auto& handle : gpu_memory_buffer_handles) {
+ int fd = -1;
+#if defined(USE_OZONE)
+ fd = handle.native_pixmap_handle.fd.fd;
+#endif
+ DCHECK_NE(fd, -1);
+ passed_dmabuf_fds->push_back(base::ScopedFD(fd));
}
- if (!StartDevicePoll()) {
- NOTIFY_ERROR(PLATFORM_FAILURE);
+ if (output_mode_ != Config::OutputMode::IMPORT) {
+ LOGF(ERROR) << "Cannot import in non-import mode";
+ NOTIFY_ERROR(INVALID_ARGUMENT);
return;
}
- ProcessPendingEventsIfNeeded();
+ decoder_thread_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask,
+ base::Unretained(this), picture_buffer_id,
+ base::Passed(&passed_dmabuf_fds)));
+}
+
+void V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask(
+ int32_t picture_buffer_id,
+ scoped_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) {
+ DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
+ DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
+
+ const auto iter =
+ std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(),
+ [picture_buffer_id](const OutputRecord& output_record) {
+ return output_record.picture_id == picture_buffer_id;
+ });
+ if (iter == output_buffer_map_.end()) {
+ LOGF(ERROR) << "Invalid picture_buffer_id=" << picture_buffer_id;
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ if (!iter->at_client) {
+ LOGF(ERROR) << "Cannot import buffer that not owned by client";
+ NOTIFY_ERROR(INVALID_ARGUMENT);
+ return;
+ }
+
+ size_t index = iter - output_buffer_map_.begin();
+ DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
+ index),
+ 0);
+
+ DCHECK(!iter->at_device);
+ iter->at_client = false;
+ if (iter->texture_id != 0) {
+ if (iter->egl_image != EGL_NO_IMAGE_KHR) {
+ child_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_,
+ egl_display_, iter->egl_image));
+ }
+
+ child_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&V4L2SliceVideoDecodeAccelerator::CreateEGLImageFor,
+ weak_this_, index, base::Passed(&passed_dmabuf_fds),
+ iter->texture_id, coded_size_, output_format_fourcc_));
+ } else {
+ // No need for an EGLImage, start using this buffer now.
+ DCHECK_EQ(output_planes_count_, passed_dmabuf_fds->size());
+ iter->dmabuf_fds.swap(*passed_dmabuf_fds);
+ free_output_buffers_.push_back(index);
+ ScheduleDecodeBufferTaskIfNeeded();
+ }
}
void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
@@ -1576,22 +1703,26 @@ void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
DCHECK(child_task_runner_->BelongsToCurrentThread());
DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
- if (!make_context_current_cb_.Run()) {
- LOGF(ERROR) << "could not make context current";
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
- }
+ scoped_ptr<EGLSyncKHRRef> egl_sync_ref;
- EGLSyncKHR egl_sync =
- eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL);
- if (egl_sync == EGL_NO_SYNC_KHR) {
- LOGF(ERROR) << "eglCreateSyncKHR() failed";
- NOTIFY_ERROR(PLATFORM_FAILURE);
- return;
+ if (!make_context_current_cb_.is_null()) {
+ if (!make_context_current_cb_.Run()) {
+ LOGF(ERROR) << "could not make context current";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ EGLSyncKHR egl_sync =
+ eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL);
+ if (egl_sync == EGL_NO_SYNC_KHR) {
+ LOGF(ERROR) << "eglCreateSyncKHR() failed";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+
+ egl_sync_ref.reset(new EGLSyncKHRRef(egl_display_, egl_sync));
}
- scoped_ptr<EGLSyncKHRRef> egl_sync_ref(
- new EGLSyncKHRRef(egl_display_, egl_sync));
decoder_thread_task_runner_->PostTask(
FROM_HERE,
base::Bind(&V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask,
@@ -1628,9 +1759,12 @@ void V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask(
DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
DCHECK(!output_record.at_device);
output_record.at_client = false;
- output_record.egl_sync = egl_sync_ref->egl_sync;
- // Take ownership of the EGLSync.
- egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR;
+ if (egl_sync_ref) {
+ output_record.egl_sync = egl_sync_ref->egl_sync;
+ // Take ownership of the EGLSync.
+ egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR;
+ }
+
surfaces_at_display_.erase(it);
}
@@ -2510,7 +2644,6 @@ void V4L2SliceVideoDecodeAccelerator::OutputSurface(
DCHECK(!output_record.at_client);
DCHECK(!output_record.at_device);
- DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
DCHECK_NE(output_record.picture_id, -1);
output_record.at_client = true;
@@ -2616,6 +2749,11 @@ bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
return true;
}
+media::VideoPixelFormat V4L2SliceVideoDecodeAccelerator::GetOutputFormat()
+ const {
+ return V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_);
+}
+
// static
media::VideoDecodeAccelerator::SupportedProfiles
V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() {

Powered by Google App Engine
This is Rietveld 408576698