Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(746)

Unified Diff: content/common/gpu/media/exynos_video_decode_accelerator.cc

Issue 68713007: Revert 235128 "Remove GSC usage from ExynosVideoDecodeAccelerator." (Closed) Base URL: svn://svn.chromium.org/chrome/branches/1710/src/
Patch Set: Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/common/gpu/media/exynos_video_decode_accelerator.cc
===================================================================
--- content/common/gpu/media/exynos_video_decode_accelerator.cc (revision 235426)
+++ content/common/gpu/media/exynos_video_decode_accelerator.cc (working copy)
@@ -5,7 +5,6 @@
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
-#include <libdrm/drm_fourcc.h>
#include <linux/videodev2.h>
#include <poll.h>
#include <sys/eventfd.h>
@@ -57,7 +56,15 @@
#endif
const char kExynosMfcDevice[] = "/dev/mfc-dec";
+const char kExynosGscDevice[] = "/dev/gsc1";
+const char kMaliDriver[] = "libmali.so";
+typedef EGLBoolean (*MaliEglImageGetBufferExtPhandleFunc)(EGLImageKHR, EGLint*,
+ void*);
+
+void* libmali_handle = NULL;
+MaliEglImageGetBufferExtPhandleFunc
+ mali_egl_image_get_buffer_ext_phandle = NULL;
} // anonymous namespace
struct ExynosVideoDecodeAccelerator::BitstreamBufferRef {
@@ -77,14 +84,13 @@
};
struct ExynosVideoDecodeAccelerator::PictureBufferArrayRef {
- PictureBufferArrayRef(EGLDisplay egl_display);
+ PictureBufferArrayRef(EGLDisplay egl_display, size_t count);
~PictureBufferArrayRef();
struct PictureBufferRef {
- PictureBufferRef(EGLImageKHR egl_image, int32 picture_id)
- : egl_image(egl_image), picture_id(picture_id) {}
EGLImageKHR egl_image;
- int32 picture_id;
+ int egl_image_fd;
+ int32 client_id;
};
EGLDisplay const egl_display;
@@ -125,14 +131,24 @@
}
ExynosVideoDecodeAccelerator::PictureBufferArrayRef::PictureBufferArrayRef(
- EGLDisplay egl_display)
- : egl_display(egl_display) {}
+ EGLDisplay egl_display, size_t count)
+ : egl_display(egl_display),
+ picture_buffers(count) {
+ for (size_t i = 0; i < picture_buffers.size(); ++i) {
+ PictureBufferRef& buffer = picture_buffers[i];
+ buffer.egl_image = EGL_NO_IMAGE_KHR;
+ buffer.egl_image_fd = -1;
+ buffer.client_id = -1;
+ }
+}
ExynosVideoDecodeAccelerator::PictureBufferArrayRef::~PictureBufferArrayRef() {
for (size_t i = 0; i < picture_buffers.size(); ++i) {
- EGLImageKHR egl_image = picture_buffers[i].egl_image;
- if (egl_image != EGL_NO_IMAGE_KHR)
- eglDestroyImageKHR(egl_display, egl_image);
+ PictureBufferRef& buffer = picture_buffers[i];
+ if (buffer.egl_image != EGL_NO_IMAGE_KHR)
+ eglDestroyImageKHR(egl_display, buffer.egl_image);
+ if (buffer.egl_image_fd != -1)
+ HANDLE_EINTR(close(buffer.egl_image_fd));
}
}
@@ -160,17 +176,38 @@
ExynosVideoDecodeAccelerator::MfcOutputRecord::MfcOutputRecord()
: at_device(false),
+ input_id(-1) {
+ bytes_used[0] = 0;
+ bytes_used[1] = 0;
+ address[0] = NULL;
+ address[1] = NULL;
+ length[0] = 0;
+ length[1] = 0;
+}
+
+ExynosVideoDecodeAccelerator::MfcOutputRecord::~MfcOutputRecord() {
+}
+
+ExynosVideoDecodeAccelerator::GscInputRecord::GscInputRecord()
+ : at_device(false),
+ mfc_output(-1) {
+}
+
+ExynosVideoDecodeAccelerator::GscInputRecord::~GscInputRecord() {
+}
+
+ExynosVideoDecodeAccelerator::GscOutputRecord::GscOutputRecord()
+ : at_device(false),
at_client(false),
+ fd(-1),
egl_image(EGL_NO_IMAGE_KHR),
egl_sync(EGL_NO_SYNC_KHR),
picture_id(-1),
- cleared(false) {
- for (size_t i = 0; i < arraysize(fds); ++i)
- fds[i] = -1;
+ cleared(false) {}
+
+ExynosVideoDecodeAccelerator::GscOutputRecord::~GscOutputRecord() {
}
-ExynosVideoDecodeAccelerator::MfcOutputRecord::~MfcOutputRecord() {}
-
ExynosVideoDecodeAccelerator::PictureRecord::PictureRecord(
bool cleared,
const media::Picture& picture)
@@ -208,6 +245,11 @@
mfc_output_buffer_queued_count_(0),
mfc_output_buffer_pixelformat_(0),
mfc_output_dpb_size_(0),
+ gsc_fd_(-1),
+ gsc_input_streamon_(false),
+ gsc_input_buffer_queued_count_(0),
+ gsc_output_streamon_(false),
+ gsc_output_buffer_queued_count_(0),
picture_clearing_count_(0),
device_poll_thread_("ExynosDevicePollThread"),
device_poll_interrupt_fd_(-1),
@@ -224,6 +266,12 @@
HANDLE_EINTR(close(device_poll_interrupt_fd_));
device_poll_interrupt_fd_ = -1;
}
+ if (gsc_fd_ != -1) {
+ DestroyGscInputBuffers();
+ DestroyGscOutputBuffers();
+ HANDLE_EINTR(close(gsc_fd_));
+ gsc_fd_ = -1;
+ }
if (mfc_fd_ != -1) {
DestroyMfcInputBuffers();
DestroyMfcOutputBuffers();
@@ -235,6 +283,8 @@
// descriptors, mmap() segments, etc.
DCHECK(mfc_input_buffer_map_.empty());
DCHECK(mfc_output_buffer_map_.empty());
+ DCHECK(gsc_input_buffer_map_.empty());
+ DCHECK(gsc_output_buffer_map_.empty());
}
bool ExynosVideoDecodeAccelerator::Initialize(
@@ -262,6 +312,13 @@
};
video_profile_ = profile;
+ static bool sandbox_initialized = PostSandboxInitialization();
+ if (!sandbox_initialized) {
+ DLOG(ERROR) << "Initialize(): PostSandboxInitialization() failed";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return false;
+ }
+
if (egl_display_ == EGL_NO_DISPLAY) {
DLOG(ERROR) << "Initialize(): could not get EGLDisplay";
NOTIFY_ERROR(PLATFORM_FAILURE);
@@ -297,6 +354,15 @@
NOTIFY_ERROR(PLATFORM_FAILURE);
return false;
}
+ DVLOG(2) << "Initialize(): opening GSC device: " << kExynosGscDevice;
+ gsc_fd_ = HANDLE_EINTR(open(kExynosGscDevice,
+ O_RDWR | O_NONBLOCK | O_CLOEXEC));
+ if (gsc_fd_ == -1) {
+ DPLOG(ERROR) << "Initialize(): could not open GSC device: "
+ << kExynosGscDevice;
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return false;
+ }
// Create the interrupt fd.
DCHECK_EQ(device_poll_interrupt_fd_, -1);
@@ -320,6 +386,13 @@
NOTIFY_ERROR(PLATFORM_FAILURE);
return false;
}
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_QUERYCAP, &caps);
+ if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
+ DLOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
+ ", caps check failed: 0x" << std::hex << caps.capabilities;
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return false;
+ }
if (!CreateMfcInputBuffers())
return false;
@@ -328,7 +401,7 @@
struct v4l2_format format;
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- format.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12M;
+ format.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12MT_16X16;
IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_S_FMT, &format);
// Subscribe to the resolution change event.
@@ -373,10 +446,10 @@
DVLOG(3) << "AssignPictureBuffers(): buffer_count=" << buffers.size();
DCHECK(child_message_loop_proxy_->BelongsToCurrentThread());
- if (buffers.size() != mfc_output_buffer_map_.size()) {
+ if (buffers.size() != gsc_output_buffer_map_.size()) {
DLOG(ERROR) << "AssignPictureBuffers(): Failed to provide requested picture"
- " buffers. (Got " << buffers.size()
- << ", requested " << mfc_output_buffer_map_.size() << ")";
+ " buffers. (Got " << buffers.size() << ", requested " <<
+ gsc_output_buffer_map_.size() << ")";
NOTIFY_ERROR(INVALID_ARGUMENT);
return;
}
@@ -387,45 +460,59 @@
return;
}
- scoped_ptr<PictureBufferArrayRef> picture_buffers_ref(
- new PictureBufferArrayRef(egl_display_));
- gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0);
- EGLint attrs[] = {
- EGL_WIDTH, 0, EGL_HEIGHT, 0,
- EGL_LINUX_DRM_FOURCC_EXT, 0, EGL_DMA_BUF_PLANE0_FD_EXT, 0,
- EGL_DMA_BUF_PLANE0_OFFSET_EXT, 0, EGL_DMA_BUF_PLANE0_PITCH_EXT, 0,
- EGL_DMA_BUF_PLANE1_FD_EXT, 0, EGL_DMA_BUF_PLANE1_OFFSET_EXT, 0,
- EGL_DMA_BUF_PLANE1_PITCH_EXT, 0, EGL_NONE, };
- attrs[1] = frame_buffer_size_.width();
- attrs[3] = frame_buffer_size_.height();
- attrs[5] = DRM_FORMAT_NV12;
- for (size_t i = 0; i < mfc_output_buffer_map_.size(); ++i) {
+ scoped_ptr<PictureBufferArrayRef> pic_buffers_ref(
+ new PictureBufferArrayRef(egl_display_, buffers.size()));
+
+ const static EGLint kImageAttrs[] = {
+ EGL_IMAGE_PRESERVED_KHR, 0,
+ EGL_NONE,
+ };
+ Display* x_display = base::MessagePumpForUI::GetDefaultXDisplay();
+ gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_2D, 0);
+ for (size_t i = 0; i < pic_buffers_ref->picture_buffers.size(); ++i) {
DCHECK(buffers[i].size() == frame_buffer_size_);
- MfcOutputRecord& output_record = mfc_output_buffer_map_[i];
- attrs[7] = output_record.fds[0];
- attrs[9] = 0;
- attrs[11] = frame_buffer_size_.width();
- attrs[13] = output_record.fds[1];
- attrs[15] = 0;
- attrs[17] = frame_buffer_size_.width();
+ PictureBufferArrayRef::PictureBufferRef& buffer =
+ pic_buffers_ref->picture_buffers[i];
+ // Create the X pixmap and then create an EGLImageKHR from it, so we can
+ // get dma_buf backing.
+ Pixmap pixmap = XCreatePixmap(x_display,
+ RootWindow(x_display, 0),
+ frame_buffer_size_.width(),
+ frame_buffer_size_.height(),
+ 32);
+ if (!pixmap) {
+ DLOG(ERROR) << "AssignPictureBuffers(): could not create X pixmap";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+ glBindTexture(GL_TEXTURE_2D, buffers[i].texture_id());
EGLImageKHR egl_image = eglCreateImageKHR(
- egl_display_, EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attrs);
+ egl_display_, EGL_NO_CONTEXT, EGL_NATIVE_PIXMAP_KHR,
+ (EGLClientBuffer)pixmap, kImageAttrs);
+ // We can free the X pixmap immediately -- according to the
+ // EGL_KHR_image_base spec, the backing storage does not go away until the
+ // last referencing EGLImage is destroyed.
+ XFreePixmap(x_display, pixmap);
if (egl_image == EGL_NO_IMAGE_KHR) {
DLOG(ERROR) << "AssignPictureBuffers(): could not create EGLImageKHR";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
}
-
- glBindTexture(GL_TEXTURE_EXTERNAL_OES, buffers[i].texture_id());
- glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, egl_image);
- picture_buffers_ref->picture_buffers.push_back(
- PictureBufferArrayRef::PictureBufferRef(egl_image, buffers[i].id()));
+ buffer.egl_image = egl_image;
+ int fd;
+ if (!mali_egl_image_get_buffer_ext_phandle(buffer.egl_image, NULL, &fd)) {
+ DLOG(ERROR) << "AssignPictureBuffers(): "
+ << "could not get EGLImageKHR dmabuf fd";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+ buffer.egl_image_fd = fd;
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, egl_image);
+ buffer.client_id = buffers[i].id();
}
- decoder_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&ExynosVideoDecodeAccelerator::AssignPictureBuffersTask,
- base::Unretained(this),
- base::Passed(&picture_buffers_ref)));
+ decoder_thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
+ &ExynosVideoDecodeAccelerator::AssignPictureBuffersTask,
+ base::Unretained(this), base::Passed(&pic_buffers_ref)));
}
void ExynosVideoDecodeAccelerator::ReusePictureBuffer(int32 picture_buffer_id) {
@@ -494,6 +581,39 @@
bool ExynosVideoDecodeAccelerator::CanDecodeOnIOThread() { return true; }
+// static
+void ExynosVideoDecodeAccelerator::PreSandboxInitialization() {
+ DVLOG(3) << "PreSandboxInitialization()";
+ dlerror();
+
+ libmali_handle = dlopen(kMaliDriver, RTLD_LAZY | RTLD_LOCAL);
+ if (libmali_handle == NULL) {
+ DPLOG(ERROR) << "failed to dlopen() " << kMaliDriver << ": " << dlerror();
+ }
+}
+
+// static
+bool ExynosVideoDecodeAccelerator::PostSandboxInitialization() {
+ DVLOG(3) << "PostSandboxInitialization()";
+ if (libmali_handle == NULL) {
+ DLOG(ERROR) << "PostSandboxInitialization(): no " << kMaliDriver
+ << " driver handle";
+ return false;
+ }
+
+ dlerror();
+ mali_egl_image_get_buffer_ext_phandle =
+ reinterpret_cast<MaliEglImageGetBufferExtPhandleFunc>(
+ dlsym(libmali_handle, "mali_egl_image_get_buffer_ext_phandle"));
+ if (mali_egl_image_get_buffer_ext_phandle == NULL) {
+ DPLOG(ERROR) << "PostSandboxInitialization(): failed to dlsym() "
+ << "mali_egl_image_get_buffer_ext_phandle: " << dlerror();
+ return false;
+ }
+
+ return true;
+}
+
void ExynosVideoDecodeAccelerator::DecodeTask(
const media::BitstreamBuffer& bitstream_buffer) {
DVLOG(3) << "DecodeTask(): input_id=" << bitstream_buffer.id();
@@ -526,7 +646,7 @@
return;
}
- decoder_input_queue_.push(
+ decoder_input_queue_.push_back(
linked_ptr<BitstreamBufferRef>(bitstream_record.release()));
decoder_decode_buffer_tasks_scheduled_++;
DecodeBufferTask();
@@ -564,7 +684,7 @@
// Setup to use the next buffer.
decoder_current_bitstream_buffer_.reset(buffer_ref.release());
- decoder_input_queue_.pop();
+ decoder_input_queue_.pop_front();
DVLOG(3) << "DecodeBufferTask(): reading input_id="
<< decoder_current_bitstream_buffer_->input_id
<< ", addr=" << (decoder_current_bitstream_buffer_->shm ?
@@ -916,7 +1036,7 @@
}
// Queue it to MFC.
- mfc_input_ready_queue_.push(decoder_current_input_buffer_);
+ mfc_input_ready_queue_.push_back(decoder_current_input_buffer_);
decoder_current_input_buffer_ = -1;
DVLOG(3) << "FlushInputFrame(): submitting input_id="
<< input_record.input_id;
@@ -939,28 +1059,30 @@
return;
}
- DCHECK_EQ(pic_buffers->picture_buffers.size(), mfc_output_buffer_map_.size());
- for (size_t i = 0; i < mfc_output_buffer_map_.size(); ++i) {
- MfcOutputRecord& output_record = mfc_output_buffer_map_[i];
- PictureBufferArrayRef::PictureBufferRef& buffer_ref =
- pic_buffers->picture_buffers[i];
+ DCHECK_EQ(pic_buffers->picture_buffers.size(), gsc_output_buffer_map_.size());
+ for (size_t i = 0; i < gsc_output_buffer_map_.size(); ++i) {
// We should be blank right now.
- DCHECK(!output_record.at_device);
- DCHECK(!output_record.at_client);
+ GscOutputRecord& output_record = gsc_output_buffer_map_[i];
+ DCHECK_EQ(output_record.fd, -1);
DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
DCHECK_EQ(output_record.picture_id, -1);
DCHECK_EQ(output_record.cleared, false);
- output_record.egl_image = buffer_ref.egl_image;
- output_record.picture_id = buffer_ref.picture_id;
- mfc_free_output_buffers_.push(i);
- DVLOG(3) << "AssignPictureBuffersTask(): buffer[" << i
- << "]: picture_id=" << buffer_ref.picture_id;
+ PictureBufferArrayRef::PictureBufferRef& buffer =
+ pic_buffers->picture_buffers[i];
+ output_record.fd = buffer.egl_image_fd;
+ output_record.egl_image = buffer.egl_image;
+ output_record.picture_id = buffer.client_id;
+
+ // Take ownership of the EGLImage and fd.
+ buffer.egl_image = EGL_NO_IMAGE_KHR;
+ buffer.egl_image_fd = -1;
+ // And add this buffer to the free list.
+ gsc_free_output_buffers_.push_back(i);
}
- pic_buffers->picture_buffers.clear();
- // We got buffers! Kick the MFC.
- EnqueueMfc();
+ // We got buffers! Kick the GSC.
+ EnqueueGsc();
if (decoder_state_ == kChangingResolution)
ResumeAfterResolutionChange();
@@ -988,7 +1110,9 @@
if (mfc_event_pending)
DequeueMfcEvents();
DequeueMfc();
+ DequeueGsc();
EnqueueMfc();
+ EnqueueGsc();
// Clear the interrupt fd.
if (!ClearDevicePollInterrupt())
@@ -999,6 +1123,10 @@
// MFC can be polled as soon as either input or output buffers are queued.
if (mfc_input_buffer_queued_count_ + mfc_output_buffer_queued_count_ > 0)
poll_fds |= kPollMfc;
+ // Add GSC fd, if we should poll on it.
+ // GSC has to wait until both input and output buffers are queued.
+ if (gsc_input_buffer_queued_count_ > 0 && gsc_output_buffer_queued_count_ > 0)
+ poll_fds |= kPollGsc;
// ServiceDeviceTask() should only ever be scheduled from DevicePollTask(),
// so either:
@@ -1021,7 +1149,14 @@
<< mfc_input_buffer_map_.size() << "->"
<< mfc_free_output_buffers_.size() << "+"
<< mfc_output_buffer_queued_count_ << "/"
- << mfc_output_buffer_map_.size() << "] => VDA["
+ << mfc_output_buffer_map_.size() << "] => "
+ << mfc_output_gsc_input_queue_.size() << " => GSC["
+ << gsc_free_input_buffers_.size() << "+"
+ << gsc_input_buffer_queued_count_ << "/"
+ << gsc_input_buffer_map_.size() << "->"
+ << gsc_free_output_buffers_.size() << "+"
+ << gsc_output_buffer_queued_count_ << "/"
+ << gsc_output_buffer_map_.size() << "] => VDA["
<< decoder_frames_at_client_ << "]";
ScheduleDecodeBufferTaskIfNeeded();
@@ -1150,25 +1285,24 @@
}
MfcOutputRecord& output_record = mfc_output_buffer_map_[dqbuf.index];
DCHECK(output_record.at_device);
- DCHECK(!output_record.at_client);
- DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
- DCHECK_NE(output_record.picture_id, -1);
output_record.at_device = false;
- if (dqbuf.m.planes[0].bytesused + dqbuf.m.planes[1].bytesused == 0) {
+ output_record.bytes_used[0] = dqbuf.m.planes[0].bytesused;
+ output_record.bytes_used[1] = dqbuf.m.planes[1].bytesused;
+ if (output_record.bytes_used[0] + output_record.bytes_used[1] == 0) {
// This is an empty output buffer returned as part of a flush.
- mfc_free_output_buffers_.push(dqbuf.index);
+ mfc_free_output_buffers_.push_back(dqbuf.index);
+ output_record.input_id = -1;
} else {
- DCHECK_GE(dqbuf.timestamp.tv_sec, 0);
- output_record.at_client = true;
- DVLOG(3) << "DequeueMfc(): returning input_id=" << dqbuf.timestamp.tv_sec
- << " as picture_id=" << output_record.picture_id;
- const media::Picture& picture =
- media::Picture(output_record.picture_id, dqbuf.timestamp.tv_sec);
- pending_picture_ready_.push(
- PictureRecord(output_record.cleared, picture));
- SendPictureReady();
- output_record.cleared = true;
- decoder_frames_at_client_++;
+ // This is an output buffer with contents to pass down the pipe.
+ mfc_output_gsc_input_queue_.push_back(dqbuf.index);
+ output_record.input_id = dqbuf.timestamp.tv_sec;
+ DCHECK(output_record.input_id >= 0);
+ DVLOG(3) << "DequeueMfc(): dequeued input_id=" << output_record.input_id;
+ // We don't count this output buffer dequeued yet, or add it to the free
+ // list, as it has data GSC needs to process.
+
+ // We have new frames in mfc_output_gsc_input_queue_. Kick the pipe.
+ SetDevicePollInterrupt();
}
mfc_output_buffer_queued_count_--;
}
@@ -1176,12 +1310,144 @@
NotifyFlushDoneIfNeeded();
}
+void ExynosVideoDecodeAccelerator::EnqueueGsc() {
+ DVLOG(3) << "EnqueueGsc()";
+ DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
+ DCHECK_NE(decoder_state_, kUninitialized);
+ DCHECK_NE(decoder_state_, kInitialized);
+ TRACE_EVENT0("Video Decoder", "EVDA::EnqueueGsc");
+
+ // Drain the pipe of completed MFC output buffers.
+ const int old_gsc_inputs_queued = gsc_input_buffer_queued_count_;
+ while (!mfc_output_gsc_input_queue_.empty() &&
+ !gsc_free_input_buffers_.empty()) {
+ if (!EnqueueGscInputRecord())
+ return;
+ }
+ if (old_gsc_inputs_queued == 0 && gsc_input_buffer_queued_count_ != 0) {
+ // We just started up a previously empty queue.
+ // Queue state changed; signal interrupt.
+ if (!SetDevicePollInterrupt())
+ return;
+ // Start VIDIOC_STREAMON if we haven't yet.
+ if (!gsc_input_streamon_) {
+ __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ IOCTL_OR_ERROR_RETURN(gsc_fd_, VIDIOC_STREAMON, &type);
+ gsc_input_streamon_ = true;
+ }
+ }
+
+ if (gsc_input_buffer_queued_count_ != 0 &&
+ gsc_output_buffer_queued_count_ == 0 &&
+ !gsc_free_output_buffers_.empty()) {
+ const int old_gsc_outputs_queued = gsc_output_buffer_queued_count_;
+ if (!EnqueueGscOutputRecord())
+ return;
+ if (old_gsc_outputs_queued == 0 && gsc_output_buffer_queued_count_ != 0) {
+ // We just started up a previously empty queue.
+ // Queue state changed; signal interrupt.
+ if (!SetDevicePollInterrupt())
+ return;
+ // Start VIDIOC_STREAMON if we haven't yet.
+ if (!gsc_output_streamon_) {
+ __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ IOCTL_OR_ERROR_RETURN(gsc_fd_, VIDIOC_STREAMON, &type);
+ gsc_output_streamon_ = true;
+ }
+ }
+ }
+ // Bug check: GSC is liable to race conditions if more than one buffer is
+ // simultaneously queued.
+ DCHECK_GE(1, gsc_output_buffer_queued_count_);
+}
+
+void ExynosVideoDecodeAccelerator::DequeueGsc() {
+ DVLOG(3) << "DequeueGsc()";
+ DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
+ DCHECK_NE(decoder_state_, kUninitialized);
+ DCHECK_NE(decoder_state_, kInitialized);
+ DCHECK_NE(decoder_state_, kAfterReset);
+ TRACE_EVENT0("Video Decoder", "EVDA::DequeueGsc");
+
+ // Dequeue completed GSC input (VIDEO_OUTPUT) buffers, and recycle to the free
+ // list. Also recycle the corresponding MFC output buffers at this time.
+ struct v4l2_buffer dqbuf;
+ struct v4l2_plane planes[2];
+ while (gsc_input_buffer_queued_count_ > 0) {
+ DCHECK(gsc_input_streamon_);
+ memset(&dqbuf, 0, sizeof(dqbuf));
+ memset(planes, 0, sizeof(planes));
+ dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ dqbuf.memory = V4L2_MEMORY_DMABUF;
+ dqbuf.m.planes = planes;
+ dqbuf.length = 2;
+ if (ioctl(gsc_fd_, VIDIOC_DQBUF, &dqbuf) != 0) {
+ if (errno == EAGAIN) {
+ // EAGAIN if we're just out of buffers to dequeue.
+ break;
+ }
+ DPLOG(ERROR) << "DequeueGsc(): ioctl() failed: VIDIOC_DQBUF";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+ GscInputRecord& input_record = gsc_input_buffer_map_[dqbuf.index];
+ MfcOutputRecord& output_record =
+ mfc_output_buffer_map_[input_record.mfc_output];
+ DCHECK(input_record.at_device);
+ gsc_free_input_buffers_.push_back(dqbuf.index);
+ mfc_free_output_buffers_.push_back(input_record.mfc_output);
+ input_record.at_device = false;
+ input_record.mfc_output = -1;
+ output_record.input_id = -1;
+ gsc_input_buffer_queued_count_--;
+ }
+
+ // Dequeue completed GSC output (VIDEO_CAPTURE) buffers, and send them off to
+ // the client. Don't recycle to its free list yet -- we can't do that until
+ // ReusePictureBuffer() returns it to us.
+ while (gsc_output_buffer_queued_count_ > 0) {
+ DCHECK(gsc_output_streamon_);
+ memset(&dqbuf, 0, sizeof(dqbuf));
+ memset(planes, 0, sizeof(planes));
+ dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dqbuf.memory = V4L2_MEMORY_DMABUF;
+ dqbuf.m.planes = planes;
+ dqbuf.length = 1;
+ if (ioctl(gsc_fd_, VIDIOC_DQBUF, &dqbuf) != 0) {
+ if (errno == EAGAIN) {
+ // EAGAIN if we're just out of buffers to dequeue.
+ break;
+ }
+ DPLOG(ERROR) << "DequeueGsc(): ioctl() failed: VIDIOC_DQBUF";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
+ return;
+ }
+ GscOutputRecord& output_record = gsc_output_buffer_map_[dqbuf.index];
+ DCHECK(output_record.at_device);
+ DCHECK(!output_record.at_client);
+ DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
+ output_record.at_device = false;
+ output_record.at_client = true;
+ gsc_output_buffer_queued_count_--;
+ DVLOG(3) << "DequeueGsc(): returning input_id=" << dqbuf.timestamp.tv_sec
+ << " as picture_id=" << output_record.picture_id;
+ const media::Picture& picture =
+ media::Picture(output_record.picture_id, dqbuf.timestamp.tv_sec);
+ pending_picture_ready_.push(PictureRecord(output_record.cleared, picture));
+ SendPictureReady();
+ output_record.cleared = true;
+ decoder_frames_at_client_++;
+ }
+
+ NotifyFlushDoneIfNeeded();
+}
+
bool ExynosVideoDecodeAccelerator::EnqueueMfcInputRecord() {
DVLOG(3) << "EnqueueMfcInputRecord()";
DCHECK(!mfc_input_ready_queue_.empty());
// Enqueue a MFC input (VIDEO_OUTPUT) buffer.
- const int buffer = mfc_input_ready_queue_.front();
+ const int buffer = mfc_input_ready_queue_.back();
MfcInputRecord& input_record = mfc_input_buffer_map_[buffer];
DCHECK(!input_record.at_device);
struct v4l2_buffer qbuf;
@@ -1196,7 +1462,7 @@
qbuf.m.planes[0].bytesused = input_record.bytes_used;
qbuf.length = 1;
IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_QBUF, &qbuf);
- mfc_input_ready_queue_.pop();
+ mfc_input_ready_queue_.pop_back();
input_record.at_device = true;
mfc_input_buffer_queued_count_++;
DVLOG(3) << "EnqueueMfcInputRecord(): enqueued input_id="
@@ -1209,17 +1475,83 @@
DCHECK(!mfc_free_output_buffers_.empty());
// Enqueue a MFC output (VIDEO_CAPTURE) buffer.
- const int buffer = mfc_free_output_buffers_.front();
+ const int buffer = mfc_free_output_buffers_.back();
MfcOutputRecord& output_record = mfc_output_buffer_map_[buffer];
DCHECK(!output_record.at_device);
+ DCHECK_EQ(output_record.input_id, -1);
+ struct v4l2_buffer qbuf;
+ struct v4l2_plane qbuf_planes[2];
+ memset(&qbuf, 0, sizeof(qbuf));
+ memset(qbuf_planes, 0, sizeof(qbuf_planes));
+ qbuf.index = buffer;
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ qbuf.memory = V4L2_MEMORY_MMAP;
+ qbuf.m.planes = qbuf_planes;
+ qbuf.length = 2;
+ IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_QBUF, &qbuf);
+ mfc_free_output_buffers_.pop_back();
+ output_record.at_device = true;
+ mfc_output_buffer_queued_count_++;
+ return true;
+}
+
+bool ExynosVideoDecodeAccelerator::EnqueueGscInputRecord() {
+ DVLOG(3) << "EnqueueGscInputRecord()";
+ DCHECK(!gsc_free_input_buffers_.empty());
+
+ // Enqueue a GSC input (VIDEO_OUTPUT) buffer for a complete MFC output
+ // (VIDEO_CAPTURE) buffer.
+ const int mfc_buffer = mfc_output_gsc_input_queue_.front();
+ const int gsc_buffer = gsc_free_input_buffers_.back();
+ MfcOutputRecord& output_record = mfc_output_buffer_map_[mfc_buffer];
+ DCHECK(!output_record.at_device);
+ GscInputRecord& input_record = gsc_input_buffer_map_[gsc_buffer];
+ DCHECK(!input_record.at_device);
+ DCHECK_EQ(input_record.mfc_output, -1);
+ struct v4l2_buffer qbuf;
+ struct v4l2_plane qbuf_planes[2];
+ memset(&qbuf, 0, sizeof(qbuf));
+ memset(qbuf_planes, 0, sizeof(qbuf_planes));
+ qbuf.index = gsc_buffer;
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ qbuf.timestamp.tv_sec = output_record.input_id;
+ qbuf.memory = V4L2_MEMORY_USERPTR;
+ qbuf.m.planes = qbuf_planes;
+ qbuf.m.planes[0].bytesused = output_record.bytes_used[0];
+ qbuf.m.planes[0].length = mfc_output_buffer_size_[0];
+ qbuf.m.planes[0].m.userptr = (unsigned long)output_record.address[0];
+ qbuf.m.planes[1].bytesused = output_record.bytes_used[1];
+ qbuf.m.planes[1].length = mfc_output_buffer_size_[1];
+ qbuf.m.planes[1].m.userptr = (unsigned long)output_record.address[1];
+ qbuf.length = 2;
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_QBUF, &qbuf);
+ mfc_output_gsc_input_queue_.pop_front();
+ gsc_free_input_buffers_.pop_back();
+ input_record.at_device = true;
+ input_record.mfc_output = mfc_buffer;
+ output_record.bytes_used[0] = 0;
+ output_record.bytes_used[1] = 0;
+ gsc_input_buffer_queued_count_++;
+ DVLOG(3) << "EnqueueGscInputRecord(): enqueued input_id="
+ << output_record.input_id;
+ return true;
+}
+
+bool ExynosVideoDecodeAccelerator::EnqueueGscOutputRecord() {
+ DVLOG(3) << "EnqueueGscOutputRecord()";
+ DCHECK(!gsc_free_output_buffers_.empty());
+
+ // Enqueue a GSC output (VIDEO_CAPTURE) buffer.
+ const int buffer = gsc_free_output_buffers_.front();
+ GscOutputRecord& output_record = gsc_output_buffer_map_[buffer];
+ DCHECK(!output_record.at_device);
DCHECK(!output_record.at_client);
- DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
- DCHECK_NE(output_record.picture_id, -1);
if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
- TRACE_EVENT0("Video Decoder",
- "EVDA::EnqueueMfcOutputRecord: eglClientWaitSyncKHR");
+ TRACE_EVENT0(
+ "Video Decoder",
+ "EVDA::EnqueueGscOutputRecord: eglClientWaitSyncKHR");
// If we have to wait for completion, wait. Note that
- // mfc_free_output_buffers_ is a FIFO queue, so we always wait on the
+ // gsc_free_output_buffers_ is a FIFO queue, so we always wait on the
// buffer that has been in the queue the longest.
eglClientWaitSyncKHR(egl_display_, output_record.egl_sync, 0,
EGL_FOREVER_KHR);
@@ -1227,18 +1559,19 @@
output_record.egl_sync = EGL_NO_SYNC_KHR;
}
struct v4l2_buffer qbuf;
- struct v4l2_plane qbuf_planes[arraysize(output_record.fds)];
+ struct v4l2_plane qbuf_plane;
memset(&qbuf, 0, sizeof(qbuf));
- memset(qbuf_planes, 0, sizeof(qbuf_planes));
- qbuf.index = buffer;
- qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- qbuf.memory = V4L2_MEMORY_MMAP;
- qbuf.m.planes = qbuf_planes;
- qbuf.length = arraysize(output_record.fds);
- IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_QBUF, &qbuf);
- mfc_free_output_buffers_.pop();
+ memset(&qbuf_plane, 0, sizeof(qbuf_plane));
+ qbuf.index = buffer;
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ qbuf.memory = V4L2_MEMORY_DMABUF;
+ qbuf.m.planes = &qbuf_plane;
+ qbuf.m.planes[0].m.fd = output_record.fd;
+ qbuf.length = 1;
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_QBUF, &qbuf);
+ gsc_free_output_buffers_.pop_front();
output_record.at_device = true;
- mfc_output_buffer_queued_count_++;
+ gsc_output_buffer_queued_count_++;
return true;
}
@@ -1261,17 +1594,17 @@
}
size_t index;
- for (index = 0; index < mfc_output_buffer_map_.size(); ++index)
- if (mfc_output_buffer_map_[index].picture_id == picture_buffer_id)
+ for (index = 0; index < gsc_output_buffer_map_.size(); ++index)
+ if (gsc_output_buffer_map_[index].picture_id == picture_buffer_id)
break;
- if (index >= mfc_output_buffer_map_.size()) {
+ if (index >= gsc_output_buffer_map_.size()) {
DLOG(ERROR) << "ReusePictureBufferTask(): picture_buffer_id not found";
NOTIFY_ERROR(INVALID_ARGUMENT);
return;
}
- MfcOutputRecord& output_record = mfc_output_buffer_map_[index];
+ GscOutputRecord& output_record = gsc_output_buffer_map_[index];
if (output_record.at_device || !output_record.at_client) {
DLOG(ERROR) << "ReusePictureBufferTask(): picture_buffer_id not reusable";
NOTIFY_ERROR(INVALID_ARGUMENT);
@@ -1281,12 +1614,12 @@
DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
output_record.at_client = false;
output_record.egl_sync = egl_sync_ref->egl_sync;
- mfc_free_output_buffers_.push(index);
+ gsc_free_output_buffers_.push_back(index);
decoder_frames_at_client_--;
// Take ownership of the EGLSync.
egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR;
- // We got a buffer back, so kick the MFC.
- EnqueueMfc();
+ // We got a buffer back, so kick the GSC.
+ EnqueueGsc();
}
void ExynosVideoDecodeAccelerator::FlushTask() {
@@ -1310,9 +1643,9 @@
DCHECK(!decoder_flushing_);
// Queue up an empty buffer -- this triggers the flush.
- decoder_input_queue_.push(
- linked_ptr<BitstreamBufferRef>(new BitstreamBufferRef(
- io_client_, io_message_loop_proxy_, NULL, 0, kFlushBufferId)));
+ decoder_input_queue_.push_back(linked_ptr<BitstreamBufferRef>(
+ new BitstreamBufferRef(io_client_, io_message_loop_proxy_, NULL, 0,
+ kFlushBufferId)));
decoder_flushing_ = true;
SendPictureReady(); // Send all pending PictureReady.
@@ -1328,6 +1661,8 @@
// * There is no currently filling input buffer.
// * MFC input holding queue is empty.
// * All MFC input (VIDEO_OUTPUT) buffers are returned.
+ // * MFC -> GSC holding queue is empty.
+ // * All GSC input (VIDEO_OUTPUT) buffers are returned.
if (!decoder_input_queue_.empty()) {
if (decoder_input_queue_.front()->input_id !=
decoder_delay_bitstream_buffer_id_)
@@ -1335,7 +1670,9 @@
}
if (decoder_current_input_buffer_ != -1)
return;
- if ((mfc_input_ready_queue_.size() + mfc_input_buffer_queued_count_) != 0)
+ if ((mfc_input_ready_queue_.size() +
+ mfc_input_buffer_queued_count_ + mfc_output_gsc_input_queue_.size() +
+ gsc_input_buffer_queued_count_ + gsc_output_buffer_queued_count_ ) != 0)
return;
// TODO(posciak): crbug.com/270039. MFC requires a streamoff-streamon
@@ -1393,8 +1730,7 @@
resolution_change_pending_ = false;
decoder_current_bitstream_buffer_.reset();
- while (!decoder_input_queue_.empty())
- decoder_input_queue_.pop();
+ decoder_input_queue_.clear();
decoder_current_input_buffer_ = -1;
@@ -1451,8 +1787,7 @@
decoder_current_input_buffer_ = -1;
decoder_decode_buffer_tasks_scheduled_ = 0;
decoder_frames_at_client_ = 0;
- while (!decoder_input_queue_.empty())
- decoder_input_queue_.pop();
+ decoder_input_queue_.clear();
decoder_flushing_ = false;
// Set our state to kError. Just in case.
@@ -1503,11 +1838,20 @@
IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_STREAMOFF, &type);
}
mfc_output_streamon_ = false;
+ if (gsc_input_streamon_) {
+ __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_STREAMOFF, &type);
+ }
+ gsc_input_streamon_ = false;
+ if (gsc_output_streamon_) {
+ __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_STREAMOFF, &type);
+ }
+ gsc_output_streamon_ = false;
// Reset all our accounting info.
if (!keep_mfc_input_state) {
- while (!mfc_input_ready_queue_.empty())
- mfc_input_ready_queue_.pop();
+ mfc_input_ready_queue_.clear();
mfc_free_input_buffers_.clear();
for (size_t i = 0; i < mfc_input_buffer_map_.size(); ++i) {
mfc_free_input_buffers_.push_back(i);
@@ -1517,18 +1861,30 @@
}
mfc_input_buffer_queued_count_ = 0;
}
- while (!mfc_free_output_buffers_.empty())
- mfc_free_output_buffers_.pop();
+ mfc_free_output_buffers_.clear();
for (size_t i = 0; i < mfc_output_buffer_map_.size(); ++i) {
- MfcOutputRecord& output_record = mfc_output_buffer_map_[i];
- // Only mark those free that aren't being held by the VDA client.
- if (!output_record.at_client) {
- DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
- mfc_free_output_buffers_.push(i);
- mfc_output_buffer_map_[i].at_device = false;
+ mfc_free_output_buffers_.push_back(i);
+ mfc_output_buffer_map_[i].at_device = false;
+ mfc_output_buffer_map_[i].input_id = -1;
+ }
+ mfc_output_buffer_queued_count_ = 0;
+ mfc_output_gsc_input_queue_.clear();
+ gsc_free_input_buffers_.clear();
+ for (size_t i = 0; i < gsc_input_buffer_map_.size(); ++i) {
+ gsc_free_input_buffers_.push_back(i);
+ gsc_input_buffer_map_[i].at_device = false;
+ gsc_input_buffer_map_[i].mfc_output = -1;
+ }
+ gsc_input_buffer_queued_count_ = 0;
+ gsc_free_output_buffers_.clear();
+ for (size_t i = 0; i < gsc_output_buffer_map_.size(); ++i) {
+ // Only mark those free that aren't being held by the VDA.
+ if (!gsc_output_buffer_map_[i].at_client) {
+ gsc_free_output_buffers_.push_back(i);
+ gsc_output_buffer_map_[i].at_device = false;
}
}
- mfc_output_buffer_queued_count_ = 0;
+ gsc_output_buffer_queued_count_ = 0;
DVLOG(3) << "StopDevicePoll(): device poll stopped";
return true;
@@ -1572,8 +1928,14 @@
if (!resolution_change_pending_)
return;
- DVLOG(3) << "No more work, initiate resolution change";
+ if (!mfc_output_gsc_input_queue_.empty() ||
+ gsc_input_buffer_queued_count_ + gsc_output_buffer_queued_count_ > 0) {
+ DVLOG(3) << "StartResolutionChangeIfNeeded(): waiting for GSC to finish.";
+ return;
+ }
+ DVLOG(3) << "No more work for GSC, initiate resolution change";
+
// Keep MFC input queue.
if (!StopDevicePoll(true))
return;
@@ -1633,6 +1995,7 @@
return;
EnqueueMfc();
+ // Gsc will get enqueued in AssignPictureBuffersTask().
ScheduleDecodeBufferTaskIfNeeded();
}
@@ -1661,6 +2024,14 @@
mfc_pollfd = nfds;
nfds++;
}
+ // Add GSC fd, if we should poll on it.
+ // GSC has to wait until both input and output buffers are queued.
+ if (poll_fds & kPollGsc) {
+ DVLOG(3) << "DevicePollTask(): adding GSC to poll() set";
+ pollfds[nfds].fd = gsc_fd_;
+ pollfds[nfds].events = POLLIN | POLLOUT | POLLERR;
+ nfds++;
+ }
// Poll it!
if (HANDLE_EINTR(poll(pollfds, nfds, -1)) == -1) {
@@ -1737,12 +2108,15 @@
CHECK_EQ(format.fmt.pix_mp.num_planes, 2);
frame_buffer_size_.SetSize(
format.fmt.pix_mp.width, format.fmt.pix_mp.height);
+ mfc_output_buffer_size_[0] = format.fmt.pix_mp.plane_fmt[0].sizeimage;
+ mfc_output_buffer_size_[1] = format.fmt.pix_mp.plane_fmt[1].sizeimage;
mfc_output_buffer_pixelformat_ = format.fmt.pix_mp.pixelformat;
- DCHECK_EQ(mfc_output_buffer_pixelformat_, V4L2_PIX_FMT_NV12M);
+ DCHECK_EQ(mfc_output_buffer_pixelformat_, V4L2_PIX_FMT_NV12MT_16X16);
DVLOG(3) << "CreateBuffersForFormat(): new resolution: "
<< frame_buffer_size_.ToString();
- if (!CreateMfcOutputBuffers())
+ if (!CreateMfcOutputBuffers() || !CreateGscInputBuffers() ||
+ !CreateGscOutputBuffers())
return false;
return true;
@@ -1833,33 +2207,144 @@
reqbufs.memory = V4L2_MEMORY_MMAP;
IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_REQBUFS, &reqbufs);
- // Create DMABUFs from output buffers.
+ // Fill our free-buffers list, and create DMABUFs from them.
mfc_output_buffer_map_.resize(reqbufs.count);
for (size_t i = 0; i < mfc_output_buffer_map_.size(); ++i) {
- MfcOutputRecord& output_record = mfc_output_buffer_map_[i];
- for (size_t j = 0; j < arraysize(output_record.fds); ++j) {
- // Export the DMABUF fd so we can export it as a texture.
- struct v4l2_exportbuffer expbuf;
- memset(&expbuf, 0, sizeof(expbuf));
- expbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- expbuf.index = i;
- expbuf.plane = j;
- expbuf.flags = O_CLOEXEC;
- IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_EXPBUF, &expbuf);
- output_record.fds[j] = expbuf.fd;
+ mfc_free_output_buffers_.push_back(i);
+
+ // Query for the MEMORY_MMAP pointer.
+ struct v4l2_plane planes[2];
+ struct v4l2_buffer buffer;
+ memset(&buffer, 0, sizeof(buffer));
+ memset(planes, 0, sizeof(planes));
+ buffer.index = i;
+ buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ buffer.memory = V4L2_MEMORY_MMAP;
+ buffer.m.planes = planes;
+ buffer.length = 2;
+ IOCTL_OR_ERROR_RETURN_FALSE(mfc_fd_, VIDIOC_QUERYBUF, &buffer);
+
+ // Get their user memory for GSC input.
+ for (int j = 0; j < 2; ++j) {
+ void* address = mmap(NULL, buffer.m.planes[j].length,
+ PROT_READ | PROT_WRITE, MAP_SHARED, mfc_fd_,
+ buffer.m.planes[j].m.mem_offset);
+ if (address == MAP_FAILED) {
+ DPLOG(ERROR) << "CreateMfcInputBuffers(): mmap() failed";
+ return false;
+ }
+ mfc_output_buffer_map_[i].address[j] = address;
+ mfc_output_buffer_map_[i].length[j] = buffer.m.planes[j].length;
}
}
- DVLOG(3) << "CreateMfcOutputBuffers(): ProvidePictureBuffers(): "
- << "buffer_count=" << mfc_output_buffer_map_.size()
+ return true;
+}
+
+bool ExynosVideoDecodeAccelerator::CreateGscInputBuffers() {
+ DVLOG(3) << "CreateGscInputBuffers()";
+ DCHECK(decoder_state_ == kInitialized ||
+ decoder_state_ == kChangingResolution);
+ DCHECK(!gsc_input_streamon_);
+ DCHECK(gsc_input_buffer_map_.empty());
+
+ struct v4l2_format format;
+ memset(&format, 0, sizeof(format));
+ format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ format.fmt.pix_mp.width = frame_buffer_size_.width();
+ format.fmt.pix_mp.height = frame_buffer_size_.height();
+ format.fmt.pix_mp.pixelformat = mfc_output_buffer_pixelformat_;
+ format.fmt.pix_mp.plane_fmt[0].sizeimage = mfc_output_buffer_size_[0];
+ format.fmt.pix_mp.plane_fmt[1].sizeimage = mfc_output_buffer_size_[1];
+ // NV12MT_16X16 is a tiled format for which bytesperline doesn't make too much
+ // sense. Convention seems to be to assume 8bpp for these tiled formats.
+ format.fmt.pix_mp.plane_fmt[0].bytesperline = frame_buffer_size_.width();
+ format.fmt.pix_mp.plane_fmt[1].bytesperline = frame_buffer_size_.width();
+ format.fmt.pix_mp.num_planes = 2;
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_S_FMT, &format);
+
+ struct v4l2_control control;
+ memset(&control, 0, sizeof(control));
+ control.id = V4L2_CID_ROTATE;
+ control.value = 0;
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_S_CTRL, &control);
+
+ memset(&control, 0, sizeof(control));
+ control.id = V4L2_CID_HFLIP;
+ control.value = 0;
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_S_CTRL, &control);
+
+ memset(&control, 0, sizeof(control));
+ control.id = V4L2_CID_VFLIP;
+ control.value = 0;
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_S_CTRL, &control);
+
+ memset(&control, 0, sizeof(control));
+ control.id = V4L2_CID_GLOBAL_ALPHA;
+ control.value = 255;
+ if (HANDLE_EINTR(ioctl(gsc_fd_, VIDIOC_S_CTRL, &control)) != 0) {
+ memset(&control, 0, sizeof(control));
+ control.id = V4L2_CID_ALPHA_COMPONENT;
+ control.value = 255;
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_S_CTRL, &control);
+ }
+
+ struct v4l2_requestbuffers reqbufs;
+ memset(&reqbufs, 0, sizeof(reqbufs));
+ reqbufs.count = kGscInputBufferCount;
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ reqbufs.memory = V4L2_MEMORY_USERPTR;
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_REQBUFS, &reqbufs);
+
+ gsc_input_buffer_map_.resize(reqbufs.count);
+ for (size_t i = 0; i < gsc_input_buffer_map_.size(); ++i) {
+ gsc_free_input_buffers_.push_back(i);
+ gsc_input_buffer_map_[i].mfc_output = -1;
+ }
+
+ return true;
+}
+
+bool ExynosVideoDecodeAccelerator::CreateGscOutputBuffers() {
+ DVLOG(3) << "CreateGscOutputBuffers()";
+ DCHECK(decoder_state_ == kInitialized ||
+ decoder_state_ == kChangingResolution);
+ DCHECK(!gsc_output_streamon_);
+ DCHECK(gsc_output_buffer_map_.empty());
+
+ // GSC outputs into the EGLImages we create from the textures we are
+ // assigned. Assume RGBA8888 format.
+ struct v4l2_format format;
+ memset(&format, 0, sizeof(format));
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.width = frame_buffer_size_.width();
+ format.fmt.pix_mp.height = frame_buffer_size_.height();
+ format.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_RGB32;
+ format.fmt.pix_mp.plane_fmt[0].sizeimage =
+ frame_buffer_size_.width() * frame_buffer_size_.height() * 4;
+ format.fmt.pix_mp.plane_fmt[0].bytesperline = frame_buffer_size_.width() * 4;
+ format.fmt.pix_mp.num_planes = 1;
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_S_FMT, &format);
+
+ struct v4l2_requestbuffers reqbufs;
+ memset(&reqbufs, 0, sizeof(reqbufs));
+ reqbufs.count = mfc_output_dpb_size_ + kDpbOutputBufferExtraCount;
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ reqbufs.memory = V4L2_MEMORY_DMABUF;
+ IOCTL_OR_ERROR_RETURN_FALSE(gsc_fd_, VIDIOC_REQBUFS, &reqbufs);
+
+ // We don't actually fill in the freelist or the map here. That happens once
+ // we have actual usable buffers, after AssignPictureBuffers();
+ gsc_output_buffer_map_.resize(reqbufs.count);
+
+ DVLOG(3) << "CreateGscOutputBuffers(): ProvidePictureBuffers(): "
+ << "buffer_count=" << gsc_output_buffer_map_.size()
<< ", width=" << frame_buffer_size_.width()
<< ", height=" << frame_buffer_size_.height();
- child_message_loop_proxy_->PostTask(FROM_HERE,
- base::Bind(&Client::ProvidePictureBuffers,
- client_,
- mfc_output_buffer_map_.size(),
- frame_buffer_size_,
- GL_TEXTURE_EXTERNAL_OES));
+ child_message_loop_proxy_->PostTask(FROM_HERE, base::Bind(
+ &Client::ProvidePictureBuffers, client_, gsc_output_buffer_map_.size(),
+ gfx::Size(frame_buffer_size_.width(), frame_buffer_size_.height()),
+ GL_TEXTURE_2D));
return true;
}
@@ -1893,32 +2378,13 @@
DCHECK(child_message_loop_proxy_->BelongsToCurrentThread());
DCHECK(!mfc_output_streamon_);
- if (mfc_output_buffer_map_.size() != 0) {
- if (!make_context_current_.Run()) {
- DLOG(ERROR) << "DestroyMfcOutputBuffers(): "
- << "could not make context current";
- } else {
- size_t i = 0;
- do {
- MfcOutputRecord& output_record = mfc_output_buffer_map_[i];
- for (size_t j = 0; j < arraysize(output_record.fds); ++j) {
- if (output_record.fds[j] != -1)
- HANDLE_EINTR(close(output_record.fds[j]));
- if (output_record.egl_image != EGL_NO_IMAGE_KHR)
- eglDestroyImageKHR(egl_display_, output_record.egl_image);
- if (output_record.egl_sync != EGL_NO_SYNC_KHR)
- eglDestroySyncKHR(egl_display_, output_record.egl_sync);
- }
- DVLOG(1) << "DestroyMfcOutputBuffers(): dismissing PictureBuffer id="
- << output_record.picture_id;
- child_message_loop_proxy_->PostTask(
- FROM_HERE,
- base::Bind(&Client::DismissPictureBuffer,
- client_,
- output_record.picture_id));
- i++;
- } while (i < mfc_output_buffer_map_.size());
- }
+ for (size_t i = 0; i < mfc_output_buffer_map_.size(); ++i) {
+ if (mfc_output_buffer_map_[i].address[0] != NULL)
+ munmap(mfc_output_buffer_map_[i].address[0],
+ mfc_output_buffer_map_[i].length[0]);
+ if (mfc_output_buffer_map_[i].address[1] != NULL)
+ munmap(mfc_output_buffer_map_[i].address[1],
+ mfc_output_buffer_map_[i].length[1]);
}
struct v4l2_requestbuffers reqbufs;
@@ -1930,14 +2396,72 @@
DPLOG(ERROR) << "DestroyMfcOutputBuffers() ioctl() failed: VIDIOC_REQBUFS";
mfc_output_buffer_map_.clear();
- while (!mfc_free_output_buffers_.empty())
- mfc_free_output_buffers_.pop();
+ mfc_free_output_buffers_.clear();
}
+void ExynosVideoDecodeAccelerator::DestroyGscInputBuffers() {
+ DVLOG(3) << "DestroyGscInputBuffers()";
+ DCHECK(child_message_loop_proxy_->BelongsToCurrentThread());
+ DCHECK(!gsc_input_streamon_);
+
+ struct v4l2_requestbuffers reqbufs;
+ memset(&reqbufs, 0, sizeof(reqbufs));
+ reqbufs.count = 0;
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ reqbufs.memory = V4L2_MEMORY_DMABUF;
+ if (ioctl(gsc_fd_, VIDIOC_REQBUFS, &reqbufs) != 0)
+ DPLOG(ERROR) << "DestroyGscInputBuffers(): ioctl() failed: VIDIOC_REQBUFS";
+
+ gsc_input_buffer_map_.clear();
+ gsc_free_input_buffers_.clear();
+}
+
+void ExynosVideoDecodeAccelerator::DestroyGscOutputBuffers() {
+ DVLOG(3) << "DestroyGscOutputBuffers()";
+ DCHECK(child_message_loop_proxy_->BelongsToCurrentThread());
+ DCHECK(!gsc_output_streamon_);
+
+ if (gsc_output_buffer_map_.size() != 0) {
+ if (!make_context_current_.Run())
+ DLOG(ERROR) << "DestroyGscOutputBuffers(): "
+ << "could not make context current";
+
+ size_t i = 0;
+ do {
+ GscOutputRecord& output_record = gsc_output_buffer_map_[i];
+ if (output_record.fd != -1)
+ HANDLE_EINTR(close(output_record.fd));
+ if (output_record.egl_image != EGL_NO_IMAGE_KHR)
+ eglDestroyImageKHR(egl_display_, output_record.egl_image);
+ if (output_record.egl_sync != EGL_NO_SYNC_KHR)
+ eglDestroySyncKHR(egl_display_, output_record.egl_sync);
+ if (client_) {
+ DVLOG(1) << "DestroyGscOutputBuffers(): "
+ << "dismissing PictureBuffer id=" << output_record.picture_id;
+ client_->DismissPictureBuffer(output_record.picture_id);
+ }
+ ++i;
+ } while (i < gsc_output_buffer_map_.size());
+ }
+
+ struct v4l2_requestbuffers reqbufs;
+ memset(&reqbufs, 0, sizeof(reqbufs));
+ reqbufs.count = 0;
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ reqbufs.memory = V4L2_MEMORY_DMABUF;
+ if (ioctl(gsc_fd_, VIDIOC_REQBUFS, &reqbufs) != 0)
+ DPLOG(ERROR) << "DestroyGscOutputBuffers(): ioctl() failed: VIDIOC_REQBUFS";
+
+ gsc_output_buffer_map_.clear();
+ gsc_free_output_buffers_.clear();
+}
+
void ExynosVideoDecodeAccelerator::ResolutionChangeDestroyBuffers() {
DCHECK(child_message_loop_proxy_->BelongsToCurrentThread());
DVLOG(3) << "ResolutionChangeDestroyBuffers()";
+ DestroyGscInputBuffers();
+ DestroyGscOutputBuffers();
DestroyMfcOutputBuffers();
// Finish resolution change on decoder thread.
@@ -1947,7 +2471,6 @@
}
void ExynosVideoDecodeAccelerator::SendPictureReady() {
- DVLOG(3) << "SendPictureReady()";
DCHECK_EQ(decoder_thread_.message_loop(), base::MessageLoop::current());
bool resetting_or_flushing =
(decoder_state_ == kResetting || decoder_flushing_);
« no previous file with comments | « content/common/gpu/media/exynos_video_decode_accelerator.h ('k') | content/common/gpu/media/rendering_helper.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698