Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1170)

Unified Diff: media/gpu/v4l2_jpeg_decode_accelerator.cc

Issue 2559423002: media/gpu: switch v4l2_jpeg_decode_accelerator to use multi-planar APIs (Closed)
Patch Set: add static_assert for kMaxInputPlanes Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: media/gpu/v4l2_jpeg_decode_accelerator.cc
diff --git a/media/gpu/v4l2_jpeg_decode_accelerator.cc b/media/gpu/v4l2_jpeg_decode_accelerator.cc
index ab39a53fdbca2ecfa36ba42311435164a6a9f458..6029b065570908435ad02b150ba099722dc1895d 100644
--- a/media/gpu/v4l2_jpeg_decode_accelerator.cc
+++ b/media/gpu/v4l2_jpeg_decode_accelerator.cc
@@ -107,8 +107,10 @@ const uint8_t kDefaultDhtSeg[] = {
0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA};
-V4L2JpegDecodeAccelerator::BufferRecord::BufferRecord()
- : address(nullptr), length(0), at_device(false) {}
+V4L2JpegDecodeAccelerator::BufferRecord::BufferRecord() : at_device(false) {
+ memset(address, 0, sizeof(address));
+ memset(length, 0, sizeof(length));
+}
V4L2JpegDecodeAccelerator::BufferRecord::~BufferRecord() {}
@@ -125,6 +127,7 @@ V4L2JpegDecodeAccelerator::V4L2JpegDecodeAccelerator(
const scoped_refptr<V4L2Device>& device,
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
: output_buffer_pixelformat_(0),
+ output_buffer_num_planes_(0),
child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
io_task_runner_(io_task_runner),
client_(nullptr),
@@ -134,6 +137,10 @@ V4L2JpegDecodeAccelerator::V4L2JpegDecodeAccelerator(
input_streamon_(false),
output_streamon_(false),
weak_factory_(this) {
+ // This class can only handle V4L2_PIX_FMT_JPEG as input, so kMaxInputPlanes
+ // can only be 1.
+ static_assert(V4L2JpegDecodeAccelerator::kMaxInputPlanes == 1,
+ "kMaxInputPlanes must be 1 as input must be V4L2_PIX_FMT_JPEG");
weak_ptr_ = weak_factory_.GetWeakPtr();
}
@@ -194,7 +201,7 @@ bool V4L2JpegDecodeAccelerator::Initialize(Client* client) {
// Capabilities check.
struct v4l2_capability caps;
- const __u32 kCapsRequired = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
+ const __u32 kCapsRequired = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
memset(&caps, 0, sizeof(caps));
if (device_->Ioctl(VIDIOC_QUERYCAP, &caps) != 0) {
PLOG(ERROR) << __func__ << ": ioctl() failed: VIDIOC_QUERYCAP";
@@ -297,7 +304,7 @@ bool V4L2JpegDecodeAccelerator::ShouldRecreateInputBuffers() {
// Check input buffer size is enough
return (input_buffer_map_.empty() ||
(job_record->shm.size() + sizeof(kDefaultDhtSeg)) >
- input_buffer_map_.front().length);
+ input_buffer_map_.front().length[0]);
}
bool V4L2JpegDecodeAccelerator::RecreateInputBuffers() {
@@ -344,16 +351,17 @@ bool V4L2JpegDecodeAccelerator::CreateInputBuffers() {
size_t reserve_size = (job_record->shm.size() + sizeof(kDefaultDhtSeg)) * 2;
struct v4l2_format format;
memset(&format, 0, sizeof(format));
- format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- format.fmt.pix.pixelformat = V4L2_PIX_FMT_JPEG;
- format.fmt.pix.sizeimage = reserve_size;
- format.fmt.pix.field = V4L2_FIELD_ANY;
+ format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ format.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_JPEG;
+ format.fmt.pix_mp.plane_fmt[0].sizeimage = reserve_size;
+ format.fmt.pix_mp.field = V4L2_FIELD_ANY;
+ format.fmt.pix_mp.num_planes = kMaxInputPlanes;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format);
struct v4l2_requestbuffers reqbufs;
memset(&reqbufs, 0, sizeof(reqbufs));
reqbufs.count = kBufferCount;
- reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
reqbufs.memory = V4L2_MEMORY_MMAP;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
@@ -364,20 +372,20 @@ bool V4L2JpegDecodeAccelerator::CreateInputBuffers() {
free_input_buffers_.push_back(i);
struct v4l2_buffer buffer;
+ struct v4l2_plane plane;
Pawel Osciak 2016/12/20 05:19:12 Perhaps s/plane/planes[kMaxInputPlanes]/ for consi
jcliang 2016/12/20 08:33:21 I'm replacing all the v4l2_plane arrays with plane
memset(&buffer, 0, sizeof(buffer));
+ memset(&plane, 0, sizeof(plane));
buffer.index = i;
- buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ buffer.m.planes = &plane;
+ buffer.length = kMaxInputPlanes;
Pawel Osciak 2016/12/20 05:19:12 arraysize(planes)
jcliang 2016/12/20 08:33:21 Done.
buffer.memory = V4L2_MEMORY_MMAP;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
- void* address = device_->Mmap(NULL, buffer.length, PROT_READ | PROT_WRITE,
- MAP_SHARED, buffer.m.offset);
- if (address == MAP_FAILED) {
- PLOG(ERROR) << __func__ << ": mmap() failed";
- PostNotifyError(kInvalidBitstreamBufferId, PLATFORM_FAILURE);
- return false;
- }
- input_buffer_map_[i].address = address;
- input_buffer_map_[i].length = buffer.length;
+ DCHECK_EQ(1u, buffer.length);
Pawel Osciak 2016/12/20 05:19:12 s/1u/kMaxInputPlanes/ s/DCHECK_EQ(...)/if (!...)
jcliang 2016/12/20 08:33:21 Done.
+ void* address = device_->Mmap(NULL, plane.length, PROT_READ | PROT_WRITE,
Pawel Osciak 2016/12/20 05:19:12 for (i=0;i<kMaxInputPlanes;++i)
jcliang 2016/12/20 08:33:21 Done.
+ MAP_SHARED, plane.m.mem_offset);
+ input_buffer_map_[i].address[0] = address;
Pawel Osciak 2016/12/20 05:19:12 s/0/i/, etc.
jcliang 2016/12/20 08:33:21 Done.
+ input_buffer_map_[i].length[0] = plane.length;
}
return true;
@@ -394,21 +402,23 @@ bool V4L2JpegDecodeAccelerator::CreateOutputBuffers() {
PIXEL_FORMAT_I420, job_record->out_frame->coded_size());
struct v4l2_format format;
memset(&format, 0, sizeof(format));
- format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- format.fmt.pix.width = job_record->out_frame->coded_size().width();
- format.fmt.pix.height = job_record->out_frame->coded_size().height();
- format.fmt.pix.sizeimage = frame_size;
- format.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
- format.fmt.pix.field = V4L2_FIELD_ANY;
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.width = job_record->out_frame->coded_size().width();
+ format.fmt.pix_mp.height = job_record->out_frame->coded_size().height();
+ format.fmt.pix_mp.num_planes = 1;
Pawel Osciak 2016/12/20 05:19:12 Should we be using kMaxOutputPlanes?
jcliang 2016/12/20 08:33:22 I thought we should set a num_planes matching the
+ format.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_YUV420;
+ format.fmt.pix_mp.plane_fmt[0].sizeimage = frame_size;
+ format.fmt.pix_mp.field = V4L2_FIELD_ANY;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_FMT, &format);
- output_buffer_pixelformat_ = format.fmt.pix.pixelformat;
- output_buffer_coded_size_.SetSize(format.fmt.pix.width,
- format.fmt.pix.height);
+ output_buffer_pixelformat_ = format.fmt.pix_mp.pixelformat;
+ output_buffer_coded_size_.SetSize(format.fmt.pix_mp.width,
+ format.fmt.pix_mp.height);
+ output_buffer_num_planes_ = format.fmt.pix_mp.num_planes;
struct v4l2_requestbuffers reqbufs;
memset(&reqbufs, 0, sizeof(reqbufs));
reqbufs.count = kBufferCount;
- reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
reqbufs.memory = V4L2_MEMORY_MMAP;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
@@ -422,26 +432,34 @@ bool V4L2JpegDecodeAccelerator::CreateOutputBuffers() {
free_output_buffers_.push_back(i);
struct v4l2_buffer buffer;
+ struct v4l2_plane planes[output_buffer_num_planes_];
Pawel Osciak 2016/12/20 05:19:12 It might be good to not use a non-const here, perh
jcliang 2016/12/20 08:33:21 Done.
memset(&buffer, 0, sizeof(buffer));
+ memset(planes, 0, sizeof(planes));
buffer.index = i;
- buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
buffer.memory = V4L2_MEMORY_MMAP;
+ buffer.m.planes = planes;
+ buffer.length = output_buffer_num_planes_;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
- DCHECK_GE(buffer.length,
- VideoFrame::AllocationSize(
- output_format,
- gfx::Size(format.fmt.pix.width, format.fmt.pix.height)));
-
- void* address = device_->Mmap(NULL, buffer.length, PROT_READ | PROT_WRITE,
- MAP_SHARED, buffer.m.offset);
- if (address == MAP_FAILED) {
- PLOG(ERROR) << __func__ << ": mmap() failed";
- PostNotifyError(kInvalidBitstreamBufferId, PLATFORM_FAILURE);
- return false;
+ DCHECK_EQ(output_buffer_num_planes_, buffer.length);
Pawel Osciak 2016/12/20 05:19:12 This should preferably be an if() also.
jcliang 2016/12/20 08:33:21 Done.
+ for (size_t j = 0; j < buffer.length; ++j) {
+ DCHECK_GE(static_cast<int64_t>(planes[j].length),
+ VideoFrame::PlaneSize(output_format, j,
+ gfx::Size(format.fmt.pix_mp.width,
+ format.fmt.pix_mp.height))
+ .GetArea());
+ void* address =
+ device_->Mmap(NULL, planes[j].length, PROT_READ | PROT_WRITE,
+ MAP_SHARED, planes[j].m.mem_offset);
+ if (address == MAP_FAILED) {
+ PLOG(ERROR) << __func__ << ": mmap() failed";
+ PostNotifyError(kInvalidBitstreamBufferId, PLATFORM_FAILURE);
+ return false;
+ }
+ output_buffer_map_[i].address[j] = address;
+ output_buffer_map_[i].length[j] = planes[j].length;
}
- output_buffer_map_[i].address = address;
- output_buffer_map_[i].length = buffer.length;
}
return true;
@@ -456,20 +474,19 @@ void V4L2JpegDecodeAccelerator::DestroyInputBuffers() {
return;
if (input_streamon_) {
- __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMOFF, &type);
input_streamon_ = false;
}
- for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
- BufferRecord& input_record = input_buffer_map_[i];
- device_->Munmap(input_record.address, input_record.length);
+ for (const auto& input_record : input_buffer_map_) {
Pawel Osciak 2016/12/20 05:19:12 kMaxInputPlanes for consistency?
jcliang 2016/12/20 08:33:21 Done.
+ device_->Munmap(input_record.address[0], input_record.length[0]);
}
struct v4l2_requestbuffers reqbufs;
memset(&reqbufs, 0, sizeof(reqbufs));
reqbufs.count = 0;
- reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
reqbufs.memory = V4L2_MEMORY_MMAP;
IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS, &reqbufs);
@@ -485,24 +502,26 @@ void V4L2JpegDecodeAccelerator::DestroyOutputBuffers() {
return;
if (output_streamon_) {
- __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMOFF, &type);
output_streamon_ = false;
}
- for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
- BufferRecord& output_record = output_buffer_map_[i];
- device_->Munmap(output_record.address, output_record.length);
+ for (const auto& output_record : output_buffer_map_) {
+ for (size_t i = 0; i < output_buffer_num_planes_; ++i) {
+ device_->Munmap(output_record.address[i], output_record.length[i]);
+ }
}
struct v4l2_requestbuffers reqbufs;
memset(&reqbufs, 0, sizeof(reqbufs));
reqbufs.count = 0;
- reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
reqbufs.memory = V4L2_MEMORY_MMAP;
IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS, &reqbufs);
output_buffer_map_.clear();
+ output_buffer_num_planes_ = 0;
}
void V4L2JpegDecodeAccelerator::DevicePollTask() {
@@ -597,7 +616,7 @@ void V4L2JpegDecodeAccelerator::EnqueueInput() {
// Check here because we cannot STREAMON before QBUF in earlier kernel.
// (kernel version < 3.14)
if (!input_streamon_ && InputBufferQueuedCount()) {
- __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ __u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type);
input_streamon_ = true;
}
@@ -615,19 +634,15 @@ void V4L2JpegDecodeAccelerator::EnqueueOutput() {
// Check here because we cannot STREAMON before QBUF in earlier kernel.
// (kernel version < 3.14)
if (!output_streamon_ && OutputBufferQueuedCount()) {
- __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ __u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type);
output_streamon_ = true;
}
}
-static bool CopyOutputImage(const uint32_t src_pixelformat,
- const void* src_addr,
- const gfx::Size& src_coded_size,
- const scoped_refptr<VideoFrame>& dst_frame) {
- VideoPixelFormat format =
- V4L2Device::V4L2PixFmtToVideoPixelFormat(src_pixelformat);
- size_t src_size = VideoFrame::AllocationSize(format, src_coded_size);
+bool V4L2JpegDecodeAccelerator::ConvertOutputImage(
+ const BufferRecord& output_buffer,
+ const scoped_refptr<VideoFrame>& dst_frame) {
uint8_t* dst_y = dst_frame->data(VideoFrame::kYPlane);
uint8_t* dst_u = dst_frame->data(VideoFrame::kUPlane);
uint8_t* dst_v = dst_frame->data(VideoFrame::kVPlane);
@@ -635,20 +650,54 @@ static bool CopyOutputImage(const uint32_t src_pixelformat,
size_t dst_u_stride = dst_frame->stride(VideoFrame::kUPlane);
size_t dst_v_stride = dst_frame->stride(VideoFrame::kVPlane);
- // If the source format is I420, ConvertToI420 will simply copy the frame.
- if (libyuv::ConvertToI420(static_cast<uint8_t*>(const_cast<void*>(src_addr)),
- src_size,
- dst_y, dst_y_stride,
- dst_u, dst_u_stride,
- dst_v, dst_v_stride,
- 0, 0,
- src_coded_size.width(),
- src_coded_size.height(),
- dst_frame->coded_size().width(),
- dst_frame->coded_size().height(),
- libyuv::kRotate0,
- src_pixelformat)) {
- LOG(ERROR) << "ConvertToI420 failed. Source format: " << src_pixelformat;
+ if (output_buffer_num_planes_ == 1) {
+ // Use ConvertToI420 to convert all splane buffers.
+ // If the source format is I420, ConvertToI420 will simply copy the frame.
+ VideoPixelFormat format =
+ V4L2Device::V4L2PixFmtToVideoPixelFormat(output_buffer_pixelformat_);
+ size_t src_size =
+ VideoFrame::AllocationSize(format, output_buffer_coded_size_);
+ if (libyuv::ConvertToI420(
+ static_cast<uint8_t*>(output_buffer.address[0]), src_size, dst_y,
+ dst_y_stride, dst_u, dst_u_stride, dst_v, dst_v_stride, 0, 0,
+ output_buffer_coded_size_.width(),
+ output_buffer_coded_size_.height(), dst_frame->coded_size().width(),
+ dst_frame->coded_size().height(), libyuv::kRotate0,
+ output_buffer_pixelformat_)) {
+ LOG(ERROR) << "ConvertToI420 failed. Source format: "
+ << output_buffer_pixelformat_;
+ return false;
+ }
+ } else if (output_buffer_pixelformat_ == V4L2_PIX_FMT_YUV420M ||
Pawel Osciak 2016/12/20 05:19:12 We are not setting the format to V4L2_PIX_FMT_YUV4
jcliang 2016/12/20 08:33:22 The mtk-jpeg decoder only supports V4L2_PIX_FMT_YU
+ output_buffer_pixelformat_ == V4L2_PIX_FMT_YUV422M) {
+ uint8_t* src_y = static_cast<uint8_t*>(output_buffer.address[0]);
+ uint8_t* src_u = static_cast<uint8_t*>(output_buffer.address[1]);
+ uint8_t* src_v = static_cast<uint8_t*>(output_buffer.address[2]);
+ size_t src_y_stride = output_buffer_coded_size_.width();
+ size_t src_u_stride = output_buffer_coded_size_.width() / 2;
+ size_t src_v_stride = output_buffer_coded_size_.width() / 2;
+ if (output_buffer_pixelformat_ == V4L2_PIX_FMT_YUV420M) {
+ if (libyuv::I420Copy(src_y, src_y_stride, src_u, src_u_stride, src_v,
+ src_v_stride, dst_y, dst_y_stride, dst_u,
+ dst_u_stride, dst_v, dst_v_stride,
+ output_buffer_coded_size_.width(),
+ output_buffer_coded_size_.height())) {
+ LOG(ERROR) << "I420Copy failed";
+ return false;
+ }
+ } else { // output_buffer_pixelformat_ == V4L2_PIX_FMT_YUV422M
+ if (libyuv::I422ToI420(src_y, src_y_stride, src_u, src_u_stride, src_v,
+ src_v_stride, dst_y, dst_y_stride, dst_u,
+ dst_u_stride, dst_v, dst_v_stride,
+ output_buffer_coded_size_.width(),
+ output_buffer_coded_size_.height())) {
+ LOG(ERROR) << "I422ToI420 failed";
+ return false;
+ }
+ }
+ } else {
+ LOG(ERROR) << "Unsupported source buffer format: "
+ << output_buffer_pixelformat_;
return false;
}
return true;
@@ -656,15 +705,20 @@ static bool CopyOutputImage(const uint32_t src_pixelformat,
void V4L2JpegDecodeAccelerator::Dequeue() {
DCHECK(decoder_task_runner_->BelongsToCurrentThread());
+ DCHECK_GE(output_buffer_num_planes_, 0u);
// Dequeue completed input (VIDEO_OUTPUT) buffers,
// and recycle to the free list.
struct v4l2_buffer dqbuf;
+ struct v4l2_plane input_plane;
Pawel Osciak 2016/12/20 05:19:12 We could use VIDEO_MAX_PLANES everywhere for v4l2_
jcliang 2016/12/20 08:33:22 Done.
while (InputBufferQueuedCount() > 0) {
DCHECK(input_streamon_);
memset(&dqbuf, 0, sizeof(dqbuf));
- dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ memset(&input_plane, 0, sizeof(input_plane));
+ dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
dqbuf.memory = V4L2_MEMORY_MMAP;
+ dqbuf.length = kMaxInputPlanes;
+ dqbuf.m.planes = &input_plane;
if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
if (errno == EAGAIN) {
// EAGAIN if we're just out of buffers to dequeue.
@@ -691,14 +745,18 @@ void V4L2JpegDecodeAccelerator::Dequeue() {
// If dequeued input buffer has an error, the error frame has removed from
// |running_jobs_|. We only have to dequeue output buffer when we actually
// have pending frames in |running_jobs_| and also enqueued output buffers.
+ struct v4l2_plane output_planes[output_buffer_num_planes_];
while (!running_jobs_.empty() && OutputBufferQueuedCount() > 0) {
DCHECK(output_streamon_);
memset(&dqbuf, 0, sizeof(dqbuf));
- dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ memset(output_planes, 0, sizeof(output_planes));
+ dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
// From experiments, using MMAP and memory copy is still faster than
// USERPTR. Also, client doesn't need to consider the buffer alignment and
// JpegDecodeAccelerator API will be simpler.
dqbuf.memory = V4L2_MEMORY_MMAP;
+ dqbuf.length = output_buffer_num_planes_;
+ dqbuf.m.planes = output_planes;
if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
if (errno == EAGAIN) {
// EAGAIN if we're just out of buffers to dequeue.
@@ -724,12 +782,10 @@ void V4L2JpegDecodeAccelerator::Dequeue() {
// Copy the decoded data from output buffer to the buffer provided by the
// client. Do format conversion when output format is not
// V4L2_PIX_FMT_YUV420.
- if (!CopyOutputImage(output_buffer_pixelformat_, output_record.address,
- output_buffer_coded_size_, job_record->out_frame)) {
+ if (!ConvertOutputImage(output_record, job_record->out_frame)) {
PostNotifyError(job_record->bitstream_buffer_id, PLATFORM_FAILURE);
return;
}
-
DVLOG(3) << "Decoding finished, returning bitstream buffer, id="
<< job_record->bitstream_buffer_id;
@@ -834,16 +890,21 @@ bool V4L2JpegDecodeAccelerator::EnqueueInputRecord() {
// It will add default huffman segment if it's missing.
if (!AddHuffmanTable(job_record->shm.memory(), job_record->shm.size(),
- input_record.address, input_record.length)) {
+ input_record.address[0], input_record.length[0])) {
PostNotifyError(job_record->bitstream_buffer_id, PARSE_JPEG_FAILED);
return false;
}
struct v4l2_buffer qbuf;
+ struct v4l2_plane plane;
memset(&qbuf, 0, sizeof(qbuf));
+ memset(&plane, 0, sizeof(plane));
qbuf.index = index;
- qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
qbuf.memory = V4L2_MEMORY_MMAP;
+ qbuf.length = kMaxInputPlanes;
+ plane.bytesused = input_record.length[0];
+ qbuf.m.planes = &plane;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
input_record.at_device = true;
running_jobs_.push(job_record);
@@ -857,16 +918,21 @@ bool V4L2JpegDecodeAccelerator::EnqueueInputRecord() {
bool V4L2JpegDecodeAccelerator::EnqueueOutputRecord() {
DCHECK(!free_output_buffers_.empty());
+ DCHECK_GE(output_buffer_num_planes_, 0u);
// Enqueue an output (VIDEO_CAPTURE) buffer.
const int index = free_output_buffers_.back();
BufferRecord& output_record = output_buffer_map_[index];
DCHECK(!output_record.at_device);
struct v4l2_buffer qbuf;
+ struct v4l2_plane planes[output_buffer_num_planes_];
memset(&qbuf, 0, sizeof(qbuf));
+ memset(&planes, 0, sizeof(planes));
qbuf.index = index;
- qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
qbuf.memory = V4L2_MEMORY_MMAP;
+ qbuf.length = output_buffer_num_planes_;
+ qbuf.m.planes = planes;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
output_record.at_device = true;
free_output_buffers_.pop_back();
« media/gpu/v4l2_jpeg_decode_accelerator.h ('K') | « media/gpu/v4l2_jpeg_decode_accelerator.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698