Chromium Code Reviews| Index: content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc |
| diff --git a/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc b/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc |
| index 1e0943d82f149de85e8f8ab93c9c5196aaacc875..c52ff78f2caf88cc58bc16dd68be0f596482a91d 100644 |
| --- a/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc |
| +++ b/content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc |
| @@ -162,10 +162,10 @@ V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord() |
| : at_device(false), |
| at_client(false), |
| picture_id(-1), |
| + texture_id(0), |
| egl_image(EGL_NO_IMAGE_KHR), |
| egl_sync(EGL_NO_SYNC_KHR), |
| - cleared(false) { |
| -} |
| + cleared(false) {} |
| struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef { |
| BitstreamBufferRef( |
| @@ -395,6 +395,7 @@ V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator( |
| video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN), |
| output_format_fourcc_(0), |
| state_(kUninitialized), |
| + output_mode_(Config::OutputMode::ALLOCATE), |
| decoder_flushing_(false), |
| decoder_resetting_(false), |
| surface_set_change_pending_(false), |
| @@ -521,6 +522,7 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config, |
| decoder_thread_task_runner_ = decoder_thread_.task_runner(); |
| state_ = kInitialized; |
| + output_mode_ = config.output_mode; |
|
kcwu
2016/03/22 05:42:54
Should we validate the value here?
Owen Lin
2016/03/23 06:32:50
Maybe not, we will set this in ArcGVDA. I think Ar
Pawel Osciak
2016/03/28 01:31:29
It's an enum class, but I guess new values may be
|
| // InitializeTask will NOTIFY_ERROR on failure. |
| decoder_thread_task_runner_->PostTask( |
| @@ -894,7 +896,7 @@ void V4L2SliceVideoDecodeAccelerator::Dequeue() { |
| memset(&dqbuf, 0, sizeof(dqbuf)); |
| memset(&planes, 0, sizeof(planes)); |
| dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; |
| - dqbuf.memory = V4L2_MEMORY_USERPTR; |
| + dqbuf.memory = V4L2_MEMORY_MMAP; |
| dqbuf.m.planes = planes; |
| dqbuf.length = input_planes_count_; |
| if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) { |
| @@ -920,7 +922,10 @@ void V4L2SliceVideoDecodeAccelerator::Dequeue() { |
| memset(&dqbuf, 0, sizeof(dqbuf)); |
| memset(&planes, 0, sizeof(planes)); |
| dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
| - dqbuf.memory = V4L2_MEMORY_MMAP; |
| + if (output_mode_ == Config::OutputMode::ALLOCATE) |
| + dqbuf.memory = V4L2_MEMORY_MMAP; |
|
Owen Lin
2016/03/23 06:32:50
use the ternary operator: "? :"
Pawel Osciak
2016/03/28 01:31:28
Done.
|
| + else |
| + dqbuf.memory = V4L2_MEMORY_DMABUF; |
| dqbuf.m.planes = planes; |
| dqbuf.length = output_planes_count_; |
| if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) { |
| @@ -1056,7 +1061,6 @@ bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) { |
| OutputRecord& output_record = output_buffer_map_[index]; |
| DCHECK(!output_record.at_device); |
| DCHECK(!output_record.at_client); |
| - DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR); |
| DCHECK_NE(output_record.picture_id, -1); |
| if (output_record.egl_sync != EGL_NO_SYNC_KHR) { |
| @@ -1082,7 +1086,16 @@ bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) { |
| memset(qbuf_planes, 0, sizeof(qbuf_planes)); |
| qbuf.index = index; |
| qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
| - qbuf.memory = V4L2_MEMORY_MMAP; |
| + if (output_mode_ == Config::OutputMode::ALLOCATE) { |
| + qbuf.memory = V4L2_MEMORY_MMAP; |
| + } else { |
| + qbuf.memory = V4L2_MEMORY_DMABUF; |
| + DCHECK_EQ(output_planes_count_, output_record.dmabuf_fds.size()); |
| + for (size_t i = 0; i < output_record.dmabuf_fds.size(); ++i) { |
| + DCHECK_NE(output_record.dmabuf_fds[i].get(), -1); |
|
Owen Lin
2016/03/23 06:32:50
DCHECK(output_record.dmabuf_fds[i].is_valid());
Pawel Osciak
2016/03/28 01:31:29
Done.
|
| + qbuf_planes[i].m.fd = output_record.dmabuf_fds[i].get(); |
| + } |
| + } |
| qbuf.m.planes = qbuf_planes; |
| qbuf.length = output_planes_count_; |
| IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf); |
| @@ -1371,7 +1384,7 @@ bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) { |
| if (output_buffer_map_.empty()) |
| return true; |
| - for (auto output_record : output_buffer_map_) { |
| + for (const auto& output_record : output_buffer_map_) { |
| DCHECK(!output_record.at_device); |
| if (output_record.egl_sync != EGL_NO_SYNC_KHR) { |
| @@ -1476,7 +1489,10 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask( |
| memset(&reqbufs, 0, sizeof(reqbufs)); |
| reqbufs.count = buffers.size(); |
| reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
| - reqbufs.memory = V4L2_MEMORY_MMAP; |
| + if (output_mode_ == Config::OutputMode::ALLOCATE) |
| + reqbufs.memory = V4L2_MEMORY_MMAP; |
|
Owen Lin
2016/03/23 06:32:50
ditto.
Pawel Osciak
2016/03/28 01:31:28
Done.
|
| + else |
| + reqbufs.memory = V4L2_MEMORY_DMABUF; |
| IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs); |
| if (reqbufs.count != buffers.size()) { |
| @@ -1485,17 +1501,56 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask( |
| return; |
| } |
| - child_task_runner_->PostTask( |
| - FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::CreateEGLImages, |
| - weak_this_, buffers, output_format_fourcc_, |
| - output_planes_count_)); |
| + DCHECK(free_output_buffers_.empty()); |
| + DCHECK(output_buffer_map_.empty()); |
| + output_buffer_map_.resize(buffers.size()); |
| + for (size_t i = 0; i < output_buffer_map_.size(); ++i) { |
| + DCHECK(buffers[i].size() == coded_size_); |
| + |
| + OutputRecord& output_record = output_buffer_map_[i]; |
| + DCHECK(!output_record.at_device); |
| + DCHECK(!output_record.at_client); |
| + DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); |
| + DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
| + DCHECK_EQ(output_record.picture_id, -1); |
| + DCHECK(output_record.dmabuf_fds.empty()); |
| + DCHECK_EQ(output_record.cleared, false); |
| + |
| + output_record.picture_id = buffers[i].id(); |
| + output_record.texture_id = buffers[i].texture_id(); |
| + // This will remain true until ImportBufferForPicture is called, either by |
| + // the client, or by ourselves, if we are allocating. |
| + output_record.at_client = true; |
|
Owen Lin
2016/03/23 06:32:50
As suggested by kcwu, maybe we can make it a singl
Pawel Osciak
2016/03/28 01:31:29
As discussed offline, to be addressed separately.
|
| + if (output_mode_ == Config::OutputMode::ALLOCATE) { |
| + std::vector<base::ScopedFD> dmabuf_fds = |
| + std::move(device_->GetDmabufsForV4L2Buffer( |
| + i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)); |
| + if (dmabuf_fds.empty()) { |
| + NOTIFY_ERROR(PLATFORM_FAILURE); |
| + return; |
| + } |
| + |
| + auto passed_dmabuf_fds(make_scoped_ptr( |
| + new std::vector<base::ScopedFD>(std::move(dmabuf_fds)))); |
| + ImportBufferForPictureTask(output_record.picture_id, |
| + std::move(passed_dmabuf_fds)); |
| + } // else we'll get triggered via ImportBufferForPicture() from client. |
| + DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id; |
| + } |
| + |
| + if (!StartDevicePoll()) { |
| + NOTIFY_ERROR(PLATFORM_FAILURE); |
| + return; |
| + } |
| } |
| -void V4L2SliceVideoDecodeAccelerator::CreateEGLImages( |
| - const std::vector<media::PictureBuffer>& buffers, |
| - uint32_t output_format_fourcc, |
| - size_t output_planes_count) { |
| - DVLOGF(3); |
| +void V4L2SliceVideoDecodeAccelerator::CreateEGLImageFor( |
| + size_t buffer_index, |
| + scoped_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds, |
| + GLuint texture_id, |
| + const gfx::Size& size, |
| + uint32_t fourcc) { |
| + DVLOGF(3) << "index=" << buffer_index; |
| DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| gfx::GLContext* gl_context = get_gl_context_cb_.Run(); |
| @@ -1507,68 +1562,127 @@ void V4L2SliceVideoDecodeAccelerator::CreateEGLImages( |
| gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0); |
| - std::vector<EGLImageKHR> egl_images; |
| - for (size_t i = 0; i < buffers.size(); ++i) { |
| - EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_, |
| - gl_context->GetHandle(), |
| - buffers[i].texture_id(), |
| - buffers[i].size(), |
| - i, |
| - output_format_fourcc, |
| - output_planes_count); |
| - if (egl_image == EGL_NO_IMAGE_KHR) { |
| - LOGF(ERROR) << "Could not create EGLImageKHR"; |
| - for (const auto& image_to_destroy : egl_images) |
| - device_->DestroyEGLImage(egl_display_, image_to_destroy); |
| - |
| - NOTIFY_ERROR(PLATFORM_FAILURE); |
| - return; |
| - } |
| - |
| - egl_images.push_back(egl_image); |
| + EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_, |
| + gl_context->GetHandle(), |
| + texture_id, |
| + size, |
| + buffer_index, |
| + fourcc, |
| + *passed_dmabuf_fds); |
| + if (egl_image == EGL_NO_IMAGE_KHR) { |
| + LOGF(ERROR) << "Could not create EGLImageKHR," |
| + << " index=" << buffer_index << " texture_id=" << texture_id; |
|
kcwu
2016/03/22 05:42:54
why not NOTIFY_ERROR and return here?
Pawel Osciak
2016/03/28 01:31:29
Done.
|
| } |
| decoder_thread_task_runner_->PostTask( |
| - FROM_HERE, base::Bind( |
| - &V4L2SliceVideoDecodeAccelerator::AssignEGLImages, |
| - base::Unretained(this), buffers, egl_images)); |
| + FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::AssignEGLImage, |
| + base::Unretained(this), buffer_index, egl_image, |
| + base::Passed(&passed_dmabuf_fds))); |
| } |
| -void V4L2SliceVideoDecodeAccelerator::AssignEGLImages( |
| - const std::vector<media::PictureBuffer>& buffers, |
| - const std::vector<EGLImageKHR>& egl_images) { |
| - DVLOGF(3); |
| +void V4L2SliceVideoDecodeAccelerator::AssignEGLImage( |
| + size_t buffer_index, |
| + EGLImageKHR egl_image, |
| + scoped_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) { |
| + DVLOGF(3) << "index=" << buffer_index; |
| DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); |
| - DCHECK_EQ(buffers.size(), egl_images.size()); |
| - DCHECK(free_output_buffers_.empty()); |
| - DCHECK(output_buffer_map_.empty()); |
| + DCHECK_LT(buffer_index, output_buffer_map_.size()); |
| + OutputRecord& output_record = output_buffer_map_[buffer_index]; |
| + DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); |
| + DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
| + DCHECK(!output_record.at_client); |
| + DCHECK(!output_record.at_device); |
| - output_buffer_map_.resize(buffers.size()); |
| + output_record.egl_image = egl_image; |
| + if (output_mode_ == Config::OutputMode::IMPORT) { |
| + DCHECK(output_record.dmabuf_fds.empty()); |
| + output_record.dmabuf_fds.swap(*passed_dmabuf_fds); |
|
Owen Lin
2016/03/23 06:32:50
std::move(*passed_dmabuf_fds);
Pawel Osciak
2016/03/28 01:31:28
Done.
|
| + } |
| - for (size_t i = 0; i < output_buffer_map_.size(); ++i) { |
| - DCHECK(buffers[i].size() == coded_size_); |
| + DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(), |
|
Owen Lin
2016/03/23 06:32:50
Will it be nice to have a static function:
bool c
Pawel Osciak
2016/03/28 01:31:29
Hmm... Sound like a good idea, but I'm on the fenc
|
| + buffer_index), |
| + 0); |
| + free_output_buffers_.push_back(buffer_index); |
| + ScheduleDecodeBufferTaskIfNeeded(); |
| +} |
| - OutputRecord& output_record = output_buffer_map_[i]; |
| - DCHECK(!output_record.at_device); |
| - DCHECK(!output_record.at_client); |
| - DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); |
| - DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
| - DCHECK_EQ(output_record.picture_id, -1); |
| - DCHECK_EQ(output_record.cleared, false); |
| +void V4L2SliceVideoDecodeAccelerator::ImportBufferForPicture( |
| + int32_t picture_buffer_id, |
| + const std::vector<gfx::GpuMemoryBufferHandle>& gpu_memory_buffer_handles) { |
| + DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id; |
| + DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| - output_record.egl_image = egl_images[i]; |
| - output_record.picture_id = buffers[i].id(); |
| - free_output_buffers_.push_back(i); |
| - DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id; |
| + auto passed_dmabuf_fds(make_scoped_ptr(new std::vector<base::ScopedFD>)); |
|
kcwu
2016/03/22 05:42:54
Add "()" for new.
new std::vector<base::ScopedFD>(
Pawel Osciak
2016/03/28 01:31:29
Done.
|
| + for (const auto& handle : gpu_memory_buffer_handles) { |
| + int fd = handle.native_pixmap_handle.fd.fd; |
|
kcwu
2016/03/22 05:42:54
I found native_pixelmap_handle is only available i
Pawel Osciak
2016/03/28 01:31:29
Done.
|
| + DCHECK_NE(fd, -1); |
| + passed_dmabuf_fds->push_back(base::ScopedFD(fd)); |
| } |
| - if (!StartDevicePoll()) { |
| - NOTIFY_ERROR(PLATFORM_FAILURE); |
| + if (output_mode_ != Config::OutputMode::IMPORT) { |
| + LOGF(ERROR) << "Cannot import in non-import mode"; |
| + NOTIFY_ERROR(INVALID_ARGUMENT); |
| return; |
| } |
| - ProcessPendingEventsIfNeeded(); |
| + decoder_thread_task_runner_->PostTask( |
| + FROM_HERE, |
| + base::Bind(&V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask, |
| + base::Unretained(this), picture_buffer_id, |
| + base::Passed(&passed_dmabuf_fds))); |
| +} |
| + |
| +void V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask( |
| + int32_t picture_buffer_id, |
| + scoped_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) { |
| + DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id; |
| + DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); |
| + |
| + const auto iter = |
| + std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(), |
| + [picture_buffer_id](const OutputRecord& output_record) { |
| + return output_record.picture_id == picture_buffer_id; |
| + }); |
| + if (iter == output_buffer_map_.end()) { |
| + LOGF(ERROR) << "Invalid picture_buffer_id=" << picture_buffer_id; |
| + NOTIFY_ERROR(INVALID_ARGUMENT); |
| + return; |
| + } |
| + |
| + if (!iter->at_client) { |
| + LOGF(ERROR) << "Cannot import buffer that not owned by client"; |
| + NOTIFY_ERROR(INVALID_ARGUMENT); |
| + return; |
| + } |
| + |
| + size_t index = iter - output_buffer_map_.begin(); |
| + DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(), |
| + index), |
| + 0); |
| + |
| + DCHECK(!iter->at_device); |
| + iter->at_client = false; |
| + if (iter->texture_id != 0) { |
| + if (iter->egl_image != EGL_NO_IMAGE_KHR) { |
| + child_task_runner_->PostTask( |
| + FROM_HERE, |
| + base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_, |
| + egl_display_, iter->egl_image)); |
| + } |
| + |
| + child_task_runner_->PostTask( |
| + FROM_HERE, |
| + base::Bind(&V4L2SliceVideoDecodeAccelerator::CreateEGLImageFor, |
| + weak_this_, index, base::Passed(&passed_dmabuf_fds), |
| + iter->texture_id, coded_size_, output_format_fourcc_)); |
| + } else { |
| + // No need for an EGLImage, start using this buffer now. |
| + DCHECK_EQ(output_planes_count_, passed_dmabuf_fds->size()); |
| + iter->dmabuf_fds.swap(*passed_dmabuf_fds); |
| + free_output_buffers_.push_back(index); |
| + ScheduleDecodeBufferTaskIfNeeded(); |
| + } |
| } |
| void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer( |
| @@ -2510,7 +2624,6 @@ void V4L2SliceVideoDecodeAccelerator::OutputSurface( |
| DCHECK(!output_record.at_client); |
| DCHECK(!output_record.at_device); |
| - DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR); |
| DCHECK_NE(output_record.picture_id, -1); |
| output_record.at_client = true; |
| @@ -2616,6 +2729,11 @@ bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( |
| return true; |
| } |
| +media::VideoPixelFormat V4L2SliceVideoDecodeAccelerator::GetOutputFormat() |
| + const { |
| + return V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_); |
| +} |
| + |
| // static |
| media::VideoDecodeAccelerator::SupportedProfiles |
| V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() { |