Index: media/gpu/v4l2_video_decode_accelerator.cc |
diff --git a/media/gpu/v4l2_video_decode_accelerator.cc b/media/gpu/v4l2_video_decode_accelerator.cc |
index 41aacbff4a67e203e8124f8dda51394920f98b40..ca1b1f93485b6b56c2863fcb7f85d3578eda8cfd 100644 |
--- a/media/gpu/v4l2_video_decode_accelerator.cc |
+++ b/media/gpu/v4l2_video_decode_accelerator.cc |
@@ -18,6 +18,7 @@ |
#include "base/command_line.h" |
#include "base/message_loop/message_loop.h" |
#include "base/numerics/safe_conversions.h" |
+#include "base/posix/eintr_wrapper.h" |
#include "base/single_thread_task_runner.h" |
#include "base/threading/thread_task_runner_handle.h" |
#include "base/trace_event/trace_event.h" |
@@ -138,6 +139,7 @@ V4L2VideoDecodeAccelerator::OutputRecord::OutputRecord() |
egl_image(EGL_NO_IMAGE_KHR), |
egl_sync(EGL_NO_SYNC_KHR), |
picture_id(-1), |
+ texture_id(0), |
cleared(false) {} |
V4L2VideoDecodeAccelerator::OutputRecord::~OutputRecord() {} |
@@ -156,6 +158,7 @@ V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator( |
: child_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
decoder_thread_("V4L2DecoderThread"), |
decoder_state_(kUninitialized), |
+ output_mode_(Config::OutputMode::ALLOCATE), |
device_(device), |
decoder_delay_bitstream_buffer_id_(-1), |
decoder_current_input_buffer_(-1), |
@@ -188,6 +191,7 @@ V4L2VideoDecodeAccelerator::~V4L2VideoDecodeAccelerator() { |
DCHECK(!device_poll_thread_.IsRunning()); |
DestroyInputBuffers(); |
+ DestroyEGLImages(); |
DestroyOutputBuffers(); |
// These maps have members that should be manually destroyed, e.g. file |
@@ -214,13 +218,9 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config, |
return false; |
} |
- if (config.output_mode != Config::OutputMode::ALLOCATE) { |
- NOTREACHED() << "Only ALLOCATE OutputMode is supported by this VDA"; |
- return false; |
- } |
- |
- if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) { |
- NOTREACHED() << "GL callbacks are required for this VDA"; |
+ if (config.output_mode != Config::OutputMode::ALLOCATE && |
+ config.output_mode != Config::OutputMode::IMPORT) { |
+ NOTREACHED() << "Only ALLOCATE and IMPORT OutputModes are supported"; |
return false; |
} |
@@ -243,18 +243,22 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config, |
} |
// We need the context to be initialized to query extensions. |
- if (!make_context_current_cb_.Run()) { |
- LOGF(ERROR) << "could not make context current"; |
- return false; |
- } |
+ if (!make_context_current_cb_.is_null()) { |
+ if (!make_context_current_cb_.Run()) { |
+ LOGF(ERROR) << "could not make context current"; |
+ return false; |
+ } |
// TODO(posciak): crbug.com/450898. |
#if defined(ARCH_CPU_ARMEL) |
- if (!gl::g_driver_egl.ext.b_EGL_KHR_fence_sync) { |
- LOGF(ERROR) << "context does not have EGL_KHR_fence_sync"; |
- return false; |
- } |
+ if (!gl::g_driver_egl.ext.b_EGL_KHR_fence_sync) { |
+ LOGF(ERROR) << "context does not have EGL_KHR_fence_sync"; |
+ return false; |
+ } |
#endif |
+ } else { |
+ DVLOG(1) << "No GL callbacks provided, initializing without GL support"; |
Pawel Osciak
2016/09/12 08:54:26
DVLOGF()?
wuchengli
2016/09/12 11:56:52
Done.
|
+ } |
// Capabilities check. |
struct v4l2_capability caps; |
@@ -288,6 +292,7 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config, |
} |
decoder_state_ = kInitialized; |
+ output_mode_ = config.output_mode; |
// StartDevicePoll will NOTIFY_ERROR on failure, so IgnoreResult is fine here. |
decoder_thread_.task_runner()->PostTask( |
@@ -359,141 +364,260 @@ void V4L2VideoDecodeAccelerator::AssignPictureBuffersTask( |
return; |
} |
- if (image_processor_device_) { |
- DCHECK(!image_processor_); |
- image_processor_.reset(new V4L2ImageProcessor(image_processor_device_)); |
- // Unretained is safe because |this| owns image processor and there will be |
- // no callbacks after processor destroys. |
- if (!image_processor_->Initialize( |
- V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_), |
- V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_), |
- V4L2_MEMORY_DMABUF, visible_size_, coded_size_, visible_size_, |
- visible_size_, buffers.size(), |
- base::Bind(&V4L2VideoDecodeAccelerator::ImageProcessorError, |
- base::Unretained(this)))) { |
- LOGF(ERROR) << "Initialize image processor failed"; |
- NOTIFY_ERROR(PLATFORM_FAILURE); |
- return; |
- } |
- DCHECK(image_processor_->output_allocated_size() == egl_image_size_); |
- if (image_processor_->input_allocated_size() != coded_size_) { |
- LOGF(ERROR) << "Image processor should be able to take the output coded " |
- << "size of decoder " << coded_size_.ToString() |
- << " without adjusting to " |
- << image_processor_->input_allocated_size().ToString(); |
- NOTIFY_ERROR(PLATFORM_FAILURE); |
+ DCHECK(free_output_buffers_.empty()); |
+ DCHECK(output_buffer_map_.empty()); |
+ output_buffer_map_.resize(buffers.size()); |
+ if (image_processor_device_ && output_mode_ == Config::OutputMode::ALLOCATE) { |
+ if (!CreateImageProcessor()) |
return; |
- } |
} |
- child_task_runner_->PostTask( |
- FROM_HERE, |
- base::Bind(&V4L2VideoDecodeAccelerator::CreateEGLImages, weak_this_, |
- buffers, egl_image_format_fourcc_, egl_image_planes_count_)); |
+ for (size_t i = 0; i < output_buffer_map_.size(); ++i) { |
+ DCHECK(buffers[i].size() == egl_image_size_); |
+ |
+ OutputRecord& output_record = output_buffer_map_[i]; |
+ DCHECK_EQ(output_record.state, kFree); |
+ DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); |
+ DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
+ DCHECK_EQ(output_record.picture_id, -1); |
+ DCHECK_EQ(output_record.cleared, false); |
+ DCHECK_EQ(1u, buffers[i].texture_ids().size()); |
+ DCHECK(output_record.processor_input_fds.empty()); |
+ |
+ output_record.picture_id = buffers[i].id(); |
+ output_record.texture_id = buffers[i].texture_ids()[0]; |
+ // This will remain kAtClient until ImportBufferForPicture is called, either |
+ // by the client, or by ourselves, if we are allocating. |
+ output_record.state = kAtClient; |
+ |
+ if (image_processor_device_) { |
+ std::vector<base::ScopedFD> dmabuf_fds = device_->GetDmabufsForV4L2Buffer( |
+ i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); |
+ if (dmabuf_fds.empty()) { |
+ LOGF(ERROR) << "Failed to get DMABUFs of decoder."; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ output_record.processor_input_fds = std::move(dmabuf_fds); |
+ } |
+ |
+ if (output_mode_ == Config::OutputMode::ALLOCATE) { |
+ std::vector<base::ScopedFD> dmabuf_fds; |
+ dmabuf_fds = egl_image_device_->GetDmabufsForV4L2Buffer( |
+ i, egl_image_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); |
+ if (dmabuf_fds.empty()) { |
+ LOGF(ERROR) << "Failed to get DMABUFs for EGLImage."; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ ImportBufferForPictureTask(output_record.picture_id, |
+ std::move(dmabuf_fds), |
+ egl_image_size_.width()); |
+ } // else we'll get triggered via ImportBufferForPicture() from client. |
+ |
+ DVLOGF(3) << "AssignPictureBuffers(): buffer[" << i |
Pawel Osciak
2016/09/12 08:54:26
s/AssignPictureBuffers(): //
wuchengli
2016/09/12 11:56:52
Done.
|
+ << "]: picture_id=" << output_record.picture_id; |
+ } |
} |
-void V4L2VideoDecodeAccelerator::CreateEGLImages( |
- const std::vector<media::PictureBuffer>& buffers, |
- uint32_t output_format_fourcc, |
- size_t output_planes_count) { |
- DVLOGF(3); |
+void V4L2VideoDecodeAccelerator::CreateEGLImageFor( |
+ size_t buffer_index, |
+ int32_t picture_buffer_id, |
+ std::vector<base::ScopedFD> dmabuf_fds, |
+ GLuint texture_id, |
+ const gfx::Size& size, |
+ uint32_t fourcc) { |
+ DVLOGF(3) << "index=" << buffer_index; |
DCHECK(child_task_runner_->BelongsToCurrentThread()); |
if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) { |
- DLOGF(ERROR) << "GL callbacks required for binding to EGLImages"; |
+ DLOG(ERROR) << "GL callbacks required for binding to EGLImages"; |
Pawel Osciak
2016/09/12 08:54:26
DLOGF() ?
wuchengli
2016/09/12 11:56:52
Done.
|
NOTIFY_ERROR(INVALID_ARGUMENT); |
return; |
} |
gl::GLContext* gl_context = get_gl_context_cb_.Run(); |
if (!gl_context || !make_context_current_cb_.Run()) { |
- DLOGF(ERROR) << "No GL context"; |
+ DLOG(ERROR) << "No GL context"; |
NOTIFY_ERROR(PLATFORM_FAILURE); |
return; |
} |
gl::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0); |
- std::vector<EGLImageKHR> egl_images; |
- for (size_t i = 0; i < buffers.size(); ++i) { |
- std::vector<base::ScopedFD> dmabuf_fds; |
- dmabuf_fds = egl_image_device_->GetDmabufsForV4L2Buffer( |
- i, egl_image_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); |
- if (dmabuf_fds.empty()) { |
- LOGF(ERROR) << "Failed to get DMABUFs for EGLImage."; |
- NOTIFY_ERROR(PLATFORM_FAILURE); |
- return; |
- } |
- |
- EGLImageKHR egl_image = egl_image_device_->CreateEGLImage( |
- egl_display_, gl_context->GetHandle(), buffers[i].texture_ids()[0], |
- buffers[i].size(), i, egl_image_format_fourcc_, dmabuf_fds); |
- if (egl_image == EGL_NO_IMAGE_KHR) { |
- LOGF(ERROR) << "could not create EGLImageKHR," |
- << " index=" << i |
- << " texture_id=" << buffers[i].texture_ids()[0]; |
- for (EGLImageKHR image : egl_images) { |
- if (egl_image_device_->DestroyEGLImage(egl_display_, image) != EGL_TRUE) |
- DVLOGF(1) << "DestroyEGLImage failed."; |
- } |
- NOTIFY_ERROR(PLATFORM_FAILURE); |
- return; |
- } |
- egl_images.push_back(egl_image); |
+ EGLImageKHR egl_image = egl_image_device_->CreateEGLImage( |
+ egl_display_, gl_context->GetHandle(), texture_id, size, buffer_index, |
+ fourcc, dmabuf_fds); |
+ if (egl_image == EGL_NO_IMAGE_KHR) { |
+ LOGF(ERROR) << "could not create EGLImageKHR," |
+ << " index=" << buffer_index << " texture_id=" << texture_id; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
} |
decoder_thread_.task_runner()->PostTask( |
- FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::AssignEGLImages, |
- base::Unretained(this), buffers, egl_images)); |
+ FROM_HERE, |
+ base::Bind(&V4L2VideoDecodeAccelerator::AssignEGLImage, |
+ base::Unretained(this), buffer_index, picture_buffer_id, |
+ egl_image, base::Passed(&dmabuf_fds))); |
} |
-void V4L2VideoDecodeAccelerator::AssignEGLImages( |
- const std::vector<media::PictureBuffer>& buffers, |
- const std::vector<EGLImageKHR>& egl_images) { |
- DVLOGF(3); |
+void V4L2VideoDecodeAccelerator::AssignEGLImage( |
+ size_t buffer_index, |
+ int32_t picture_buffer_id, |
+ EGLImageKHR egl_image, |
+ std::vector<base::ScopedFD> dmabuf_fds) { |
+ DVLOGF(3) << "index=" << buffer_index << ", picture_id=" << picture_buffer_id; |
DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); |
- DCHECK_EQ(buffers.size(), egl_images.size()); |
- DCHECK(free_output_buffers_.empty()); |
- DCHECK(output_buffer_map_.empty()); |
- output_buffer_map_.resize(buffers.size()); |
- for (size_t i = 0; i < output_buffer_map_.size(); ++i) { |
- DCHECK(buffers[i].size() == egl_image_size_); |
+ // It's possible that while waiting for the EGLImages to be allocated and |
+ // assigned, we have already decoded more of the stream and saw another |
+ // resolution change. This is a normal situation, in such a case either there |
+ // is no output record with this index awaiting an EGLImage to be assigned to |
+ // it, or the record is already updated to use a newer PictureBuffer and is |
+ // awaiting an EGLImage associated with a different picture_buffer_id. If so, |
+ // just discard this image, we will get the one we are waiting for later. |
+ if (buffer_index >= output_buffer_map_.size() || |
+ output_buffer_map_[buffer_index].picture_id != picture_buffer_id) { |
+ DVLOGF(3) << "Picture set already changed, dropping EGLImage"; |
+ child_task_runner_->PostTask( |
+ FROM_HERE, base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), |
+ device_, egl_display_, egl_image)); |
+ return; |
+ } |
- OutputRecord& output_record = output_buffer_map_[i]; |
- DCHECK_EQ(output_record.state, kFree); |
- DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); |
- DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
- DCHECK_EQ(output_record.picture_id, -1); |
- DCHECK_EQ(output_record.cleared, false); |
- DCHECK_LE(1u, buffers[i].texture_ids().size()); |
+ OutputRecord& output_record = output_buffer_map_[buffer_index]; |
+ DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); |
+ DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
+ DCHECK_EQ(output_record.state, kFree); |
+ DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(), |
+ buffer_index), |
+ 0); |
+ output_record.egl_image = egl_image; |
+ free_output_buffers_.push_back(buffer_index); |
+ if (decoder_state_ == kAwaitingPictureBuffers) { |
+ DVLOG(1) << "Change state to kDecoding"; |
Pawel Osciak
2016/09/12 08:54:26
DVLOGF() ?
wuchengli
2016/09/12 11:56:51
Done.
|
+ decoder_state_ = kDecoding; |
+ } |
+ if (reset_pending_) { |
+ FinishReset(); |
+ return; |
+ } |
+ if (decoder_state_ != kChangingResolution) { |
+ Enqueue(); |
+ ScheduleDecodeBufferTaskIfNeeded(); |
+ } |
+} |
- if (image_processor_device_) { |
- std::vector<base::ScopedFD> fds = device_->GetDmabufsForV4L2Buffer( |
- i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); |
- if (fds.empty()) { |
- LOGF(ERROR) << "Failed to get DMABUFs of decoder."; |
- NOTIFY_ERROR(PLATFORM_FAILURE); |
- return; |
- } |
- output_record.fds = std::move(fds); |
- } |
+void V4L2VideoDecodeAccelerator::ImportBufferForPicture( |
+ int32_t picture_buffer_id, |
+ const gfx::GpuMemoryBufferHandle& gpu_memory_buffer_handle) { |
+ DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id; |
+ DCHECK(child_task_runner_->BelongsToCurrentThread()); |
- output_record.egl_image = egl_images[i]; |
- output_record.picture_id = buffers[i].id(); |
+ if (output_mode_ != Config::OutputMode::IMPORT) { |
+ LOGF(ERROR) << "Cannot import in non-import mode"; |
+ NOTIFY_ERROR(INVALID_ARGUMENT); |
+ return; |
+ } |
+ |
+ std::vector<base::ScopedFD> dmabuf_fds; |
+ int32_t stride = 0; |
+#if defined(USE_OZONE) |
+ for (const auto& fd : gpu_memory_buffer_handle.native_pixmap_handle.fds) { |
+ DCHECK_NE(fd.fd, -1); |
+ dmabuf_fds.push_back(base::ScopedFD(fd.fd)); |
+ } |
+ stride = gpu_memory_buffer_handle.native_pixmap_handle.planes[0].stride; |
+ for (const auto& plane : |
+ gpu_memory_buffer_handle.native_pixmap_handle.planes) { |
+ DVLOGF(3) << ": offset=" << plane.offset << ", stride=" << plane.stride; |
+ } |
+#endif |
+ |
+ decoder_thread_.message_loop()->PostTask( |
+ FROM_HERE, |
+ base::Bind(&V4L2VideoDecodeAccelerator::ImportBufferForPictureTask, |
+ base::Unretained(this), picture_buffer_id, |
+ base::Passed(&dmabuf_fds), stride)); |
+} |
+ |
+void V4L2VideoDecodeAccelerator::ImportBufferForPictureTask( |
+ int32_t picture_buffer_id, |
+ std::vector<base::ScopedFD> dmabuf_fds, |
+ int32_t stride) { |
+ DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id |
+ << ", dmabuf_fds.size()=" << dmabuf_fds.size() |
+ << ", stride=" << stride; |
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); |
- free_output_buffers_.push(i); |
- DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id; |
+ DCHECK(image_processor_device_); |
+ if (!image_processor_) { |
+ // The client may adjust the coded width. We don't have the final coded size |
+ // in AssignPictureBuffers yet. Use the adjusted coded width to create the |
+ // image processor. |
+ DVLOGF(3) << "Original egl_image_size=" << egl_image_size_.ToString(); |
+ egl_image_size_.set_width(stride); |
Pawel Osciak
2016/09/12 08:54:26
stride is in bytes, and is not the same as size in
|
+ if (!CreateImageProcessor()) |
+ return; |
+ } else { |
+ DCHECK_EQ(egl_image_size_.width(), stride); |
} |
- decoder_state_ = kDecoding; |
- Enqueue(); |
- if (reset_pending_) { |
- FinishReset(); |
+ const auto iter = |
+ std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(), |
+ [picture_buffer_id](const OutputRecord& output_record) { |
+ return output_record.picture_id == picture_buffer_id; |
+ }); |
+ if (iter == output_buffer_map_.end()) { |
+ // It's possible that we've already posted a DismissPictureBuffer for this |
+ // picture, but it has not yet executed when this ImportBufferForPicture was |
+ // posted to us by the client. In that case just ignore this (we've already |
+ // dismissed it and accounted for that). |
+ DVLOGF(3) << "got picture id=" << picture_buffer_id |
+ << " not in use (anymore?)."; |
return; |
} |
- ScheduleDecodeBufferTaskIfNeeded(); |
+ if (iter->state != kAtClient) { |
+ LOGF(ERROR) << "Cannot import buffer that not owned by client"; |
Pawel Osciak
2016/09/12 08:54:26
s/that not/not/
wuchengli
2016/09/12 11:56:52
Done.
|
+ NOTIFY_ERROR(INVALID_ARGUMENT); |
+ return; |
+ } |
+ |
+ size_t index = iter - output_buffer_map_.begin(); |
+ DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(), |
+ index), |
+ 0); |
+ |
+ iter->state = kFree; |
+ if (iter->texture_id != 0) { |
+ if (iter->egl_image != EGL_NO_IMAGE_KHR) { |
+ child_task_runner_->PostTask( |
+ FROM_HERE, |
+ base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_, |
+ egl_display_, iter->egl_image)); |
+ } |
+ |
+ child_task_runner_->PostTask( |
+ FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::CreateEGLImageFor, |
+ weak_this_, index, picture_buffer_id, |
+ base::Passed(&dmabuf_fds), iter->texture_id, |
+ egl_image_size_, egl_image_format_fourcc_)); |
+ } else { |
+ // No need for an EGLImage, start using this buffer now. |
+ DCHECK_EQ(egl_image_planes_count_, dmabuf_fds.size()); |
+ iter->processor_output_fds.swap(dmabuf_fds); |
+ free_output_buffers_.push_back(index); |
+ if (decoder_state_ == kAwaitingPictureBuffers) { |
+ DVLOG(1) << "Change state to kDecoding"; |
Pawel Osciak
2016/09/12 08:54:26
DVLOGF() ?
wuchengli
2016/09/12 11:56:51
Done.
|
+ decoder_state_ = kDecoding; |
+ } |
+ if (decoder_state_ != kChangingResolution) { |
+ Enqueue(); |
+ ScheduleDecodeBufferTaskIfNeeded(); |
+ } |
+ } |
} |
void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) { |
@@ -501,25 +625,28 @@ void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) { |
// Must be run on child thread, as we'll insert a sync in the EGL context. |
DCHECK(child_task_runner_->BelongsToCurrentThread()); |
- if (!make_context_current_cb_.Run()) { |
- LOGF(ERROR) << "could not make context current"; |
- NOTIFY_ERROR(PLATFORM_FAILURE); |
- return; |
- } |
+ std::unique_ptr<EGLSyncKHRRef> egl_sync_ref; |
- EGLSyncKHR egl_sync = EGL_NO_SYNC_KHR; |
+ if (!make_context_current_cb_.is_null()) { |
+ if (!make_context_current_cb_.Run()) { |
+ LOGF(ERROR) << "could not make context current"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ EGLSyncKHR egl_sync = EGL_NO_SYNC_KHR; |
// TODO(posciak): crbug.com/450898. |
#if defined(ARCH_CPU_ARMEL) |
- egl_sync = eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL); |
- if (egl_sync == EGL_NO_SYNC_KHR) { |
- LOGF(ERROR) << "eglCreateSyncKHR() failed"; |
- NOTIFY_ERROR(PLATFORM_FAILURE); |
- return; |
- } |
+ egl_sync = eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL); |
+ if (egl_sync == EGL_NO_SYNC_KHR) { |
+ LOGF(ERROR) << "eglCreateSyncKHR() failed"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
+ } |
#endif |
- std::unique_ptr<EGLSyncKHRRef> egl_sync_ref( |
- new EGLSyncKHRRef(egl_display_, egl_sync)); |
+ egl_sync_ref.reset(new EGLSyncKHRRef(egl_display_, egl_sync)); |
+ } |
decoder_thread_.task_runner()->PostTask( |
FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ReusePictureBufferTask, |
@@ -1203,38 +1330,23 @@ void V4L2VideoDecodeAccelerator::Dequeue() { |
} |
OutputRecord& output_record = output_buffer_map_[dqbuf.index]; |
DCHECK_EQ(output_record.state, kAtDevice); |
- DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR); |
DCHECK_NE(output_record.picture_id, -1); |
output_buffer_queued_count_--; |
if (dqbuf.m.planes[0].bytesused == 0) { |
// This is an empty output buffer returned as part of a flush. |
output_record.state = kFree; |
- free_output_buffers_.push(dqbuf.index); |
+ free_output_buffers_.push_back(dqbuf.index); |
} else { |
int32_t bitstream_buffer_id = dqbuf.timestamp.tv_sec; |
DCHECK_GE(bitstream_buffer_id, 0); |
DVLOGF(3) << "Dequeue output buffer: dqbuf index=" << dqbuf.index |
<< " bitstream input_id=" << bitstream_buffer_id; |
if (image_processor_device_) { |
- output_record.state = kAtProcessor; |
- image_processor_bitstream_buffer_ids_.push(bitstream_buffer_id); |
- std::vector<int> fds; |
- for (auto& fd : output_record.fds) { |
- fds.push_back(fd.get()); |
+ if (!ProcessFrame(bitstream_buffer_id, dqbuf.index)) { |
+ LOGF(ERROR) << "Process frame failed"; |
Pawel Osciak
2016/09/12 08:54:26
DLOGF()
wuchengli
2016/09/12 11:56:52
Done.
|
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return; |
} |
- scoped_refptr<VideoFrame> frame = VideoFrame::WrapExternalDmabufs( |
- V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_), |
- coded_size_, gfx::Rect(visible_size_), visible_size_, fds, |
- base::TimeDelta()); |
- // Unretained is safe because |this| owns image processor and there will |
- // be no callbacks after processor destroys. Also, this class ensures it |
- // is safe to post a task from child thread to decoder thread using |
- // Unretained. |
- image_processor_->Process( |
- frame, dqbuf.index, |
- BindToCurrentLoop( |
- base::Bind(&V4L2VideoDecodeAccelerator::FrameProcessed, |
- base::Unretained(this), bitstream_buffer_id))); |
} else { |
output_record.state = kAtClient; |
decoder_frames_at_client_++; |
@@ -1287,7 +1399,6 @@ bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() { |
DVLOGF(3) << "buffer " << buffer; |
OutputRecord& output_record = output_buffer_map_[buffer]; |
DCHECK_EQ(output_record.state, kFree); |
- DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR); |
DCHECK_NE(output_record.picture_id, -1); |
if (output_record.egl_sync != EGL_NO_SYNC_KHR) { |
TRACE_EVENT0("Video Decoder", |
@@ -1318,8 +1429,11 @@ bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() { |
qbuf.memory = V4L2_MEMORY_MMAP; |
qbuf.m.planes = qbuf_planes.get(); |
qbuf.length = output_planes_count_; |
+ DVLOG(2) << "qbuf.index=" << qbuf.index |
Pawel Osciak
2016/09/12 08:54:26
DVLOGF()
wuchengli
2016/09/12 11:56:52
Done.
|
+ << ", output_mode_=" << (int)output_mode_ |
Pawel Osciak
2016/09/12 08:54:26
static_cast<>()
wuchengli
2016/09/12 11:56:52
Done.
|
+ << ", output_planes_count_=" << output_planes_count_; |
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf); |
- free_output_buffers_.pop(); |
+ free_output_buffers_.pop_front(); |
output_record.state = kAtDevice; |
output_buffer_queued_count_++; |
return true; |
@@ -1367,12 +1481,14 @@ void V4L2VideoDecodeAccelerator::ReusePictureBufferTask( |
} |
DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
- output_record.egl_sync = egl_sync_ref->egl_sync; |
output_record.state = kFree; |
- free_output_buffers_.push(index); |
+ free_output_buffers_.push_back(index); |
decoder_frames_at_client_--; |
- // Take ownership of the EGLSync. |
- egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR; |
+ if (egl_sync_ref) { |
+ output_record.egl_sync = egl_sync_ref->egl_sync; |
+ // Take ownership of the EGLSync. |
+ egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR; |
+ } |
// We got a buffer back, so enqueue it back. |
Enqueue(); |
} |
@@ -1638,7 +1754,7 @@ bool V4L2VideoDecodeAccelerator::StopOutputStream() { |
OutputRecord& output_record = output_buffer_map_[i]; |
if (output_record.state == kAtDevice) { |
output_record.state = kFree; |
- free_output_buffers_.push(i); |
+ free_output_buffers_.push_back(i); |
DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
} |
} |
@@ -1708,6 +1824,8 @@ void V4L2VideoDecodeAccelerator::FinishResolutionChange() { |
return; |
} |
+ DestroyOutputBuffers(); |
+ |
struct v4l2_format format; |
bool again; |
gfx::Size visible_size; |
@@ -1837,7 +1955,8 @@ bool V4L2VideoDecodeAccelerator::CreateBuffersForFormat( |
} |
DVLOGF(3) << "new resolution: " << coded_size_.ToString() |
<< ", visible size: " << visible_size_.ToString() |
- << ", EGLImage size: " << egl_image_size_.ToString(); |
+ << ", EGLImage size: " << egl_image_size_.ToString() |
+ << ", plane count: " << egl_image_planes_count_; |
return CreateOutputBuffers(); |
} |
@@ -2003,6 +2122,10 @@ bool V4L2VideoDecodeAccelerator::SetupFormats() { |
} |
egl_image_device_ = image_processor_device_; |
} else { |
+ if (output_mode_ == Config::OutputMode::IMPORT) { |
+ LOGF(ERROR) << "Import mode is unsupported without image processor."; |
Pawel Osciak
2016/09/12 08:54:26
Is this because we haven't implemented it yet with
wuchengli
2016/09/12 09:15:57
Yes. It's just not implemented. I don't have a way
Pawel Osciak
2016/09/14 04:17:28
I'd prefer to clearly mention that here. Otherwise
wuchengli
2016/09/14 05:17:26
I changed the log to "Import mode without image pr
|
+ return false; |
+ } |
egl_image_format_fourcc_ = output_format_fourcc_; |
egl_image_device_ = device_; |
} |
@@ -2069,6 +2192,78 @@ uint32_t V4L2VideoDecodeAccelerator::FindImageProcessorOutputFormat() { |
return 0; |
} |
+bool V4L2VideoDecodeAccelerator::CreateImageProcessor() { |
+ DVLOGF(3); |
Pawel Osciak
2016/09/12 08:54:26
DCHECK(!image_processor_)
wuchengli
2016/09/12 11:56:52
Done.
|
+ image_processor_.reset(new V4L2ImageProcessor(image_processor_device_)); |
+ v4l2_memory output_memory_type = |
+ (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP |
+ : V4L2_MEMORY_DMABUF); |
+ // Unretained is safe because |this| owns image processor and there will be |
+ // no callbacks after processor destroys. |
+ if (!image_processor_->Initialize( |
+ V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_), |
+ V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_), |
+ V4L2_MEMORY_DMABUF, output_memory_type, visible_size_, coded_size_, |
+ visible_size_, egl_image_size_, output_buffer_map_.size(), |
+ base::Bind(&V4L2VideoDecodeAccelerator::ImageProcessorError, |
+ base::Unretained(this)))) { |
+ LOGF(ERROR) << "Initialize image processor failed"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ DCHECK(image_processor_->output_allocated_size() == egl_image_size_); |
Pawel Osciak
2016/09/12 08:54:26
DCHECK_EQ
wuchengli
2016/09/12 11:56:52
DCHECK_EQ cannot compile. DCHECK_EQ uses logging::
Pawel Osciak
2016/09/14 04:17:29
Acknowledged.
|
+ DVLOGF(3) << "image_processor_->output_allocated_size()=" |
+ << image_processor_->output_allocated_size().ToString(); |
+ if (image_processor_->input_allocated_size() != coded_size_) { |
+ LOGF(ERROR) << "Image processor should be able to take the output coded " |
+ << "size of decoder " << coded_size_.ToString() |
+ << " without adjusting to " |
+ << image_processor_->input_allocated_size().ToString(); |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ return true; |
+} |
+ |
+bool V4L2VideoDecodeAccelerator::ProcessFrame(int32_t bitstream_buffer_id, |
+ int output_buffer_index) { |
+ DVLOGF(3); |
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); |
+ |
+ OutputRecord& output_record = output_buffer_map_[output_buffer_index]; |
+ output_record.state = kAtProcessor; |
+ image_processor_bitstream_buffer_ids_.push(bitstream_buffer_id); |
+ std::vector<int> processor_input_fds; |
+ for (auto& fd : output_record.processor_input_fds) { |
+ processor_input_fds.push_back(fd.get()); |
+ } |
+ scoped_refptr<VideoFrame> input_frame = VideoFrame::WrapExternalDmabufs( |
+ V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_), |
+ coded_size_, gfx::Rect(visible_size_), visible_size_, processor_input_fds, |
+ base::TimeDelta()); |
+ |
+ std::vector<base::ScopedFD> processor_output_fds; |
+ if (output_mode_ == Config::OutputMode::IMPORT) { |
+ for (auto& fd : output_record.processor_output_fds) |
+ processor_output_fds.push_back( |
+ base::ScopedFD(HANDLE_EINTR(dup(fd.get())))); |
+ if (!processor_output_fds.back().is_valid()) { |
Pawel Osciak
2016/09/12 08:54:26
This should be inside the for() and check all fds?
wuchengli
2016/09/12 11:56:52
Done.
|
+ PLOG(ERROR) << "Failed duplicating a dmabuf fd"; |
Pawel Osciak
2016/09/12 08:54:26
PLOGF() ?
wuchengli
2016/09/12 11:56:51
Done.
|
+ return false; |
+ } |
+ } |
+ // Unretained is safe because |this| owns image processor and there will |
+ // be no callbacks after processor destroys. Also, this class ensures it |
+ // is safe to post a task from child thread to decoder thread using |
+ // Unretained. |
+ image_processor_->Process( |
+ input_frame, output_buffer_index, std::move(processor_output_fds), |
+ BindToCurrentLoop(base::Bind(&V4L2VideoDecodeAccelerator::FrameProcessed, |
+ base::Unretained(this), |
+ bitstream_buffer_id))); |
+ return true; |
+} |
+ |
bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() { |
DVLOGF(3); |
DCHECK(decoder_state_ == kInitialized || |
@@ -2089,10 +2284,17 @@ bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() { |
DVLOGF(3) << "buffer_count=" << buffer_count |
<< ", coded_size=" << egl_image_size_.ToString(); |
+ // With ALLOCATE mode the client can sample it as RGB and doesn't need to |
+ // know the precise format. |
+ VideoPixelFormat pixel_format = |
+ (output_mode_ == Config::OutputMode::IMPORT) |
+ ? V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_) |
+ : PIXEL_FORMAT_UNKNOWN; |
+ |
child_task_runner_->PostTask( |
FROM_HERE, base::Bind(&Client::ProvidePictureBuffers, client_, |
- buffer_count, PIXEL_FORMAT_UNKNOWN, 1, |
- egl_image_size_, device_->GetTextureTarget())); |
+ buffer_count, pixel_format, 1, egl_image_size_, |
+ device_->GetTextureTarget())); |
// Go into kAwaitingPictureBuffers to prevent us from doing any more decoding |
// or event handling while we are waiting for AssignPictureBuffers(). Not |
@@ -2131,6 +2333,28 @@ void V4L2VideoDecodeAccelerator::DestroyInputBuffers() { |
} |
bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() { |
+ struct v4l2_requestbuffers reqbufs; |
+ memset(&reqbufs, 0, sizeof(reqbufs)); |
+ reqbufs.count = 0; |
+ reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
+ reqbufs.memory = V4L2_MEMORY_MMAP; |
+ if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) { |
+ PLOGF(ERROR) << "ioctl() failed: VIDIOC_REQBUFS"; |
+ NOTIFY_ERROR(PLATFORM_FAILURE); |
+ return false; |
+ } |
+ |
+ output_buffer_map_.clear(); |
+ while (!free_output_buffers_.empty()) |
+ free_output_buffers_.pop_front(); |
+ output_buffer_queued_count_ = 0; |
+ // The client may still hold some buffers. The texture holds a reference to |
+ // the buffer. It is OK to free the buffer and destroy EGLImage here. |
+ decoder_frames_at_client_ = 0; |
+ return true; |
+} |
+ |
+bool V4L2VideoDecodeAccelerator::DestroyEGLImages() { |
DVLOGF(3); |
DCHECK(child_task_runner_->BelongsToCurrentThread()); |
DCHECK(!output_streamon_); |
@@ -2160,24 +2384,6 @@ bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() { |
output_record.picture_id)); |
} |
- struct v4l2_requestbuffers reqbufs; |
- memset(&reqbufs, 0, sizeof(reqbufs)); |
- reqbufs.count = 0; |
- reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
- reqbufs.memory = V4L2_MEMORY_MMAP; |
- if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) { |
- PLOGF(ERROR) << "ioctl() failed: VIDIOC_REQBUFS"; |
- success = false; |
- } |
- |
- output_buffer_map_.clear(); |
- while (!free_output_buffers_.empty()) |
- free_output_buffers_.pop(); |
- output_buffer_queued_count_ = 0; |
- // The client may still hold some buffers. The texture holds a reference to |
- // the buffer. It is OK to free the buffer and destroy EGLImage here. |
- decoder_frames_at_client_ = 0; |
- |
return success; |
} |
@@ -2185,7 +2391,7 @@ void V4L2VideoDecodeAccelerator::ResolutionChangeDestroyBuffers() { |
DCHECK(child_task_runner_->BelongsToCurrentThread()); |
DVLOGF(3); |
- if (!DestroyOutputBuffers()) { |
+ if (!DestroyEGLImages()) { |
LOGF(ERROR) << "Failed destroying output buffers."; |
NOTIFY_ERROR(PLATFORM_FAILURE); |
return; |
@@ -2260,7 +2466,6 @@ void V4L2VideoDecodeAccelerator::FrameProcessed(int32_t bitstream_buffer_id, |
if (!image_processor_bitstream_buffer_ids_.empty() && |
image_processor_bitstream_buffer_ids_.front() == bitstream_buffer_id) { |
DVLOGF(3) << "picture_id=" << output_record.picture_id; |
- DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR); |
DCHECK_NE(output_record.picture_id, -1); |
// Send the processed frame to render. |
output_record.state = kAtClient; |
@@ -2281,7 +2486,7 @@ void V4L2VideoDecodeAccelerator::FrameProcessed(int32_t bitstream_buffer_id, |
DVLOGF(2) << "Bitstream buffer id " << bitstream_buffer_id << " not found " |
<< "because of Reset. Drop the buffer"; |
output_record.state = kFree; |
- free_output_buffers_.push(output_buffer_index); |
+ free_output_buffers_.push_back(output_buffer_index); |
// Do not queue the buffer if a resolution change is in progress. The queue |
// is about to be destroyed anyway. Otherwise, the queue will be started in |
// Enqueue and REQBUFS(0) will fail. |