Chromium Code Reviews| Index: media/gpu/v4l2_video_decode_accelerator.cc |
| diff --git a/media/gpu/v4l2_video_decode_accelerator.cc b/media/gpu/v4l2_video_decode_accelerator.cc |
| index 41aacbff4a67e203e8124f8dda51394920f98b40..7cb665f56713bb64ba58b920822ee474c0a79087 100644 |
| --- a/media/gpu/v4l2_video_decode_accelerator.cc |
| +++ b/media/gpu/v4l2_video_decode_accelerator.cc |
| @@ -16,6 +16,7 @@ |
| #include "base/bind.h" |
| #include "base/command_line.h" |
| +#include "base/memory/ptr_util.h" |
| #include "base/message_loop/message_loop.h" |
| #include "base/numerics/safe_conversions.h" |
| #include "base/single_thread_task_runner.h" |
| @@ -138,6 +139,7 @@ V4L2VideoDecodeAccelerator::OutputRecord::OutputRecord() |
| egl_image(EGL_NO_IMAGE_KHR), |
| egl_sync(EGL_NO_SYNC_KHR), |
| picture_id(-1), |
| + texture_id(0), |
| cleared(false) {} |
| V4L2VideoDecodeAccelerator::OutputRecord::~OutputRecord() {} |
| @@ -156,6 +158,7 @@ V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator( |
| : child_task_runner_(base::ThreadTaskRunnerHandle::Get()), |
| decoder_thread_("V4L2DecoderThread"), |
| decoder_state_(kUninitialized), |
| + output_mode_(Config::OutputMode::ALLOCATE), |
| device_(device), |
| decoder_delay_bitstream_buffer_id_(-1), |
| decoder_current_input_buffer_(-1), |
| @@ -188,6 +191,7 @@ V4L2VideoDecodeAccelerator::~V4L2VideoDecodeAccelerator() { |
| DCHECK(!device_poll_thread_.IsRunning()); |
| DestroyInputBuffers(); |
| + DestroyEGLImages(); |
| DestroyOutputBuffers(); |
| // These maps have members that should be manually destroyed, e.g. file |
| @@ -214,13 +218,9 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config, |
| return false; |
| } |
| - if (config.output_mode != Config::OutputMode::ALLOCATE) { |
| - NOTREACHED() << "Only ALLOCATE OutputMode is supported by this VDA"; |
| - return false; |
| - } |
| - |
| - if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) { |
| - NOTREACHED() << "GL callbacks are required for this VDA"; |
| + if (config.output_mode != Config::OutputMode::ALLOCATE && |
| + config.output_mode != Config::OutputMode::IMPORT) { |
| + NOTREACHED() << "Only ALLOCATE and IMPORT OutputModes are supported"; |
| return false; |
| } |
| @@ -243,18 +243,22 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config, |
| } |
| // We need the context to be initialized to query extensions. |
| - if (!make_context_current_cb_.Run()) { |
| - LOGF(ERROR) << "could not make context current"; |
| - return false; |
| - } |
| + if (!make_context_current_cb_.is_null()) { |
| + if (!make_context_current_cb_.Run()) { |
| + LOGF(ERROR) << "could not make context current"; |
| + return false; |
| + } |
| // TODO(posciak): crbug.com/450898. |
| #if defined(ARCH_CPU_ARMEL) |
| - if (!gl::g_driver_egl.ext.b_EGL_KHR_fence_sync) { |
| - LOGF(ERROR) << "context does not have EGL_KHR_fence_sync"; |
| - return false; |
| - } |
| + if (!gl::g_driver_egl.ext.b_EGL_KHR_fence_sync) { |
| + LOGF(ERROR) << "context does not have EGL_KHR_fence_sync"; |
| + return false; |
| + } |
| #endif |
| + } else { |
| + DVLOG(1) << "No GL callbacks provided, initializing without GL support"; |
| + } |
| // Capabilities check. |
| struct v4l2_capability caps; |
| @@ -288,6 +292,7 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config, |
| } |
| decoder_state_ = kInitialized; |
| + output_mode_ = config.output_mode; |
| // StartDevicePoll will NOTIFY_ERROR on failure, so IgnoreResult is fine here. |
| decoder_thread_.task_runner()->PostTask( |
| @@ -359,141 +364,256 @@ void V4L2VideoDecodeAccelerator::AssignPictureBuffersTask( |
| return; |
| } |
| - if (image_processor_device_) { |
| - DCHECK(!image_processor_); |
| - image_processor_.reset(new V4L2ImageProcessor(image_processor_device_)); |
| - // Unretained is safe because |this| owns image processor and there will be |
| - // no callbacks after processor destroys. |
| - if (!image_processor_->Initialize( |
| - V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_), |
| - V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_), |
| - V4L2_MEMORY_DMABUF, visible_size_, coded_size_, visible_size_, |
| - visible_size_, buffers.size(), |
| - base::Bind(&V4L2VideoDecodeAccelerator::ImageProcessorError, |
| - base::Unretained(this)))) { |
| - LOGF(ERROR) << "Initialize image processor failed"; |
| - NOTIFY_ERROR(PLATFORM_FAILURE); |
| - return; |
| - } |
| - DCHECK(image_processor_->output_allocated_size() == egl_image_size_); |
| - if (image_processor_->input_allocated_size() != coded_size_) { |
| - LOGF(ERROR) << "Image processor should be able to take the output coded " |
| - << "size of decoder " << coded_size_.ToString() |
| - << " without adjusting to " |
| - << image_processor_->input_allocated_size().ToString(); |
| - NOTIFY_ERROR(PLATFORM_FAILURE); |
| - return; |
| - } |
| + DCHECK(free_output_buffers_.empty()); |
| + DCHECK(output_buffer_map_.empty()); |
| + output_buffer_map_.resize(buffers.size()); |
| + if (image_processor_device_ && output_mode_ == Config::OutputMode::ALLOCATE) { |
| + CreateImageProcessor(); |
|
kcwu
2016/09/07 03:46:23
return if CreateImageProcessor failed.
|
| } |
| - child_task_runner_->PostTask( |
| - FROM_HERE, |
| - base::Bind(&V4L2VideoDecodeAccelerator::CreateEGLImages, weak_this_, |
| - buffers, egl_image_format_fourcc_, egl_image_planes_count_)); |
| + for (size_t i = 0; i < output_buffer_map_.size(); ++i) { |
| + DCHECK(buffers[i].size() == egl_image_size_); |
| + DCHECK_EQ(1u, buffers[i].texture_ids().size()); |
| + |
| + OutputRecord& output_record = output_buffer_map_[i]; |
| + DCHECK_EQ(output_record.state, kFree); |
| + DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); |
| + DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
| + DCHECK_EQ(output_record.picture_id, -1); |
| + DCHECK(output_record.processor_input_fds.empty()); |
| + DCHECK_EQ(output_record.cleared, false); |
| + |
| + output_record.picture_id = buffers[i].id(); |
| + output_record.texture_id = buffers[i].texture_ids()[0]; |
| + // This will remain kAtClient until ImportBufferForPicture is called, either |
| + // by the client, or by ourselves, if we are allocating. |
| + output_record.state = kAtClient; |
| + |
| + if (image_processor_device_) { |
| + std::vector<base::ScopedFD> dmabuf_fds = device_->GetDmabufsForV4L2Buffer( |
| + i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); |
| + if (dmabuf_fds.empty()) { |
| + LOGF(ERROR) << "Failed to get DMABUFs of decoder."; |
| + NOTIFY_ERROR(PLATFORM_FAILURE); |
| + return; |
| + } |
| + output_record.processor_input_fds = std::move(dmabuf_fds); |
| + } |
| + |
| + if (output_mode_ == Config::OutputMode::ALLOCATE) { |
| + std::vector<base::ScopedFD> dmabuf_fds; |
| + dmabuf_fds = egl_image_device_->GetDmabufsForV4L2Buffer( |
| + i, egl_image_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); |
| + if (dmabuf_fds.empty()) { |
| + LOGF(ERROR) << "Failed to get DMABUFs for EGLImage."; |
| + NOTIFY_ERROR(PLATFORM_FAILURE); |
| + return; |
| + } |
| + auto passed_dmabuf_fds(base::WrapUnique( |
| + new std::vector<base::ScopedFD>(std::move(dmabuf_fds)))); |
| + ImportBufferForPictureTask(output_record.picture_id, |
| + std::move(passed_dmabuf_fds), |
| + egl_image_size_.width()); |
| + } // else we'll get triggered via ImportBufferForPicture() from client. |
| + |
| + DVLOGF(3) << "AssignPictureBuffers(): buffer[" << i |
| + << "]: picture_id=" << output_record.picture_id; |
| + } |
| } |
| -void V4L2VideoDecodeAccelerator::CreateEGLImages( |
| - const std::vector<media::PictureBuffer>& buffers, |
| - uint32_t output_format_fourcc, |
| - size_t output_planes_count) { |
| - DVLOGF(3); |
| +void V4L2VideoDecodeAccelerator::CreateEGLImageFor( |
| + size_t buffer_index, |
| + int32_t picture_buffer_id, |
| + std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds, |
| + GLuint texture_id, |
| + const gfx::Size& size, |
| + uint32_t fourcc) { |
| + DVLOGF(3) << "index=" << buffer_index; |
| DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) { |
| - DLOGF(ERROR) << "GL callbacks required for binding to EGLImages"; |
| + DLOG(ERROR) << "GL callbacks required for binding to EGLImages"; |
| NOTIFY_ERROR(INVALID_ARGUMENT); |
| return; |
| } |
| gl::GLContext* gl_context = get_gl_context_cb_.Run(); |
| if (!gl_context || !make_context_current_cb_.Run()) { |
| - DLOGF(ERROR) << "No GL context"; |
| + DLOG(ERROR) << "No GL context"; |
| NOTIFY_ERROR(PLATFORM_FAILURE); |
| return; |
| } |
| gl::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0); |
| - std::vector<EGLImageKHR> egl_images; |
| - for (size_t i = 0; i < buffers.size(); ++i) { |
| - std::vector<base::ScopedFD> dmabuf_fds; |
| - dmabuf_fds = egl_image_device_->GetDmabufsForV4L2Buffer( |
| - i, egl_image_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); |
| - if (dmabuf_fds.empty()) { |
| - LOGF(ERROR) << "Failed to get DMABUFs for EGLImage."; |
| - NOTIFY_ERROR(PLATFORM_FAILURE); |
| - return; |
| - } |
| - |
| - EGLImageKHR egl_image = egl_image_device_->CreateEGLImage( |
| - egl_display_, gl_context->GetHandle(), buffers[i].texture_ids()[0], |
| - buffers[i].size(), i, egl_image_format_fourcc_, dmabuf_fds); |
| - if (egl_image == EGL_NO_IMAGE_KHR) { |
| - LOGF(ERROR) << "could not create EGLImageKHR," |
| - << " index=" << i |
| - << " texture_id=" << buffers[i].texture_ids()[0]; |
| - for (EGLImageKHR image : egl_images) { |
| - if (egl_image_device_->DestroyEGLImage(egl_display_, image) != EGL_TRUE) |
| - DVLOGF(1) << "DestroyEGLImage failed."; |
| - } |
| - NOTIFY_ERROR(PLATFORM_FAILURE); |
| - return; |
| - } |
| - egl_images.push_back(egl_image); |
| + EGLImageKHR egl_image = egl_image_device_->CreateEGLImage( |
| + egl_display_, gl_context->GetHandle(), texture_id, size, buffer_index, |
| + fourcc, *passed_dmabuf_fds); |
| + if (egl_image == EGL_NO_IMAGE_KHR) { |
| + LOGF(ERROR) << "could not create EGLImageKHR," |
| + << " index=" << buffer_index << " texture_id=" << texture_id; |
| + NOTIFY_ERROR(PLATFORM_FAILURE); |
| + return; |
| } |
| decoder_thread_.task_runner()->PostTask( |
| - FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::AssignEGLImages, |
| - base::Unretained(this), buffers, egl_images)); |
| + FROM_HERE, |
| + base::Bind(&V4L2VideoDecodeAccelerator::AssignEGLImage, |
| + base::Unretained(this), buffer_index, picture_buffer_id, |
| + egl_image, base::Passed(&passed_dmabuf_fds))); |
| } |
| -void V4L2VideoDecodeAccelerator::AssignEGLImages( |
| - const std::vector<media::PictureBuffer>& buffers, |
| - const std::vector<EGLImageKHR>& egl_images) { |
| - DVLOGF(3); |
| +void V4L2VideoDecodeAccelerator::AssignEGLImage( |
| + size_t buffer_index, |
| + int32_t picture_buffer_id, |
| + EGLImageKHR egl_image, |
| + std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) { |
| + DVLOGF(3) << "index=" << buffer_index; |
| DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); |
| - DCHECK_EQ(buffers.size(), egl_images.size()); |
| - DCHECK(free_output_buffers_.empty()); |
| - DCHECK(output_buffer_map_.empty()); |
| - output_buffer_map_.resize(buffers.size()); |
| - for (size_t i = 0; i < output_buffer_map_.size(); ++i) { |
| - DCHECK(buffers[i].size() == egl_image_size_); |
| + // It's possible that while waiting for the EGLImages to be allocated and |
| + // assigned, we have already decoded more of the stream and saw another |
| + // resolution change. This is a normal situation, in such a case either there |
| + // is no output record with this index awaiting an EGLImage to be assigned to |
| + // it, or the record is already updated to use a newer PictureBuffer and is |
| + // awaiting an EGLImage associated with a different picture_buffer_id. If so, |
| + // just discard this image, we will get the one we are waiting for later. |
| + if (buffer_index >= output_buffer_map_.size() || |
| + output_buffer_map_[buffer_index].picture_id != picture_buffer_id) { |
| + DVLOGF(3) << "Picture set already changed, dropping EGLImage"; |
| + child_task_runner_->PostTask( |
| + FROM_HERE, base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), |
| + device_, egl_display_, egl_image)); |
| + return; |
| + } |
| - OutputRecord& output_record = output_buffer_map_[i]; |
| - DCHECK_EQ(output_record.state, kFree); |
| - DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); |
| - DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
| - DCHECK_EQ(output_record.picture_id, -1); |
| - DCHECK_EQ(output_record.cleared, false); |
| - DCHECK_LE(1u, buffers[i].texture_ids().size()); |
| + OutputRecord& output_record = output_buffer_map_[buffer_index]; |
| + DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); |
| + DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
| + DCHECK_EQ(output_record.state, kFree); |
| + DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(), |
| + buffer_index), |
| + 0); |
| + output_record.egl_image = egl_image; |
| + free_output_buffers_.push_back(buffer_index); |
| + if (decoder_state_ == kAwaitingPictureBuffers) { |
| + DVLOG(1) << "Change state to kDecoding"; |
| + decoder_state_ = kDecoding; |
| + } |
| + if (reset_pending_) { |
| + FinishReset(); |
| + return; |
| + } |
| + if (decoder_state_ != kChangingResolution) { |
| + Enqueue(); |
| + ScheduleDecodeBufferTaskIfNeeded(); |
| + } |
| +} |
| - if (image_processor_device_) { |
| - std::vector<base::ScopedFD> fds = device_->GetDmabufsForV4L2Buffer( |
| - i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); |
| - if (fds.empty()) { |
| - LOGF(ERROR) << "Failed to get DMABUFs of decoder."; |
| - NOTIFY_ERROR(PLATFORM_FAILURE); |
| - return; |
| - } |
| - output_record.fds = std::move(fds); |
| - } |
| +void V4L2VideoDecodeAccelerator::ImportBufferForPicture( |
| + int32_t picture_buffer_id, |
| + const gfx::GpuMemoryBufferHandle& gpu_memory_buffer_handle) { |
| + DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id; |
| + DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| - output_record.egl_image = egl_images[i]; |
| - output_record.picture_id = buffers[i].id(); |
| + auto passed_dmabuf_fds(base::WrapUnique(new std::vector<base::ScopedFD>())); |
| + int32_t stride = egl_image_size_.width(); |
| +#if defined(USE_OZONE) |
| + for (const auto& fd : gpu_memory_buffer_handle.native_pixmap_handle.fds) { |
| + DCHECK_NE(fd.fd, -1); |
| + passed_dmabuf_fds->push_back(base::ScopedFD(fd.fd)); |
| + } |
| + stride = gpu_memory_buffer_handle.native_pixmap_handle.planes[0].stride; |
| + for (const auto& plane : |
| + gpu_memory_buffer_handle.native_pixmap_handle.planes) { |
| + DVLOGF(3) << ": offset=" << plane.offset << ", stride=" << plane.stride; |
| + } |
| +#endif |
| - free_output_buffers_.push(i); |
| - DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id; |
| + if (output_mode_ != Config::OutputMode::IMPORT) { |
| + LOGF(ERROR) << "Cannot import in non-import mode"; |
| + NOTIFY_ERROR(INVALID_ARGUMENT); |
| + return; |
| } |
| - decoder_state_ = kDecoding; |
| - Enqueue(); |
| - if (reset_pending_) { |
| - FinishReset(); |
| + decoder_thread_.message_loop()->PostTask( |
| + FROM_HERE, |
| + base::Bind(&V4L2VideoDecodeAccelerator::ImportBufferForPictureTask, |
| + base::Unretained(this), picture_buffer_id, |
| + base::Passed(&passed_dmabuf_fds), stride)); |
| +} |
| + |
| +void V4L2VideoDecodeAccelerator::ImportBufferForPictureTask( |
| + int32_t picture_buffer_id, |
| + std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds, |
| + int32_t stride) { |
| + DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id; |
| + DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); |
| + |
| + if (image_processor_device_ && !image_processor_) { |
| + DVLOGF(3) << "Original egl_image_size=" << egl_image_size_.ToString() |
| + << ", width is adjusted to=" << stride; |
| + egl_image_size_.set_width(stride); |
| + CreateImageProcessor(); |
|
kcwu
2016/09/07 03:46:23
return if CreateImageProcessor failed.
|
| + } |
| + |
| + const auto iter = |
| + std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(), |
| + [picture_buffer_id](const OutputRecord& output_record) { |
| + return output_record.picture_id == picture_buffer_id; |
| + }); |
| + if (iter == output_buffer_map_.end()) { |
| + // It's possible that we've already posted a DismissPictureBuffer for this |
| + // picture, but it has not yet executed when this ImportBufferForPicture was |
| + // posted to us by the client. In that case just ignore this (we've already |
| + // dismissed it and accounted for that). |
| + DVLOGF(3) << "got picture id=" << picture_buffer_id |
| + << " not in use (anymore?)."; |
| return; |
| } |
| - ScheduleDecodeBufferTaskIfNeeded(); |
| + if (iter->state != kAtClient) { |
| + LOGF(ERROR) << "Cannot import buffer that not owned by client"; |
| + NOTIFY_ERROR(INVALID_ARGUMENT); |
| + return; |
| + } |
| + |
| + size_t index = iter - output_buffer_map_.begin(); |
| + DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(), |
| + index), |
| + 0); |
| + |
| + iter->state = kFree; |
| + if (iter->texture_id != 0) { |
| + if (iter->egl_image != EGL_NO_IMAGE_KHR) { |
| + child_task_runner_->PostTask( |
| + FROM_HERE, |
| + base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_, |
| + egl_display_, iter->egl_image)); |
| + } |
| + |
| + child_task_runner_->PostTask( |
| + FROM_HERE, |
| + base::Bind(&V4L2VideoDecodeAccelerator::CreateEGLImageFor, weak_this_, |
| + index, picture_buffer_id, base::Passed(&passed_dmabuf_fds), |
| + iter->texture_id, egl_image_size_, |
| + egl_image_format_fourcc_)); |
| + } else { |
| + // No need for an EGLImage, start using this buffer now. |
| + DVLOGF(2) << "egl_image_planes_count_=" << egl_image_planes_count_ |
| + << ", passed_dmabuf_fds->size()=" << passed_dmabuf_fds->size(); |
| + DCHECK_EQ(egl_image_planes_count_, passed_dmabuf_fds->size()); |
| + iter->processor_output_fds.swap(*passed_dmabuf_fds); |
| + free_output_buffers_.push_back(index); |
| + if (decoder_state_ == kAwaitingPictureBuffers) { |
| + DVLOG(1) << "Change state to kDecoding"; |
| + decoder_state_ = kDecoding; |
| + } |
| + if (decoder_state_ != kChangingResolution) { |
| + Enqueue(); |
| + ScheduleDecodeBufferTaskIfNeeded(); |
| + } |
| + } |
| } |
| void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) { |
| @@ -501,25 +621,28 @@ void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) { |
| // Must be run on child thread, as we'll insert a sync in the EGL context. |
| DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| - if (!make_context_current_cb_.Run()) { |
| - LOGF(ERROR) << "could not make context current"; |
| - NOTIFY_ERROR(PLATFORM_FAILURE); |
| - return; |
| - } |
| + std::unique_ptr<EGLSyncKHRRef> egl_sync_ref; |
| - EGLSyncKHR egl_sync = EGL_NO_SYNC_KHR; |
| + if (!make_context_current_cb_.is_null()) { |
| + if (!make_context_current_cb_.Run()) { |
| + LOGF(ERROR) << "could not make context current"; |
| + NOTIFY_ERROR(PLATFORM_FAILURE); |
| + return; |
| + } |
| + |
| + EGLSyncKHR egl_sync = EGL_NO_SYNC_KHR; |
| // TODO(posciak): crbug.com/450898. |
| #if defined(ARCH_CPU_ARMEL) |
| - egl_sync = eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL); |
| - if (egl_sync == EGL_NO_SYNC_KHR) { |
| - LOGF(ERROR) << "eglCreateSyncKHR() failed"; |
| - NOTIFY_ERROR(PLATFORM_FAILURE); |
| - return; |
| - } |
| + egl_sync = eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL); |
| + if (egl_sync == EGL_NO_SYNC_KHR) { |
| + LOGF(ERROR) << "eglCreateSyncKHR() failed"; |
| + NOTIFY_ERROR(PLATFORM_FAILURE); |
| + return; |
| + } |
| #endif |
| - std::unique_ptr<EGLSyncKHRRef> egl_sync_ref( |
| - new EGLSyncKHRRef(egl_display_, egl_sync)); |
| + egl_sync_ref.reset(new EGLSyncKHRRef(egl_display_, egl_sync)); |
| + } |
| decoder_thread_.task_runner()->PostTask( |
| FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ReusePictureBufferTask, |
| @@ -1203,13 +1326,13 @@ void V4L2VideoDecodeAccelerator::Dequeue() { |
| } |
| OutputRecord& output_record = output_buffer_map_[dqbuf.index]; |
| DCHECK_EQ(output_record.state, kAtDevice); |
| - DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR); |
| + // DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR); |
|
kcwu
2016/09/07 03:46:23
remove
|
| DCHECK_NE(output_record.picture_id, -1); |
| output_buffer_queued_count_--; |
| if (dqbuf.m.planes[0].bytesused == 0) { |
| // This is an empty output buffer returned as part of a flush. |
| output_record.state = kFree; |
| - free_output_buffers_.push(dqbuf.index); |
| + free_output_buffers_.push_back(dqbuf.index); |
| } else { |
| int32_t bitstream_buffer_id = dqbuf.timestamp.tv_sec; |
| DCHECK_GE(bitstream_buffer_id, 0); |
| @@ -1218,20 +1341,24 @@ void V4L2VideoDecodeAccelerator::Dequeue() { |
| if (image_processor_device_) { |
| output_record.state = kAtProcessor; |
| image_processor_bitstream_buffer_ids_.push(bitstream_buffer_id); |
| - std::vector<int> fds; |
| - for (auto& fd : output_record.fds) { |
| - fds.push_back(fd.get()); |
| + std::vector<int> processor_input_fds; |
| + for (auto& fd : output_record.processor_input_fds) { |
| + processor_input_fds.push_back(fd.get()); |
| + } |
| + std::vector<int> processor_output_fds; |
| + for (auto& fd : output_record.processor_output_fds) { |
| + processor_output_fds.push_back(fd.get()); |
| } |
| scoped_refptr<VideoFrame> frame = VideoFrame::WrapExternalDmabufs( |
| V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_), |
| - coded_size_, gfx::Rect(visible_size_), visible_size_, fds, |
| - base::TimeDelta()); |
| + coded_size_, gfx::Rect(visible_size_), visible_size_, |
| + processor_input_fds, base::TimeDelta()); |
| // Unretained is safe because |this| owns image processor and there will |
| // be no callbacks after processor destroys. Also, this class ensures it |
| // is safe to post a task from child thread to decoder thread using |
| // Unretained. |
| image_processor_->Process( |
| - frame, dqbuf.index, |
| + frame, dqbuf.index, processor_output_fds, |
| BindToCurrentLoop( |
| base::Bind(&V4L2VideoDecodeAccelerator::FrameProcessed, |
| base::Unretained(this), bitstream_buffer_id))); |
| @@ -1287,7 +1414,6 @@ bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() { |
| DVLOGF(3) << "buffer " << buffer; |
| OutputRecord& output_record = output_buffer_map_[buffer]; |
| DCHECK_EQ(output_record.state, kFree); |
| - DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR); |
| DCHECK_NE(output_record.picture_id, -1); |
| if (output_record.egl_sync != EGL_NO_SYNC_KHR) { |
| TRACE_EVENT0("Video Decoder", |
| @@ -1318,8 +1444,11 @@ bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() { |
| qbuf.memory = V4L2_MEMORY_MMAP; |
| qbuf.m.planes = qbuf_planes.get(); |
| qbuf.length = output_planes_count_; |
| + DVLOG(2) << "qbuf.index=" << qbuf.index |
| + << ", output_mode_=" << (int)output_mode_ |
| + << ", output_planes_count_=" << output_planes_count_; |
| IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf); |
| - free_output_buffers_.pop(); |
| + free_output_buffers_.pop_front(); |
| output_record.state = kAtDevice; |
| output_buffer_queued_count_++; |
| return true; |
| @@ -1367,12 +1496,14 @@ void V4L2VideoDecodeAccelerator::ReusePictureBufferTask( |
| } |
| DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
| - output_record.egl_sync = egl_sync_ref->egl_sync; |
| output_record.state = kFree; |
| - free_output_buffers_.push(index); |
| + free_output_buffers_.push_back(index); |
| decoder_frames_at_client_--; |
| - // Take ownership of the EGLSync. |
| - egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR; |
| + if (egl_sync_ref) { |
| + output_record.egl_sync = egl_sync_ref->egl_sync; |
| + // Take ownership of the EGLSync. |
| + egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR; |
| + } |
| // We got a buffer back, so enqueue it back. |
| Enqueue(); |
| } |
| @@ -1638,7 +1769,7 @@ bool V4L2VideoDecodeAccelerator::StopOutputStream() { |
| OutputRecord& output_record = output_buffer_map_[i]; |
| if (output_record.state == kAtDevice) { |
| output_record.state = kFree; |
| - free_output_buffers_.push(i); |
| + free_output_buffers_.push_back(i); |
| DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); |
| } |
| } |
| @@ -1708,6 +1839,8 @@ void V4L2VideoDecodeAccelerator::FinishResolutionChange() { |
| return; |
| } |
| + DestroyOutputBuffers(); |
| + |
| struct v4l2_format format; |
| bool again; |
| gfx::Size visible_size; |
| @@ -2003,6 +2136,10 @@ bool V4L2VideoDecodeAccelerator::SetupFormats() { |
| } |
| egl_image_device_ = image_processor_device_; |
| } else { |
| + if (output_mode_ == Config::OutputMode::IMPORT) { |
| + LOGF(ERROR) << "Import mode is unsupported without image processor."; |
| + return false; |
| + } |
| egl_image_format_fourcc_ = output_format_fourcc_; |
| egl_image_device_ = device_; |
| } |
| @@ -2069,6 +2206,85 @@ uint32_t V4L2VideoDecodeAccelerator::FindImageProcessorOutputFormat() { |
| return 0; |
| } |
| +void V4L2VideoDecodeAccelerator::CreateImageProcessor() { |
| + DVLOGF(3); |
| + image_processor_.reset(new V4L2ImageProcessor(image_processor_device_)); |
| + v4l2_memory output_memory_type = |
| + (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP |
| + : V4L2_MEMORY_DMABUF); |
| + // Unretained is safe because |this| owns image processor and there will be |
| + // no callbacks after processor destroys. |
| + if (!image_processor_->Initialize( |
| + V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_), |
| + V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_), |
| + V4L2_MEMORY_DMABUF, output_memory_type, visible_size_, coded_size_, |
| + visible_size_, egl_image_size_, output_buffer_map_.size(), |
| + base::Bind(&V4L2VideoDecodeAccelerator::ImageProcessorError, |
| + base::Unretained(this)))) { |
| + LOGF(ERROR) << "Initialize image processor failed"; |
| + NOTIFY_ERROR(PLATFORM_FAILURE); |
| + return; |
| + } |
| + DCHECK(image_processor_->output_allocated_size() == egl_image_size_); |
| + DVLOGF(3) << "image_processor_->output_allocated_size()=" |
| + << image_processor_->output_allocated_size().ToString(); |
| + if (image_processor_->input_allocated_size() != coded_size_) { |
| + LOGF(ERROR) << "Image processor should be able to take the output coded " |
| + << "size of decoder " << coded_size_.ToString() |
| + << " without adjusting to " |
| + << image_processor_->input_allocated_size().ToString(); |
| + NOTIFY_ERROR(PLATFORM_FAILURE); |
| + return; |
| + } |
| +} |
| + |
| +void V4L2VideoDecodeAccelerator::FrameProcessed(int32_t bitstream_buffer_id, |
| + int output_buffer_index) { |
| + DVLOGF(3) << "output_buffer_index=" << output_buffer_index |
| + << ", bitstream_buffer_id=" << bitstream_buffer_id; |
| + DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); |
| + DCHECK_GE(output_buffer_index, 0); |
| + DCHECK_LT(output_buffer_index, static_cast<int>(output_buffer_map_.size())); |
| + |
| + OutputRecord& output_record = output_buffer_map_[output_buffer_index]; |
| + DCHECK_EQ(output_record.state, kAtProcessor); |
| + if (!image_processor_bitstream_buffer_ids_.empty() && |
| + image_processor_bitstream_buffer_ids_.front() == bitstream_buffer_id) { |
| + DVLOGF(3) << "picture_id=" << output_record.picture_id; |
| + DCHECK_NE(output_record.picture_id, -1); |
| + // Send the processed frame to render. |
| + output_record.state = kAtClient; |
| + decoder_frames_at_client_++; |
| + image_processor_bitstream_buffer_ids_.pop(); |
| + const Picture picture(output_record.picture_id, bitstream_buffer_id, |
| + gfx::Rect(visible_size_), false); |
| + pending_picture_ready_.push(PictureRecord(output_record.cleared, picture)); |
| + SendPictureReady(); |
| + output_record.cleared = true; |
| + // Flush or resolution change may be waiting image processor to finish. |
| + if (image_processor_bitstream_buffer_ids_.empty()) { |
| + NotifyFlushDoneIfNeeded(); |
| + if (decoder_state_ == kChangingResolution) |
| + StartResolutionChange(); |
| + } |
| + } else { |
| + DVLOGF(2) << "Bitstream buffer id " << bitstream_buffer_id << " not found " |
| + << "because of Reset. Drop the buffer"; |
| + output_record.state = kFree; |
| + free_output_buffers_.push_back(output_buffer_index); |
| + // Do not queue the buffer if a resolution change is in progress. The queue |
| + // is about to be destroyed anyway. Otherwise, the queue will be started in |
| + // Enqueue and REQBUFS(0) will fail. |
| + if (decoder_state_ != kChangingResolution) |
| + Enqueue(); |
| + } |
| +} |
| + |
| +void V4L2VideoDecodeAccelerator::ImageProcessorError() { |
| + LOGF(ERROR) << "Image processor error"; |
| + NOTIFY_ERROR(PLATFORM_FAILURE); |
| +} |
| + |
| bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() { |
| DVLOGF(3); |
| DCHECK(decoder_state_ == kInitialized || |
| @@ -2089,10 +2305,17 @@ bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() { |
| DVLOGF(3) << "buffer_count=" << buffer_count |
| << ", coded_size=" << egl_image_size_.ToString(); |
| + // With ALLOCATE mode the client can sample it as RGB and doesn't need to |
| + // know the precise format. |
| + VideoPixelFormat pixel_format = |
| + (output_mode_ == Config::OutputMode::IMPORT) |
| + ? V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_) |
| + : PIXEL_FORMAT_UNKNOWN; |
| + |
| child_task_runner_->PostTask( |
| FROM_HERE, base::Bind(&Client::ProvidePictureBuffers, client_, |
| - buffer_count, PIXEL_FORMAT_UNKNOWN, 1, |
| - egl_image_size_, device_->GetTextureTarget())); |
| + buffer_count, pixel_format, 1, egl_image_size_, |
| + device_->GetTextureTarget())); |
| // Go into kAwaitingPictureBuffers to prevent us from doing any more decoding |
| // or event handling while we are waiting for AssignPictureBuffers(). Not |
| @@ -2131,6 +2354,28 @@ void V4L2VideoDecodeAccelerator::DestroyInputBuffers() { |
| } |
| bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() { |
| + struct v4l2_requestbuffers reqbufs; |
| + memset(&reqbufs, 0, sizeof(reqbufs)); |
| + reqbufs.count = 0; |
| + reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
| + reqbufs.memory = V4L2_MEMORY_MMAP; |
| + if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) { |
| + PLOGF(ERROR) << "ioctl() failed: VIDIOC_REQBUFS"; |
| + NOTIFY_ERROR(PLATFORM_FAILURE); |
| + return false; |
| + } |
| + |
| + output_buffer_map_.clear(); |
| + while (!free_output_buffers_.empty()) |
| + free_output_buffers_.pop_front(); |
| + output_buffer_queued_count_ = 0; |
| + // The client may still hold some buffers. The texture holds a reference to |
| + // the buffer. It is OK to free the buffer and destroy EGLImage here. |
| + decoder_frames_at_client_ = 0; |
| + return true; |
| +} |
| + |
| +bool V4L2VideoDecodeAccelerator::DestroyEGLImages() { |
| DVLOGF(3); |
| DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| DCHECK(!output_streamon_); |
| @@ -2160,24 +2405,6 @@ bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() { |
| output_record.picture_id)); |
| } |
| - struct v4l2_requestbuffers reqbufs; |
| - memset(&reqbufs, 0, sizeof(reqbufs)); |
| - reqbufs.count = 0; |
| - reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
| - reqbufs.memory = V4L2_MEMORY_MMAP; |
| - if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) { |
| - PLOGF(ERROR) << "ioctl() failed: VIDIOC_REQBUFS"; |
| - success = false; |
| - } |
| - |
| - output_buffer_map_.clear(); |
| - while (!free_output_buffers_.empty()) |
| - free_output_buffers_.pop(); |
| - output_buffer_queued_count_ = 0; |
| - // The client may still hold some buffers. The texture holds a reference to |
| - // the buffer. It is OK to free the buffer and destroy EGLImage here. |
| - decoder_frames_at_client_ = 0; |
| - |
| return success; |
| } |
| @@ -2185,7 +2412,7 @@ void V4L2VideoDecodeAccelerator::ResolutionChangeDestroyBuffers() { |
| DCHECK(child_task_runner_->BelongsToCurrentThread()); |
| DVLOGF(3); |
| - if (!DestroyOutputBuffers()) { |
| + if (!DestroyEGLImages()) { |
| LOGF(ERROR) << "Failed destroying output buffers."; |
| NOTIFY_ERROR(PLATFORM_FAILURE); |
| return; |
| @@ -2247,52 +2474,4 @@ void V4L2VideoDecodeAccelerator::PictureCleared() { |
| SendPictureReady(); |
| } |
| -void V4L2VideoDecodeAccelerator::FrameProcessed(int32_t bitstream_buffer_id, |
| - int output_buffer_index) { |
| - DVLOGF(3) << "output_buffer_index=" << output_buffer_index |
| - << ", bitstream_buffer_id=" << bitstream_buffer_id; |
| - DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); |
| - DCHECK_GE(output_buffer_index, 0); |
| - DCHECK_LT(output_buffer_index, static_cast<int>(output_buffer_map_.size())); |
| - |
| - OutputRecord& output_record = output_buffer_map_[output_buffer_index]; |
| - DCHECK_EQ(output_record.state, kAtProcessor); |
| - if (!image_processor_bitstream_buffer_ids_.empty() && |
| - image_processor_bitstream_buffer_ids_.front() == bitstream_buffer_id) { |
| - DVLOGF(3) << "picture_id=" << output_record.picture_id; |
| - DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR); |
| - DCHECK_NE(output_record.picture_id, -1); |
| - // Send the processed frame to render. |
| - output_record.state = kAtClient; |
| - decoder_frames_at_client_++; |
| - image_processor_bitstream_buffer_ids_.pop(); |
| - const Picture picture(output_record.picture_id, bitstream_buffer_id, |
| - gfx::Rect(visible_size_), false); |
| - pending_picture_ready_.push(PictureRecord(output_record.cleared, picture)); |
| - SendPictureReady(); |
| - output_record.cleared = true; |
| - // Flush or resolution change may be waiting image processor to finish. |
| - if (image_processor_bitstream_buffer_ids_.empty()) { |
| - NotifyFlushDoneIfNeeded(); |
| - if (decoder_state_ == kChangingResolution) |
| - StartResolutionChange(); |
| - } |
| - } else { |
| - DVLOGF(2) << "Bitstream buffer id " << bitstream_buffer_id << " not found " |
| - << "because of Reset. Drop the buffer"; |
| - output_record.state = kFree; |
| - free_output_buffers_.push(output_buffer_index); |
| - // Do not queue the buffer if a resolution change is in progress. The queue |
| - // is about to be destroyed anyway. Otherwise, the queue will be started in |
| - // Enqueue and REQBUFS(0) will fail. |
| - if (decoder_state_ != kChangingResolution) |
| - Enqueue(); |
| - } |
| -} |
| - |
| -void V4L2VideoDecodeAccelerator::ImageProcessorError() { |
| - LOGF(ERROR) << "Image processor error"; |
| - NOTIFY_ERROR(PLATFORM_FAILURE); |
| -} |
| - |
| } // namespace media |