| Index: media/gpu/v4l2_video_decode_accelerator.cc
|
| diff --git a/media/gpu/v4l2_video_decode_accelerator.cc b/media/gpu/v4l2_video_decode_accelerator.cc
|
| index 3f0ceeabe5f1160bfbbdfacdb2f9e96b6b3223c4..86a73cc53abd88f953fe7226a31b6e94225cfa7d 100644
|
| --- a/media/gpu/v4l2_video_decode_accelerator.cc
|
| +++ b/media/gpu/v4l2_video_decode_accelerator.cc
|
| @@ -18,6 +18,7 @@
|
| #include "base/command_line.h"
|
| #include "base/message_loop/message_loop.h"
|
| #include "base/numerics/safe_conversions.h"
|
| +#include "base/posix/eintr_wrapper.h"
|
| #include "base/single_thread_task_runner.h"
|
| #include "base/threading/thread_task_runner_handle.h"
|
| #include "base/trace_event/trace_event.h"
|
| @@ -137,6 +138,7 @@ V4L2VideoDecodeAccelerator::OutputRecord::OutputRecord()
|
| egl_image(EGL_NO_IMAGE_KHR),
|
| egl_sync(EGL_NO_SYNC_KHR),
|
| picture_id(-1),
|
| + texture_id(0),
|
| cleared(false) {}
|
|
|
| V4L2VideoDecodeAccelerator::OutputRecord::~OutputRecord() {}
|
| @@ -155,6 +157,7 @@ V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator(
|
| : child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
|
| decoder_thread_("V4L2DecoderThread"),
|
| decoder_state_(kUninitialized),
|
| + output_mode_(Config::OutputMode::ALLOCATE),
|
| device_(device),
|
| decoder_delay_bitstream_buffer_id_(-1),
|
| decoder_current_input_buffer_(-1),
|
| @@ -194,7 +197,8 @@ V4L2VideoDecodeAccelerator::~V4L2VideoDecodeAccelerator() {
|
|
|
| bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
|
| Client* client) {
|
| - DVLOGF(3) << "profile: " << config.profile;
|
| + DVLOGF(3) << "profile: " << config.profile
|
| + << ", output_mode=" << static_cast<int>(config.output_mode);
|
| DCHECK(child_task_runner_->BelongsToCurrentThread());
|
| DCHECK_EQ(decoder_state_, kUninitialized);
|
|
|
| @@ -210,13 +214,9 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
|
| return false;
|
| }
|
|
|
| - if (config.output_mode != Config::OutputMode::ALLOCATE) {
|
| - NOTREACHED() << "Only ALLOCATE OutputMode is supported by this VDA";
|
| - return false;
|
| - }
|
| -
|
| - if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
|
| - NOTREACHED() << "GL callbacks are required for this VDA";
|
| + if (config.output_mode != Config::OutputMode::ALLOCATE &&
|
| + config.output_mode != Config::OutputMode::IMPORT) {
|
| + NOTREACHED() << "Only ALLOCATE and IMPORT OutputModes are supported";
|
| return false;
|
| }
|
|
|
| @@ -239,18 +239,22 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
|
| }
|
|
|
| // We need the context to be initialized to query extensions.
|
| - if (!make_context_current_cb_.Run()) {
|
| - LOGF(ERROR) << "could not make context current";
|
| - return false;
|
| - }
|
| + if (!make_context_current_cb_.is_null()) {
|
| + if (!make_context_current_cb_.Run()) {
|
| + LOGF(ERROR) << "could not make context current";
|
| + return false;
|
| + }
|
|
|
| // TODO(posciak): crbug.com/450898.
|
| #if defined(ARCH_CPU_ARMEL)
|
| - if (!gl::g_driver_egl.ext.b_EGL_KHR_fence_sync) {
|
| - LOGF(ERROR) << "context does not have EGL_KHR_fence_sync";
|
| - return false;
|
| - }
|
| + if (!gl::g_driver_egl.ext.b_EGL_KHR_fence_sync) {
|
| + LOGF(ERROR) << "context does not have EGL_KHR_fence_sync";
|
| + return false;
|
| + }
|
| #endif
|
| + } else {
|
| + DVLOGF(1) << "No GL callbacks provided, initializing without GL support";
|
| + }
|
|
|
| // Capabilities check.
|
| struct v4l2_capability caps;
|
| @@ -284,6 +288,7 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
|
| }
|
|
|
| decoder_state_ = kInitialized;
|
| + output_mode_ = config.output_mode;
|
|
|
| // StartDevicePoll will NOTIFY_ERROR on failure, so IgnoreResult is fine here.
|
| decoder_thread_.task_runner()->PostTask(
|
| @@ -355,44 +360,83 @@ void V4L2VideoDecodeAccelerator::AssignPictureBuffersTask(
|
| return;
|
| }
|
|
|
| - if (image_processor_device_) {
|
| - DCHECK(!image_processor_);
|
| - image_processor_.reset(new V4L2ImageProcessor(image_processor_device_));
|
| - // Unretained is safe because |this| owns image processor and there will be
|
| - // no callbacks after processor destroys.
|
| - if (!image_processor_->Initialize(
|
| - V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_),
|
| - V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_),
|
| - V4L2_MEMORY_DMABUF, visible_size_, coded_size_, visible_size_,
|
| - visible_size_, buffers.size(),
|
| - base::Bind(&V4L2VideoDecodeAccelerator::ImageProcessorError,
|
| - base::Unretained(this)))) {
|
| - LOGF(ERROR) << "Initialize image processor failed";
|
| - NOTIFY_ERROR(PLATFORM_FAILURE);
|
| + DCHECK(free_output_buffers_.empty());
|
| + DCHECK(output_buffer_map_.empty());
|
| + output_buffer_map_.resize(buffers.size());
|
| + if (image_processor_device_ && output_mode_ == Config::OutputMode::ALLOCATE) {
|
| + if (!CreateImageProcessor())
|
| return;
|
| + }
|
| +
|
| + for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
|
| + DCHECK(buffers[i].size() == egl_image_size_);
|
| +
|
| + OutputRecord& output_record = output_buffer_map_[i];
|
| + DCHECK_EQ(output_record.state, kFree);
|
| + DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
|
| + DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
|
| + DCHECK_EQ(output_record.picture_id, -1);
|
| + DCHECK_EQ(output_record.cleared, false);
|
| + DCHECK_EQ(1u, buffers[i].texture_ids().size());
|
| + DCHECK(output_record.processor_input_fds.empty());
|
| +
|
| + output_record.picture_id = buffers[i].id();
|
| + output_record.texture_id = buffers[i].texture_ids()[0];
|
| + // This will remain kAtClient until ImportBufferForPicture is called, either
|
| + // by the client, or by ourselves, if we are allocating.
|
| + output_record.state = kAtClient;
|
| +
|
| + if (image_processor_device_) {
|
| + std::vector<base::ScopedFD> dmabuf_fds = device_->GetDmabufsForV4L2Buffer(
|
| + i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
|
| + if (dmabuf_fds.empty()) {
|
| + LOGF(ERROR) << "Failed to get DMABUFs of decoder.";
|
| + NOTIFY_ERROR(PLATFORM_FAILURE);
|
| + return;
|
| + }
|
| + output_record.processor_input_fds = std::move(dmabuf_fds);
|
| }
|
| - DCHECK(image_processor_->output_allocated_size() == egl_image_size_);
|
| - if (image_processor_->input_allocated_size() != coded_size_) {
|
| - LOGF(ERROR) << "Image processor should be able to take the output coded "
|
| - << "size of decoder " << coded_size_.ToString()
|
| - << " without adjusting to "
|
| - << image_processor_->input_allocated_size().ToString();
|
| - NOTIFY_ERROR(PLATFORM_FAILURE);
|
| +
|
| + if (output_mode_ == Config::OutputMode::ALLOCATE) {
|
| + std::vector<base::ScopedFD> dmabuf_fds;
|
| + dmabuf_fds = egl_image_device_->GetDmabufsForV4L2Buffer(
|
| + i, egl_image_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
|
| + if (dmabuf_fds.empty()) {
|
| + LOGF(ERROR) << "Failed to get DMABUFs for EGLImage.";
|
| + NOTIFY_ERROR(PLATFORM_FAILURE);
|
| + return;
|
| + }
|
| + int plane_horiz_bits_per_pixel = VideoFrame::PlaneHorizontalBitsPerPixel(
|
| + V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_),
|
| + 0);
|
| + ImportBufferForPictureTask(
|
| + output_record.picture_id, std::move(dmabuf_fds),
|
| + egl_image_size_.width() * plane_horiz_bits_per_pixel / 8);
|
| + } // else we'll get triggered via ImportBufferForPicture() from client.
|
| +
|
| + DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
|
| + }
|
| +
|
| + if (output_mode_ == Config::OutputMode::ALLOCATE) {
|
| + DCHECK_EQ(kAwaitingPictureBuffers, decoder_state_);
|
| + DVLOGF(1) << "Change state to kDecoding";
|
| + decoder_state_ = kDecoding;
|
| + if (reset_pending_) {
|
| + FinishReset();
|
| return;
|
| }
|
| + ScheduleDecodeBufferTaskIfNeeded();
|
| }
|
| -
|
| - child_task_runner_->PostTask(
|
| - FROM_HERE,
|
| - base::Bind(&V4L2VideoDecodeAccelerator::CreateEGLImages, weak_this_,
|
| - buffers, egl_image_format_fourcc_, egl_image_planes_count_));
|
| }
|
|
|
| -void V4L2VideoDecodeAccelerator::CreateEGLImages(
|
| - const std::vector<media::PictureBuffer>& buffers,
|
| - uint32_t output_format_fourcc,
|
| - size_t output_planes_count) {
|
| - DVLOGF(3);
|
| +void V4L2VideoDecodeAccelerator::CreateEGLImageFor(
|
| + size_t buffer_index,
|
| + int32_t picture_buffer_id,
|
| + std::vector<base::ScopedFD> dmabuf_fds,
|
| + GLuint texture_id,
|
| + const gfx::Size& size,
|
| + uint32_t fourcc) {
|
| + DVLOGF(3) << "index=" << buffer_index;
|
| DCHECK(child_task_runner_->BelongsToCurrentThread());
|
|
|
| if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
|
| @@ -410,85 +454,186 @@ void V4L2VideoDecodeAccelerator::CreateEGLImages(
|
|
|
| gl::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0);
|
|
|
| - std::vector<EGLImageKHR> egl_images;
|
| - for (size_t i = 0; i < buffers.size(); ++i) {
|
| - std::vector<base::ScopedFD> dmabuf_fds;
|
| - dmabuf_fds = egl_image_device_->GetDmabufsForV4L2Buffer(
|
| - i, egl_image_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
|
| - if (dmabuf_fds.empty()) {
|
| - LOGF(ERROR) << "Failed to get DMABUFs for EGLImage.";
|
| - NOTIFY_ERROR(PLATFORM_FAILURE);
|
| - return;
|
| - }
|
| -
|
| - EGLImageKHR egl_image = egl_image_device_->CreateEGLImage(
|
| - egl_display_, gl_context->GetHandle(), buffers[i].texture_ids()[0],
|
| - buffers[i].size(), i, egl_image_format_fourcc_, dmabuf_fds);
|
| - if (egl_image == EGL_NO_IMAGE_KHR) {
|
| - LOGF(ERROR) << "could not create EGLImageKHR,"
|
| - << " index=" << i
|
| - << " texture_id=" << buffers[i].texture_ids()[0];
|
| - for (EGLImageKHR image : egl_images) {
|
| - egl_image_device_->DestroyEGLImage(egl_display_, image);
|
| - }
|
| - NOTIFY_ERROR(PLATFORM_FAILURE);
|
| - return;
|
| - }
|
| - egl_images.push_back(egl_image);
|
| + EGLImageKHR egl_image = egl_image_device_->CreateEGLImage(
|
| + egl_display_, gl_context->GetHandle(), texture_id, size, buffer_index,
|
| + fourcc, dmabuf_fds);
|
| + if (egl_image == EGL_NO_IMAGE_KHR) {
|
| + LOGF(ERROR) << "could not create EGLImageKHR,"
|
| + << " index=" << buffer_index << " texture_id=" << texture_id;
|
| + NOTIFY_ERROR(PLATFORM_FAILURE);
|
| + return;
|
| }
|
|
|
| decoder_thread_.task_runner()->PostTask(
|
| - FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::AssignEGLImages,
|
| - base::Unretained(this), buffers, egl_images));
|
| + FROM_HERE,
|
| + base::Bind(&V4L2VideoDecodeAccelerator::AssignEGLImage,
|
| + base::Unretained(this), buffer_index, picture_buffer_id,
|
| + egl_image, base::Passed(&dmabuf_fds)));
|
| }
|
|
|
| -void V4L2VideoDecodeAccelerator::AssignEGLImages(
|
| - const std::vector<media::PictureBuffer>& buffers,
|
| - const std::vector<EGLImageKHR>& egl_images) {
|
| - DVLOGF(3);
|
| +void V4L2VideoDecodeAccelerator::AssignEGLImage(
|
| + size_t buffer_index,
|
| + int32_t picture_buffer_id,
|
| + EGLImageKHR egl_image,
|
| + std::vector<base::ScopedFD> dmabuf_fds) {
|
| + DVLOGF(3) << "index=" << buffer_index << ", picture_id=" << picture_buffer_id;
|
| DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
|
| - DCHECK_EQ(buffers.size(), egl_images.size());
|
| - DCHECK(free_output_buffers_.empty());
|
| - DCHECK(output_buffer_map_.empty());
|
|
|
| - output_buffer_map_.resize(buffers.size());
|
| - for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
|
| - DCHECK(buffers[i].size() == egl_image_size_);
|
| + // It's possible that while waiting for the EGLImages to be allocated and
|
| + // assigned, we have already decoded more of the stream and saw another
|
| + // resolution change. This is a normal situation, in such a case either there
|
| + // is no output record with this index awaiting an EGLImage to be assigned to
|
| + // it, or the record is already updated to use a newer PictureBuffer and is
|
| + // awaiting an EGLImage associated with a different picture_buffer_id. If so,
|
| + // just discard this image, we will get the one we are waiting for later.
|
| + if (buffer_index >= output_buffer_map_.size() ||
|
| + output_buffer_map_[buffer_index].picture_id != picture_buffer_id) {
|
| + DVLOGF(3) << "Picture set already changed, dropping EGLImage";
|
| + child_task_runner_->PostTask(
|
| + FROM_HERE, base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage),
|
| + device_, egl_display_, egl_image));
|
| + return;
|
| + }
|
|
|
| - OutputRecord& output_record = output_buffer_map_[i];
|
| - DCHECK_EQ(output_record.state, kFree);
|
| - DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
|
| - DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
|
| - DCHECK_EQ(output_record.picture_id, -1);
|
| - DCHECK_EQ(output_record.cleared, false);
|
| - DCHECK_LE(1u, buffers[i].texture_ids().size());
|
| + OutputRecord& output_record = output_buffer_map_[buffer_index];
|
| + DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
|
| + DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
|
| + DCHECK_EQ(output_record.state, kFree);
|
| + DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
|
| + buffer_index),
|
| + 0);
|
| + output_record.egl_image = egl_image;
|
| + free_output_buffers_.push_back(buffer_index);
|
| + if (decoder_state_ != kChangingResolution) {
|
| + Enqueue();
|
| + ScheduleDecodeBufferTaskIfNeeded();
|
| + }
|
| +}
|
|
|
| - if (image_processor_device_) {
|
| - std::vector<base::ScopedFD> fds = device_->GetDmabufsForV4L2Buffer(
|
| - i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
|
| - if (fds.empty()) {
|
| - LOGF(ERROR) << "Failed to get DMABUFs of decoder.";
|
| - NOTIFY_ERROR(PLATFORM_FAILURE);
|
| - return;
|
| - }
|
| - output_record.fds = std::move(fds);
|
| - }
|
| +void V4L2VideoDecodeAccelerator::ImportBufferForPicture(
|
| + int32_t picture_buffer_id,
|
| + const gfx::GpuMemoryBufferHandle& gpu_memory_buffer_handle) {
|
| + DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
|
| + DCHECK(child_task_runner_->BelongsToCurrentThread());
|
|
|
| - output_record.egl_image = egl_images[i];
|
| - output_record.picture_id = buffers[i].id();
|
| + if (output_mode_ != Config::OutputMode::IMPORT) {
|
| + LOGF(ERROR) << "Cannot import in non-import mode";
|
| + NOTIFY_ERROR(INVALID_ARGUMENT);
|
| + return;
|
| + }
|
|
|
| - free_output_buffers_.push(i);
|
| - DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
|
| + std::vector<base::ScopedFD> dmabuf_fds;
|
| + int32_t stride = 0;
|
| +#if defined(USE_OZONE)
|
| + for (const auto& fd : gpu_memory_buffer_handle.native_pixmap_handle.fds) {
|
| + DCHECK_NE(fd.fd, -1);
|
| + dmabuf_fds.push_back(base::ScopedFD(fd.fd));
|
| }
|
| + stride = gpu_memory_buffer_handle.native_pixmap_handle.planes[0].stride;
|
| + for (const auto& plane :
|
| + gpu_memory_buffer_handle.native_pixmap_handle.planes) {
|
| + DVLOGF(3) << ": offset=" << plane.offset << ", stride=" << plane.stride;
|
| + }
|
| +#endif
|
|
|
| - decoder_state_ = kDecoding;
|
| - Enqueue();
|
| - if (reset_pending_) {
|
| - FinishReset();
|
| + decoder_thread_.task_runner()->PostTask(
|
| + FROM_HERE,
|
| + base::Bind(&V4L2VideoDecodeAccelerator::ImportBufferForPictureTask,
|
| + base::Unretained(this), picture_buffer_id,
|
| + base::Passed(&dmabuf_fds), stride));
|
| +}
|
| +
|
| +void V4L2VideoDecodeAccelerator::ImportBufferForPictureTask(
|
| + int32_t picture_buffer_id,
|
| + std::vector<base::ScopedFD> dmabuf_fds,
|
| + int32_t stride) {
|
| + DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id
|
| + << ", dmabuf_fds.size()=" << dmabuf_fds.size()
|
| + << ", stride=" << stride;
|
| + DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
|
| +
|
| + const auto iter =
|
| + std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(),
|
| + [picture_buffer_id](const OutputRecord& output_record) {
|
| + return output_record.picture_id == picture_buffer_id;
|
| + });
|
| + if (iter == output_buffer_map_.end()) {
|
| + // It's possible that we've already posted a DismissPictureBuffer for this
|
| + // picture, but it has not yet executed when this ImportBufferForPicture was
|
| + // posted to us by the client. In that case just ignore this (we've already
|
| + // dismissed it and accounted for that).
|
| + DVLOGF(3) << "got picture id=" << picture_buffer_id
|
| + << " not in use (anymore?).";
|
| return;
|
| }
|
|
|
| - ScheduleDecodeBufferTaskIfNeeded();
|
| + if (iter->state != kAtClient) {
|
| + LOGF(ERROR) << "Cannot import buffer not owned by client";
|
| + NOTIFY_ERROR(INVALID_ARGUMENT);
|
| + return;
|
| + }
|
| +
|
| + int plane_horiz_bits_per_pixel = VideoFrame::PlaneHorizontalBitsPerPixel(
|
| + V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_), 0);
|
| + if (plane_horiz_bits_per_pixel == 0 ||
|
| + (stride * 8) % plane_horiz_bits_per_pixel != 0) {
|
| + LOG(ERROR) << "Invalid format " << egl_image_format_fourcc_ << " or stride "
|
| + << stride;
|
| + NOTIFY_ERROR(INVALID_ARGUMENT);
|
| + return;
|
| + }
|
| + int adjusted_coded_width = stride * 8 / plane_horiz_bits_per_pixel;
|
| +
|
| + if (image_processor_device_ && !image_processor_) {
|
| + // This is the first buffer import. Create the image processor and change
|
| + // the decoder state. The client may adjust the coded width. We don't have
|
| + // the final coded size in AssignPictureBuffers yet. Use the adjusted coded
|
| + // width to create the image processor.
|
| + DVLOGF(3) << "Original egl_image_size=" << egl_image_size_.ToString()
|
| + << ", adjusted coded width=" << adjusted_coded_width;
|
| + DCHECK_GE(adjusted_coded_width, egl_image_size_.width());
|
| + egl_image_size_.set_width(adjusted_coded_width);
|
| + if (!CreateImageProcessor())
|
| + return;
|
| + DCHECK_EQ(kAwaitingPictureBuffers, decoder_state_);
|
| + DVLOGF(1) << "Change state to kDecoding";
|
| + decoder_state_ = kDecoding;
|
| + if (reset_pending_) {
|
| + FinishReset();
|
| + }
|
| + } else {
|
| + DCHECK_EQ(egl_image_size_.width(), adjusted_coded_width);
|
| + }
|
| +
|
| + size_t index = iter - output_buffer_map_.begin();
|
| + DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
|
| + index),
|
| + 0);
|
| +
|
| + iter->state = kFree;
|
| + if (iter->texture_id != 0) {
|
| + if (iter->egl_image != EGL_NO_IMAGE_KHR) {
|
| + child_task_runner_->PostTask(
|
| + FROM_HERE,
|
| + base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_,
|
| + egl_display_, iter->egl_image));
|
| + }
|
| +
|
| + child_task_runner_->PostTask(
|
| + FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::CreateEGLImageFor,
|
| + weak_this_, index, picture_buffer_id,
|
| + base::Passed(&dmabuf_fds), iter->texture_id,
|
| + egl_image_size_, egl_image_format_fourcc_));
|
| + } else {
|
| + // No need for an EGLImage, start using this buffer now.
|
| + DCHECK_EQ(egl_image_planes_count_, dmabuf_fds.size());
|
| + iter->processor_output_fds.swap(dmabuf_fds);
|
| + free_output_buffers_.push_back(index);
|
| + if (decoder_state_ != kChangingResolution) {
|
| + Enqueue();
|
| + ScheduleDecodeBufferTaskIfNeeded();
|
| + }
|
| + }
|
| }
|
|
|
| void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) {
|
| @@ -496,25 +641,28 @@ void V4L2VideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) {
|
| // Must be run on child thread, as we'll insert a sync in the EGL context.
|
| DCHECK(child_task_runner_->BelongsToCurrentThread());
|
|
|
| - if (!make_context_current_cb_.Run()) {
|
| - LOGF(ERROR) << "could not make context current";
|
| - NOTIFY_ERROR(PLATFORM_FAILURE);
|
| - return;
|
| - }
|
| + std::unique_ptr<EGLSyncKHRRef> egl_sync_ref;
|
| +
|
| + if (!make_context_current_cb_.is_null()) {
|
| + if (!make_context_current_cb_.Run()) {
|
| + LOGF(ERROR) << "could not make context current";
|
| + NOTIFY_ERROR(PLATFORM_FAILURE);
|
| + return;
|
| + }
|
|
|
| - EGLSyncKHR egl_sync = EGL_NO_SYNC_KHR;
|
| + EGLSyncKHR egl_sync = EGL_NO_SYNC_KHR;
|
| // TODO(posciak): crbug.com/450898.
|
| #if defined(ARCH_CPU_ARMEL)
|
| - egl_sync = eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL);
|
| - if (egl_sync == EGL_NO_SYNC_KHR) {
|
| - LOGF(ERROR) << "eglCreateSyncKHR() failed";
|
| - NOTIFY_ERROR(PLATFORM_FAILURE);
|
| - return;
|
| - }
|
| + egl_sync = eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL);
|
| + if (egl_sync == EGL_NO_SYNC_KHR) {
|
| + LOGF(ERROR) << "eglCreateSyncKHR() failed";
|
| + NOTIFY_ERROR(PLATFORM_FAILURE);
|
| + return;
|
| + }
|
| #endif
|
|
|
| - std::unique_ptr<EGLSyncKHRRef> egl_sync_ref(
|
| - new EGLSyncKHRRef(egl_display_, egl_sync));
|
| + egl_sync_ref.reset(new EGLSyncKHRRef(egl_display_, egl_sync));
|
| + }
|
|
|
| decoder_thread_.task_runner()->PostTask(
|
| FROM_HERE, base::Bind(&V4L2VideoDecodeAccelerator::ReusePictureBufferTask,
|
| @@ -1198,35 +1346,23 @@ void V4L2VideoDecodeAccelerator::Dequeue() {
|
| }
|
| OutputRecord& output_record = output_buffer_map_[dqbuf.index];
|
| DCHECK_EQ(output_record.state, kAtDevice);
|
| - DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
|
| DCHECK_NE(output_record.picture_id, -1);
|
| output_buffer_queued_count_--;
|
| if (dqbuf.m.planes[0].bytesused == 0) {
|
| // This is an empty output buffer returned as part of a flush.
|
| output_record.state = kFree;
|
| - free_output_buffers_.push(dqbuf.index);
|
| + free_output_buffers_.push_back(dqbuf.index);
|
| } else {
|
| int32_t bitstream_buffer_id = dqbuf.timestamp.tv_sec;
|
| DCHECK_GE(bitstream_buffer_id, 0);
|
| DVLOGF(3) << "Dequeue output buffer: dqbuf index=" << dqbuf.index
|
| << " bitstream input_id=" << bitstream_buffer_id;
|
| if (image_processor_device_) {
|
| - output_record.state = kAtProcessor;
|
| - image_processor_bitstream_buffer_ids_.push(bitstream_buffer_id);
|
| - std::vector<int> fds;
|
| - for (auto& fd : output_record.fds) {
|
| - fds.push_back(fd.get());
|
| + if (!ProcessFrame(bitstream_buffer_id, dqbuf.index)) {
|
| + DLOGF(ERROR) << "Processing frame failed";
|
| + NOTIFY_ERROR(PLATFORM_FAILURE);
|
| + return;
|
| }
|
| - scoped_refptr<VideoFrame> frame = VideoFrame::WrapExternalDmabufs(
|
| - V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_),
|
| - coded_size_, gfx::Rect(visible_size_), visible_size_, fds,
|
| - base::TimeDelta());
|
| - // Unretained is safe because |this| owns image processor and there will
|
| - // be no callbacks after processor destroys.
|
| - image_processor_->Process(
|
| - frame, dqbuf.index,
|
| - base::Bind(&V4L2VideoDecodeAccelerator::FrameProcessed,
|
| - base::Unretained(this), bitstream_buffer_id));
|
| } else {
|
| output_record.state = kAtClient;
|
| decoder_frames_at_client_++;
|
| @@ -1279,7 +1415,6 @@ bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() {
|
| DVLOGF(3) << "buffer " << buffer;
|
| OutputRecord& output_record = output_buffer_map_[buffer];
|
| DCHECK_EQ(output_record.state, kFree);
|
| - DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
|
| DCHECK_NE(output_record.picture_id, -1);
|
| if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
|
| TRACE_EVENT0("Video Decoder",
|
| @@ -1310,8 +1445,9 @@ bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() {
|
| qbuf.memory = V4L2_MEMORY_MMAP;
|
| qbuf.m.planes = qbuf_planes.get();
|
| qbuf.length = output_planes_count_;
|
| + DVLOGF(2) << "qbuf.index=" << qbuf.index;
|
| IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
|
| - free_output_buffers_.pop();
|
| + free_output_buffers_.pop_front();
|
| output_record.state = kAtDevice;
|
| output_buffer_queued_count_++;
|
| return true;
|
| @@ -1359,12 +1495,14 @@ void V4L2VideoDecodeAccelerator::ReusePictureBufferTask(
|
| }
|
|
|
| DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
|
| - output_record.egl_sync = egl_sync_ref->egl_sync;
|
| output_record.state = kFree;
|
| - free_output_buffers_.push(index);
|
| + free_output_buffers_.push_back(index);
|
| decoder_frames_at_client_--;
|
| - // Take ownership of the EGLSync.
|
| - egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR;
|
| + if (egl_sync_ref) {
|
| + output_record.egl_sync = egl_sync_ref->egl_sync;
|
| + // Take ownership of the EGLSync.
|
| + egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR;
|
| + }
|
| // We got a buffer back, so enqueue it back.
|
| Enqueue();
|
| }
|
| @@ -1636,7 +1774,7 @@ bool V4L2VideoDecodeAccelerator::StopOutputStream() {
|
| OutputRecord& output_record = output_buffer_map_[i];
|
| if (output_record.state == kAtDevice) {
|
| output_record.state = kFree;
|
| - free_output_buffers_.push(i);
|
| + free_output_buffers_.push_back(i);
|
| DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
|
| }
|
| }
|
| @@ -1836,7 +1974,9 @@ bool V4L2VideoDecodeAccelerator::CreateBuffersForFormat(
|
| }
|
| DVLOGF(3) << "new resolution: " << coded_size_.ToString()
|
| << ", visible size: " << visible_size_.ToString()
|
| - << ", EGLImage size: " << egl_image_size_.ToString();
|
| + << ", decoder output planes count: " << output_planes_count_
|
| + << ", EGLImage size: " << egl_image_size_.ToString()
|
| + << ", EGLImage plane count: " << egl_image_planes_count_;
|
|
|
| return CreateOutputBuffers();
|
| }
|
| @@ -2002,6 +2142,11 @@ bool V4L2VideoDecodeAccelerator::SetupFormats() {
|
| }
|
| egl_image_device_ = image_processor_device_;
|
| } else {
|
| + if (output_mode_ == Config::OutputMode::IMPORT) {
|
| + LOGF(ERROR) << "Import mode without image processor is not implemented "
|
| + << "yet.";
|
| + return false;
|
| + }
|
| egl_image_format_fourcc_ = output_format_fourcc_;
|
| egl_image_device_ = device_;
|
| }
|
| @@ -2078,7 +2223,7 @@ bool V4L2VideoDecodeAccelerator::ResetImageProcessor() {
|
| OutputRecord& output_record = output_buffer_map_[i];
|
| if (output_record.state == kAtProcessor) {
|
| output_record.state = kFree;
|
| - free_output_buffers_.push(i);
|
| + free_output_buffers_.push_back(i);
|
| }
|
| }
|
| while (!image_processor_bitstream_buffer_ids_.empty())
|
| @@ -2087,6 +2232,78 @@ bool V4L2VideoDecodeAccelerator::ResetImageProcessor() {
|
| return true;
|
| }
|
|
|
| +bool V4L2VideoDecodeAccelerator::CreateImageProcessor() {
|
| + DVLOGF(3);
|
| + DCHECK(!image_processor_);
|
| + image_processor_.reset(new V4L2ImageProcessor(image_processor_device_));
|
| + v4l2_memory output_memory_type =
|
| + (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP
|
| + : V4L2_MEMORY_DMABUF);
|
| + // Unretained is safe because |this| owns image processor and there will be
|
| + // no callbacks after processor destroys.
|
| + if (!image_processor_->Initialize(
|
| + V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_),
|
| + V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_),
|
| + V4L2_MEMORY_DMABUF, output_memory_type, visible_size_, coded_size_,
|
| + visible_size_, egl_image_size_, output_buffer_map_.size(),
|
| + base::Bind(&V4L2VideoDecodeAccelerator::ImageProcessorError,
|
| + base::Unretained(this)))) {
|
| + LOGF(ERROR) << "Initialize image processor failed";
|
| + NOTIFY_ERROR(PLATFORM_FAILURE);
|
| + return false;
|
| + }
|
| + DVLOGF(3) << "image_processor_->output_allocated_size()="
|
| + << image_processor_->output_allocated_size().ToString();
|
| + DCHECK(image_processor_->output_allocated_size() == egl_image_size_);
|
| + if (image_processor_->input_allocated_size() != coded_size_) {
|
| + LOGF(ERROR) << "Image processor should be able to take the output coded "
|
| + << "size of decoder " << coded_size_.ToString()
|
| + << " without adjusting to "
|
| + << image_processor_->input_allocated_size().ToString();
|
| + NOTIFY_ERROR(PLATFORM_FAILURE);
|
| + return false;
|
| + }
|
| + return true;
|
| +}
|
| +
|
| +bool V4L2VideoDecodeAccelerator::ProcessFrame(int32_t bitstream_buffer_id,
|
| + int output_buffer_index) {
|
| + DVLOGF(3);
|
| + DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
|
| +
|
| + OutputRecord& output_record = output_buffer_map_[output_buffer_index];
|
| + DCHECK_EQ(output_record.state, kAtDevice);
|
| + output_record.state = kAtProcessor;
|
| + image_processor_bitstream_buffer_ids_.push(bitstream_buffer_id);
|
| + std::vector<int> processor_input_fds;
|
| + for (auto& fd : output_record.processor_input_fds) {
|
| + processor_input_fds.push_back(fd.get());
|
| + }
|
| + scoped_refptr<VideoFrame> input_frame = VideoFrame::WrapExternalDmabufs(
|
| + V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_),
|
| + coded_size_, gfx::Rect(visible_size_), visible_size_, processor_input_fds,
|
| + base::TimeDelta());
|
| +
|
| + std::vector<base::ScopedFD> processor_output_fds;
|
| + if (output_mode_ == Config::OutputMode::IMPORT) {
|
| + for (auto& fd : output_record.processor_output_fds) {
|
| + processor_output_fds.push_back(
|
| + base::ScopedFD(HANDLE_EINTR(dup(fd.get()))));
|
| + if (!processor_output_fds.back().is_valid()) {
|
| + PLOGF(ERROR) << "Failed duplicating a dmabuf fd";
|
| + return false;
|
| + }
|
| + }
|
| + }
|
| + // Unretained is safe because |this| owns image processor and there will
|
| + // be no callbacks after processor destroys.
|
| + image_processor_->Process(
|
| + input_frame, output_buffer_index, std::move(processor_output_fds),
|
| + base::Bind(&V4L2VideoDecodeAccelerator::FrameProcessed,
|
| + base::Unretained(this), bitstream_buffer_id));
|
| + return true;
|
| +}
|
| +
|
| bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() {
|
| DVLOGF(3);
|
| DCHECK(decoder_state_ == kInitialized ||
|
| @@ -2107,10 +2324,17 @@ bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() {
|
| DVLOGF(3) << "buffer_count=" << buffer_count
|
| << ", coded_size=" << egl_image_size_.ToString();
|
|
|
| + // With ALLOCATE mode the client can sample it as RGB and doesn't need to
|
| + // know the precise format.
|
| + VideoPixelFormat pixel_format =
|
| + (output_mode_ == Config::OutputMode::IMPORT)
|
| + ? V4L2Device::V4L2PixFmtToVideoPixelFormat(egl_image_format_fourcc_)
|
| + : PIXEL_FORMAT_UNKNOWN;
|
| +
|
| child_task_runner_->PostTask(
|
| FROM_HERE, base::Bind(&Client::ProvidePictureBuffers, client_,
|
| - buffer_count, PIXEL_FORMAT_UNKNOWN, 1,
|
| - egl_image_size_, device_->GetTextureTarget()));
|
| + buffer_count, pixel_format, 1, egl_image_size_,
|
| + device_->GetTextureTarget()));
|
|
|
| // Go into kAwaitingPictureBuffers to prevent us from doing any more decoding
|
| // or event handling while we are waiting for AssignPictureBuffers(). Not
|
| @@ -2184,12 +2408,13 @@ bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() {
|
| reqbufs.memory = V4L2_MEMORY_MMAP;
|
| if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) {
|
| PLOGF(ERROR) << "ioctl() failed: VIDIOC_REQBUFS";
|
| + NOTIFY_ERROR(PLATFORM_FAILURE);
|
| success = false;
|
| }
|
|
|
| output_buffer_map_.clear();
|
| while (!free_output_buffers_.empty())
|
| - free_output_buffers_.pop();
|
| + free_output_buffers_.pop_front();
|
| output_buffer_queued_count_ = 0;
|
| // The client may still hold some buffers. The texture holds a reference to
|
| // the buffer. It is OK to free the buffer and destroy EGLImage here.
|
| @@ -2261,7 +2486,6 @@ void V4L2VideoDecodeAccelerator::FrameProcessed(int32_t bitstream_buffer_id,
|
| OutputRecord& output_record = output_buffer_map_[output_buffer_index];
|
| DVLOGF(3) << "picture_id=" << output_record.picture_id;
|
| DCHECK_EQ(output_record.state, kAtProcessor);
|
| - DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
|
| DCHECK_NE(output_record.picture_id, -1);
|
|
|
| // Send the processed frame to render.
|
|
|