Chromium Code Reviews| Index: chrome/gpu/arc_gpu_video_decode_accelerator.cc |
| diff --git a/chrome/gpu/arc_gpu_video_decode_accelerator.cc b/chrome/gpu/arc_gpu_video_decode_accelerator.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..282149f53975fc783519db911ef3b87dc3f4fd6e |
| --- /dev/null |
| +++ b/chrome/gpu/arc_gpu_video_decode_accelerator.cc |
| @@ -0,0 +1,418 @@ |
| +// Copyright 2016 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "base/callback_helpers.h" |
| +#include "base/logging.h" |
| +#include "base/run_loop.h" |
| +#include "chrome/gpu/arc_gpu_video_decode_accelerator.h" |
| + |
| +// TODO use Pawel's factory instead |
| +#include "content/public/common/create_vda.h" |
| + |
| +#undef DVLOG |
| +#define DVLOG VLOG |
| + |
| +namespace chromeos { |
| +namespace arc { |
| + |
| +ArcGpuVideoDecodeAccelerator::InputRecord::InputRecord( |
| + int32_t bitstream_buffer_id, |
| + uint32_t buffer_index, |
| + int64_t timestamp) |
| + : bitstream_buffer_id(bitstream_buffer_id), |
| + buffer_index(buffer_index), |
| + timestamp(timestamp) {} |
| + |
| +ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo() |
| + : offset(0), length(0) {} |
| + |
| +ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo( |
| + InputBufferInfo&& a) { |
| + handle = std::move(a.handle); |
| + offset = a.offset; |
| + length = a.length; |
| +} |
| + |
| +ArcGpuVideoDecodeAccelerator::InputBufferInfo::~InputBufferInfo() {} |
| + |
| +ArcGpuVideoDecodeAccelerator::PortInfo::PortInfo() |
| + : memory_type(MEMORY_DMABUF), num_buffers(0) {} |
| + |
| +ArcGpuVideoDecodeAccelerator::ArcGpuVideoDecodeAccelerator( |
| + const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) |
| + : pending_eos_output_buffer_(false), |
| + arc_client_(nullptr), |
| + next_bitstream_buffer_id_(0), |
| + io_task_runner_(io_task_runner), |
| + output_buffer_size_(0) {} |
| + |
| +ArcGpuVideoDecodeAccelerator::~ArcGpuVideoDecodeAccelerator() {} |
| + |
| +bool ArcGpuVideoDecodeAccelerator::Initialize( |
| + DeviceType device_type, |
| + ArcVideoAccelerator::Client* client) { |
| + DVLOG(5) << "Initialize()"; |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + if (device_type != DEVICE_DECODER) |
| + return false; |
| + DCHECK(client); |
| + DCHECK(!arc_client_); |
| + arc_client_ = client; |
| + |
| + vda_.reset(content::CreateVDA(AsWeakPtr(), io_task_runner_)); |
| + if (!vda_) { |
| + DVLOG(1) << "Failed to create VDA."; |
| + return false; |
| + } |
| + return true; |
| +} |
| + |
| +bool ArcGpuVideoDecodeAccelerator::SetBufferCount(PortType port, |
| + size_t* count) { |
|
Pawel Osciak
2016/02/29 08:25:20
We are not assigning to count, do we need it to be
Owen Lin
2016/03/03 06:30:42
That's because VDA doesn't returns false even on f
|
| + DVLOG(5) << "SetBufferCount(port=" << port << ", count=" << *count << ")"; |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + if (!ValidatePort(port)) |
| + return false; |
| + if (vda_ == nullptr) { |
| + DVLOG(1) << "Must call setBufferFormat before setBufferCount"; |
|
Pawel Osciak
2016/02/29 08:25:19
VDA is now created in Initialize()... But ideally
Owen Lin
2016/03/03 06:30:42
Done.
|
| + return false; |
| + } |
| + if (port == PORT_OUTPUT) { |
|
Pawel Osciak
2016/02/29 08:25:19
We should probably check we don't have any existin
Owen Lin
2016/03/03 06:30:43
I think VDA should handle this case?
|
| + std::vector<media::PictureBuffer> buffers; |
| + for (int32_t id = 0, n = *count; id < n; ++id) { |
| + // TODO: Make sure the size is what we want. |
| + buffers.push_back(media::PictureBuffer(id, coded_size_, 0)); |
| + } |
| + vda_->AssignPictureBuffers(buffers); |
| + } |
| + PortInfo* port_info = &port_info_[port]; |
| + port_info->num_buffers = *count; |
| + if (port == PORT_INPUT) { |
|
Pawel Osciak
2016/02/29 08:25:19
Is it ok to allow SetBufferCount at any time for i
Owen Lin
2016/03/03 06:30:43
Will move this to be part of initialization.
|
| + input_buffer_info_.clear(); |
| + input_buffer_info_.resize(port_info->num_buffers); |
| + } |
| + return true; |
| +} |
| + |
| +bool ArcGpuVideoDecodeAccelerator::SetBufferFormat(PortType port, |
|
Pawel Osciak
2016/02/29 08:25:20
IIRC, we could not change format and memory type a
Owen Lin
2016/03/03 06:30:43
I agree. But this will involve changes of differen
|
| + const BufferFormat& format) { |
| + DVLOG(5) << "SetBufferFormat(port=" << port |
| + << ", format=(memory_type=" << format.memory_type |
| + << ", pixel_format=" << format.pixel_format << "))"; |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + if (!ValidatePort(port)) |
| + return false; |
| + switch (port) { |
| + case PORT_INPUT: { |
| + PortInfo* port_info = &port_info_[port]; |
| + if (format.memory_type != MEMORY_SHARED_MEMORY) { |
| + DVLOG(1) << "Only SharedMemory is supported for input buffers"; |
| + return false; |
| + } |
| + port_info->memory_type = format.memory_type; |
| + media::VideoDecodeAccelerator::Config config; |
| + switch (format.pixel_format) { |
| + case HAL_PIXEL_FORMAT_H264: |
| + config.profile = media::H264PROFILE_MAIN; |
|
Pawel Osciak
2016/02/29 08:25:20
On second thought, could we pass the exact profile
Owen Lin
2016/03/03 06:30:42
I don't think we know the exact profile. MediaCode
|
| + break; |
| + case HAL_PIXEL_FORMAT_VP8: |
| + config.profile = media::VP8PROFILE_ANY; |
| + break; |
| + default: |
| + DVLOG(1) << "Unsupported input format: " << format.pixel_format; |
| + return false; |
| + } |
| + config.output_mode = |
| + media::VideoDecodeAccelerator::Config::OutputMode::IMPORT; |
| + if (!vda_->Initialize(config, this)) { |
|
Pawel Osciak
2016/02/29 08:25:20
We should probably make sure vda_ is not null, oth
Owen Lin
2016/03/03 06:30:42
Done.
|
| + DVLOG(1) << "VDA::Initialize() failed."; |
| + return false; |
| + } |
| + break; |
| + } |
| + case PORT_OUTPUT: { |
| + PortInfo* port_info = &port_info_[port]; |
| + if (format.memory_type != MEMORY_DMABUF) { |
| + DVLOG(1) << "Only DMA buffer is supported for output buffers"; |
|
Pawel Osciak
2016/02/29 08:25:19
s/DMA buffer/dmabuf/
Owen Lin
2016/03/03 06:30:42
Done.
|
| + return false; |
| + } |
| + port_info->memory_type = format.memory_type; |
| + break; |
| + } |
| + default: |
| + NOTREACHED() << "Invalid port: " << port; |
| + return false; |
| + } |
| + return true; |
| +} |
| + |
| +bool ArcGpuVideoDecodeAccelerator::ValidatePort(PortType port) { |
| + if (port != PORT_INPUT && port != PORT_OUTPUT) { |
| + DVLOG(1) << "Invalid port: " << port; |
| + return false; |
| + } |
| + return true; |
| +} |
| + |
| +bool ArcGpuVideoDecodeAccelerator::ValidatePortAndIndex(PortType port, |
| + uint32_t index) { |
| + if (!ValidatePort(port)) |
| + return false; |
| + if (index >= port_info_[port].num_buffers) { |
| + DVLOG(1) << "Invalid buffer - port: " << port << ", index: " << index; |
| + return false; |
| + } |
| + return true; |
| +} |
| + |
| +bool ArcGpuVideoDecodeAccelerator::BindSharedMemory(PortType port, |
| + uint32_t index, |
| + int ashmem_fd, |
| + size_t offset, |
|
Pawel Osciak
2016/02/29 08:25:20
off_t ?
Owen Lin
2016/03/03 06:30:42
Done.
|
| + size_t length) { |
| + DVLOG(5) << "ArcGVDA::BindSharedMemory, offset: " << offset |
| + << ", length: " << length; |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + |
| + // Make sure we will close the file descriptor. |
| + base::ScopedFD handle(ashmem_fd); |
| + if (!ValidatePortAndIndex(port, index)) |
| + return false; |
| + if (port != PORT_INPUT) { |
|
Pawel Osciak
2016/02/29 08:25:20
Should we instead check against port_info->memory_
Owen Lin
2016/03/03 06:30:43
port_info is just removed.
|
| + DVLOG(1) << "SharedBuffer is only supported for input"; |
| + return false; |
| + } |
| + InputBufferInfo* input_info = &input_buffer_info_[index]; |
| + input_info->handle = std::move(handle); |
| + input_info->offset = offset; |
| + input_info->length = length; |
| + return true; |
| +} |
| + |
| +bool ArcGpuVideoDecodeAccelerator::BindDmabuf(PortType port, |
| + uint32_t index, |
| + int dmabuf_fd) { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + base::ScopedFD handle(dmabuf_fd); |
| + |
| + // Make sure we will close the file descriptor. |
| + if (!ValidatePortAndIndex(port, index)) |
| + return false; |
| + if (port != PORT_OUTPUT) { |
|
Pawel Osciak
2016/02/29 08:25:20
Here as well, should we instead check against port
Owen Lin
2016/03/03 06:30:42
same as above.
|
| + DVLOG(1) << "GraphicBuffer is only supported for input"; |
| + return false; |
| + } |
| + std::vector<base::ScopedFD> dmabuf_fds; |
| + dmabuf_fds.push_back(std::move(handle)); |
| + vda_->ImportBufferForPicture(index, std::move(dmabuf_fds)); |
| + return true; |
| +} |
| + |
| +void ArcGpuVideoDecodeAccelerator::UseBuffer(PortType port, |
| + uint32_t index, |
| + const BufferMetadata& metadata) { |
| + DVLOG(5) << "UseBuffer(port=" << port << ", index=" << index |
| + << ", metadata=(bytes_used=" << metadata.bytes_used |
| + << ", timestamp=" << metadata.timestamp << "))"; |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + if (!ValidatePortAndIndex(port, index)) { |
| + arc_client_->OnError(INVALID_ARGUMENT); |
| + return; |
| + } |
| + switch (port) { |
| + case PORT_INPUT: { |
| + InputBufferInfo* input_info = &input_buffer_info_[index]; |
| + if (metadata.flags & BUFFER_FLAG_EOS) { |
| + // TODO(owenlin): Ask VDA to return all output pictures so that we |
| + // can output an EOS picture when Flush() is done. |
| + vda_->Flush(); |
| + } |
| + if (metadata.bytes_used == 0) { |
|
Pawel Osciak
2016/02/29 08:25:19
Could we handle this case in VDAs? We already do I
Owen Lin
2016/03/03 06:30:42
Done.
|
| + arc_client_->OnBufferDone(PORT_INPUT, index, BufferMetadata()); |
| + return; |
| + } |
| + if (metadata.bytes_used > input_info->length) { |
|
Pawel Osciak
2016/02/29 08:25:19
I think this also should be validated in the VDA,
Owen Lin
2016/03/03 06:30:42
Yes, you're right.
|
| + DVLOG(1) << "Invalid bytes_used: " << metadata.bytes_used << " > " |
| + << input_info->length; |
| + arc_client_->OnError(INVALID_ARGUMENT); |
| + return; |
| + } |
| + int32_t bitstream_buffer_id = next_bitstream_buffer_id_; |
| + // Mask against 30 bits, to avoid (undefined wraparound on signed integer) |
| + next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF; |
| + SetInputRecord(bitstream_buffer_id, index, metadata.timestamp); |
| + int dup_fd = HANDLE_EINTR(dup(input_info->handle.get())); |
| + if (dup_fd < 0) { |
| + DVLOG(1) << "dup() failed."; |
| + arc_client_->OnError(PLATFORM_FAILURE); |
| + return; |
| + } |
| + vda_->Decode(media::BitstreamBuffer( |
| + bitstream_buffer_id, base::SharedMemoryHandle(dup_fd, true), |
| + metadata.bytes_used, input_info->offset)); |
| + break; |
| + } |
| + case PORT_OUTPUT: { |
| + SendEosIfNeededOrReusePicture(index); |
| + break; |
| + } |
| + default: |
| + NOTREACHED(); |
| + } |
| +} |
| + |
| +void ArcGpuVideoDecodeAccelerator::Reset() { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + base::RunLoop loop; |
| + reset_done_callback_ = loop.QuitClosure(); |
| + vda_->Reset(); |
| + base::MessageLoop::ScopedNestableTaskAllower allow( |
| + base::MessageLoop::current()); |
| + // Wait for the ResetDone callback. |
| + loop.Run(); |
| +} |
| + |
| +void ArcGpuVideoDecodeAccelerator::ProvidePictureBuffers( |
| + uint32_t requested_num_of_buffers, |
| + const gfx::Size& dimensions, |
| + uint32_t texture_target) { |
| + DVLOG(5) << "ProvidePictureBuffers(" |
| + << "requested_num_of_buffers=" << requested_num_of_buffers |
| + << ", dimensions=" << dimensions.ToString() << ")"; |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + coded_size_ = dimensions; |
| + // TODO(owenlin): use VDA::GetOutputFormat() here and calculate correct |
| + // |image_size|. |
| + VideoFormat video_format; |
| + video_format.image_size = dimensions.GetArea() * 3 / 2; |
| + output_buffer_size_ = video_format.image_size; |
| + video_format.min_num_buffers = requested_num_of_buffers; |
| + video_format.coded_width = dimensions.width(); |
| + video_format.coded_height = dimensions.height(); |
| + // TODO(owenlin): How to get visible size? |
|
Pawel Osciak
2016/02/29 08:25:20
Do we need it at this time for the client?
Owen Lin
2016/03/03 06:30:42
Maybe not, I think client needs the visible size b
|
| + video_format.crop_top = 0; |
| + video_format.crop_left = 0; |
| + video_format.crop_width = dimensions.width(); |
| + video_format.crop_height = dimensions.height(); |
| + arc_client_->OnOutputFormatChanged(video_format); |
| +} |
| + |
| +void ArcGpuVideoDecodeAccelerator::DismissPictureBuffer( |
| + int32_t picture_buffer) { |
| + // no-op |
|
Pawel Osciak
2016/02/29 08:25:20
I'm wondering if we should still clean up here, i.
Owen Lin
2016/03/03 06:30:42
There should not be any fd owned by us and need to
|
| +} |
| + |
| +void ArcGpuVideoDecodeAccelerator::PictureReady(const media::Picture& picture) { |
| + DVLOG(5) << "PictureReady(picture_buffer_id=" << picture.picture_buffer_id() |
| + << ", bitstream_buffer_id=" << picture.bitstream_buffer_id(); |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + if (!ValidatePortAndIndex(PORT_OUTPUT, picture.picture_buffer_id())) { |
|
Pawel Osciak
2016/02/29 08:25:20
Do we need to validate PR(), since it's coming fro
Owen Lin
2016/03/03 06:30:43
Done.
|
| + DVLOG(1) << "Invalid index: " << picture.picture_buffer_id(); |
| + arc_client_->OnError(PLATFORM_FAILURE); |
| + return; |
| + } |
| + |
| + // Empty buffer, returned in Flushing. |
| + if (picture.bitstream_buffer_id() == -1) { |
| + buffers_pending_eos_.push(picture.picture_buffer_id()); |
| + return; |
| + } |
| + InputRecord* input_record = FindInputRecord(picture.bitstream_buffer_id()); |
| + if (input_record == nullptr) { |
| + DVLOG(1) << "Cannot find for bitstream buffer id: " |
| + << picture.bitstream_buffer_id(); |
| + arc_client_->OnError(PLATFORM_FAILURE); |
| + return; |
| + } |
| + |
| + BufferMetadata metadata; |
| + metadata.timestamp = input_record->timestamp; |
| + metadata.bytes_used = output_buffer_size_; |
| + arc_client_->OnBufferDone(PORT_OUTPUT, picture.picture_buffer_id(), metadata); |
| +} |
| + |
| +void ArcGpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer( |
| + int32_t bitstream_buffer_id) { |
| + DVLOG(5) << "NotifyEndOfBitstreamBuffer(" << bitstream_buffer_id << ")"; |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + InputRecord* input_record = FindInputRecord(bitstream_buffer_id); |
| + if (input_record == nullptr) { |
| + arc_client_->OnError(PLATFORM_FAILURE); |
| + return; |
| + } |
| + arc_client_->OnBufferDone(PORT_INPUT, input_record->buffer_index, |
| + BufferMetadata()); |
| +} |
| + |
| +void ArcGpuVideoDecodeAccelerator::NotifyFlushDone() { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + pending_eos_output_buffer_ = true; |
| + while (!buffers_pending_eos_.empty()) { |
| + SendEosIfNeededOrReusePicture(buffers_pending_eos_.front()); |
| + buffers_pending_eos_.pop(); |
| + } |
| +} |
| + |
| +void ArcGpuVideoDecodeAccelerator::NotifyResetDone() { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + base::ResetAndReturn(&reset_done_callback_).Run(); |
| +} |
| + |
| +static ArcVideoAccelerator::Error ConvertErrorCode( |
| + media::VideoDecodeAccelerator::Error error) { |
| + switch (error) { |
| + case media::VideoDecodeAccelerator::ILLEGAL_STATE: |
| + return ArcVideoAccelerator::ILLEGAL_STATE; |
| + case media::VideoDecodeAccelerator::INVALID_ARGUMENT: |
| + return ArcVideoAccelerator::INVALID_ARGUMENT; |
| + case media::VideoDecodeAccelerator::UNREADABLE_INPUT: |
| + return ArcVideoAccelerator::UNREADABLE_INPUT; |
| + case media::VideoDecodeAccelerator::PLATFORM_FAILURE: |
| + return ArcVideoAccelerator::PLATFORM_FAILURE; |
| + default: |
| + DVLOG(1) << "Unknown error: " << error; |
| + return ArcVideoAccelerator::PLATFORM_FAILURE; |
| + } |
| +} |
| + |
| +void ArcGpuVideoDecodeAccelerator::NotifyError( |
| + media::VideoDecodeAccelerator::Error error) { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + arc_client_->OnError(ConvertErrorCode(error)); |
| +} |
| + |
| +void ArcGpuVideoDecodeAccelerator::SendEosIfNeededOrReusePicture( |
| + uint32_t index) { |
| + if (pending_eos_output_buffer_) { |
| + BufferMetadata metadata; |
| + metadata.flags = BUFFER_FLAG_EOS; |
| + arc_client_->OnBufferDone(PORT_OUTPUT, index, metadata); |
| + pending_eos_output_buffer_ = false; |
| + } else { |
| + vda_->ReusePictureBuffer(index); |
| + } |
| +} |
| + |
| +void ArcGpuVideoDecodeAccelerator::SetInputRecord(int32_t bitstream_buffer_id, |
|
Pawel Osciak
2016/02/29 08:25:20
Since we can't have more inputs than buffer indice
Owen Lin
2016/03/03 06:30:42
InputRecord could be more than InputBuffers. Assum
|
| + uint32_t buffer_index, |
| + int64_t timestamp) { |
| + input_records_.push_front( |
| + InputRecord(bitstream_buffer_id, buffer_index, timestamp)); |
| + // The same value copied from media::GpuVideoDecoder. |
| + const size_t kMaxNumberOfInputRecords = 128; |
| + if (input_records_.size() > kMaxNumberOfInputRecords) |
| + input_records_.pop_back(); |
| +} |
| + |
| +ArcGpuVideoDecodeAccelerator::InputRecord* |
| +ArcGpuVideoDecodeAccelerator::FindInputRecord(int32_t bitstream_buffer_id) { |
| + for (auto& record : input_records_) { |
| + if (record.bitstream_buffer_id == bitstream_buffer_id) |
| + return &record; |
| + } |
| + return nullptr; |
| +} |
| + |
| +} // namespace arc |
| +} // namespace chromeos |