Index: chrome/gpu/arc_gpu_video_decode_accelerator.cc |
diff --git a/chrome/gpu/arc_gpu_video_decode_accelerator.cc b/chrome/gpu/arc_gpu_video_decode_accelerator.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..bdc487dd74acc18fbb9f6964d753da4c5bf7eeb3 |
--- /dev/null |
+++ b/chrome/gpu/arc_gpu_video_decode_accelerator.cc |
@@ -0,0 +1,384 @@ |
+// Copyright 2016 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "base/callback_helpers.h" |
+#include "base/logging.h" |
+#include "base/run_loop.h" |
+#include "chrome/gpu/arc_gpu_video_decode_accelerator.h" |
+ |
+// TODO use Pawel's factory instead |
+#include "content/public/common/create_vda.h" |
+ |
+#undef DVLOG |
+#define DVLOG VLOG |
+ |
+namespace chromeos { |
+namespace arc { |
+ |
+ArcGpuVideoDecodeAccelerator::InputRecord::InputRecord( |
+ int32_t bitstream_buffer_id, |
+ uint32_t buffer_index, |
+ int64_t timestamp) |
+ : bitstream_buffer_id(bitstream_buffer_id), |
+ buffer_index(buffer_index), |
+ timestamp(timestamp) {} |
+ |
+ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo() |
+ : offset(0), length(0) {} |
+ |
+ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo( |
+ InputBufferInfo&& a) { |
+ handle = std::move(a.handle); |
kcwu
2016/03/09 07:58:21
ArcGpuVideoDecodeAccelerator::InputBufferInfo::Inp
Owen Lin
2016/03/14 08:46:59
Removed.
|
+ offset = a.offset; |
+ length = a.length; |
+} |
+ |
+ArcGpuVideoDecodeAccelerator::InputBufferInfo::~InputBufferInfo() {} |
+ |
+ArcGpuVideoDecodeAccelerator::ArcGpuVideoDecodeAccelerator( |
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) |
+ : pending_eos_output_buffer_(false), |
+ arc_client_(nullptr), |
+ next_bitstream_buffer_id_(0), |
+ io_task_runner_(io_task_runner), |
+ output_buffer_size_(0) {} |
+ |
+ArcGpuVideoDecodeAccelerator::~ArcGpuVideoDecodeAccelerator() {} |
+ |
+static const size_t MAX_BUFFER_COUNT = 128; |
kcwu
2016/03/09 07:58:21
why 128?
Is the same reason as kMaxNumberOfInputR
kcwu
2016/03/10 07:27:26
and s/MAX_BUFFER_COUNT/kMaxBufferCount/
Owen Lin
2016/03/14 08:46:59
Done.
Owen Lin
2016/03/14 08:46:59
Not the exactly same purpose but it is also arbitr
|
+ |
+bool ArcGpuVideoDecodeAccelerator::Initialize( |
+ const Config& config, |
+ ArcVideoAccelerator::Client* client) { |
+ DVLOG(5) << "Initialize(device=" << config.device_type |
+ << ", input_pixel_format=" << config.input_pixel_format |
+ << ", num_input_buffers=" << config.num_input_buffers << ")"; |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (config.device_type != DEVICE_DECODER) |
+ return false; |
+ DCHECK(client); |
+ DCHECK(!arc_client_); |
+ arc_client_ = client; |
+ |
+ vda_.reset(content::CreateVDA(AsWeakPtr(), io_task_runner_)); |
kcwu
2016/03/09 07:58:21
base::ThreadTaskRunnerHandle::Get().
Since you exp
Owen Lin
2016/03/14 08:46:59
io_task_runner_ is no longer required. Removed.
|
+ if (!vda_) { |
+ DLOG(ERROR) << "Failed to create VDA."; |
+ return false; |
+ } |
+ |
+ if (config.num_input_buffers > MAX_BUFFER_COUNT) { |
+ DLOG(ERROR) << "Request too many buffers: " << config.num_input_buffers; |
+ return false; |
+ } |
+ input_buffer_info_.resize(config.num_input_buffers); |
+ |
+ media::VideoDecodeAccelerator::Config vda_config; |
+ switch (config.input_pixel_format) { |
+ case HAL_PIXEL_FORMAT_H264: |
+ vda_config.profile = media::H264PROFILE_MAIN; |
+ break; |
+ case HAL_PIXEL_FORMAT_VP8: |
+ vda_config.profile = media::VP8PROFILE_ANY; |
+ break; |
+ default: |
+ DLOG(ERROR) << "Unsupported input format: " << config.input_pixel_format; |
+ return false; |
+ } |
+ vda_config.output_mode = |
+ media::VideoDecodeAccelerator::Config::OutputMode::IMPORT; |
+ if (!vda_->Initialize(vda_config, this)) { |
+ vda_.reset(); |
+ DLOG(ERROR) << "VDA::Initialize() failed."; |
+ return false; |
+ } |
+ return true; |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::SetNumberOfOutputBuffers(size_t number) { |
+ DVLOG(5) << "SetNumberOfOutputBuffers(" << number << ")"; |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ |
+ if (number >= MAX_BUFFER_COUNT) { |
kcwu
2016/03/09 07:58:21
> instead of >= ?
Owen Lin
2016/03/14 08:46:59
Done.
|
+ DLOG(ERROR) << "Too many buffers: " << number; |
+ arc_client_->OnError(INVALID_ARGUMENT); |
+ return; |
+ } |
+ |
+ std::vector<media::PictureBuffer> buffers; |
+ for (int32_t id = 0, n = number; id < n; ++id) { |
+ // TODO: Make sure the |coded_size| is what we want. |
+ buffers.push_back(media::PictureBuffer(id, coded_size_, 0)); |
+ } |
+ vda_->AssignPictureBuffers(buffers); |
+ |
+ pending_import_buffer_.clear(); |
+ pending_import_buffer_.resize(number); |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::BindSharedMemory(PortType port, |
+ uint32_t index, |
+ int ashmem_fd, |
+ off_t offset, |
+ size_t length) { |
+ DVLOG(5) << "ArcGVDA::BindSharedMemory, offset: " << offset |
+ << ", length: " << length; |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ |
+ // Make sure we will close the file descriptor. |
+ base::ScopedFD handle(ashmem_fd); |
+ if (!ValidatePortAndIndex(port, index)) { |
+ arc_client_->OnError(INVALID_ARGUMENT); |
+ return; |
+ } |
+ if (port != PORT_INPUT) { |
kcwu
2016/03/09 07:58:21
How about move this check to line 130 (before Vali
Owen Lin
2016/03/14 08:46:59
Done.
|
+ DLOG(ERROR) << "SharedBuffer is only supported for input"; |
+ arc_client_->OnError(INVALID_ARGUMENT); |
+ return; |
+ } |
+ InputBufferInfo* input_info = &input_buffer_info_[index]; |
+ input_info->handle = std::move(handle); |
+ input_info->offset = offset; |
+ input_info->length = length; |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::BindDmabuf(PortType port, |
+ uint32_t index, |
+ int dmabuf_fd) { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ |
+ // Make sure we will close the file descriptor. |
+ base::ScopedFD handle(dmabuf_fd); |
+ if (!ValidatePortAndIndex(port, index)) { |
+ arc_client_->OnError(INVALID_ARGUMENT); |
+ return; |
+ } |
+ if (port != PORT_OUTPUT) { |
kcwu
2016/03/09 07:58:21
How about move this check to line 152 (before Vali
Owen Lin
2016/03/14 08:46:59
Done.
|
+ DLOG(ERROR) << "GraphicBuffer is only supported for input"; |
+ arc_client_->OnError(INVALID_ARGUMENT); |
+ return; |
+ } |
+ pending_import_buffer_[index] = std::move(handle); |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::UseBuffer(PortType port, |
+ uint32_t index, |
+ const BufferMetadata& metadata) { |
+ DVLOG(5) << "UseBuffer(port=" << port << ", index=" << index |
+ << ", metadata=(bytes_used=" << metadata.bytes_used |
+ << ", timestamp=" << metadata.timestamp << "))"; |
kcwu
2016/03/07 14:04:33
extra )
Owen Lin
2016/03/14 08:46:59
Done.
|
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (!ValidatePortAndIndex(port, index)) { |
+ arc_client_->OnError(INVALID_ARGUMENT); |
+ return; |
+ } |
+ switch (port) { |
+ case PORT_INPUT: { |
+ InputBufferInfo* input_info = &input_buffer_info_[index]; |
+ if (metadata.flags & BUFFER_FLAG_EOS) { |
+ // TODO(owenlin): Ask VDA to return all output pictures so that we |
+ // can output an EOS picture when Flush() is done. |
+ vda_->Flush(); |
kcwu
2016/03/09 07:58:21
I'm not sure. Should this move to line 196 or retu
Owen Lin
2016/03/14 08:46:59
Move to line 196. We need to send the content to b
|
+ } |
+ int32_t bitstream_buffer_id = next_bitstream_buffer_id_; |
+ // Mask against 30 bits, to avoid (undefined wraparound on signed integer) |
kcwu
2016/03/09 07:58:21
s/(undefined wraparound on signed integer)/(undefi
Owen Lin
2016/03/14 08:46:59
Done.
|
+ next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF; |
+ SetInputRecord(bitstream_buffer_id, index, metadata.timestamp); |
+ int dup_fd = HANDLE_EINTR(dup(input_info->handle.get())); |
+ if (dup_fd < 0) { |
+ DLOG(ERROR) << "dup() failed."; |
+ arc_client_->OnError(PLATFORM_FAILURE); |
+ return; |
+ } |
+ vda_->Decode(media::BitstreamBuffer( |
+ bitstream_buffer_id, base::SharedMemoryHandle(dup_fd, true), |
+ metadata.bytes_used, input_info->offset)); |
+ break; |
+ } |
+ case PORT_OUTPUT: { |
+ SendEosIfNeededOrReusePicture(index); |
+ break; |
+ } |
+ default: |
+ NOTREACHED(); |
+ } |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::Reset() { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ base::RunLoop loop; |
+ reset_done_callback_ = loop.QuitClosure(); |
+ DCHECK(vda_); |
+ vda_->Reset(); |
+ base::MessageLoop::ScopedNestableTaskAllower allow( |
+ base::MessageLoop::current()); |
+ // Wait for the ResetDone callback. |
+ loop.Run(); |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::ProvidePictureBuffers( |
+ size_t requested_num_of_buffers, |
+ const gfx::Size& dimensions, |
+ uint32_t texture_target) { |
+ DVLOG(5) << "ProvidePictureBuffers(" |
+ << "requested_num_of_buffers=" << requested_num_of_buffers |
+ << ", dimensions=" << dimensions.ToString() << ")"; |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ coded_size_ = dimensions; |
+ // TODO(owenlin): use VDA::GetOutputFormat() here and calculate correct |
+ // |image_size|. |
+ VideoFormat video_format; |
+ video_format.image_size = dimensions.GetArea() * 3 / 2; |
+ output_buffer_size_ = video_format.image_size; |
+ video_format.min_num_buffers = requested_num_of_buffers; |
+ video_format.coded_width = dimensions.width(); |
+ video_format.coded_height = dimensions.height(); |
+ // TODO(owenlin): How to get visible size? |
+ video_format.crop_top = 0; |
+ video_format.crop_left = 0; |
+ video_format.crop_width = dimensions.width(); |
+ video_format.crop_height = dimensions.height(); |
+ arc_client_->OnOutputFormatChanged(video_format); |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::DismissPictureBuffer( |
+ int32_t picture_buffer) { |
+ // no-op |
kcwu
2016/03/09 07:58:21
why can it be no-op?
Owen Lin
2016/03/14 08:46:59
VDA has its owned FDs of the picture buffers and w
|
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::PictureReady(const media::Picture& picture) { |
+ DVLOG(5) << "PictureReady(picture_buffer_id=" << picture.picture_buffer_id() |
+ << ", bitstream_buffer_id=" << picture.bitstream_buffer_id(); |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ |
+ // Empty buffer, returned in Flushing. |
+ if (picture.bitstream_buffer_id() == -1) { |
kcwu
2016/03/09 07:58:21
where is "-1" coming from?
I don't find it in this
Owen Lin
2016/03/14 08:46:59
It would be defined in the new Flush(bool return_b
|
+ buffers_pending_eos_.push(picture.picture_buffer_id()); |
+ return; |
+ } |
+ InputRecord* input_record = FindInputRecord(picture.bitstream_buffer_id()); |
+ if (input_record == nullptr) { |
+ DLOG(ERROR) << "Cannot find for bitstream buffer id: " |
+ << picture.bitstream_buffer_id(); |
+ arc_client_->OnError(PLATFORM_FAILURE); |
+ return; |
+ } |
+ |
+ BufferMetadata metadata; |
+ metadata.timestamp = input_record->timestamp; |
+ metadata.bytes_used = output_buffer_size_; |
+ arc_client_->OnBufferDone(PORT_OUTPUT, picture.picture_buffer_id(), metadata); |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer( |
+ int32_t bitstream_buffer_id) { |
+ DVLOG(5) << "NotifyEndOfBitstreamBuffer(" << bitstream_buffer_id << ")"; |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ InputRecord* input_record = FindInputRecord(bitstream_buffer_id); |
+ if (input_record == nullptr) { |
+ arc_client_->OnError(PLATFORM_FAILURE); |
+ return; |
+ } |
+ arc_client_->OnBufferDone(PORT_INPUT, input_record->buffer_index, |
+ BufferMetadata()); |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::NotifyFlushDone() { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ pending_eos_output_buffer_ = true; |
+ while (!buffers_pending_eos_.empty()) { |
+ SendEosIfNeededOrReusePicture(buffers_pending_eos_.front()); |
+ buffers_pending_eos_.pop(); |
+ } |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::NotifyResetDone() { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ base::ResetAndReturn(&reset_done_callback_).Run(); |
+} |
+ |
+static ArcVideoAccelerator::Error ConvertErrorCode( |
+ media::VideoDecodeAccelerator::Error error) { |
+ switch (error) { |
+ case media::VideoDecodeAccelerator::ILLEGAL_STATE: |
+ return ArcVideoAccelerator::ILLEGAL_STATE; |
+ case media::VideoDecodeAccelerator::INVALID_ARGUMENT: |
+ return ArcVideoAccelerator::INVALID_ARGUMENT; |
+ case media::VideoDecodeAccelerator::UNREADABLE_INPUT: |
+ return ArcVideoAccelerator::UNREADABLE_INPUT; |
+ case media::VideoDecodeAccelerator::PLATFORM_FAILURE: |
+ return ArcVideoAccelerator::PLATFORM_FAILURE; |
+ default: |
+ DLOG(ERROR) << "Unknown error: " << error; |
+ return ArcVideoAccelerator::PLATFORM_FAILURE; |
+ } |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::NotifyError( |
+ media::VideoDecodeAccelerator::Error error) { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ arc_client_->OnError(ConvertErrorCode(error)); |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::SendEosIfNeededOrReusePicture( |
+ uint32_t index) { |
+ if (pending_eos_output_buffer_) { |
+ BufferMetadata metadata; |
+ metadata.flags = BUFFER_FLAG_EOS; |
+ arc_client_->OnBufferDone(PORT_OUTPUT, index, metadata); |
+ pending_eos_output_buffer_ = false; |
+ } else { |
+ if (pending_import_buffer_[index].is_valid()) { |
+ std::vector<base::ScopedFD> dmabuf_fds; |
+ dmabuf_fds.push_back(std::move(pending_import_buffer_[index])); |
+ vda_->ImportBufferForPicture(index, std::move(dmabuf_fds)); |
+ DCHECK(!pending_import_buffer_[index].is_valid()); |
+ } else { |
+ vda_->ReusePictureBuffer(index); |
+ } |
+ } |
+} |
+ |
+void ArcGpuVideoDecodeAccelerator::SetInputRecord(int32_t bitstream_buffer_id, |
+ uint32_t buffer_index, |
+ int64_t timestamp) { |
+ input_records_.push_front( |
+ InputRecord(bitstream_buffer_id, buffer_index, timestamp)); |
+ // The same value copied from media::GpuVideoDecoder. |
+ const size_t kMaxNumberOfInputRecords = 128; |
+ if (input_records_.size() > kMaxNumberOfInputRecords) |
+ input_records_.pop_back(); |
+} |
+ |
+ArcGpuVideoDecodeAccelerator::InputRecord* |
+ArcGpuVideoDecodeAccelerator::FindInputRecord(int32_t bitstream_buffer_id) { |
+ for (auto& record : input_records_) { |
+ if (record.bitstream_buffer_id == bitstream_buffer_id) |
+ return &record; |
+ } |
+ return nullptr; |
+} |
+ |
+bool ArcGpuVideoDecodeAccelerator::ValidatePortAndIndex(PortType port, |
+ uint32_t index) { |
+ switch (port) { |
+ case PORT_INPUT: |
+ if (index >= input_buffer_info_.size()) { |
+ DLOG(ERROR) << "Invalid index: " << index; |
+ return false; |
+ } |
+ return true; |
+ case PORT_OUTPUT: |
+ if (index >= pending_import_buffer_.size()) { |
+ DLOG(ERROR) << "Invalid index: " << index; |
+ return false; |
+ } |
+ return true; |
+ default: |
+ DLOG(ERROR) << "Invalid port: " << port; |
+ return false; |
+ } |
+} |
+ |
+} // namespace arc |
+} // namespace chromeos |