| OLD | NEW |
| (Empty) |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "chrome/gpu/arc_gpu_video_decode_accelerator.h" | |
| 6 | |
| 7 #include "base/callback_helpers.h" | |
| 8 #include "base/logging.h" | |
| 9 #include "base/metrics/histogram_macros.h" | |
| 10 #include "base/numerics/safe_math.h" | |
| 11 #include "base/run_loop.h" | |
| 12 #include "base/unguessable_token.h" | |
| 13 #include "media/base/video_frame.h" | |
| 14 #include "media/gpu/gpu_video_decode_accelerator_factory.h" | |
| 15 | |
| 16 namespace chromeos { | |
| 17 namespace arc { | |
| 18 | |
| 19 namespace { | |
| 20 | |
| 21 // An arbitrary chosen limit of the number of buffers. The number of | |
| 22 // buffers used is requested from the untrusted client side. | |
| 23 const size_t kMaxBufferCount = 128; | |
| 24 | |
| 25 // Maximum number of concurrent ARC video clients. | |
| 26 // Currently we have no way to know the resources are not enough to create more | |
| 27 // VDA. Arbitrarily chosen a reasonable constant as the limit. | |
| 28 const int kMaxConcurrentClients = 8; | |
| 29 | |
| 30 } // anonymous namespace | |
| 31 | |
| 32 int ArcGpuVideoDecodeAccelerator::client_count_ = 0; | |
| 33 | |
| 34 ArcGpuVideoDecodeAccelerator::InputRecord::InputRecord( | |
| 35 int32_t bitstream_buffer_id, | |
| 36 uint32_t buffer_index, | |
| 37 int64_t timestamp) | |
| 38 : bitstream_buffer_id(bitstream_buffer_id), | |
| 39 buffer_index(buffer_index), | |
| 40 timestamp(timestamp) {} | |
| 41 | |
| 42 ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo() = default; | |
| 43 | |
| 44 ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo( | |
| 45 InputBufferInfo&& other) = default; | |
| 46 | |
| 47 ArcGpuVideoDecodeAccelerator::InputBufferInfo::~InputBufferInfo() = default; | |
| 48 | |
| 49 ArcGpuVideoDecodeAccelerator::OutputBufferInfo::OutputBufferInfo() = default; | |
| 50 | |
| 51 ArcGpuVideoDecodeAccelerator::OutputBufferInfo::OutputBufferInfo( | |
| 52 OutputBufferInfo&& other) = default; | |
| 53 | |
| 54 ArcGpuVideoDecodeAccelerator::OutputBufferInfo::~OutputBufferInfo() = default; | |
| 55 | |
| 56 ArcGpuVideoDecodeAccelerator::ArcGpuVideoDecodeAccelerator( | |
| 57 const gpu::GpuPreferences& gpu_preferences) | |
| 58 : arc_client_(nullptr), | |
| 59 next_bitstream_buffer_id_(0), | |
| 60 output_pixel_format_(media::PIXEL_FORMAT_UNKNOWN), | |
| 61 output_buffer_size_(0), | |
| 62 requested_num_of_output_buffers_(0), | |
| 63 gpu_preferences_(gpu_preferences) {} | |
| 64 | |
| 65 ArcGpuVideoDecodeAccelerator::~ArcGpuVideoDecodeAccelerator() { | |
| 66 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 67 if (vda_) { | |
| 68 client_count_--; | |
| 69 } | |
| 70 } | |
| 71 | |
| 72 ArcVideoAccelerator::Result ArcGpuVideoDecodeAccelerator::Initialize( | |
| 73 const Config& config, | |
| 74 ArcVideoAccelerator::Client* client) { | |
| 75 auto result = InitializeTask(config, client); | |
| 76 // Report initialization status to UMA. | |
| 77 UMA_HISTOGRAM_ENUMERATION( | |
| 78 "Media.ArcGpuVideoDecodeAccelerator.InitializeResult", result, | |
| 79 RESULT_MAX); | |
| 80 return result; | |
| 81 } | |
| 82 | |
| 83 ArcVideoAccelerator::Result ArcGpuVideoDecodeAccelerator::InitializeTask( | |
| 84 const Config& config, | |
| 85 ArcVideoAccelerator::Client* client) { | |
| 86 DVLOG(5) << "Initialize(device=" << config.device_type | |
| 87 << ", input_pixel_format=" << config.input_pixel_format | |
| 88 << ", num_input_buffers=" << config.num_input_buffers << ")"; | |
| 89 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 90 if (config.device_type != Config::DEVICE_DECODER) | |
| 91 return INVALID_ARGUMENT; | |
| 92 DCHECK(client); | |
| 93 | |
| 94 if (arc_client_) { | |
| 95 DLOG(ERROR) << "Re-Initialize() is not allowed"; | |
| 96 return ILLEGAL_STATE; | |
| 97 } | |
| 98 | |
| 99 if (client_count_ >= kMaxConcurrentClients) { | |
| 100 LOG(WARNING) << "Reject to Initialize() due to too many clients: " | |
| 101 << client_count_; | |
| 102 return INSUFFICIENT_RESOURCES; | |
| 103 } | |
| 104 | |
| 105 arc_client_ = client; | |
| 106 | |
| 107 if (config.num_input_buffers > kMaxBufferCount) { | |
| 108 DLOG(ERROR) << "Request too many buffers: " << config.num_input_buffers; | |
| 109 return INVALID_ARGUMENT; | |
| 110 } | |
| 111 input_buffer_info_.resize(config.num_input_buffers); | |
| 112 | |
| 113 media::VideoDecodeAccelerator::Config vda_config; | |
| 114 switch (config.input_pixel_format) { | |
| 115 case HAL_PIXEL_FORMAT_H264: | |
| 116 vda_config.profile = media::H264PROFILE_MAIN; | |
| 117 break; | |
| 118 case HAL_PIXEL_FORMAT_VP8: | |
| 119 vda_config.profile = media::VP8PROFILE_ANY; | |
| 120 break; | |
| 121 case HAL_PIXEL_FORMAT_VP9: | |
| 122 vda_config.profile = media::VP9PROFILE_PROFILE0; | |
| 123 break; | |
| 124 default: | |
| 125 DLOG(ERROR) << "Unsupported input format: " << config.input_pixel_format; | |
| 126 return INVALID_ARGUMENT; | |
| 127 } | |
| 128 vda_config.output_mode = | |
| 129 media::VideoDecodeAccelerator::Config::OutputMode::IMPORT; | |
| 130 | |
| 131 auto vda_factory = media::GpuVideoDecodeAcceleratorFactory::CreateWithNoGL(); | |
| 132 vda_ = vda_factory->CreateVDA( | |
| 133 this, vda_config, gpu::GpuDriverBugWorkarounds(), gpu_preferences_); | |
| 134 if (!vda_) { | |
| 135 DLOG(ERROR) << "Failed to create VDA."; | |
| 136 return PLATFORM_FAILURE; | |
| 137 } | |
| 138 | |
| 139 client_count_++; | |
| 140 DVLOG(5) << "Number of concurrent ArcVideoAccelerator clients: " | |
| 141 << client_count_; | |
| 142 | |
| 143 return SUCCESS; | |
| 144 } | |
| 145 | |
| 146 void ArcGpuVideoDecodeAccelerator::SetNumberOfOutputBuffers(size_t number) { | |
| 147 DVLOG(5) << "SetNumberOfOutputBuffers(" << number << ")"; | |
| 148 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 149 if (!vda_) { | |
| 150 DLOG(ERROR) << "VDA not initialized"; | |
| 151 return; | |
| 152 } | |
| 153 | |
| 154 if (number > kMaxBufferCount) { | |
| 155 DLOG(ERROR) << "Too many buffers: " << number; | |
| 156 arc_client_->OnError(INVALID_ARGUMENT); | |
| 157 return; | |
| 158 } | |
| 159 | |
| 160 std::vector<media::PictureBuffer> buffers; | |
| 161 for (size_t id = 0; id < number; ++id) { | |
| 162 // TODO(owenlin): Make sure the |coded_size| is what we want. | |
| 163 buffers.push_back( | |
| 164 media::PictureBuffer(base::checked_cast<int32_t>(id), coded_size_)); | |
| 165 } | |
| 166 vda_->AssignPictureBuffers(buffers); | |
| 167 | |
| 168 buffers_pending_import_.clear(); | |
| 169 buffers_pending_import_.resize(number); | |
| 170 } | |
| 171 | |
| 172 void ArcGpuVideoDecodeAccelerator::BindSharedMemory(PortType port, | |
| 173 uint32_t index, | |
| 174 base::ScopedFD ashmem_fd, | |
| 175 off_t offset, | |
| 176 size_t length) { | |
| 177 DVLOG(5) << "ArcGVDA::BindSharedMemory, offset: " << offset | |
| 178 << ", length: " << length; | |
| 179 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 180 if (!vda_) { | |
| 181 DLOG(ERROR) << "VDA not initialized"; | |
| 182 return; | |
| 183 } | |
| 184 | |
| 185 if (port != PORT_INPUT) { | |
| 186 DLOG(ERROR) << "SharedBuffer is only supported for input"; | |
| 187 arc_client_->OnError(INVALID_ARGUMENT); | |
| 188 return; | |
| 189 } | |
| 190 if (!ValidatePortAndIndex(port, index)) { | |
| 191 arc_client_->OnError(INVALID_ARGUMENT); | |
| 192 return; | |
| 193 } | |
| 194 InputBufferInfo* input_info = &input_buffer_info_[index]; | |
| 195 input_info->handle = std::move(ashmem_fd); | |
| 196 input_info->offset = offset; | |
| 197 input_info->length = length; | |
| 198 } | |
| 199 | |
| 200 bool ArcGpuVideoDecodeAccelerator::VerifyDmabuf( | |
| 201 const base::ScopedFD& dmabuf_fd, | |
| 202 const std::vector<::arc::ArcVideoAcceleratorDmabufPlane>& dmabuf_planes) | |
| 203 const { | |
| 204 size_t num_planes = media::VideoFrame::NumPlanes(output_pixel_format_); | |
| 205 if (dmabuf_planes.size() != num_planes) { | |
| 206 DLOG(ERROR) << "Invalid number of dmabuf planes passed: " | |
| 207 << dmabuf_planes.size() << ", expected: " << num_planes; | |
| 208 return false; | |
| 209 } | |
| 210 | |
| 211 off_t size = lseek(dmabuf_fd.get(), 0, SEEK_END); | |
| 212 lseek(dmabuf_fd.get(), 0, SEEK_SET); | |
| 213 if (size < 0) { | |
| 214 DPLOG(ERROR) << "fail to find the size of dmabuf"; | |
| 215 return false; | |
| 216 } | |
| 217 | |
| 218 size_t i = 0; | |
| 219 for (const auto& plane : dmabuf_planes) { | |
| 220 DVLOG(4) << "Plane " << i << ", offset: " << plane.offset | |
| 221 << ", stride: " << plane.stride; | |
| 222 | |
| 223 size_t rows = | |
| 224 media::VideoFrame::Rows(i, output_pixel_format_, coded_size_.height()); | |
| 225 base::CheckedNumeric<off_t> current_size(plane.offset); | |
| 226 current_size += base::CheckMul(plane.stride, rows); | |
| 227 | |
| 228 if (!current_size.IsValid() || current_size.ValueOrDie() > size) { | |
| 229 DLOG(ERROR) << "Invalid strides/offsets"; | |
| 230 return false; | |
| 231 } | |
| 232 | |
| 233 ++i; | |
| 234 } | |
| 235 | |
| 236 return true; | |
| 237 } | |
| 238 | |
| 239 void ArcGpuVideoDecodeAccelerator::BindDmabuf( | |
| 240 PortType port, | |
| 241 uint32_t index, | |
| 242 base::ScopedFD dmabuf_fd, | |
| 243 const std::vector<::arc::ArcVideoAcceleratorDmabufPlane>& dmabuf_planes) { | |
| 244 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 245 | |
| 246 if (!vda_) { | |
| 247 DLOG(ERROR) << "VDA not initialized"; | |
| 248 return; | |
| 249 } | |
| 250 | |
| 251 if (port != PORT_OUTPUT) { | |
| 252 DLOG(ERROR) << "Dmabuf is only supported for input"; | |
| 253 arc_client_->OnError(INVALID_ARGUMENT); | |
| 254 return; | |
| 255 } | |
| 256 if (!ValidatePortAndIndex(port, index)) { | |
| 257 arc_client_->OnError(INVALID_ARGUMENT); | |
| 258 return; | |
| 259 } | |
| 260 if (!VerifyDmabuf(dmabuf_fd, dmabuf_planes)) { | |
| 261 arc_client_->OnError(INVALID_ARGUMENT); | |
| 262 return; | |
| 263 } | |
| 264 | |
| 265 OutputBufferInfo& info = buffers_pending_import_[index]; | |
| 266 info.handle = std::move(dmabuf_fd); | |
| 267 info.planes = dmabuf_planes; | |
| 268 } | |
| 269 | |
| 270 void ArcGpuVideoDecodeAccelerator::UseBuffer(PortType port, | |
| 271 uint32_t index, | |
| 272 const BufferMetadata& metadata) { | |
| 273 DVLOG(5) << "UseBuffer(port=" << port << ", index=" << index | |
| 274 << ", metadata=(bytes_used=" << metadata.bytes_used | |
| 275 << ", timestamp=" << metadata.timestamp << ")"; | |
| 276 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 277 if (!vda_) { | |
| 278 DLOG(ERROR) << "VDA not initialized"; | |
| 279 return; | |
| 280 } | |
| 281 if (!ValidatePortAndIndex(port, index)) { | |
| 282 arc_client_->OnError(INVALID_ARGUMENT); | |
| 283 return; | |
| 284 } | |
| 285 switch (port) { | |
| 286 case PORT_INPUT: { | |
| 287 InputBufferInfo* input_info = &input_buffer_info_[index]; | |
| 288 int32_t bitstream_buffer_id = next_bitstream_buffer_id_; | |
| 289 // Mask against 30 bits, to avoid (undefined) wraparound on signed | |
| 290 // integer. | |
| 291 next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF; | |
| 292 int dup_fd = HANDLE_EINTR(dup(input_info->handle.get())); | |
| 293 if (dup_fd < 0) { | |
| 294 DLOG(ERROR) << "dup() failed."; | |
| 295 arc_client_->OnError(PLATFORM_FAILURE); | |
| 296 return; | |
| 297 } | |
| 298 CreateInputRecord(bitstream_buffer_id, index, metadata.timestamp); | |
| 299 // TODO(rockot): Pass GUIDs through Mojo. https://crbug.com/713763. | |
| 300 // TODO(rockot): This fd comes from a mojo::ScopedHandle in | |
| 301 // GpuArcVideoService::BindSharedMemory. That should be passed through, | |
| 302 // rather than pulling out the fd. https://crbug.com/713763. | |
| 303 // TODO(rockot): Pass through a real size rather than |0|. | |
| 304 base::UnguessableToken guid = base::UnguessableToken::Create(); | |
| 305 vda_->Decode(media::BitstreamBuffer( | |
| 306 bitstream_buffer_id, | |
| 307 base::SharedMemoryHandle(base::FileDescriptor(dup_fd, true), 0u, | |
| 308 guid), | |
| 309 metadata.bytes_used, input_info->offset)); | |
| 310 break; | |
| 311 } | |
| 312 case PORT_OUTPUT: { | |
| 313 // is_valid() is true for the first time the buffer is passed to the VDA. | |
| 314 // In that case, VDA needs to import the buffer first. | |
| 315 OutputBufferInfo& info = buffers_pending_import_[index]; | |
| 316 if (info.handle.is_valid()) { | |
| 317 gfx::GpuMemoryBufferHandle handle; | |
| 318 #if defined(USE_OZONE) | |
| 319 handle.native_pixmap_handle.fds.emplace_back( | |
| 320 base::FileDescriptor(info.handle.release(), true)); | |
| 321 for (const auto& plane : info.planes) { | |
| 322 handle.native_pixmap_handle.planes.emplace_back(plane.stride, | |
| 323 plane.offset, 0, 0); | |
| 324 } | |
| 325 #endif | |
| 326 vda_->ImportBufferForPicture(index, handle); | |
| 327 } else { | |
| 328 vda_->ReusePictureBuffer(index); | |
| 329 } | |
| 330 break; | |
| 331 } | |
| 332 default: | |
| 333 NOTREACHED(); | |
| 334 } | |
| 335 } | |
| 336 | |
| 337 void ArcGpuVideoDecodeAccelerator::Reset() { | |
| 338 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 339 if (!vda_) { | |
| 340 DLOG(ERROR) << "VDA not initialized"; | |
| 341 return; | |
| 342 } | |
| 343 vda_->Reset(); | |
| 344 } | |
| 345 | |
| 346 void ArcGpuVideoDecodeAccelerator::Flush() { | |
| 347 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 348 if (!vda_) { | |
| 349 DLOG(ERROR) << "VDA not initialized"; | |
| 350 return; | |
| 351 } | |
| 352 vda_->Flush(); | |
| 353 } | |
| 354 | |
| 355 void ArcGpuVideoDecodeAccelerator::ProvidePictureBuffers( | |
| 356 uint32_t requested_num_of_buffers, | |
| 357 media::VideoPixelFormat output_pixel_format, | |
| 358 uint32_t textures_per_buffer, | |
| 359 const gfx::Size& dimensions, | |
| 360 uint32_t texture_target) { | |
| 361 DVLOG(5) << "ProvidePictureBuffers(" | |
| 362 << "requested_num_of_buffers=" << requested_num_of_buffers | |
| 363 << ", dimensions=" << dimensions.ToString() << ")"; | |
| 364 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 365 coded_size_ = dimensions; | |
| 366 | |
| 367 // By default, use an empty rect to indicate the visible rectangle is not | |
| 368 // available. | |
| 369 visible_rect_ = gfx::Rect(); | |
| 370 if ((output_pixel_format_ != media::PIXEL_FORMAT_UNKNOWN) && | |
| 371 (output_pixel_format_ != output_pixel_format)) { | |
| 372 arc_client_->OnError(PLATFORM_FAILURE); | |
| 373 return; | |
| 374 } | |
| 375 output_pixel_format_ = output_pixel_format; | |
| 376 requested_num_of_output_buffers_ = requested_num_of_buffers; | |
| 377 output_buffer_size_ = | |
| 378 media::VideoFrame::AllocationSize(output_pixel_format_, coded_size_); | |
| 379 | |
| 380 NotifyOutputFormatChanged(); | |
| 381 } | |
| 382 | |
| 383 void ArcGpuVideoDecodeAccelerator::NotifyOutputFormatChanged() { | |
| 384 VideoFormat video_format; | |
| 385 switch (output_pixel_format_) { | |
| 386 case media::PIXEL_FORMAT_I420: | |
| 387 case media::PIXEL_FORMAT_YV12: | |
| 388 case media::PIXEL_FORMAT_NV12: | |
| 389 case media::PIXEL_FORMAT_NV21: | |
| 390 // HAL_PIXEL_FORMAT_YCbCr_420_888 is the flexible pixel format in Android | |
| 391 // which handles all 420 formats, with both orderings of chroma (CbCr and | |
| 392 // CrCb) as well as planar and semi-planar layouts. | |
| 393 video_format.pixel_format = HAL_PIXEL_FORMAT_YCbCr_420_888; | |
| 394 break; | |
| 395 case media::PIXEL_FORMAT_ARGB: | |
| 396 video_format.pixel_format = HAL_PIXEL_FORMAT_BGRA_8888; | |
| 397 break; | |
| 398 default: | |
| 399 DLOG(ERROR) << "Format not supported: " << output_pixel_format_; | |
| 400 arc_client_->OnError(PLATFORM_FAILURE); | |
| 401 return; | |
| 402 } | |
| 403 video_format.buffer_size = output_buffer_size_; | |
| 404 video_format.min_num_buffers = requested_num_of_output_buffers_; | |
| 405 video_format.coded_width = coded_size_.width(); | |
| 406 video_format.coded_height = coded_size_.height(); | |
| 407 video_format.crop_top = visible_rect_.y(); | |
| 408 video_format.crop_left = visible_rect_.x(); | |
| 409 video_format.crop_width = visible_rect_.width(); | |
| 410 video_format.crop_height = visible_rect_.height(); | |
| 411 arc_client_->OnOutputFormatChanged(video_format); | |
| 412 } | |
| 413 | |
| 414 void ArcGpuVideoDecodeAccelerator::DismissPictureBuffer( | |
| 415 int32_t picture_buffer) { | |
| 416 // no-op | |
| 417 } | |
| 418 | |
| 419 void ArcGpuVideoDecodeAccelerator::PictureReady(const media::Picture& picture) { | |
| 420 DVLOG(5) << "PictureReady(picture_buffer_id=" << picture.picture_buffer_id() | |
| 421 << ", bitstream_buffer_id=" << picture.bitstream_buffer_id(); | |
| 422 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 423 | |
| 424 // Handle visible size change. | |
| 425 if (visible_rect_ != picture.visible_rect()) { | |
| 426 DVLOG(5) << "visible size changed: " << picture.visible_rect().ToString(); | |
| 427 visible_rect_ = picture.visible_rect(); | |
| 428 NotifyOutputFormatChanged(); | |
| 429 } | |
| 430 | |
| 431 InputRecord* input_record = FindInputRecord(picture.bitstream_buffer_id()); | |
| 432 if (input_record == nullptr) { | |
| 433 DLOG(ERROR) << "Cannot find for bitstream buffer id: " | |
| 434 << picture.bitstream_buffer_id(); | |
| 435 arc_client_->OnError(PLATFORM_FAILURE); | |
| 436 return; | |
| 437 } | |
| 438 | |
| 439 BufferMetadata metadata; | |
| 440 metadata.timestamp = input_record->timestamp; | |
| 441 metadata.bytes_used = output_buffer_size_; | |
| 442 arc_client_->OnBufferDone(PORT_OUTPUT, picture.picture_buffer_id(), metadata); | |
| 443 } | |
| 444 | |
| 445 void ArcGpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer( | |
| 446 int32_t bitstream_buffer_id) { | |
| 447 DVLOG(5) << "NotifyEndOfBitstreamBuffer(" << bitstream_buffer_id << ")"; | |
| 448 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 449 InputRecord* input_record = FindInputRecord(bitstream_buffer_id); | |
| 450 if (input_record == nullptr) { | |
| 451 arc_client_->OnError(PLATFORM_FAILURE); | |
| 452 return; | |
| 453 } | |
| 454 arc_client_->OnBufferDone(PORT_INPUT, input_record->buffer_index, | |
| 455 BufferMetadata()); | |
| 456 } | |
| 457 | |
| 458 void ArcGpuVideoDecodeAccelerator::NotifyFlushDone() { | |
| 459 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 460 arc_client_->OnFlushDone(); | |
| 461 } | |
| 462 | |
| 463 void ArcGpuVideoDecodeAccelerator::NotifyResetDone() { | |
| 464 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 465 arc_client_->OnResetDone(); | |
| 466 } | |
| 467 | |
| 468 static ArcVideoAccelerator::Result ConvertErrorCode( | |
| 469 media::VideoDecodeAccelerator::Error error) { | |
| 470 switch (error) { | |
| 471 case media::VideoDecodeAccelerator::ILLEGAL_STATE: | |
| 472 return ArcVideoAccelerator::ILLEGAL_STATE; | |
| 473 case media::VideoDecodeAccelerator::INVALID_ARGUMENT: | |
| 474 return ArcVideoAccelerator::INVALID_ARGUMENT; | |
| 475 case media::VideoDecodeAccelerator::UNREADABLE_INPUT: | |
| 476 return ArcVideoAccelerator::UNREADABLE_INPUT; | |
| 477 case media::VideoDecodeAccelerator::PLATFORM_FAILURE: | |
| 478 return ArcVideoAccelerator::PLATFORM_FAILURE; | |
| 479 default: | |
| 480 DLOG(ERROR) << "Unknown error: " << error; | |
| 481 return ArcVideoAccelerator::PLATFORM_FAILURE; | |
| 482 } | |
| 483 } | |
| 484 | |
| 485 void ArcGpuVideoDecodeAccelerator::NotifyError( | |
| 486 media::VideoDecodeAccelerator::Error error) { | |
| 487 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 488 DLOG(ERROR) << "Error notified: " << error; | |
| 489 arc_client_->OnError(ConvertErrorCode(error)); | |
| 490 } | |
| 491 | |
| 492 void ArcGpuVideoDecodeAccelerator::CreateInputRecord( | |
| 493 int32_t bitstream_buffer_id, | |
| 494 uint32_t buffer_index, | |
| 495 int64_t timestamp) { | |
| 496 input_records_.push_front( | |
| 497 InputRecord(bitstream_buffer_id, buffer_index, timestamp)); | |
| 498 | |
| 499 // The same value copied from media::GpuVideoDecoder. The input record is | |
| 500 // needed when the input buffer or the corresponding output buffer are | |
| 501 // returned from VDA. However there is no guarantee how much buffers will be | |
| 502 // kept in the VDA. We kept the last |kMaxNumberOfInputRecords| in | |
| 503 // |input_records_| and drop the others. | |
| 504 const size_t kMaxNumberOfInputRecords = 128; | |
| 505 if (input_records_.size() > kMaxNumberOfInputRecords) | |
| 506 input_records_.pop_back(); | |
| 507 } | |
| 508 | |
| 509 ArcGpuVideoDecodeAccelerator::InputRecord* | |
| 510 ArcGpuVideoDecodeAccelerator::FindInputRecord(int32_t bitstream_buffer_id) { | |
| 511 for (auto& record : input_records_) { | |
| 512 if (record.bitstream_buffer_id == bitstream_buffer_id) | |
| 513 return &record; | |
| 514 } | |
| 515 return nullptr; | |
| 516 } | |
| 517 | |
| 518 bool ArcGpuVideoDecodeAccelerator::ValidatePortAndIndex(PortType port, | |
| 519 uint32_t index) const { | |
| 520 switch (port) { | |
| 521 case PORT_INPUT: | |
| 522 if (index >= input_buffer_info_.size()) { | |
| 523 DLOG(ERROR) << "Invalid index: " << index; | |
| 524 return false; | |
| 525 } | |
| 526 return true; | |
| 527 case PORT_OUTPUT: | |
| 528 if (index >= buffers_pending_import_.size()) { | |
| 529 DLOG(ERROR) << "Invalid index: " << index; | |
| 530 return false; | |
| 531 } | |
| 532 return true; | |
| 533 default: | |
| 534 DLOG(ERROR) << "Invalid port: " << port; | |
| 535 return false; | |
| 536 } | |
| 537 } | |
| 538 | |
| 539 } // namespace arc | |
| 540 } // namespace chromeos | |
| OLD | NEW |