OLD | NEW |
---|---|
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "chrome/gpu/arc_gpu_video_decode_accelerator.h" | 5 #include "chrome/gpu/arc_gpu_video_decode_accelerator.h" |
6 | 6 |
7 #include "base/callback_helpers.h" | 7 #include "base/callback_helpers.h" |
8 #include "base/logging.h" | 8 #include "base/logging.h" |
9 #include "base/numerics/safe_math.h" | |
9 #include "base/run_loop.h" | 10 #include "base/run_loop.h" |
10 #include "content/public/gpu/gpu_video_decode_accelerator_factory.h" | 11 #include "content/public/gpu/gpu_video_decode_accelerator_factory.h" |
11 #include "media/base/video_frame.h" | 12 #include "media/base/video_frame.h" |
12 | 13 |
13 namespace chromeos { | 14 namespace chromeos { |
14 namespace arc { | 15 namespace arc { |
15 | 16 |
16 ArcGpuVideoDecodeAccelerator::InputRecord::InputRecord( | 17 ArcGpuVideoDecodeAccelerator::InputRecord::InputRecord( |
17 int32_t bitstream_buffer_id, | 18 int32_t bitstream_buffer_id, |
18 uint32_t buffer_index, | 19 uint32_t buffer_index, |
19 int64_t timestamp) | 20 int64_t timestamp) |
20 : bitstream_buffer_id(bitstream_buffer_id), | 21 : bitstream_buffer_id(bitstream_buffer_id), |
21 buffer_index(buffer_index), | 22 buffer_index(buffer_index), |
22 timestamp(timestamp) {} | 23 timestamp(timestamp) {} |
23 | 24 |
24 ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo() | 25 ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo() = default; |
25 : offset(0), length(0) {} | |
26 | 26 |
27 ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo( | 27 ArcGpuVideoDecodeAccelerator::InputBufferInfo::InputBufferInfo( |
28 InputBufferInfo&& other) | 28 InputBufferInfo&& other) = default; |
29 : handle(std::move(other.handle)), | |
30 offset(other.offset), | |
31 length(other.length) {} | |
32 | 29 |
33 ArcGpuVideoDecodeAccelerator::InputBufferInfo::~InputBufferInfo() {} | 30 ArcGpuVideoDecodeAccelerator::InputBufferInfo::~InputBufferInfo() = default; |
31 | |
32 ArcGpuVideoDecodeAccelerator::OutputBufferInfo::OutputBufferInfo() = default; | |
33 | |
34 ArcGpuVideoDecodeAccelerator::OutputBufferInfo::OutputBufferInfo( | |
35 OutputBufferInfo&& other) = default; | |
36 | |
37 ArcGpuVideoDecodeAccelerator::OutputBufferInfo::~OutputBufferInfo() = default; | |
34 | 38 |
35 ArcGpuVideoDecodeAccelerator::ArcGpuVideoDecodeAccelerator() | 39 ArcGpuVideoDecodeAccelerator::ArcGpuVideoDecodeAccelerator() |
36 : pending_eos_output_buffer_(false), | 40 : pending_eos_output_buffer_(false), |
37 arc_client_(nullptr), | 41 arc_client_(nullptr), |
38 next_bitstream_buffer_id_(0), | 42 next_bitstream_buffer_id_(0), |
43 output_pixel_format_(media::PIXEL_FORMAT_UNKNOWN), | |
39 output_buffer_size_(0) {} | 44 output_buffer_size_(0) {} |
40 | 45 |
41 ArcGpuVideoDecodeAccelerator::~ArcGpuVideoDecodeAccelerator() {} | 46 ArcGpuVideoDecodeAccelerator::~ArcGpuVideoDecodeAccelerator() {} |
42 | 47 |
43 namespace { | 48 namespace { |
44 | 49 |
45 // An arbitrary chosen limit of the number of buffers. The number of | 50 // An arbitrary chosen limit of the number of buffers. The number of |
46 // buffers used is requested from the untrusted client side. | 51 // buffers used is requested from the untrusted client side. |
47 const size_t kMaxBufferCount = 128; | 52 const size_t kMaxBufferCount = 128; |
48 | 53 |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
147 if (!ValidatePortAndIndex(port, index)) { | 152 if (!ValidatePortAndIndex(port, index)) { |
148 arc_client_->OnError(INVALID_ARGUMENT); | 153 arc_client_->OnError(INVALID_ARGUMENT); |
149 return; | 154 return; |
150 } | 155 } |
151 InputBufferInfo* input_info = &input_buffer_info_[index]; | 156 InputBufferInfo* input_info = &input_buffer_info_[index]; |
152 input_info->handle = std::move(ashmem_fd); | 157 input_info->handle = std::move(ashmem_fd); |
153 input_info->offset = offset; | 158 input_info->offset = offset; |
154 input_info->length = length; | 159 input_info->length = length; |
155 } | 160 } |
156 | 161 |
162 bool ArcGpuVideoDecodeAccelerator::VerifyStride(const base::ScopedFD& dmabuf_fd, | |
163 int32_t stride) { | |
164 off_t size = lseek(dmabuf_fd.get(), 0, SEEK_END); | |
165 lseek(dmabuf_fd.get(), 0, SEEK_SET); | |
166 | |
167 if (size < 0) { | |
168 DLOG(ERROR) << "fail to find the size of dmabuf" << errno; | |
169 return false; | |
170 } | |
171 | |
172 int height = coded_size_.height(); | |
173 switch (output_pixel_format_) { | |
174 case media::PIXEL_FORMAT_I420: | |
175 case media::PIXEL_FORMAT_YV12: | |
176 case media::PIXEL_FORMAT_NV12: | |
177 case media::PIXEL_FORMAT_NV21: | |
178 // Adjusts the height for UV plane. | |
179 DCHECK(height % 2 == 0); // The coded height should be even for YUV. | |
dcheng
2016/05/17 18:56:15
Not DCHECK(), this should just return false.
| |
180 height = height * 3 / 2; | |
181 break; | |
182 case media::PIXEL_FORMAT_ARGB: | |
183 // No need to adjust height. | |
184 break; | |
185 default: | |
186 DLOG(ERROR) << "Format not supported: " << output_pixel_format_; | |
187 return false; | |
188 } | |
189 base::CheckedNumeric<off_t> used_bytes(height); | |
190 used_bytes *= stride; | |
191 | |
192 if (stride < 0 || !used_bytes.IsValid() || used_bytes.ValueOrDie() > size) { | |
193 DLOG(ERROR) << "invalid stride: " << stride << ", height: " << height | |
194 << ", size of dmabuf: " << size; | |
195 return false; | |
196 } | |
197 | |
198 return true; | |
199 } | |
200 | |
157 void ArcGpuVideoDecodeAccelerator::BindDmabuf(PortType port, | 201 void ArcGpuVideoDecodeAccelerator::BindDmabuf(PortType port, |
158 uint32_t index, | 202 uint32_t index, |
159 base::ScopedFD dmabuf_fd) { | 203 base::ScopedFD dmabuf_fd, |
204 int32_t stride) { | |
160 DCHECK(thread_checker_.CalledOnValidThread()); | 205 DCHECK(thread_checker_.CalledOnValidThread()); |
161 | 206 |
162 if (!vda_) { | 207 if (!vda_) { |
163 DLOG(ERROR) << "VDA not initialized"; | 208 DLOG(ERROR) << "VDA not initialized"; |
164 return; | 209 return; |
165 } | 210 } |
166 | 211 |
167 if (port != PORT_OUTPUT) { | 212 if (port != PORT_OUTPUT) { |
168 DLOG(ERROR) << "Dmabuf is only supported for input"; | 213 DLOG(ERROR) << "Dmabuf is only supported for input"; |
169 arc_client_->OnError(INVALID_ARGUMENT); | 214 arc_client_->OnError(INVALID_ARGUMENT); |
170 return; | 215 return; |
171 } | 216 } |
172 if (!ValidatePortAndIndex(port, index)) { | 217 if (!ValidatePortAndIndex(port, index)) { |
173 arc_client_->OnError(INVALID_ARGUMENT); | 218 arc_client_->OnError(INVALID_ARGUMENT); |
174 return; | 219 return; |
175 } | 220 } |
176 buffers_pending_import_[index] = std::move(dmabuf_fd); | 221 if (!VerifyStride(dmabuf_fd, stride)) { |
222 arc_client_->OnError(INVALID_ARGUMENT); | |
223 return; | |
224 } | |
225 | |
226 OutputBufferInfo& info = buffers_pending_import_[index]; | |
227 info.handle = std::move(dmabuf_fd); | |
228 info.stride = stride; | |
177 } | 229 } |
178 | 230 |
179 void ArcGpuVideoDecodeAccelerator::UseBuffer(PortType port, | 231 void ArcGpuVideoDecodeAccelerator::UseBuffer(PortType port, |
180 uint32_t index, | 232 uint32_t index, |
181 const BufferMetadata& metadata) { | 233 const BufferMetadata& metadata) { |
182 DVLOG(5) << "UseBuffer(port=" << port << ", index=" << index | 234 DVLOG(5) << "UseBuffer(port=" << port << ", index=" << index |
183 << ", metadata=(bytes_used=" << metadata.bytes_used | 235 << ", metadata=(bytes_used=" << metadata.bytes_used |
184 << ", timestamp=" << metadata.timestamp << ")"; | 236 << ", timestamp=" << metadata.timestamp << ")"; |
185 DCHECK(thread_checker_.CalledOnValidThread()); | 237 DCHECK(thread_checker_.CalledOnValidThread()); |
186 if (!vda_) { | 238 if (!vda_) { |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
234 void ArcGpuVideoDecodeAccelerator::ProvidePictureBuffers( | 286 void ArcGpuVideoDecodeAccelerator::ProvidePictureBuffers( |
235 uint32_t requested_num_of_buffers, | 287 uint32_t requested_num_of_buffers, |
236 uint32_t textures_per_buffer, | 288 uint32_t textures_per_buffer, |
237 const gfx::Size& dimensions, | 289 const gfx::Size& dimensions, |
238 uint32_t texture_target) { | 290 uint32_t texture_target) { |
239 DVLOG(5) << "ProvidePictureBuffers(" | 291 DVLOG(5) << "ProvidePictureBuffers(" |
240 << "requested_num_of_buffers=" << requested_num_of_buffers | 292 << "requested_num_of_buffers=" << requested_num_of_buffers |
241 << ", dimensions=" << dimensions.ToString() << ")"; | 293 << ", dimensions=" << dimensions.ToString() << ")"; |
242 DCHECK(thread_checker_.CalledOnValidThread()); | 294 DCHECK(thread_checker_.CalledOnValidThread()); |
243 coded_size_ = dimensions; | 295 coded_size_ = dimensions; |
296 output_pixel_format_ = vda_->GetOutputFormat(); | |
244 | 297 |
245 VideoFormat video_format; | 298 VideoFormat video_format; |
246 media::VideoPixelFormat output_format = vda_->GetOutputFormat(); | 299 switch (output_pixel_format_) { |
247 switch (output_format) { | |
248 case media::PIXEL_FORMAT_I420: | 300 case media::PIXEL_FORMAT_I420: |
249 case media::PIXEL_FORMAT_YV12: | 301 case media::PIXEL_FORMAT_YV12: |
250 case media::PIXEL_FORMAT_NV12: | 302 case media::PIXEL_FORMAT_NV12: |
251 case media::PIXEL_FORMAT_NV21: | 303 case media::PIXEL_FORMAT_NV21: |
252 // HAL_PIXEL_FORMAT_YCbCr_420_888 is the flexible pixel format in Android | 304 // HAL_PIXEL_FORMAT_YCbCr_420_888 is the flexible pixel format in Android |
253 // which handles all 420 formats, with both orderings of chroma (CbCr and | 305 // which handles all 420 formats, with both orderings of chroma (CbCr and |
254 // CrCb) as well as planar and semi-planar layouts. | 306 // CrCb) as well as planar and semi-planar layouts. |
255 video_format.pixel_format = HAL_PIXEL_FORMAT_YCbCr_420_888; | 307 video_format.pixel_format = HAL_PIXEL_FORMAT_YCbCr_420_888; |
256 break; | 308 break; |
257 default: | 309 default: |
258 DLOG(ERROR) << "Format not supported: " << output_format; | 310 DLOG(ERROR) << "Format not supported: " << output_pixel_format_; |
259 arc_client_->OnError(PLATFORM_FAILURE); | 311 arc_client_->OnError(PLATFORM_FAILURE); |
260 return; | 312 return; |
261 } | 313 } |
262 video_format.buffer_size = | 314 video_format.buffer_size = |
263 media::VideoFrame::AllocationSize(output_format, coded_size_); | 315 media::VideoFrame::AllocationSize(output_pixel_format_, coded_size_); |
264 output_buffer_size_ = video_format.buffer_size; | 316 output_buffer_size_ = video_format.buffer_size; |
265 video_format.min_num_buffers = requested_num_of_buffers; | 317 video_format.min_num_buffers = requested_num_of_buffers; |
266 video_format.coded_width = dimensions.width(); | 318 video_format.coded_width = dimensions.width(); |
267 video_format.coded_height = dimensions.height(); | 319 video_format.coded_height = dimensions.height(); |
268 // TODO(owenlin): How to get visible size? | 320 // TODO(owenlin): How to get visible size? |
269 video_format.crop_top = 0; | 321 video_format.crop_top = 0; |
270 video_format.crop_left = 0; | 322 video_format.crop_left = 0; |
271 video_format.crop_width = dimensions.width(); | 323 video_format.crop_width = dimensions.width(); |
272 video_format.crop_height = dimensions.height(); | 324 video_format.crop_height = dimensions.height(); |
273 arc_client_->OnOutputFormatChanged(video_format); | 325 arc_client_->OnOutputFormatChanged(video_format); |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
354 } | 406 } |
355 | 407 |
356 void ArcGpuVideoDecodeAccelerator::SendEosIfNeededOrReusePicture( | 408 void ArcGpuVideoDecodeAccelerator::SendEosIfNeededOrReusePicture( |
357 uint32_t index) { | 409 uint32_t index) { |
358 if (pending_eos_output_buffer_) { | 410 if (pending_eos_output_buffer_) { |
359 BufferMetadata metadata; | 411 BufferMetadata metadata; |
360 metadata.flags = BUFFER_FLAG_EOS; | 412 metadata.flags = BUFFER_FLAG_EOS; |
361 arc_client_->OnBufferDone(PORT_OUTPUT, index, metadata); | 413 arc_client_->OnBufferDone(PORT_OUTPUT, index, metadata); |
362 pending_eos_output_buffer_ = false; | 414 pending_eos_output_buffer_ = false; |
363 } else { | 415 } else { |
364 if (buffers_pending_import_[index].is_valid()) { | 416 OutputBufferInfo& info = buffers_pending_import_[index]; |
365 std::vector<gfx::GpuMemoryBufferHandle> buffers; | 417 if (info.handle.is_valid()) { |
366 buffers.push_back(gfx::GpuMemoryBufferHandle()); | 418 gfx::GpuMemoryBufferHandle handle; |
367 #if defined(USE_OZONE) | 419 #if defined(USE_OZONE) |
368 buffers.back().native_pixmap_handle.fd = | 420 handle.native_pixmap_handle.fd = |
369 base::FileDescriptor(buffers_pending_import_[index].release(), true); | 421 base::FileDescriptor(info.handle.release(), true); |
422 handle.native_pixmap_handle.stride = info.stride; | |
370 #endif | 423 #endif |
371 vda_->ImportBufferForPicture(index, buffers); | 424 vda_->ImportBufferForPicture(index, {handle}); |
372 } else { | 425 } else { |
373 vda_->ReusePictureBuffer(index); | 426 vda_->ReusePictureBuffer(index); |
374 } | 427 } |
375 } | 428 } |
376 } | 429 } |
377 | 430 |
378 void ArcGpuVideoDecodeAccelerator::CreateInputRecord( | 431 void ArcGpuVideoDecodeAccelerator::CreateInputRecord( |
379 int32_t bitstream_buffer_id, | 432 int32_t bitstream_buffer_id, |
380 uint32_t buffer_index, | 433 uint32_t buffer_index, |
381 int64_t timestamp) { | 434 int64_t timestamp) { |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
417 } | 470 } |
418 return true; | 471 return true; |
419 default: | 472 default: |
420 DLOG(ERROR) << "Invalid port: " << port; | 473 DLOG(ERROR) << "Invalid port: " << port; |
421 return false; | 474 return false; |
422 } | 475 } |
423 } | 476 } |
424 | 477 |
425 } // namespace arc | 478 } // namespace arc |
426 } // namespace chromeos | 479 } // namespace chromeos |
OLD | NEW |