Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(171)

Side by Side Diff: content/common/gpu/media/v4l2_slice_video_decode_accelerator.cc

Issue 1822983002: Support external buffer import in VDA interface and add a V4L2SVDA impl. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <errno.h> 5 #include <errno.h>
6 #include <fcntl.h> 6 #include <fcntl.h>
7 #include <linux/videodev2.h> 7 #include <linux/videodev2.h>
8 #include <poll.h> 8 #include <poll.h>
9 #include <string.h> 9 #include <string.h>
10 #include <sys/eventfd.h> 10 #include <sys/eventfd.h>
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
155 address(nullptr), 155 address(nullptr),
156 length(0), 156 length(0),
157 bytes_used(0), 157 bytes_used(0),
158 at_device(false) { 158 at_device(false) {
159 } 159 }
160 160
161 V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord() 161 V4L2SliceVideoDecodeAccelerator::OutputRecord::OutputRecord()
162 : at_device(false), 162 : at_device(false),
163 at_client(false), 163 at_client(false),
164 picture_id(-1), 164 picture_id(-1),
165 texture_id(0),
165 egl_image(EGL_NO_IMAGE_KHR), 166 egl_image(EGL_NO_IMAGE_KHR),
166 egl_sync(EGL_NO_SYNC_KHR), 167 egl_sync(EGL_NO_SYNC_KHR),
167 cleared(false) { 168 cleared(false) {}
168 }
169 169
170 struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef { 170 struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef {
171 BitstreamBufferRef( 171 BitstreamBufferRef(
172 base::WeakPtr<VideoDecodeAccelerator::Client>& client, 172 base::WeakPtr<VideoDecodeAccelerator::Client>& client,
173 const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner, 173 const scoped_refptr<base::SingleThreadTaskRunner>& client_task_runner,
174 SharedMemoryRegion* shm, 174 SharedMemoryRegion* shm,
175 int32_t input_id); 175 int32_t input_id);
176 ~BitstreamBufferRef(); 176 ~BitstreamBufferRef();
177 const base::WeakPtr<VideoDecodeAccelerator::Client> client; 177 const base::WeakPtr<VideoDecodeAccelerator::Client> client;
178 const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner; 178 const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after
388 device_(device), 388 device_(device),
389 decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"), 389 decoder_thread_("V4L2SliceVideoDecodeAcceleratorThread"),
390 device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"), 390 device_poll_thread_("V4L2SliceVideoDecodeAcceleratorDevicePollThread"),
391 input_streamon_(false), 391 input_streamon_(false),
392 input_buffer_queued_count_(0), 392 input_buffer_queued_count_(0),
393 output_streamon_(false), 393 output_streamon_(false),
394 output_buffer_queued_count_(0), 394 output_buffer_queued_count_(0),
395 video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN), 395 video_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN),
396 output_format_fourcc_(0), 396 output_format_fourcc_(0),
397 state_(kUninitialized), 397 state_(kUninitialized),
398 output_mode_(Config::OutputMode::ALLOCATE),
398 decoder_flushing_(false), 399 decoder_flushing_(false),
399 decoder_resetting_(false), 400 decoder_resetting_(false),
400 surface_set_change_pending_(false), 401 surface_set_change_pending_(false),
401 picture_clearing_count_(0), 402 picture_clearing_count_(0),
402 egl_display_(egl_display), 403 egl_display_(egl_display),
403 get_gl_context_cb_(get_gl_context_cb), 404 get_gl_context_cb_(get_gl_context_cb),
404 make_context_current_cb_(make_context_current_cb), 405 make_context_current_cb_(make_context_current_cb),
405 weak_this_factory_(this) { 406 weak_this_factory_(this) {
406 weak_this_ = weak_this_factory_.GetWeakPtr(); 407 weak_this_ = weak_this_factory_.GetWeakPtr();
407 } 408 }
(...skipping 22 matching lines...) Expand all
430 client_ptr_factory_.reset(); 431 client_ptr_factory_.reset();
431 } 432 }
432 } 433 }
433 434
434 bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config, 435 bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
435 Client* client) { 436 Client* client) {
436 DVLOGF(3) << "profile: " << config.profile; 437 DVLOGF(3) << "profile: " << config.profile;
437 DCHECK(child_task_runner_->BelongsToCurrentThread()); 438 DCHECK(child_task_runner_->BelongsToCurrentThread());
438 DCHECK_EQ(state_, kUninitialized); 439 DCHECK_EQ(state_, kUninitialized);
439 440
440 if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) { 441 if (!device_->SupportsDecodeProfileForV4L2PixelFormats(
441 NOTREACHED() << "GL callbacks are required for this VDA"; 442 config.profile, arraysize(supported_input_fourccs_),
443 supported_input_fourccs_)) {
444 DVLOGF(1) << "unsupported profile " << config.profile;
442 return false; 445 return false;
443 } 446 }
444 447
445 if (config.is_encrypted) { 448 if (config.is_encrypted) {
446 NOTREACHED() << "Encrypted streams are not supported for this VDA"; 449 NOTREACHED() << "Encrypted streams are not supported for this VDA";
447 return false; 450 return false;
448 } 451 }
449 452
450 if (!device_->SupportsDecodeProfileForV4L2PixelFormats( 453 if (config.output_mode != Config::OutputMode::ALLOCATE &&
451 config.profile, arraysize(supported_input_fourccs_), 454 config.output_mode != Config::OutputMode::IMPORT) {
452 supported_input_fourccs_)) { 455 NOTREACHED() << "Only ALLOCATE and IMPORT OutputModes are supported";
453 DVLOGF(1) << "unsupported profile " << config.profile;
454 return false; 456 return false;
455 } 457 }
456 458
457 client_ptr_factory_.reset( 459 client_ptr_factory_.reset(
458 new base::WeakPtrFactory<VideoDecodeAccelerator::Client>(client)); 460 new base::WeakPtrFactory<VideoDecodeAccelerator::Client>(client));
459 client_ = client_ptr_factory_->GetWeakPtr(); 461 client_ = client_ptr_factory_->GetWeakPtr();
460 // If we haven't been set up to decode on separate thread via 462 // If we haven't been set up to decode on separate thread via
461 // TryToSetupDecodeOnSeparateThread(), use the main thread/client for 463 // TryToSetupDecodeOnSeparateThread(), use the main thread/client for
462 // decode tasks. 464 // decode tasks.
463 if (!decode_task_runner_) { 465 if (!decode_task_runner_) {
(...skipping 20 matching lines...) Expand all
484 // TODO(posciak): This needs to be queried once supported. 486 // TODO(posciak): This needs to be queried once supported.
485 input_planes_count_ = 1; 487 input_planes_count_ = 1;
486 output_planes_count_ = 1; 488 output_planes_count_ = 1;
487 489
488 if (egl_display_ == EGL_NO_DISPLAY) { 490 if (egl_display_ == EGL_NO_DISPLAY) {
489 LOG(ERROR) << "Initialize(): could not get EGLDisplay"; 491 LOG(ERROR) << "Initialize(): could not get EGLDisplay";
490 return false; 492 return false;
491 } 493 }
492 494
493 // We need the context to be initialized to query extensions. 495 // We need the context to be initialized to query extensions.
494 if (!make_context_current_cb_.Run()) { 496 if (!make_context_current_cb_.is_null()) {
495 LOG(ERROR) << "Initialize(): could not make context current"; 497 if (!make_context_current_cb_.Run()) {
496 return false; 498 LOG(ERROR) << "Initialize(): could not make context current";
497 } 499 return false;
500 }
498 501
499 if (!gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync) { 502 if (!gfx::g_driver_egl.ext.b_EGL_KHR_fence_sync) {
500 LOG(ERROR) << "Initialize(): context does not have EGL_KHR_fence_sync"; 503 LOG(ERROR) << "Initialize(): context does not have EGL_KHR_fence_sync";
501 return false; 504 return false;
505 }
506 } else {
507 DVLOG(1) << "No GL callbacks provided, initializing without GL support";
502 } 508 }
503 509
504 // Capabilities check. 510 // Capabilities check.
505 struct v4l2_capability caps; 511 struct v4l2_capability caps;
506 const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; 512 const __u32 kCapsRequired = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
507 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps); 513 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYCAP, &caps);
508 if ((caps.capabilities & kCapsRequired) != kCapsRequired) { 514 if ((caps.capabilities & kCapsRequired) != kCapsRequired) {
509 LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP" 515 LOG(ERROR) << "Initialize(): ioctl() failed: VIDIOC_QUERYCAP"
510 ", caps check failed: 0x" << std::hex << caps.capabilities; 516 ", caps check failed: 0x" << std::hex << caps.capabilities;
511 return false; 517 return false;
512 } 518 }
513 519
514 if (!SetupFormats()) 520 if (!SetupFormats())
515 return false; 521 return false;
516 522
517 if (!decoder_thread_.Start()) { 523 if (!decoder_thread_.Start()) {
518 DLOG(ERROR) << "Initialize(): device thread failed to start"; 524 DLOG(ERROR) << "Initialize(): device thread failed to start";
519 return false; 525 return false;
520 } 526 }
521 decoder_thread_task_runner_ = decoder_thread_.task_runner(); 527 decoder_thread_task_runner_ = decoder_thread_.task_runner();
522 528
523 state_ = kInitialized; 529 state_ = kInitialized;
530 output_mode_ = config.output_mode;
524 531
525 // InitializeTask will NOTIFY_ERROR on failure. 532 // InitializeTask will NOTIFY_ERROR on failure.
526 decoder_thread_task_runner_->PostTask( 533 decoder_thread_task_runner_->PostTask(
527 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::InitializeTask, 534 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::InitializeTask,
528 base::Unretained(this))); 535 base::Unretained(this)));
529 536
530 DVLOGF(1) << "V4L2SliceVideoDecodeAccelerator initialized"; 537 DVLOGF(1) << "V4L2SliceVideoDecodeAccelerator initialized";
531 return true; 538 return true;
532 } 539 }
533 540
(...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after
887 DVLOGF(3); 894 DVLOGF(3);
888 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); 895 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
889 896
890 struct v4l2_buffer dqbuf; 897 struct v4l2_buffer dqbuf;
891 struct v4l2_plane planes[VIDEO_MAX_PLANES]; 898 struct v4l2_plane planes[VIDEO_MAX_PLANES];
892 while (input_buffer_queued_count_ > 0) { 899 while (input_buffer_queued_count_ > 0) {
893 DCHECK(input_streamon_); 900 DCHECK(input_streamon_);
894 memset(&dqbuf, 0, sizeof(dqbuf)); 901 memset(&dqbuf, 0, sizeof(dqbuf));
895 memset(&planes, 0, sizeof(planes)); 902 memset(&planes, 0, sizeof(planes));
896 dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 903 dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
897 dqbuf.memory = V4L2_MEMORY_USERPTR; 904 dqbuf.memory = V4L2_MEMORY_MMAP;
898 dqbuf.m.planes = planes; 905 dqbuf.m.planes = planes;
899 dqbuf.length = input_planes_count_; 906 dqbuf.length = input_planes_count_;
900 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) { 907 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
901 if (errno == EAGAIN) { 908 if (errno == EAGAIN) {
902 // EAGAIN if we're just out of buffers to dequeue. 909 // EAGAIN if we're just out of buffers to dequeue.
903 break; 910 break;
904 } 911 }
905 PLOG(ERROR) << "ioctl() failed: VIDIOC_DQBUF"; 912 PLOG(ERROR) << "ioctl() failed: VIDIOC_DQBUF";
906 NOTIFY_ERROR(PLATFORM_FAILURE); 913 NOTIFY_ERROR(PLATFORM_FAILURE);
907 return; 914 return;
908 } 915 }
909 InputRecord& input_record = input_buffer_map_[dqbuf.index]; 916 InputRecord& input_record = input_buffer_map_[dqbuf.index];
910 DCHECK(input_record.at_device); 917 DCHECK(input_record.at_device);
911 input_record.at_device = false; 918 input_record.at_device = false;
912 ReuseInputBuffer(dqbuf.index); 919 ReuseInputBuffer(dqbuf.index);
913 input_buffer_queued_count_--; 920 input_buffer_queued_count_--;
914 DVLOGF(4) << "Dequeued input=" << dqbuf.index 921 DVLOGF(4) << "Dequeued input=" << dqbuf.index
915 << " count: " << input_buffer_queued_count_; 922 << " count: " << input_buffer_queued_count_;
916 } 923 }
917 924
918 while (output_buffer_queued_count_ > 0) { 925 while (output_buffer_queued_count_ > 0) {
919 DCHECK(output_streamon_); 926 DCHECK(output_streamon_);
920 memset(&dqbuf, 0, sizeof(dqbuf)); 927 memset(&dqbuf, 0, sizeof(dqbuf));
921 memset(&planes, 0, sizeof(planes)); 928 memset(&planes, 0, sizeof(planes));
922 dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 929 dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
923 dqbuf.memory = V4L2_MEMORY_MMAP; 930 dqbuf.memory =
931 (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP
932 : V4L2_MEMORY_DMABUF);
924 dqbuf.m.planes = planes; 933 dqbuf.m.planes = planes;
925 dqbuf.length = output_planes_count_; 934 dqbuf.length = output_planes_count_;
926 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) { 935 if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
927 if (errno == EAGAIN) { 936 if (errno == EAGAIN) {
928 // EAGAIN if we're just out of buffers to dequeue. 937 // EAGAIN if we're just out of buffers to dequeue.
929 break; 938 break;
930 } 939 }
931 PLOG(ERROR) << "ioctl() failed: VIDIOC_DQBUF"; 940 PLOG(ERROR) << "ioctl() failed: VIDIOC_DQBUF";
932 NOTIFY_ERROR(PLATFORM_FAILURE); 941 NOTIFY_ERROR(PLATFORM_FAILURE);
933 return; 942 return;
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
1049 } 1058 }
1050 1059
1051 bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) { 1060 bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(int index) {
1052 DVLOGF(3); 1061 DVLOGF(3);
1053 DCHECK_LT(index, static_cast<int>(output_buffer_map_.size())); 1062 DCHECK_LT(index, static_cast<int>(output_buffer_map_.size()));
1054 1063
1055 // Enqueue an output (VIDEO_CAPTURE) buffer. 1064 // Enqueue an output (VIDEO_CAPTURE) buffer.
1056 OutputRecord& output_record = output_buffer_map_[index]; 1065 OutputRecord& output_record = output_buffer_map_[index];
1057 DCHECK(!output_record.at_device); 1066 DCHECK(!output_record.at_device);
1058 DCHECK(!output_record.at_client); 1067 DCHECK(!output_record.at_client);
1059 DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
1060 DCHECK_NE(output_record.picture_id, -1); 1068 DCHECK_NE(output_record.picture_id, -1);
1061 1069
1062 if (output_record.egl_sync != EGL_NO_SYNC_KHR) { 1070 if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
1063 // If we have to wait for completion, wait. Note that 1071 // If we have to wait for completion, wait. Note that
1064 // free_output_buffers_ is a FIFO queue, so we always wait on the 1072 // free_output_buffers_ is a FIFO queue, so we always wait on the
1065 // buffer that has been in the queue the longest. 1073 // buffer that has been in the queue the longest.
1066 if (eglClientWaitSyncKHR(egl_display_, output_record.egl_sync, 0, 1074 if (eglClientWaitSyncKHR(egl_display_, output_record.egl_sync, 0,
1067 EGL_FOREVER_KHR) == EGL_FALSE) { 1075 EGL_FOREVER_KHR) == EGL_FALSE) {
1068 // This will cause tearing, but is safe otherwise. 1076 // This will cause tearing, but is safe otherwise.
1069 DVLOGF(1) << "eglClientWaitSyncKHR failed!"; 1077 DVLOGF(1) << "eglClientWaitSyncKHR failed!";
1070 } 1078 }
1071 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) { 1079 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) {
1072 LOGF(ERROR) << "eglDestroySyncKHR failed!"; 1080 LOGF(ERROR) << "eglDestroySyncKHR failed!";
1073 NOTIFY_ERROR(PLATFORM_FAILURE); 1081 NOTIFY_ERROR(PLATFORM_FAILURE);
1074 return false; 1082 return false;
1075 } 1083 }
1076 output_record.egl_sync = EGL_NO_SYNC_KHR; 1084 output_record.egl_sync = EGL_NO_SYNC_KHR;
1077 } 1085 }
1078 1086
1079 struct v4l2_buffer qbuf; 1087 struct v4l2_buffer qbuf;
1080 struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES]; 1088 struct v4l2_plane qbuf_planes[VIDEO_MAX_PLANES];
1081 memset(&qbuf, 0, sizeof(qbuf)); 1089 memset(&qbuf, 0, sizeof(qbuf));
1082 memset(qbuf_planes, 0, sizeof(qbuf_planes)); 1090 memset(qbuf_planes, 0, sizeof(qbuf_planes));
1083 qbuf.index = index; 1091 qbuf.index = index;
1084 qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 1092 qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1085 qbuf.memory = V4L2_MEMORY_MMAP; 1093 if (output_mode_ == Config::OutputMode::ALLOCATE) {
1094 qbuf.memory = V4L2_MEMORY_MMAP;
1095 } else {
1096 qbuf.memory = V4L2_MEMORY_DMABUF;
1097 DCHECK_EQ(output_planes_count_, output_record.dmabuf_fds.size());
1098 for (size_t i = 0; i < output_record.dmabuf_fds.size(); ++i) {
1099 DCHECK(output_record.dmabuf_fds[i].is_valid());
1100 qbuf_planes[i].m.fd = output_record.dmabuf_fds[i].get();
1101 }
1102 }
1086 qbuf.m.planes = qbuf_planes; 1103 qbuf.m.planes = qbuf_planes;
1087 qbuf.length = output_planes_count_; 1104 qbuf.length = output_planes_count_;
1088 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf); 1105 IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
1089 output_record.at_device = true; 1106 output_record.at_device = true;
1090 output_buffer_queued_count_++; 1107 output_buffer_queued_count_++;
1091 DVLOGF(4) << "Enqueued output=" << qbuf.index 1108 DVLOGF(4) << "Enqueued output=" << qbuf.index
1092 << " count: " << output_buffer_queued_count_; 1109 << " count: " << output_buffer_queued_count_;
1093 1110
1094 return true; 1111 return true;
1095 } 1112 }
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after
1358 // yet. We will not start decoding without having surfaces available, 1375 // yet. We will not start decoding without having surfaces available,
1359 // and will schedule a decode task once the client provides the buffers. 1376 // and will schedule a decode task once the client provides the buffers.
1360 surface_set_change_pending_ = false; 1377 surface_set_change_pending_ = false;
1361 DVLOG(3) << "Surface set change finished"; 1378 DVLOG(3) << "Surface set change finished";
1362 return true; 1379 return true;
1363 } 1380 }
1364 1381
1365 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) { 1382 bool V4L2SliceVideoDecodeAccelerator::DestroyOutputs(bool dismiss) {
1366 DVLOGF(3); 1383 DVLOGF(3);
1367 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); 1384 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1368 std::vector<EGLImageKHR> egl_images_to_destroy;
1369 std::vector<int32_t> picture_buffers_to_dismiss; 1385 std::vector<int32_t> picture_buffers_to_dismiss;
1370 1386
1371 if (output_buffer_map_.empty()) 1387 if (output_buffer_map_.empty())
1372 return true; 1388 return true;
1373 1389
1374 for (auto output_record : output_buffer_map_) { 1390 for (const auto& output_record : output_buffer_map_) {
1375 DCHECK(!output_record.at_device); 1391 DCHECK(!output_record.at_device);
1376 1392
1377 if (output_record.egl_sync != EGL_NO_SYNC_KHR) { 1393 if (output_record.egl_sync != EGL_NO_SYNC_KHR) {
1378 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE) 1394 if (eglDestroySyncKHR(egl_display_, output_record.egl_sync) != EGL_TRUE)
1379 DVLOGF(1) << "eglDestroySyncKHR failed."; 1395 DVLOGF(1) << "eglDestroySyncKHR failed.";
1380 } 1396 }
1381 1397
1382 if (output_record.egl_image != EGL_NO_IMAGE_KHR) { 1398 if (output_record.egl_image != EGL_NO_IMAGE_KHR) {
1383 child_task_runner_->PostTask( 1399 child_task_runner_->PostTask(
1384 FROM_HERE, 1400 FROM_HERE,
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
1469 << ", requested " << req_buffer_count << ")"; 1485 << ", requested " << req_buffer_count << ")";
1470 NOTIFY_ERROR(INVALID_ARGUMENT); 1486 NOTIFY_ERROR(INVALID_ARGUMENT);
1471 return; 1487 return;
1472 } 1488 }
1473 1489
1474 // Allocate the output buffers. 1490 // Allocate the output buffers.
1475 struct v4l2_requestbuffers reqbufs; 1491 struct v4l2_requestbuffers reqbufs;
1476 memset(&reqbufs, 0, sizeof(reqbufs)); 1492 memset(&reqbufs, 0, sizeof(reqbufs));
1477 reqbufs.count = buffers.size(); 1493 reqbufs.count = buffers.size();
1478 reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 1494 reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1479 reqbufs.memory = V4L2_MEMORY_MMAP; 1495 reqbufs.memory =
1496 (output_mode_ == Config::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP
1497 : V4L2_MEMORY_DMABUF);
1480 IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs); 1498 IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
1481 1499
1482 if (reqbufs.count != buffers.size()) { 1500 if (reqbufs.count != buffers.size()) {
1483 DLOG(ERROR) << "Could not allocate enough output buffers"; 1501 DLOG(ERROR) << "Could not allocate enough output buffers";
1484 NOTIFY_ERROR(PLATFORM_FAILURE); 1502 NOTIFY_ERROR(PLATFORM_FAILURE);
1485 return; 1503 return;
1486 } 1504 }
1487 1505
1488 child_task_runner_->PostTask( 1506 DCHECK(free_output_buffers_.empty());
1489 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::CreateEGLImages, 1507 DCHECK(output_buffer_map_.empty());
1490 weak_this_, buffers, output_format_fourcc_, 1508 output_buffer_map_.resize(buffers.size());
1491 output_planes_count_)); 1509 for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
1510 DCHECK(buffers[i].size() == coded_size_);
1511
1512 OutputRecord& output_record = output_buffer_map_[i];
1513 DCHECK(!output_record.at_device);
1514 DCHECK(!output_record.at_client);
1515 DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
1516 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
1517 DCHECK_EQ(output_record.picture_id, -1);
1518 DCHECK(output_record.dmabuf_fds.empty());
1519 DCHECK_EQ(output_record.cleared, false);
1520
1521 output_record.picture_id = buffers[i].id();
1522 output_record.texture_id = buffers[i].texture_id();
1523 // This will remain true until ImportBufferForPicture is called, either by
1524 // the client, or by ourselves, if we are allocating.
1525 output_record.at_client = true;
1526 if (output_mode_ == Config::OutputMode::ALLOCATE) {
1527 std::vector<base::ScopedFD> dmabuf_fds =
1528 std::move(device_->GetDmabufsForV4L2Buffer(
1529 i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE));
1530 if (dmabuf_fds.empty()) {
1531 NOTIFY_ERROR(PLATFORM_FAILURE);
1532 return;
1533 }
1534
1535 auto passed_dmabuf_fds(make_scoped_ptr(
1536 new std::vector<base::ScopedFD>(std::move(dmabuf_fds))));
1537 ImportBufferForPictureTask(output_record.picture_id,
1538 std::move(passed_dmabuf_fds));
1539 } // else we'll get triggered via ImportBufferForPicture() from client.
1540 DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
1541 }
1542
1543 if (!StartDevicePoll()) {
1544 NOTIFY_ERROR(PLATFORM_FAILURE);
1545 return;
1546 }
1492 } 1547 }
1493 1548
1494 void V4L2SliceVideoDecodeAccelerator::CreateEGLImages( 1549 void V4L2SliceVideoDecodeAccelerator::CreateEGLImageFor(
1495 const std::vector<media::PictureBuffer>& buffers, 1550 size_t buffer_index,
1496 uint32_t output_format_fourcc, 1551 scoped_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds,
1497 size_t output_planes_count) { 1552 GLuint texture_id,
1498 DVLOGF(3); 1553 const gfx::Size& size,
1554 uint32_t fourcc) {
1555 DVLOGF(3) << "index=" << buffer_index;
1499 DCHECK(child_task_runner_->BelongsToCurrentThread()); 1556 DCHECK(child_task_runner_->BelongsToCurrentThread());
1500 1557
1558 if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
1559 DLOG(ERROR) << "GL callbacks required for binding to EGLImages";
1560 NOTIFY_ERROR(INVALID_ARGUMENT);
1561 return;
1562 }
1563
1501 gfx::GLContext* gl_context = get_gl_context_cb_.Run(); 1564 gfx::GLContext* gl_context = get_gl_context_cb_.Run();
1502 if (!gl_context || !make_context_current_cb_.Run()) { 1565 if (!gl_context || !make_context_current_cb_.Run()) {
1503 DLOG(ERROR) << "No GL context"; 1566 DLOG(ERROR) << "No GL context";
1504 NOTIFY_ERROR(PLATFORM_FAILURE); 1567 NOTIFY_ERROR(PLATFORM_FAILURE);
1505 return; 1568 return;
1506 } 1569 }
1507 1570
1508 gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0); 1571 gfx::ScopedTextureBinder bind_restore(GL_TEXTURE_EXTERNAL_OES, 0);
1509 1572
1510 std::vector<EGLImageKHR> egl_images; 1573 EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_,
1511 for (size_t i = 0; i < buffers.size(); ++i) { 1574 gl_context->GetHandle(),
1512 EGLImageKHR egl_image = device_->CreateEGLImage(egl_display_, 1575 texture_id,
1513 gl_context->GetHandle(), 1576 size,
1514 buffers[i].texture_id(), 1577 buffer_index,
1515 buffers[i].size(), 1578 fourcc,
1516 i, 1579 *passed_dmabuf_fds);
1517 output_format_fourcc, 1580 if (egl_image == EGL_NO_IMAGE_KHR) {
1518 output_planes_count); 1581 LOGF(ERROR) << "Could not create EGLImageKHR,"
1519 if (egl_image == EGL_NO_IMAGE_KHR) { 1582 << " index=" << buffer_index << " texture_id=" << texture_id;
1520 LOGF(ERROR) << "Could not create EGLImageKHR";
1521 for (const auto& image_to_destroy : egl_images)
1522 device_->DestroyEGLImage(egl_display_, image_to_destroy);
1523
1524 NOTIFY_ERROR(PLATFORM_FAILURE);
1525 return;
1526 }
1527
1528 egl_images.push_back(egl_image);
1529 }
1530
1531 decoder_thread_task_runner_->PostTask(
1532 FROM_HERE, base::Bind(
1533 &V4L2SliceVideoDecodeAccelerator::AssignEGLImages,
1534 base::Unretained(this), buffers, egl_images));
1535 }
1536
1537 void V4L2SliceVideoDecodeAccelerator::AssignEGLImages(
1538 const std::vector<media::PictureBuffer>& buffers,
1539 const std::vector<EGLImageKHR>& egl_images) {
1540 DVLOGF(3);
1541 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1542 DCHECK_EQ(buffers.size(), egl_images.size());
1543
1544 DCHECK(free_output_buffers_.empty());
1545 DCHECK(output_buffer_map_.empty());
1546
1547 output_buffer_map_.resize(buffers.size());
1548
1549 for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
1550 DCHECK(buffers[i].size() == coded_size_);
1551
1552 OutputRecord& output_record = output_buffer_map_[i];
1553 DCHECK(!output_record.at_device);
1554 DCHECK(!output_record.at_client);
1555 DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
1556 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
1557 DCHECK_EQ(output_record.picture_id, -1);
1558 DCHECK_EQ(output_record.cleared, false);
1559
1560 output_record.egl_image = egl_images[i];
1561 output_record.picture_id = buffers[i].id();
1562 free_output_buffers_.push_back(i);
1563 DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
1564 }
1565
1566 if (!StartDevicePoll()) {
1567 NOTIFY_ERROR(PLATFORM_FAILURE); 1583 NOTIFY_ERROR(PLATFORM_FAILURE);
1568 return; 1584 return;
1569 } 1585 }
1570 1586
1571 ProcessPendingEventsIfNeeded(); 1587 decoder_thread_task_runner_->PostTask(
1588 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::AssignEGLImage,
1589 base::Unretained(this), buffer_index, egl_image,
1590 base::Passed(&passed_dmabuf_fds)));
1591 }
1592
1593 void V4L2SliceVideoDecodeAccelerator::AssignEGLImage(
1594 size_t buffer_index,
1595 EGLImageKHR egl_image,
1596 scoped_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) {
1597 DVLOGF(3) << "index=" << buffer_index;
1598 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1599
1600 DCHECK_LT(buffer_index, output_buffer_map_.size());
1601 OutputRecord& output_record = output_buffer_map_[buffer_index];
1602 DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
1603 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
1604 DCHECK(!output_record.at_client);
1605 DCHECK(!output_record.at_device);
1606
1607 output_record.egl_image = egl_image;
1608 if (output_mode_ == Config::OutputMode::IMPORT) {
1609 DCHECK(output_record.dmabuf_fds.empty());
1610 output_record.dmabuf_fds = std::move(*passed_dmabuf_fds);
1611 }
1612
1613 DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
1614 buffer_index),
1615 0);
1616 free_output_buffers_.push_back(buffer_index);
1617 ScheduleDecodeBufferTaskIfNeeded();
1618 }
1619
1620 void V4L2SliceVideoDecodeAccelerator::ImportBufferForPicture(
1621 int32_t picture_buffer_id,
1622 const std::vector<gfx::GpuMemoryBufferHandle>& gpu_memory_buffer_handles) {
1623 DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
1624 DCHECK(child_task_runner_->BelongsToCurrentThread());
1625
1626 auto passed_dmabuf_fds(make_scoped_ptr(new std::vector<base::ScopedFD>()));
1627 for (const auto& handle : gpu_memory_buffer_handles) {
1628 int fd = -1;
1629 #if defined(USE_OZONE)
1630 fd = handle.native_pixmap_handle.fd.fd;
1631 #endif
1632 DCHECK_NE(fd, -1);
1633 passed_dmabuf_fds->push_back(base::ScopedFD(fd));
1634 }
1635
1636 if (output_mode_ != Config::OutputMode::IMPORT) {
1637 LOGF(ERROR) << "Cannot import in non-import mode";
1638 NOTIFY_ERROR(INVALID_ARGUMENT);
1639 return;
1640 }
1641
1642 decoder_thread_task_runner_->PostTask(
1643 FROM_HERE,
1644 base::Bind(&V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask,
1645 base::Unretained(this), picture_buffer_id,
1646 base::Passed(&passed_dmabuf_fds)));
1647 }
1648
1649 void V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask(
1650 int32_t picture_buffer_id,
1651 scoped_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) {
1652 DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
1653 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1654
1655 const auto iter =
1656 std::find_if(output_buffer_map_.begin(), output_buffer_map_.end(),
1657 [picture_buffer_id](const OutputRecord& output_record) {
1658 return output_record.picture_id == picture_buffer_id;
1659 });
1660 if (iter == output_buffer_map_.end()) {
1661 LOGF(ERROR) << "Invalid picture_buffer_id=" << picture_buffer_id;
1662 NOTIFY_ERROR(INVALID_ARGUMENT);
1663 return;
1664 }
1665
1666 if (!iter->at_client) {
1667 LOGF(ERROR) << "Cannot import buffer that not owned by client";
1668 NOTIFY_ERROR(INVALID_ARGUMENT);
1669 return;
1670 }
1671
1672 size_t index = iter - output_buffer_map_.begin();
1673 DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
1674 index),
1675 0);
1676
1677 DCHECK(!iter->at_device);
1678 iter->at_client = false;
1679 if (iter->texture_id != 0) {
1680 if (iter->egl_image != EGL_NO_IMAGE_KHR) {
1681 child_task_runner_->PostTask(
1682 FROM_HERE,
1683 base::Bind(base::IgnoreResult(&V4L2Device::DestroyEGLImage), device_,
1684 egl_display_, iter->egl_image));
1685 }
1686
1687 child_task_runner_->PostTask(
1688 FROM_HERE,
1689 base::Bind(&V4L2SliceVideoDecodeAccelerator::CreateEGLImageFor,
1690 weak_this_, index, base::Passed(&passed_dmabuf_fds),
1691 iter->texture_id, coded_size_, output_format_fourcc_));
1692 } else {
1693 // No need for an EGLImage, start using this buffer now.
1694 DCHECK_EQ(output_planes_count_, passed_dmabuf_fds->size());
1695 iter->dmabuf_fds.swap(*passed_dmabuf_fds);
1696 free_output_buffers_.push_back(index);
1697 ScheduleDecodeBufferTaskIfNeeded();
1698 }
1572 } 1699 }
1573 1700
1574 void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer( 1701 void V4L2SliceVideoDecodeAccelerator::ReusePictureBuffer(
1575 int32_t picture_buffer_id) { 1702 int32_t picture_buffer_id) {
1576 DCHECK(child_task_runner_->BelongsToCurrentThread()); 1703 DCHECK(child_task_runner_->BelongsToCurrentThread());
1577 DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id; 1704 DVLOGF(4) << "picture_buffer_id=" << picture_buffer_id;
1578 1705
1579 if (!make_context_current_cb_.Run()) { 1706 scoped_ptr<EGLSyncKHRRef> egl_sync_ref;
1580 LOGF(ERROR) << "could not make context current"; 1707
1581 NOTIFY_ERROR(PLATFORM_FAILURE); 1708 if (!make_context_current_cb_.is_null()) {
1582 return; 1709 if (!make_context_current_cb_.Run()) {
1710 LOGF(ERROR) << "could not make context current";
1711 NOTIFY_ERROR(PLATFORM_FAILURE);
1712 return;
1713 }
1714
1715 EGLSyncKHR egl_sync =
1716 eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL);
1717 if (egl_sync == EGL_NO_SYNC_KHR) {
1718 LOGF(ERROR) << "eglCreateSyncKHR() failed";
1719 NOTIFY_ERROR(PLATFORM_FAILURE);
1720 return;
1721 }
1722
1723 egl_sync_ref.reset(new EGLSyncKHRRef(egl_display_, egl_sync));
1583 } 1724 }
1584 1725
1585 EGLSyncKHR egl_sync =
1586 eglCreateSyncKHR(egl_display_, EGL_SYNC_FENCE_KHR, NULL);
1587 if (egl_sync == EGL_NO_SYNC_KHR) {
1588 LOGF(ERROR) << "eglCreateSyncKHR() failed";
1589 NOTIFY_ERROR(PLATFORM_FAILURE);
1590 return;
1591 }
1592
1593 scoped_ptr<EGLSyncKHRRef> egl_sync_ref(
1594 new EGLSyncKHRRef(egl_display_, egl_sync));
1595 decoder_thread_task_runner_->PostTask( 1726 decoder_thread_task_runner_->PostTask(
1596 FROM_HERE, 1727 FROM_HERE,
1597 base::Bind(&V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask, 1728 base::Bind(&V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask,
1598 base::Unretained(this), picture_buffer_id, 1729 base::Unretained(this), picture_buffer_id,
1599 base::Passed(&egl_sync_ref))); 1730 base::Passed(&egl_sync_ref)));
1600 } 1731 }
1601 1732
1602 void V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask( 1733 void V4L2SliceVideoDecodeAccelerator::ReusePictureBufferTask(
1603 int32_t picture_buffer_id, 1734 int32_t picture_buffer_id,
1604 scoped_ptr<EGLSyncKHRRef> egl_sync_ref) { 1735 scoped_ptr<EGLSyncKHRRef> egl_sync_ref) {
(...skipping 16 matching lines...) Expand all
1621 OutputRecord& output_record = output_buffer_map_[it->second->output_record()]; 1752 OutputRecord& output_record = output_buffer_map_[it->second->output_record()];
1622 if (output_record.at_device || !output_record.at_client) { 1753 if (output_record.at_device || !output_record.at_client) {
1623 DVLOGF(1) << "picture_buffer_id not reusable"; 1754 DVLOGF(1) << "picture_buffer_id not reusable";
1624 NOTIFY_ERROR(INVALID_ARGUMENT); 1755 NOTIFY_ERROR(INVALID_ARGUMENT);
1625 return; 1756 return;
1626 } 1757 }
1627 1758
1628 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR); 1759 DCHECK_EQ(output_record.egl_sync, EGL_NO_SYNC_KHR);
1629 DCHECK(!output_record.at_device); 1760 DCHECK(!output_record.at_device);
1630 output_record.at_client = false; 1761 output_record.at_client = false;
1631 output_record.egl_sync = egl_sync_ref->egl_sync; 1762 if (egl_sync_ref) {
1632 // Take ownership of the EGLSync. 1763 output_record.egl_sync = egl_sync_ref->egl_sync;
1633 egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR; 1764 // Take ownership of the EGLSync.
1765 egl_sync_ref->egl_sync = EGL_NO_SYNC_KHR;
1766 }
1767
1634 surfaces_at_display_.erase(it); 1768 surfaces_at_display_.erase(it);
1635 } 1769 }
1636 1770
1637 void V4L2SliceVideoDecodeAccelerator::Flush() { 1771 void V4L2SliceVideoDecodeAccelerator::Flush() {
1638 DVLOGF(3); 1772 DVLOGF(3);
1639 DCHECK(child_task_runner_->BelongsToCurrentThread()); 1773 DCHECK(child_task_runner_->BelongsToCurrentThread());
1640 1774
1641 decoder_thread_task_runner_->PostTask( 1775 decoder_thread_task_runner_->PostTask(
1642 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::FlushTask, 1776 FROM_HERE, base::Bind(&V4L2SliceVideoDecodeAccelerator::FlushTask,
1643 base::Unretained(this))); 1777 base::Unretained(this)));
(...skipping 859 matching lines...) Expand 10 before | Expand all | Expand 10 after
2503 OutputRecord& output_record = 2637 OutputRecord& output_record =
2504 output_buffer_map_[dec_surface->output_record()]; 2638 output_buffer_map_[dec_surface->output_record()];
2505 2639
2506 bool inserted = 2640 bool inserted =
2507 surfaces_at_display_.insert(std::make_pair(output_record.picture_id, 2641 surfaces_at_display_.insert(std::make_pair(output_record.picture_id,
2508 dec_surface)).second; 2642 dec_surface)).second;
2509 DCHECK(inserted); 2643 DCHECK(inserted);
2510 2644
2511 DCHECK(!output_record.at_client); 2645 DCHECK(!output_record.at_client);
2512 DCHECK(!output_record.at_device); 2646 DCHECK(!output_record.at_device);
2513 DCHECK_NE(output_record.egl_image, EGL_NO_IMAGE_KHR);
2514 DCHECK_NE(output_record.picture_id, -1); 2647 DCHECK_NE(output_record.picture_id, -1);
2515 output_record.at_client = true; 2648 output_record.at_client = true;
2516 2649
2517 // TODO(posciak): Use visible size from decoder here instead 2650 // TODO(posciak): Use visible size from decoder here instead
2518 // (crbug.com/402760). Passing (0, 0) results in the client using the 2651 // (crbug.com/402760). Passing (0, 0) results in the client using the
2519 // visible size extracted from the container instead. 2652 // visible size extracted from the container instead.
2520 media::Picture picture(output_record.picture_id, dec_surface->bitstream_id(), 2653 media::Picture picture(output_record.picture_id, dec_surface->bitstream_id(),
2521 gfx::Rect(0, 0), false); 2654 gfx::Rect(0, 0), false);
2522 DVLOGF(3) << dec_surface->ToString() 2655 DVLOGF(3) << dec_surface->ToString()
2523 << ", bitstream_id: " << picture.bitstream_buffer_id() 2656 << ", bitstream_id: " << picture.bitstream_buffer_id()
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
2609 } 2742 }
2610 2743
2611 bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( 2744 bool V4L2SliceVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
2612 const base::WeakPtr<Client>& decode_client, 2745 const base::WeakPtr<Client>& decode_client,
2613 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { 2746 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
2614 decode_client_ = decode_client_; 2747 decode_client_ = decode_client_;
2615 decode_task_runner_ = decode_task_runner; 2748 decode_task_runner_ = decode_task_runner;
2616 return true; 2749 return true;
2617 } 2750 }
2618 2751
2752 media::VideoPixelFormat V4L2SliceVideoDecodeAccelerator::GetOutputFormat()
2753 const {
2754 return V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_);
2755 }
2756
2619 // static 2757 // static
2620 media::VideoDecodeAccelerator::SupportedProfiles 2758 media::VideoDecodeAccelerator::SupportedProfiles
2621 V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() { 2759 V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() {
2622 scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder); 2760 scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kDecoder);
2623 if (!device) 2761 if (!device)
2624 return SupportedProfiles(); 2762 return SupportedProfiles();
2625 2763
2626 return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_), 2764 return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_),
2627 supported_input_fourccs_); 2765 supported_input_fourccs_);
2628 } 2766 }
2629 2767
2630 } // namespace content 2768 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698