Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(57)

Side by Side Diff: cc/resources/video_resource_updater.cc

Issue 2122573003: media: replace LUMINANCE_F16 by RG_88 for 9/10-bit h264 videos Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: introduce --disable-half-float-conversion-texture flag Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "cc/resources/video_resource_updater.h" 5 #include "cc/resources/video_resource_updater.h"
6 6
7 #include <stddef.h> 7 #include <stddef.h>
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include <algorithm> 10 #include <algorithm>
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
77 case media::PIXEL_FORMAT_RGB24: 77 case media::PIXEL_FORMAT_RGB24:
78 case media::PIXEL_FORMAT_RGB32: 78 case media::PIXEL_FORMAT_RGB32:
79 case media::PIXEL_FORMAT_MJPEG: 79 case media::PIXEL_FORMAT_MJPEG:
80 case media::PIXEL_FORMAT_MT21: 80 case media::PIXEL_FORMAT_MT21:
81 case media::PIXEL_FORMAT_YUV420P9: 81 case media::PIXEL_FORMAT_YUV420P9:
82 case media::PIXEL_FORMAT_YUV422P9: 82 case media::PIXEL_FORMAT_YUV422P9:
83 case media::PIXEL_FORMAT_YUV444P9: 83 case media::PIXEL_FORMAT_YUV444P9:
84 case media::PIXEL_FORMAT_YUV420P10: 84 case media::PIXEL_FORMAT_YUV420P10:
85 case media::PIXEL_FORMAT_YUV422P10: 85 case media::PIXEL_FORMAT_YUV422P10:
86 case media::PIXEL_FORMAT_YUV444P10: 86 case media::PIXEL_FORMAT_YUV444P10:
87 case media::PIXEL_FORMAT_Y8:
88 case media::PIXEL_FORMAT_Y16:
87 case media::PIXEL_FORMAT_UNKNOWN: 89 case media::PIXEL_FORMAT_UNKNOWN:
88 break; 90 break;
89 } 91 }
90 return VideoFrameExternalResources::NONE; 92 return VideoFrameExternalResources::NONE;
91 } 93 }
92 94
93 class SyncTokenClientImpl : public media::VideoFrame::SyncTokenClient { 95 class SyncTokenClientImpl : public media::VideoFrame::SyncTokenClient {
94 public: 96 public:
95 SyncTokenClientImpl(gpu::gles2::GLES2Interface* gl, 97 SyncTokenClientImpl(gpu::gles2::GLES2Interface* gl,
96 const gpu::SyncToken& sync_token) 98 const gpu::SyncToken& sync_token)
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after
288 coded_size.height()); 290 coded_size.height());
289 return gfx::Size(plane_width, plane_height); 291 return gfx::Size(plane_width, plane_height);
290 } 292 }
291 293
292 VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( 294 VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes(
293 scoped_refptr<media::VideoFrame> video_frame) { 295 scoped_refptr<media::VideoFrame> video_frame) {
294 TRACE_EVENT0("cc", "VideoResourceUpdater::CreateForSoftwarePlanes"); 296 TRACE_EVENT0("cc", "VideoResourceUpdater::CreateForSoftwarePlanes");
295 const media::VideoPixelFormat input_frame_format = video_frame->format(); 297 const media::VideoPixelFormat input_frame_format = video_frame->format();
296 298
297 // TODO(hubbe): Make this a video frame method. 299 // TODO(hubbe): Make this a video frame method.
300 // TODO(dshwang): handle YUV4XXPX by GMBs pool code. crbug.com/445071
298 int bits_per_channel = 0; 301 int bits_per_channel = 0;
299 switch (input_frame_format) { 302 switch (input_frame_format) {
300 case media::PIXEL_FORMAT_UNKNOWN: 303 case media::PIXEL_FORMAT_UNKNOWN:
301 NOTREACHED(); 304 NOTREACHED();
302 // Fall through! 305 // Fall through!
303 case media::PIXEL_FORMAT_I420: 306 case media::PIXEL_FORMAT_I420:
304 case media::PIXEL_FORMAT_YV12: 307 case media::PIXEL_FORMAT_YV12:
305 case media::PIXEL_FORMAT_YV16: 308 case media::PIXEL_FORMAT_YV16:
306 case media::PIXEL_FORMAT_YV12A: 309 case media::PIXEL_FORMAT_YV12A:
307 case media::PIXEL_FORMAT_YV24: 310 case media::PIXEL_FORMAT_YV24:
308 case media::PIXEL_FORMAT_NV12: 311 case media::PIXEL_FORMAT_NV12:
309 case media::PIXEL_FORMAT_NV21: 312 case media::PIXEL_FORMAT_NV21:
310 case media::PIXEL_FORMAT_UYVY: 313 case media::PIXEL_FORMAT_UYVY:
311 case media::PIXEL_FORMAT_YUY2: 314 case media::PIXEL_FORMAT_YUY2:
312 case media::PIXEL_FORMAT_ARGB: 315 case media::PIXEL_FORMAT_ARGB:
313 case media::PIXEL_FORMAT_XRGB: 316 case media::PIXEL_FORMAT_XRGB:
314 case media::PIXEL_FORMAT_RGB24: 317 case media::PIXEL_FORMAT_RGB24:
315 case media::PIXEL_FORMAT_RGB32: 318 case media::PIXEL_FORMAT_RGB32:
316 case media::PIXEL_FORMAT_MJPEG: 319 case media::PIXEL_FORMAT_MJPEG:
317 case media::PIXEL_FORMAT_MT21: 320 case media::PIXEL_FORMAT_MT21:
321 case media::PIXEL_FORMAT_Y8:
318 bits_per_channel = 8; 322 bits_per_channel = 8;
319 break; 323 break;
320 case media::PIXEL_FORMAT_YUV420P9: 324 case media::PIXEL_FORMAT_YUV420P9:
321 case media::PIXEL_FORMAT_YUV422P9: 325 case media::PIXEL_FORMAT_YUV422P9:
322 case media::PIXEL_FORMAT_YUV444P9: 326 case media::PIXEL_FORMAT_YUV444P9:
323 bits_per_channel = 9; 327 bits_per_channel = 9;
324 break; 328 break;
325 case media::PIXEL_FORMAT_YUV420P10: 329 case media::PIXEL_FORMAT_YUV420P10:
326 case media::PIXEL_FORMAT_YUV422P10: 330 case media::PIXEL_FORMAT_YUV422P10:
327 case media::PIXEL_FORMAT_YUV444P10: 331 case media::PIXEL_FORMAT_YUV444P10:
328 bits_per_channel = 10; 332 bits_per_channel = 10;
329 break; 333 break;
334 case media::PIXEL_FORMAT_Y16:
335 bits_per_channel = 16;
336 break;
330 } 337 }
331 338
332 // Only YUV software video frames are supported. 339 // Only YUV software video frames are supported.
333 if (!media::IsYuvPlanar(input_frame_format)) { 340 if (!media::IsYuvPlanar(input_frame_format)) {
334 NOTREACHED() << media::VideoPixelFormatToString(input_frame_format); 341 NOTREACHED() << media::VideoPixelFormatToString(input_frame_format);
335 return VideoFrameExternalResources(); 342 return VideoFrameExternalResources();
336 } 343 }
337 344
338 const bool software_compositor = context_provider_ == NULL; 345 const bool software_compositor = context_provider_ == NULL;
339 346
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
386 ResourceList::iterator resource_it = RecycleOrAllocateResource( 393 ResourceList::iterator resource_it = RecycleOrAllocateResource(
387 output_plane_resource_size, output_resource_format, 394 output_plane_resource_size, output_resource_format,
388 video_frame->ColorSpace(), software_compositor, is_immutable, 395 video_frame->ColorSpace(), software_compositor, is_immutable,
389 video_frame->unique_id(), i); 396 video_frame->unique_id(), i);
390 397
391 resource_it->add_ref(); 398 resource_it->add_ref();
392 plane_resources.push_back(resource_it); 399 plane_resources.push_back(resource_it);
393 } 400 }
394 401
395 VideoFrameExternalResources external_resources; 402 VideoFrameExternalResources external_resources;
396
397 external_resources.bits_per_channel = bits_per_channel; 403 external_resources.bits_per_channel = bits_per_channel;
398 404
399 if (software_compositor || texture_needs_rgb_conversion) { 405 if (software_compositor || texture_needs_rgb_conversion) {
400 DCHECK_EQ(plane_resources.size(), 1u); 406 DCHECK_EQ(plane_resources.size(), 1u);
401 PlaneResource& plane_resource = *plane_resources[0]; 407 PlaneResource& plane_resource = *plane_resources[0];
402 DCHECK_EQ(plane_resource.resource_format(), kRGBResourceFormat); 408 DCHECK_EQ(plane_resource.resource_format(), kRGBResourceFormat);
403 DCHECK_EQ(software_compositor, plane_resource.mailbox().IsZero()); 409 DCHECK_EQ(software_compositor, plane_resource.mailbox().IsZero());
404 410
405 if (!plane_resource.Matches(video_frame->unique_id(), 0)) { 411 if (!plane_resource.Matches(video_frame->unique_id(), 0)) {
406 // We need to transfer data from |video_frame| to the plane resource. 412 // We need to transfer data from |video_frame| to the plane resource.
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
445 plane_resource.resource_id())); 451 plane_resource.resource_id()));
446 mailbox.set_color_space(video_frame->ColorSpace()); 452 mailbox.set_color_space(video_frame->ColorSpace());
447 external_resources.mailboxes.push_back(mailbox); 453 external_resources.mailboxes.push_back(mailbox);
448 external_resources.release_callbacks.push_back(base::Bind( 454 external_resources.release_callbacks.push_back(base::Bind(
449 &RecycleResource, AsWeakPtr(), plane_resource.resource_id())); 455 &RecycleResource, AsWeakPtr(), plane_resource.resource_id()));
450 external_resources.type = VideoFrameExternalResources::RGBA_RESOURCE; 456 external_resources.type = VideoFrameExternalResources::RGBA_RESOURCE;
451 } 457 }
452 return external_resources; 458 return external_resources;
453 } 459 }
454 460
455 for (size_t i = 0; i < plane_resources.size(); ++i) { 461 bool needs_conversion = false;
456 PlaneResource& plane_resource = *plane_resources[i]; 462 int shift = 0;
463 if (output_resource_format == LUMINANCE_F16) {
464 // LUMINANCE_F16 uses half-floats, so we always need a conversion step.
465 needs_conversion = true;
466 // Note that the current method of converting integers to half-floats
467 // stops working if you have more than 10 bits of data.
468 DCHECK_LE(bits_per_channel, 10);
469 } else if (output_resource_format == RG_88) {
470 // RG_88 can represent 16bit int, so we don't need a conversion step.
471 needs_conversion = false;
472 } else if (bits_per_channel > 8) {
473 // If bits_per_channel > 8 and we can't use RG_88, we need to
474 // shift the data down and create an 8-bit texture.
475 needs_conversion = true;
476 shift = bits_per_channel - 8;
477 external_resources.bits_per_channel = 8;
478 }
479
480 for (size_t plane = 0; plane < plane_resources.size(); ++plane) {
481 PlaneResource& plane_resource = *plane_resources[plane];
457 // Update each plane's resource id with its content. 482 // Update each plane's resource id with its content.
458 DCHECK_EQ(plane_resource.resource_format(), 483 DCHECK_EQ(plane_resource.resource_format(), output_resource_format);
459 resource_provider_->YuvResourceFormat(bits_per_channel));
460 484
461 if (!plane_resource.Matches(video_frame->unique_id(), i)) { 485 if (!plane_resource.Matches(video_frame->unique_id(), plane)) {
462 // We need to transfer data from |video_frame| to the plane resource. 486 // We need to transfer data from |video_frame| to the plane resource.
463 // TODO(reveman): Can use GpuMemoryBuffers here to improve performance. 487 // TODO(reveman): Can use GpuMemoryBuffers here to improve performance.
464 488
465 // The |resource_size_pixels| is the size of the resource we want to 489 // The |resource_size_pixels| is the size of the resource we want to
466 // upload to. 490 // upload to.
467 gfx::Size resource_size_pixels = plane_resource.resource_size(); 491 gfx::Size resource_size_pixels = plane_resource.resource_size();
468 // The |video_stride_bytes| is the width of the video frame we are 492 // The |video_stride_bytes| is the width of the video frame we are
469 // uploading (including non-frame data to fill in the stride). 493 // uploading (including non-frame data to fill in the stride).
470 int video_stride_bytes = video_frame->stride(i); 494 int video_stride_bytes = video_frame->stride(plane);
471 495
472 size_t bytes_per_row = ResourceUtil::CheckedWidthInBytes<size_t>( 496 size_t bytes_per_row = ResourceUtil::CheckedWidthInBytes<size_t>(
473 resource_size_pixels.width(), plane_resource.resource_format()); 497 resource_size_pixels.width(), plane_resource.resource_format());
474 // Use 4-byte row alignment (OpenGL default) for upload performance. 498 // Use 4-byte row alignment (OpenGL default) for upload performance.
475 // Assuming that GL_UNPACK_ALIGNMENT has not changed from default. 499 // Assuming that GL_UNPACK_ALIGNMENT has not changed from default.
476 size_t upload_image_stride = 500 size_t upload_image_stride =
477 MathUtil::CheckedRoundUp<size_t>(bytes_per_row, 4u); 501 MathUtil::CheckedRoundUp<size_t>(bytes_per_row, 4u);
478 502
479 bool needs_conversion = false;
480 int shift = 0;
481
482 // LUMINANCE_F16 uses half-floats, so we always need a conversion step.
483 if (plane_resource.resource_format() == LUMINANCE_F16) {
484 needs_conversion = true;
485 // Note that the current method of converting integers to half-floats
486 // stops working if you have more than 10 bits of data.
487 DCHECK_LE(bits_per_channel, 10);
488 } else if (bits_per_channel > 8) {
489 // If bits_per_channel > 8 and we can't use LUMINANCE_F16, we need to
490 // shift the data down and create an 8-bit texture.
491 needs_conversion = true;
492 shift = bits_per_channel - 8;
493 }
494 const uint8_t* pixels; 503 const uint8_t* pixels;
495 if (static_cast<int>(upload_image_stride) == video_stride_bytes && 504 if (static_cast<int>(upload_image_stride) == video_stride_bytes &&
496 !needs_conversion) { 505 !needs_conversion) {
497 pixels = video_frame->data(i); 506 pixels = video_frame->data(plane);
498 } else { 507 } else {
499 // Avoid malloc for each frame/plane if possible. 508 // Avoid malloc for each frame/plane if possible.
500 size_t needed_size = 509 size_t needed_size =
501 upload_image_stride * resource_size_pixels.height(); 510 upload_image_stride * resource_size_pixels.height();
502 if (upload_pixels_.size() < needed_size) 511 if (upload_pixels_.size() < needed_size)
503 upload_pixels_.resize(needed_size); 512 upload_pixels_.resize(needed_size);
504 513
505 for (int row = 0; row < resource_size_pixels.height(); ++row) { 514 for (int row = 0; row < resource_size_pixels.height(); ++row) {
506 if (plane_resource.resource_format() == LUMINANCE_F16) { 515 if (plane_resource.resource_format() == LUMINANCE_F16) {
507 uint16_t* dst = reinterpret_cast<uint16_t*>( 516 uint16_t* dst = reinterpret_cast<uint16_t*>(
508 &upload_pixels_[upload_image_stride * row]); 517 &upload_pixels_[upload_image_stride * row]);
509 const uint16_t* src = reinterpret_cast<uint16_t*>( 518 const uint16_t* src = reinterpret_cast<uint16_t*>(
510 video_frame->data(i) + (video_stride_bytes * row)); 519 video_frame->data(plane) + (video_stride_bytes * row));
511 // Micro-benchmarking indicates that the compiler does 520 // Micro-benchmarking indicates that the compiler does
512 // a good enough job of optimizing this loop that trying 521 // a good enough job of optimizing this loop that trying
513 // to manually operate on one uint64 at a time is not 522 // to manually operate on one uint64 at a time is not
514 // actually helpful. 523 // actually helpful.
515 // Note to future optimizers: Benchmark your optimizations! 524 // Note to future optimizers: Benchmark your optimizations!
516 for (size_t i = 0; i < bytes_per_row / 2; i++) 525 for (size_t i = 0; i < bytes_per_row / 2; i++)
517 dst[i] = src[i] | 0x3800; 526 dst[i] = src[i] | 0x3800;
518 } else if (shift != 0) { 527 } else if (shift != 0) {
519 // We have more-than-8-bit input which we need to shift 528 // We have more-than-8-bit input which we need to shift
520 // down to fit it into an 8-bit texture. 529 // down to fit it into an 8-bit texture.
521 uint8_t* dst = &upload_pixels_[upload_image_stride * row]; 530 uint8_t* dst = &upload_pixels_[upload_image_stride * row];
522 const uint16_t* src = reinterpret_cast<uint16_t*>( 531 const uint16_t* src = reinterpret_cast<uint16_t*>(
523 video_frame->data(i) + (video_stride_bytes * row)); 532 video_frame->data(plane) + (video_stride_bytes * row));
524 for (size_t i = 0; i < bytes_per_row; i++) 533 for (size_t i = 0; i < bytes_per_row; i++)
525 dst[i] = src[i] >> shift; 534 dst[i] = src[i] >> shift;
535
526 } else { 536 } else {
527 // Input and output are the same size and format, but 537 // Input and output are the same size and format, but
528 // differ in stride, copy one row at a time. 538 // differ in stride, copy one row at a time.
529 uint8_t* dst = &upload_pixels_[upload_image_stride * row]; 539 uint8_t* dst = &upload_pixels_[upload_image_stride * row];
530 const uint8_t* src = 540 const uint8_t* src =
531 video_frame->data(i) + (video_stride_bytes * row); 541 video_frame->data(plane) + (video_stride_bytes * row);
532 memcpy(dst, src, bytes_per_row); 542 memcpy(dst, src, bytes_per_row);
533 } 543 }
534 } 544 }
535 pixels = &upload_pixels_[0]; 545 pixels = &upload_pixels_[0];
536 } 546 }
537 547
538 resource_provider_->CopyToResource(plane_resource.resource_id(), pixels, 548 resource_provider_->CopyToResource(plane_resource.resource_id(), pixels,
539 resource_size_pixels); 549 resource_size_pixels);
540 plane_resource.SetUniqueId(video_frame->unique_id(), i); 550 plane_resource.SetUniqueId(video_frame->unique_id(), plane);
541 } 551 }
542 552
543 if (plane_resource.resource_format() == LUMINANCE_F16) { 553 if (plane_resource.resource_format() == LUMINANCE_F16) {
544 // By OR-ing with 0x3800, 10-bit numbers become half-floats in the 554 // By OR-ing with 0x3800, 10-bit numbers become half-floats in the
545 // range [0.5..1) and 9-bit numbers get the range [0.5..0.75). 555 // range [0.5..1) and 9-bit numbers get the range [0.5..0.75).
546 // 556 //
547 // Half-floats are evaluated as: 557 // Half-floats are evaluated as:
548 // float value = pow(2.0, exponent - 25) * (0x400 + fraction); 558 // float value = pow(2.0, exponent - 25) * (0x400 + fraction);
549 // 559 //
550 // In our case the exponent is 14 (since we or with 0x3800) and 560 // In our case the exponent is 14 (since we or with 0x3800) and
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
722 if (lost_resource) { 732 if (lost_resource) {
723 resource_it->clear_refs(); 733 resource_it->clear_refs();
724 updater->DeleteResource(resource_it); 734 updater->DeleteResource(resource_it);
725 return; 735 return;
726 } 736 }
727 737
728 resource_it->remove_ref(); 738 resource_it->remove_ref();
729 } 739 }
730 740
731 } // namespace cc 741 } // namespace cc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698