Index: cc/resources/video_resource_updater.cc |
diff --git a/cc/resources/video_resource_updater.cc b/cc/resources/video_resource_updater.cc |
index e56c3853fbe383d95bcf46d4f5d6f6615e02e738..69d7f67650eb8a677f80c0d1daf867f7691c501c 100644 |
--- a/cc/resources/video_resource_updater.cc |
+++ b/cc/resources/video_resource_updater.cc |
@@ -156,6 +156,7 @@ void VideoResourceUpdater::PlaneResource::SetUniqueId(int unique_frame_id, |
VideoFrameExternalResources::VideoFrameExternalResources() |
: type(NONE), |
+ format(RGBA_8888), |
read_lock_fences_enabled(false), |
offset(0.0f), |
multiplier(1.0f), |
@@ -319,6 +320,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
const media::VideoPixelFormat input_frame_format = video_frame->format(); |
// TODO(hubbe): Make this a video frame method. |
+ // TODO(dshwang): handle YUV4XXPX by GMBs pool code. crbug.com/445071 |
int bits_per_channel = 0; |
switch (input_frame_format) { |
case media::PIXEL_FORMAT_UNKNOWN: |
@@ -362,9 +364,6 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
break; |
} |
- // TODO(dshwang): support PIXEL_FORMAT_Y16. crbug.com/624436 |
- DCHECK_NE(bits_per_channel, 16); |
- |
// Only YUV software video frames are supported. |
if (!media::IsYuvPlanar(input_frame_format)) { |
NOTREACHED() << media::VideoPixelFormatToString(input_frame_format); |
@@ -429,8 +428,8 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
} |
VideoFrameExternalResources external_resources; |
- |
external_resources.bits_per_channel = bits_per_channel; |
+ external_resources.format = output_resource_format; |
if (software_compositor || texture_needs_rgb_conversion) { |
DCHECK_EQ(plane_resources.size(), 1u); |
@@ -488,13 +487,56 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
return external_resources; |
} |
- for (size_t i = 0; i < plane_resources.size(); ++i) { |
- PlaneResource& plane_resource = *plane_resources[i]; |
+ bool needs_conversion = false; |
+ int shift = 0; |
+ if (output_resource_format == LUMINANCE_F16) { |
+ // LUMINANCE_F16 uses half-floats, so we always need a conversion step. |
+ needs_conversion = true; |
+ |
+ // If the input data was 9 or 10 bit, and we output to half-floats, |
+ // then we used the OR path below, which means that we need to |
+ // adjust the resource offset and multiplier accordingly. If the |
+ // input data uses more than 10 bits, it will already be normalized |
+ // to 0.0..1.0, so there is no need to do anything. |
+ if (bits_per_channel <= 10) { |
+ // By OR-ing with 0x3800, 10-bit numbers become half-floats in the |
+ // range [0.5..1) and 9-bit numbers get the range [0.5..0.75). |
+ // |
+ // Half-floats are evaluated as: |
+ // float value = pow(2.0, exponent - 25) * (0x400 + fraction); |
+ // |
+ // In our case the exponent is 14 (since we or with 0x3800) and |
+ // pow(2.0, 14-25) * 0x400 evaluates to 0.5 (our offset) and |
+ // pow(2.0, 14-25) * fraction is [0..0.49951171875] for 10-bit and |
+ // [0..0.24951171875] for 9-bit. |
+ // |
+ // https://en.wikipedia.org/wiki/Half-precision_floating-point_format |
+ // |
+ // PLEASE NOTE: |
+ // All planes are assumed to use the same multiplier/offset. |
+ external_resources.offset = 0.5f; |
+ // Max value from input data. |
+ int max_input_value = (1 << bits_per_channel) - 1; |
+ // 2 << 11 = 2048 would be 1.0 with our exponent. |
+ external_resources.multiplier = 2048.0 / max_input_value; |
+ } |
+ } else if (output_resource_format == RG_88) { |
+ // RG_88 can represent 16bit int, so we don't need a conversion step. |
+ needs_conversion = false; |
+ } else if (bits_per_channel > 8) { |
+ // If bits_per_channel > 8 and we can't use RG_88, we need to |
+ // shift the data down and create an 8-bit texture. |
+ needs_conversion = true; |
+ shift = bits_per_channel - 8; |
+ external_resources.bits_per_channel = 8; |
+ } |
+ |
+ for (size_t plane = 0; plane < plane_resources.size(); ++plane) { |
+ PlaneResource& plane_resource = *plane_resources[plane]; |
// Update each plane's resource id with its content. |
- DCHECK_EQ(plane_resource.resource_format(), |
- resource_provider_->YuvResourceFormat(bits_per_channel)); |
+ DCHECK_EQ(plane_resource.resource_format(), output_resource_format); |
- if (!plane_resource.Matches(video_frame->unique_id(), i)) { |
+ if (!plane_resource.Matches(video_frame->unique_id(), plane)) { |
// We need to transfer data from |video_frame| to the plane resource. |
// TODO(reveman): Can use GpuMemoryBuffers here to improve performance. |
@@ -503,7 +545,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
gfx::Size resource_size_pixels = plane_resource.resource_size(); |
// The |video_stride_bytes| is the width of the video frame we are |
// uploading (including non-frame data to fill in the stride). |
- int video_stride_bytes = video_frame->stride(i); |
+ int video_stride_bytes = video_frame->stride(plane); |
size_t bytes_per_row = ResourceUtil::CheckedWidthInBytes<size_t>( |
resource_size_pixels.width(), plane_resource.resource_format()); |
@@ -512,50 +554,10 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
size_t upload_image_stride = |
MathUtil::CheckedRoundUp<size_t>(bytes_per_row, 4u); |
- bool needs_conversion = false; |
- int shift = 0; |
- |
- // LUMINANCE_F16 uses half-floats, so we always need a conversion step. |
- if (plane_resource.resource_format() == LUMINANCE_F16) { |
- needs_conversion = true; |
- |
- // If the input data was 9 or 10 bit, and we output to half-floats, |
- // then we used the OR path below, which means that we need to |
- // adjust the resource offset and multiplier accordingly. If the |
- // input data uses more than 10 bits, it will already be normalized |
- // to 0.0..1.0, so there is no need to do anything. |
- if (bits_per_channel <= 10) { |
- // By OR-ing with 0x3800, 10-bit numbers become half-floats in the |
- // range [0.5..1) and 9-bit numbers get the range [0.5..0.75). |
- // |
- // Half-floats are evaluated as: |
- // float value = pow(2.0, exponent - 25) * (0x400 + fraction); |
- // |
- // In our case the exponent is 14 (since we or with 0x3800) and |
- // pow(2.0, 14-25) * 0x400 evaluates to 0.5 (our offset) and |
- // pow(2.0, 14-25) * fraction is [0..0.49951171875] for 10-bit and |
- // [0..0.24951171875] for 9-bit. |
- // |
- // https://en.wikipedia.org/wiki/Half-precision_floating-point_format |
- // |
- // PLEASE NOTE: |
- // All planes are assumed to use the same multiplier/offset. |
- external_resources.offset = 0.5f; |
- // Max value from input data. |
- int max_input_value = (1 << bits_per_channel) - 1; |
- // 2 << 11 = 2048 would be 1.0 with our exponent. |
- external_resources.multiplier = 2048.0 / max_input_value; |
- } |
- } else if (bits_per_channel > 8) { |
- // If bits_per_channel > 8 and we can't use LUMINANCE_F16, we need to |
- // shift the data down and create an 8-bit texture. |
- needs_conversion = true; |
- shift = bits_per_channel - 8; |
- } |
const uint8_t* pixels; |
if (static_cast<int>(upload_image_stride) == video_stride_bytes && |
!needs_conversion) { |
- pixels = video_frame->data(i); |
+ pixels = video_frame->data(plane); |
} else { |
// Avoid malloc for each frame/plane if possible. |
size_t needed_size = |
@@ -568,7 +570,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
uint16_t* dst = reinterpret_cast<uint16_t*>( |
&upload_pixels_[upload_image_stride * row]); |
const uint16_t* src = reinterpret_cast<uint16_t*>( |
- video_frame->data(i) + (video_stride_bytes * row)); |
+ video_frame->data(plane) + (video_stride_bytes * row)); |
if (bits_per_channel <= 10) { |
// Micro-benchmarking indicates that the compiler does |
// a good enough job of optimizing this loop that trying |
@@ -585,7 +587,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
// down to fit it into an 8-bit texture. |
uint8_t* dst = &upload_pixels_[upload_image_stride * row]; |
const uint16_t* src = reinterpret_cast<uint16_t*>( |
- video_frame->data(i) + (video_stride_bytes * row)); |
+ video_frame->data(plane) + (video_stride_bytes * row)); |
for (size_t i = 0; i < bytes_per_row; i++) |
dst[i] = src[i] >> shift; |
} else { |
@@ -593,7 +595,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
// differ in stride, copy one row at a time. |
uint8_t* dst = &upload_pixels_[upload_image_stride * row]; |
const uint8_t* src = |
- video_frame->data(i) + (video_stride_bytes * row); |
+ video_frame->data(plane) + (video_stride_bytes * row); |
memcpy(dst, src, bytes_per_row); |
} |
} |
@@ -602,10 +604,9 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
resource_provider_->CopyToResource(plane_resource.resource_id(), pixels, |
resource_size_pixels); |
- plane_resource.SetUniqueId(video_frame->unique_id(), i); |
+ plane_resource.SetUniqueId(video_frame->unique_id(), plane); |
} |
- |
// VideoResourceUpdater shares a context with the compositor so a |
// sync token is not required. |
TextureMailbox mailbox(plane_resource.mailbox(), gpu::SyncToken(), |