Index: cc/resources/video_resource_updater.cc |
diff --git a/cc/resources/video_resource_updater.cc b/cc/resources/video_resource_updater.cc |
index c35d5d4273db362f72fb00b5fed805019470d5b3..14e9f74b9aa4c1005ae86c4c219e43b3e326f2c2 100644 |
--- a/cc/resources/video_resource_updater.cc |
+++ b/cc/resources/video_resource_updater.cc |
@@ -157,6 +157,7 @@ void VideoResourceUpdater::PlaneResource::SetUniqueId(int unique_frame_id, |
VideoFrameExternalResources::VideoFrameExternalResources() |
: type(NONE), |
+ format(RGBA_8888), |
read_lock_fences_enabled(false), |
offset(0.0f), |
multiplier(1.0f), |
@@ -314,6 +315,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
const media::VideoPixelFormat input_frame_format = video_frame->format(); |
// TODO(hubbe): Make this a video frame method. |
+ // TODO(dshwang): handle YUV4XXPX by GMBs pool code. crbug.com/445071 |
int bits_per_channel = 0; |
switch (input_frame_format) { |
case media::PIXEL_FORMAT_UNKNOWN: |
@@ -357,9 +359,6 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
break; |
} |
- // TODO(dshwang): support PIXEL_FORMAT_Y16. crbug.com/624436 |
- DCHECK_NE(bits_per_channel, 16); |
- |
// Only YUV software video frames are supported. |
if (!media::IsYuvPlanar(input_frame_format)) { |
NOTREACHED() << media::VideoPixelFormatToString(input_frame_format); |
@@ -368,6 +367,9 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
const bool software_compositor = context_provider_ == NULL; |
+ const auto caps = context_provider_->ContextCapabilities(); |
+ bool disable_one_component_textures = caps.disable_one_component_textures; |
+ |
ResourceFormat output_resource_format = |
resource_provider_->YuvResourceFormat(bits_per_channel); |
@@ -376,8 +378,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
// bug workaround requires that YUV frames must be converted to RGB |
// before texture upload. |
bool texture_needs_rgb_conversion = |
- !software_compositor && |
- output_resource_format == ResourceFormat::RGBA_8888; |
+ !software_compositor && disable_one_component_textures; |
size_t output_plane_count = media::VideoFrame::NumPlanes(input_frame_format); |
// TODO(skaslev): If we're in software compositing mode, we do the YUV -> RGB |
@@ -424,8 +425,8 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
} |
VideoFrameExternalResources external_resources; |
- |
external_resources.bits_per_channel = bits_per_channel; |
+ external_resources.format = output_resource_format; |
if (software_compositor || texture_needs_rgb_conversion) { |
DCHECK_EQ(plane_resources.size(), 1u); |
@@ -483,13 +484,61 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
return external_resources; |
} |
- for (size_t i = 0; i < plane_resources.size(); ++i) { |
- PlaneResource& plane_resource = *plane_resources[i]; |
+ const bool highbit_rg_rgba_fallback = |
+ bits_per_channel > 8 && output_resource_format == RGBA_8888; |
+ bool needs_conversion = false; |
+ int shift = 0; |
+ if (output_resource_format == LUMINANCE_F16) { |
+ // LUMINANCE_F16 uses half-floats, so we always need a conversion step. |
+ needs_conversion = true; |
+ |
+ // If the input data was 9 or 10 bit, and we output to half-floats, |
+ // then we used the OR path below, which means that we need to |
+ // adjust the resource offset and multiplier accordingly. If the |
+ // input data uses more than 10 bits, it will already be normalized |
+ // to 0.0..1.0, so there is no need to do anything. |
+ if (bits_per_channel <= 10) { |
+ // By OR-ing with 0x3800, 10-bit numbers become half-floats in the |
+ // range [0.5..1) and 9-bit numbers get the range [0.5..0.75). |
+ // |
+ // Half-floats are evaluated as: |
+ // float value = pow(2.0, exponent - 25) * (0x400 + fraction); |
+ // |
+ // In our case the exponent is 14 (since we or with 0x3800) and |
+ // pow(2.0, 14-25) * 0x400 evaluates to 0.5 (our offset) and |
+ // pow(2.0, 14-25) * fraction is [0..0.49951171875] for 10-bit and |
+ // [0..0.24951171875] for 9-bit. |
+ // |
+ // https://en.wikipedia.org/wiki/Half-precision_floating-point_format |
+ // |
+ // PLEASE NOTE: |
+ // All planes are assumed to use the same multiplier/offset. |
+ external_resources.offset = 0.5f; |
+ // Max value from input data. |
+ int max_input_value = (1 << bits_per_channel) - 1; |
+ // 2 << 11 = 2048 would be 1.0 with our exponent. |
+ external_resources.multiplier = 2048.0 / max_input_value; |
+ } |
+ } else if (output_resource_format == RG_88) { |
+ // RG_88 can represent 16bit int, so we don't need a conversion step. |
+ needs_conversion = false; |
+ } else if (highbit_rg_rgba_fallback) { |
+ // RG channels is used to represent 16bit int. |
+ needs_conversion = true; |
+ } else if (bits_per_channel > 8) { |
+ // If bits_per_channel > 8 and we can't use RG_88, we need to |
+ // shift the data down and create an 8-bit texture. |
+ needs_conversion = true; |
+ shift = bits_per_channel - 8; |
+ external_resources.bits_per_channel = 8; |
+ } |
+ |
+ for (size_t plane = 0; plane < plane_resources.size(); ++plane) { |
+ PlaneResource& plane_resource = *plane_resources[plane]; |
// Update each plane's resource id with its content. |
- DCHECK_EQ(plane_resource.resource_format(), |
- resource_provider_->YuvResourceFormat(bits_per_channel)); |
+ DCHECK_EQ(plane_resource.resource_format(), output_resource_format); |
- if (!plane_resource.Matches(video_frame->unique_id(), i)) { |
+ if (!plane_resource.Matches(video_frame->unique_id(), plane)) { |
// TODO(hubbe): Move all conversion (and upload?) code to media/. |
// We need to transfer data from |video_frame| to the plane resource. |
// TODO(reveman): Can use GpuMemoryBuffers here to improve performance. |
@@ -499,59 +548,19 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
gfx::Size resource_size_pixels = plane_resource.resource_size(); |
// The |video_stride_bytes| is the width of the video frame we are |
// uploading (including non-frame data to fill in the stride). |
- int video_stride_bytes = video_frame->stride(i); |
+ int video_stride_bytes = video_frame->stride(plane); |
size_t bytes_per_row = ResourceUtil::CheckedWidthInBytes<size_t>( |
- resource_size_pixels.width(), plane_resource.resource_format()); |
+ resource_size_pixels.width(), output_resource_format); |
// Use 4-byte row alignment (OpenGL default) for upload performance. |
// Assuming that GL_UNPACK_ALIGNMENT has not changed from default. |
size_t upload_image_stride = |
MathUtil::CheckedRoundUp<size_t>(bytes_per_row, 4u); |
- bool needs_conversion = false; |
- int shift = 0; |
- |
- // LUMINANCE_F16 uses half-floats, so we always need a conversion step. |
- if (plane_resource.resource_format() == LUMINANCE_F16) { |
- needs_conversion = true; |
- |
- // If the input data was 9 or 10 bit, and we output to half-floats, |
- // then we used the OR path below, which means that we need to |
- // adjust the resource offset and multiplier accordingly. If the |
- // input data uses more than 10 bits, it will already be normalized |
- // to 0.0..1.0, so there is no need to do anything. |
- if (bits_per_channel <= 10) { |
- // By OR-ing with 0x3800, 10-bit numbers become half-floats in the |
- // range [0.5..1) and 9-bit numbers get the range [0.5..0.75). |
- // |
- // Half-floats are evaluated as: |
- // float value = pow(2.0, exponent - 25) * (0x400 + fraction); |
- // |
- // In our case the exponent is 14 (since we or with 0x3800) and |
- // pow(2.0, 14-25) * 0x400 evaluates to 0.5 (our offset) and |
- // pow(2.0, 14-25) * fraction is [0..0.49951171875] for 10-bit and |
- // [0..0.24951171875] for 9-bit. |
- // |
- // https://en.wikipedia.org/wiki/Half-precision_floating-point_format |
- // |
- // PLEASE NOTE: |
- // All planes are assumed to use the same multiplier/offset. |
- external_resources.offset = 0.5f; |
- // Max value from input data. |
- int max_input_value = (1 << bits_per_channel) - 1; |
- // 2 << 11 = 2048 would be 1.0 with our exponent. |
- external_resources.multiplier = 2048.0 / max_input_value; |
- } |
- } else if (bits_per_channel > 8) { |
- // If bits_per_channel > 8 and we can't use LUMINANCE_F16, we need to |
- // shift the data down and create an 8-bit texture. |
- needs_conversion = true; |
- shift = bits_per_channel - 8; |
- } |
const uint8_t* pixels; |
if (static_cast<int>(upload_image_stride) == video_stride_bytes && |
!needs_conversion) { |
- pixels = video_frame->data(i); |
+ pixels = video_frame->data(plane); |
} else { |
// Avoid malloc for each frame/plane if possible. |
size_t needed_size = |
@@ -560,11 +569,11 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
upload_pixels_.resize(needed_size); |
for (int row = 0; row < resource_size_pixels.height(); ++row) { |
- if (plane_resource.resource_format() == LUMINANCE_F16) { |
+ if (output_resource_format == LUMINANCE_F16) { |
uint16_t* dst = reinterpret_cast<uint16_t*>( |
&upload_pixels_[upload_image_stride * row]); |
const uint16_t* src = reinterpret_cast<uint16_t*>( |
- video_frame->data(i) + (video_stride_bytes * row)); |
+ video_frame->data(plane) + (video_stride_bytes * row)); |
if (bits_per_channel <= 10) { |
// Micro-benchmarking indicates that the compiler does |
// a good enough job of optimizing this loop that trying |
@@ -576,12 +585,19 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
} else { |
MakeHalfFloats(src, bits_per_channel, bytes_per_row / 2, dst); |
} |
+ } else if (highbit_rg_rgba_fallback) { |
+ uint32_t* dst = reinterpret_cast<uint32_t*>( |
+ &upload_pixels_[upload_image_stride * row]); |
+ const uint16_t* src = reinterpret_cast<uint16_t*>( |
+ video_frame->data(plane) + (video_stride_bytes * row)); |
+ for (int i = 0; i < resource_size_pixels.width(); i++) |
+ dst[i] = src[i]; |
} else if (shift != 0) { |
// We have more-than-8-bit input which we need to shift |
// down to fit it into an 8-bit texture. |
uint8_t* dst = &upload_pixels_[upload_image_stride * row]; |
const uint16_t* src = reinterpret_cast<uint16_t*>( |
- video_frame->data(i) + (video_stride_bytes * row)); |
+ video_frame->data(plane) + (video_stride_bytes * row)); |
for (size_t i = 0; i < bytes_per_row; i++) |
dst[i] = src[i] >> shift; |
} else { |
@@ -589,7 +605,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
// differ in stride, copy one row at a time. |
uint8_t* dst = &upload_pixels_[upload_image_stride * row]; |
const uint8_t* src = |
- video_frame->data(i) + (video_stride_bytes * row); |
+ video_frame->data(plane) + (video_stride_bytes * row); |
memcpy(dst, src, bytes_per_row); |
} |
} |
@@ -598,10 +614,9 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
resource_provider_->CopyToResource(plane_resource.resource_id(), pixels, |
resource_size_pixels); |
- plane_resource.SetUniqueId(video_frame->unique_id(), i); |
+ plane_resource.SetUniqueId(video_frame->unique_id(), plane); |
} |
- |
// VideoResourceUpdater shares a context with the compositor so a |
// sync token is not required. |
TextureMailbox mailbox(plane_resource.mailbox(), gpu::SyncToken(), |