Index: cc/output/gl_renderer.cc |
diff --git a/cc/output/gl_renderer.cc b/cc/output/gl_renderer.cc |
index 59bb170c17de233ad7fd41c17ec0818435a0048d..573d2248dcd613159d9b00922b5f4b6942e5a054 100644 |
--- a/cc/output/gl_renderer.cc |
+++ b/cc/output/gl_renderer.cc |
@@ -2135,18 +2135,22 @@ void ComputeYUVToRGBMatrices(YUVVideoDrawQuad::ColorSpace color_space, |
float* yuv_to_rgb = NULL; |
float* yuv_adjust = NULL; |
+ gfx::ColorSpace gfx_color_space; |
switch (color_space) { |
case YUVVideoDrawQuad::REC_601: |
yuv_to_rgb = yuv_to_rgb_rec601; |
yuv_adjust = yuv_adjust_constrained; |
+ gfx_color_space = gfx::ColorSpace::CreateREC601(); |
break; |
case YUVVideoDrawQuad::REC_709: |
yuv_to_rgb = yuv_to_rgb_rec709; |
yuv_adjust = yuv_adjust_constrained; |
+ gfx_color_space = gfx::ColorSpace::CreateREC709(); |
break; |
case YUVVideoDrawQuad::JPEG: |
yuv_to_rgb = yuv_to_rgb_jpeg; |
yuv_adjust = yuv_adjust_full; |
+ gfx_color_space = gfx::ColorSpace::CreateJpeg(); |
break; |
} |
@@ -2164,6 +2168,60 @@ void ComputeYUVToRGBMatrices(YUVVideoDrawQuad::ColorSpace color_space, |
yuv_adjust[i] * adjustment_multiplier / resource_multiplier - |
resource_offset; |
} |
+ |
+ // TODO(ccameron): Delete the above code, and just the below code instead. |
+ |
+ // Start with the resource adjust. |
+ SkMatrix44 full_transform; |
hubbe
2017/02/02 04:51:37
I still think GetAffineApproximation() is a better
ccameron
2017/02/02 17:33:52
This patch is solving a different problem than tha
hubbe
2017/02/02 18:35:25
Matching is perhaps not the right goal since the c
|
+ { |
+ full_transform.setScale(resource_multiplier, resource_multiplier, |
+ resource_multiplier); |
+ full_transform.postTranslate(resource_offset, resource_offset, |
+ resource_offset); |
+ } |
hubbe
2017/02/02 04:51:36
What is this {} for?
(Same for the ones below.)
ccameron
2017/02/02 17:33:52
In the instances below, it's to show to the reader
hubbe
2017/02/02 18:35:25
For me the add cognitive load when I read the code
|
+ |
+ // Then apply the range adjust. |
+ { |
+ SkMatrix44 range_adjust; |
+ gfx_color_space.GetRangeAdjustMatrix(&range_adjust); |
+ full_transform.postConcat(range_adjust); |
+ } |
+ |
+ // Then apply the YUV to RGB full_transform. |
+ { |
+ SkMatrix44 rgb_to_yuv; |
+ SkMatrix44 yuv_to_rgb; |
+ gfx_color_space.GetTransferMatrix(&rgb_to_yuv); |
hubbe
2017/02/02 04:51:37
In one case, this is is not a linear matrix...
ccameron
2017/02/02 17:33:52
How to handle this in the general case is a good q
hubbe
2017/02/02 18:35:25
The problem with the BT2020_CL case is that it dep
|
+ rgb_to_yuv.invert(&yuv_to_rgb); |
+ full_transform.postConcat(yuv_to_rgb); |
+ } |
+ |
+ // For the upcoming DCHECKs, convert from the form |
+ // rgb = A*yuv+b |
+ // to the form |
+ // rgb = A*(yuv+b) |
+ float adjust[4] = {0, 0, 0, 0}; |
+ { |
+ SkMatrix44 full_transform_inverse; |
+ full_transform.invert(&full_transform_inverse); |
+ float adjust_preimage[4] = {full_transform.get(0, 3), |
+ full_transform.get(1, 3), |
+ full_transform.get(2, 3), 0}; |
+ full_transform_inverse.mapScalars(adjust_preimage, adjust); |
+ } |
+ |
+ // TODO(ccameron): The gfx::ColorSpace-based approach produces some pixel |
+ // differences. For the initial checkin, DCHECK that the parameters are |
+ // very close. The subsequent checkins will delete the old path. |
+ const float kEpsilon = 1.f / 255.f; |
+ for (int i = 0; i < 3; ++i) { |
+ for (int j = 0; j < 3; ++j) { |
+ float diff = yuv_to_rgb_multiplied[3 * j + i] - full_transform.get(i, j); |
+ DCHECK_LT(std::abs(diff), kEpsilon); |
+ } |
+ float diff = yuv_adjust_with_offset[i] - adjust[i]; |
+ DCHECK_LT(std::abs(diff), kEpsilon); |
+ } |
} |
void GLRenderer::DrawYUVVideoQuad(const DrawingFrame* frame, |