Chromium Code Reviews| Index: cc/output/gl_renderer.cc |
| diff --git a/cc/output/gl_renderer.cc b/cc/output/gl_renderer.cc |
| index 7427d4df54e5947ccbee08c8658d3e9509576d49..aa965eba56e3d864bd435ad4036ec7683cede1c1 100644 |
| --- a/cc/output/gl_renderer.cc |
| +++ b/cc/output/gl_renderer.cc |
| @@ -2207,18 +2207,40 @@ void GLRenderer::DrawYUVVideoQuad(const DrawingFrame* frame, |
| base::FeatureList::IsEnabled(media::kVideoColorManagement); |
| DCHECK(!(use_nv12 && use_alpha_plane)); |
| + FragmentShaderYUVVideo::HighbitTexture highbit_texture = |
| + FragmentShaderYUVVideo::HIGHBIT_Y8; |
| + if (quad->bits_per_channel > 8) { |
| + // TODO(dshwang): the resource should know its format. crbug.com/624436 |
| + ResourceFormat resource_format = |
| + resource_provider_->GetResourceFormat(quad->y_plane_resource_id()); |
| + if (resource_format == LUMINANCE_F16) { |
| + highbit_texture = FragmentShaderYUVVideo::HIGHBIT_LUMINANCE_F16; |
| + } else { |
| + DCHECK_EQ(RG_88, resource_format); |
| + highbit_texture = FragmentShaderYUVVideo::HIGHBIT_RG88; |
| + } |
| + } |
| + DCHECK_LE(YUVVideoDrawQuad::kMinBitsPerChannel, quad->bits_per_channel); |
| + DCHECK_LE(quad->bits_per_channel, YUVVideoDrawQuad::kMaxBitsPerChannel); |
| + GLenum filter = highbit_texture == FragmentShaderYUVVideo::HIGHBIT_RG88 |
| + ? GL_NEAREST |
| + : GL_LINEAR; |
| + |
| + // y_plane can be RG texture, so use manual bilinear shader. |
| ResourceProvider::ScopedSamplerGL y_plane_lock( |
| - resource_provider_, quad->y_plane_resource_id(), GL_TEXTURE1, GL_LINEAR); |
| + resource_provider_, quad->y_plane_resource_id(), GL_TEXTURE1, filter); |
| ResourceProvider::ScopedSamplerGL u_plane_lock( |
| - resource_provider_, quad->u_plane_resource_id(), GL_TEXTURE2, GL_LINEAR); |
| + resource_provider_, quad->u_plane_resource_id(), GL_TEXTURE2, filter); |
| DCHECK_EQ(y_plane_lock.target(), u_plane_lock.target()); |
| // TODO(jbauman): Use base::Optional when available. |
| std::unique_ptr<ResourceProvider::ScopedSamplerGL> v_plane_lock; |
| if (!use_nv12) { |
| v_plane_lock.reset(new ResourceProvider::ScopedSamplerGL( |
| - resource_provider_, quad->v_plane_resource_id(), GL_TEXTURE3, |
| - GL_LINEAR)); |
| + resource_provider_, quad->v_plane_resource_id(), GL_TEXTURE3, filter)); |
| DCHECK_EQ(y_plane_lock.target(), v_plane_lock->target()); |
| + } else { |
| + // |bits_per_channel| of PIXEL_FORMAT_NV12 is 8. |
| + DCHECK_EQ(static_cast<GLenum>(GL_LINEAR), filter); |
| } |
| std::unique_ptr<ResourceProvider::ScopedSamplerGL> a_plane_lock; |
| if (use_alpha_plane) { |
| @@ -2226,6 +2248,8 @@ void GLRenderer::DrawYUVVideoQuad(const DrawingFrame* frame, |
| resource_provider_, quad->a_plane_resource_id(), GL_TEXTURE4, |
| GL_LINEAR)); |
| DCHECK_EQ(y_plane_lock.target(), a_plane_lock->target()); |
| + // |bits_per_channel| of PIXEL_FORMAT_YV12A is 8. |
| + DCHECK_EQ(static_cast<GLenum>(GL_LINEAR), filter); |
| } |
| // All planes must have the same sampler type. |
| @@ -2236,8 +2260,11 @@ void GLRenderer::DrawYUVVideoQuad(const DrawingFrame* frame, |
| int ya_tex_offset_location = -1; |
| int uv_tex_scale_location = -1; |
| int uv_tex_offset_location = -1; |
| + int ya_size_location = -1; |
| + int uv_size_location = -1; |
| int ya_clamp_rect_location = -1; |
| int uv_clamp_rect_location = -1; |
| + int inverse_max_input_value_location = -1; |
| int y_texture_location = -1; |
| int u_texture_location = -1; |
| int v_texture_location = -1; |
| @@ -2249,8 +2276,9 @@ void GLRenderer::DrawYUVVideoQuad(const DrawingFrame* frame, |
| int alpha_location = -1; |
| int resource_multiplier_location = -1; |
| int resource_offset_location = -1; |
| - const VideoYUVProgram* program = GetVideoYUVProgram( |
| - tex_coord_precision, sampler, use_alpha_plane, use_nv12, use_color_lut); |
| + const VideoYUVProgram* program = |
| + GetVideoYUVProgram(tex_coord_precision, sampler, use_alpha_plane, |
| + use_nv12, use_color_lut, highbit_texture); |
| DCHECK(program && (program->initialized() || IsContextLost())); |
| SetUseProgram(program->program()); |
| matrix_location = program->vertex_shader().matrix_location(); |
| @@ -2266,8 +2294,12 @@ void GLRenderer::DrawYUVVideoQuad(const DrawingFrame* frame, |
| lut_texture_location = program->fragment_shader().lut_texture_location(); |
| yuv_matrix_location = program->fragment_shader().yuv_matrix_location(); |
| yuv_adj_location = program->fragment_shader().yuv_adj_location(); |
| + ya_size_location = program->fragment_shader().ya_size_location(); |
| + uv_size_location = program->fragment_shader().uv_size_location(); |
| ya_clamp_rect_location = program->fragment_shader().ya_clamp_rect_location(); |
| uv_clamp_rect_location = program->fragment_shader().uv_clamp_rect_location(); |
| + inverse_max_input_value_location = |
| + program->fragment_shader().inverse_max_input_value_location(); |
| alpha_location = program->fragment_shader().alpha_location(); |
| resource_multiplier_location = |
| program->fragment_shader().resource_multiplier_location(); |
| @@ -2335,83 +2367,105 @@ void GLRenderer::DrawYUVVideoQuad(const DrawingFrame* frame, |
| if (use_alpha_plane) |
| gl_->Uniform1i(a_texture_location, 4); |
| - // These values are magic numbers that are used in the transformation from YUV |
| - // to RGB color values. They are taken from the following webpage: |
| - // http://www.fourcc.org/fccyvrgb.php |
| - float yuv_to_rgb_rec601[9] = { |
| - 1.164f, 1.164f, 1.164f, 0.0f, -.391f, 2.018f, 1.596f, -.813f, 0.0f, |
| - }; |
| - float yuv_to_rgb_jpeg[9] = { |
| - 1.f, 1.f, 1.f, 0.0f, -.34414f, 1.772f, 1.402f, -.71414f, 0.0f, |
| - }; |
| - float yuv_to_rgb_rec709[9] = { |
| - 1.164f, 1.164f, 1.164f, 0.0f, -0.213f, 2.112f, 1.793f, -0.533f, 0.0f, |
| - }; |
| - |
| - // They are used in the YUV to RGBA conversion formula: |
| - // Y - 16 : Gives 16 values of head and footroom for overshooting |
| - // U - 128 : Turns unsigned U into signed U [-128,127] |
| - // V - 128 : Turns unsigned V into signed V [-128,127] |
| - float yuv_adjust_constrained[3] = { |
| - -16.f, -128.f, -128.f, |
| - }; |
| + if (use_color_lut) { |
|
hubbe
2016/09/29 23:36:51
I don't really like this change, as it requires th
dshwang
2016/10/05 15:13:39
I think gl_renderer is controller of shader.cc. gl
enne (OOO)
2016/10/05 18:59:43
I agree with this. GLRenderer is the single consu
|
| + DCHECK_NE(-1, lut_texture_location); |
| + unsigned int lut_texture = color_lut_cache_.GetLUT( |
| + quad->video_color_space, output_surface_->device_color_space(), 32); |
| + gl_->ActiveTexture(GL_TEXTURE5); |
| + gl_->BindTexture(GL_TEXTURE_2D, lut_texture); |
| + gl_->Uniform1i(lut_texture_location, 5); |
| + gl_->ActiveTexture(GL_TEXTURE0); |
| - // Same as above, but without the head and footroom. |
| - float yuv_adjust_full[3] = { |
| - 0.0f, -128.f, -128.f, |
| - }; |
| + if (highbit_texture == FragmentShaderYUVVideo::HIGHBIT_LUMINANCE_F16) { |
| + DCHECK_NE(-1, resource_multiplier_location); |
| + DCHECK_NE(-1, resource_offset_location); |
| + gl_->Uniform1f(resource_multiplier_location, quad->resource_multiplier); |
| + gl_->Uniform1f(resource_offset_location, quad->resource_offset); |
| + } |
| + } else { |
| + DCHECK_NE(-1, yuv_matrix_location); |
| + DCHECK_NE(-1, yuv_adj_location); |
| + |
| + // These values are magic numbers that are used in the transformation from |
| + // YUV to RGB color values. They are taken from the following webpage: |
| + // http://www.fourcc.org/fccyvrgb.php |
| + float yuv_to_rgb_rec601[9] = { |
| + 1.164f, 1.164f, 1.164f, 0.0f, -.391f, 2.018f, 1.596f, -.813f, 0.0f, |
| + }; |
| + float yuv_to_rgb_jpeg[9] = { |
| + 1.f, 1.f, 1.f, 0.0f, -.34414f, 1.772f, 1.402f, -.71414f, 0.0f, |
| + }; |
| + float yuv_to_rgb_rec709[9] = { |
| + 1.164f, 1.164f, 1.164f, 0.0f, -0.213f, 2.112f, 1.793f, -0.533f, 0.0f, |
| + }; |
| - float* yuv_to_rgb = NULL; |
| - float* yuv_adjust = NULL; |
| + // They are used in the YUV to RGBA conversion formula: |
| + // Y - 16 : Gives 16 values of head and footroom for overshooting |
| + // U - 128 : Turns unsigned U into signed U [-128,127] |
| + // V - 128 : Turns unsigned V into signed V [-128,127] |
| + float yuv_adjust_constrained[3] = { |
| + -16.f, -128.f, -128.f, |
| + }; |
| - switch (quad->color_space) { |
| - case YUVVideoDrawQuad::REC_601: |
| - yuv_to_rgb = yuv_to_rgb_rec601; |
| - yuv_adjust = yuv_adjust_constrained; |
| - break; |
| - case YUVVideoDrawQuad::REC_709: |
| - yuv_to_rgb = yuv_to_rgb_rec709; |
| - yuv_adjust = yuv_adjust_constrained; |
| - break; |
| - case YUVVideoDrawQuad::JPEG: |
| - yuv_to_rgb = yuv_to_rgb_jpeg; |
| - yuv_adjust = yuv_adjust_full; |
| - break; |
| - } |
| + // Same as above, but without the head and footroom. |
| + float yuv_adjust_full[3] = { |
| + 0.0f, -128.f, -128.f, |
| + }; |
| - float yuv_to_rgb_multiplied[9]; |
| - float yuv_adjust_with_offset[3]; |
| + float* yuv_to_rgb = NULL; |
| + float* yuv_adjust = NULL; |
| - // Formula according to BT.601-7 section 2.5.3. |
| - DCHECK_LE(YUVVideoDrawQuad::kMinBitsPerChannel, quad->bits_per_channel); |
| - DCHECK_LE(quad->bits_per_channel, YUVVideoDrawQuad::kMaxBitsPerChannel); |
| - float adjustment_multiplier = (1 << (quad->bits_per_channel - 8)) * 1.0f / |
| - ((1 << quad->bits_per_channel) - 1); |
| + switch (quad->color_space) { |
| + case YUVVideoDrawQuad::REC_601: |
| + yuv_to_rgb = yuv_to_rgb_rec601; |
| + yuv_adjust = yuv_adjust_constrained; |
| + break; |
| + case YUVVideoDrawQuad::REC_709: |
| + yuv_to_rgb = yuv_to_rgb_rec709; |
| + yuv_adjust = yuv_adjust_constrained; |
| + break; |
| + case YUVVideoDrawQuad::JPEG: |
| + yuv_to_rgb = yuv_to_rgb_jpeg; |
| + yuv_adjust = yuv_adjust_full; |
| + break; |
| + } |
| - for (int i = 0; i < 9; ++i) |
| - yuv_to_rgb_multiplied[i] = yuv_to_rgb[i] * quad->resource_multiplier; |
| + float yuv_adjust_with_offset[3]; |
| - for (int i = 0; i < 3; ++i) { |
| - yuv_adjust_with_offset[i] = |
| - yuv_adjust[i] * adjustment_multiplier / quad->resource_multiplier - |
| - quad->resource_offset; |
| - } |
| + // Formula according to BT.601-7 section 2.5.3. |
| + float adjustment_multiplier = (1 << (quad->bits_per_channel - 8)) * 1.0f / |
| + ((1 << quad->bits_per_channel) - 1); |
| - if (lut_texture_location != -1) { |
| - unsigned int lut_texture = color_lut_cache_.GetLUT( |
| - quad->video_color_space, output_surface_->device_color_space(), 32); |
| - gl_->ActiveTexture(GL_TEXTURE5); |
| - gl_->BindTexture(GL_TEXTURE_2D, lut_texture); |
| - gl_->Uniform1i(lut_texture_location, 5); |
| - gl_->ActiveTexture(GL_TEXTURE0); |
| - } |
| + if (highbit_texture == FragmentShaderYUVVideo::HIGHBIT_LUMINANCE_F16) { |
| + float yuv_to_rgb_multiplied[9]; |
| + for (int i = 0; i < 9; ++i) |
| + yuv_to_rgb_multiplied[i] = yuv_to_rgb[i] * quad->resource_multiplier; |
| + for (int i = 0; i < 3; ++i) { |
| + yuv_adjust_with_offset[i] = |
| + yuv_adjust[i] * adjustment_multiplier / quad->resource_multiplier - |
| + quad->resource_offset; |
| + } |
| + gl_->UniformMatrix3fv(yuv_matrix_location, 1, 0, yuv_to_rgb_multiplied); |
| + } else { |
| + for (int i = 0; i < 3; ++i) { |
| + yuv_adjust_with_offset[i] = yuv_adjust[i] * adjustment_multiplier; |
| + } |
| + gl_->UniformMatrix3fv(yuv_matrix_location, 1, 0, yuv_to_rgb); |
| + } |
| - if (resource_multiplier_location != -1) { |
| - gl_->Uniform1f(resource_multiplier_location, quad->resource_multiplier); |
| + gl_->Uniform3fv(yuv_adj_location, 1, yuv_adjust_with_offset); |
| } |
| - if (resource_offset_location != -1) { |
| - gl_->Uniform1f(resource_offset_location, quad->resource_offset); |
| + if (highbit_texture == FragmentShaderYUVVideo::HIGHBIT_RG88) { |
| + DCHECK_NE(-1, ya_size_location); |
| + DCHECK_NE(-1, uv_size_location); |
| + DCHECK_NE(-1, inverse_max_input_value_location); |
| + gl_->Uniform2f(ya_size_location, quad->ya_tex_size.width(), |
| + quad->ya_tex_size.height()); |
| + gl_->Uniform2f(uv_size_location, quad->uv_tex_size.width(), |
| + quad->uv_tex_size.height()); |
| + gl_->Uniform1f(inverse_max_input_value_location, |
| + 1.f / ((1 << quad->bits_per_channel) - 1)); |
| } |
| // The transform and vertex data are used to figure out the extents that the |
| @@ -2419,13 +2473,6 @@ void GLRenderer::DrawYUVVideoQuad(const DrawingFrame* frame, |
| // quad passed in via uniform is the actual geometry that gets used to draw |
| // it. This is why this centered rect is used and not the original quad_rect. |
| auto tile_rect = gfx::RectF(quad->rect); |
| - if (yuv_matrix_location != -1) { |
| - gl_->UniformMatrix3fv(yuv_matrix_location, 1, 0, yuv_to_rgb_multiplied); |
| - } |
| - |
| - if (yuv_adj_location) { |
| - gl_->Uniform3fv(yuv_adj_location, 1, yuv_adjust_with_offset); |
| - } |
| SetShaderOpacity(quad->shared_quad_state->opacity, alpha_location); |
| if (!clip_region) { |
| @@ -3626,18 +3673,19 @@ const GLRenderer::VideoYUVProgram* GLRenderer::GetVideoYUVProgram( |
| SamplerType sampler, |
| bool use_alpha_plane, |
| bool use_nv12, |
| - bool use_color_lut) { |
| + bool use_color_lut, |
| + FragmentShaderYUVVideo::HighbitTexture highbit_texture) { |
| DCHECK_GE(precision, 0); |
| DCHECK_LE(precision, LAST_TEX_COORD_PRECISION); |
| DCHECK_GE(sampler, 0); |
| DCHECK_LE(sampler, LAST_SAMPLER_TYPE); |
| VideoYUVProgram* program = |
| &video_yuv_program_[precision][sampler][use_alpha_plane][use_nv12] |
| - [use_color_lut]; |
| + [use_color_lut][highbit_texture]; |
| if (!program->initialized()) { |
| TRACE_EVENT0("cc", "GLRenderer::videoYUVProgram::initialize"); |
| - program->mutable_fragment_shader()->SetFeatures(use_alpha_plane, use_nv12, |
| - use_color_lut); |
| + program->mutable_fragment_shader()->SetFeatures( |
| + use_alpha_plane, use_nv12, use_color_lut, highbit_texture); |
| program->Initialize(output_surface_->context_provider(), precision, |
| sampler); |
| } |
| @@ -3682,7 +3730,10 @@ void GLRenderer::CleanupSharedObjects() { |
| for (int k = 0; k < 2; k++) { |
| for (int l = 0; l < 2; l++) { |
| for (int m = 0; m < 2; m++) { |
| - video_yuv_program_[i][j][k][l][m].Cleanup(gl_); |
| + for (int n = 0; n <= FragmentShaderYUVVideo::LAST_HIGHBIT_TEXTURE; |
| + n++) { |
| + video_yuv_program_[i][j][k][l][m][n].Cleanup(gl_); |
| + } |
| } |
| } |
| } |