Chromium Code Reviews| Index: include/gpu/GrCaps.h |
| diff --git a/include/gpu/GrCaps.h b/include/gpu/GrCaps.h |
| index d698a4142c2b5f783d1753e8678f18b96615b7de..d3faf26c60f02a63a6d37ea47ec0295943dff543 100644 |
| --- a/include/gpu/GrCaps.h |
| +++ b/include/gpu/GrCaps.h |
| @@ -76,6 +76,17 @@ public: |
| }; |
| /** |
| + * Returns the effective amount of precision found in floating point types with a given precision |
| + * qualifier. |
| + * |
| + * e.g. If lowp == mediump, effectiveFloatPrecision(lowp) is mediump. |
| + * If the platform doesn't use precision, effectiveFloatPrecision() is always highp. |
| + */ |
| + GrSLPrecision effectiveFloatPrecision(GrShaderType shaderType, GrSLPrecision precision) const { |
|
bsalomon
2016/04/06 13:35:56
It still feels odd to me that we have a precooked
|
| + return fEffectiveFloatPrecisions[shaderType][precision]; |
| + } |
| + |
| + /** |
| * Is there any difference between the float shader variable precision types? If this is true |
| * then unless the shader type is not supported, any call to getFloatShaderPrecisionInfo() would |
| * report the same info for all precisions in all shader types. |
| @@ -101,6 +112,9 @@ public: |
| } |
| protected: |
| + /** Subclasses must call this after filling in the shader precision table. */ |
| + void initEffectiveFloatPrecisionTable(); |
| + |
| /** Subclasses must call this after initialization in order to apply caps overrides requested by |
| the client. Note that overrides will only reduce the caps never expand them. */ |
| void applyOptionsOverrides(const GrContextOptions& options); |
| @@ -114,6 +128,7 @@ protected: |
| bool fShaderPrecisionVaries; |
| PrecisionInfo fFloatPrecisions[kGrShaderTypeCount][kGrSLPrecisionCount]; |
| + GrSLPrecision fEffectiveFloatPrecisions[kGrShaderTypeCount][kGrSLPrecisionCount]; |
| int fPixelLocalStorageSize; |
| bool fPLSPathRenderingSupport; |