OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2016 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 #include "GrInstanceProcessor.h" |
| 9 |
| 10 #include "GrContext.h" |
| 11 #include "GrRenderTargetPriv.h" |
| 12 #include "GrResourceCache.h" |
| 13 #include "GrResourceProvider.h" |
| 14 #include "glsl/GrGLSLGeometryProcessor.h" |
| 15 #include "glsl/GrGLSLFragmentShaderBuilder.h" |
| 16 #include "glsl/GrGLSLProgramBuilder.h" |
| 17 #include "glsl/GrGLSLVarying.h" |
| 18 |
| 19 uint32_t GrInstanceProcessor::GetSupportedAAModes(const GrGLSLCaps& glslCaps, co
nst GrCaps& caps) { |
| 20 if (!glslCaps.canUseAnyFunctionInShader() || |
| 21 !glslCaps.flatInterpolationSupport() || |
| 22 !glslCaps.integerSupport() || |
| 23 0 == glslCaps.maxVertexSamplers() || |
| 24 !caps.shaderCaps()->texelBufferSupport() || |
| 25 caps.maxVertexAttributes() < kNumVertexAttribs) { |
| 26 return 0; |
| 27 } |
| 28 uint32_t supportedAAModes = kNone_AntialiasFlag | kCoverage_AntialiasFlag; |
| 29 if (caps.sampleLocationsSupport() && |
| 30 glslCaps.sampleVariablesSupport() && |
| 31 glslCaps.shaderDerivativeSupport()) { |
| 32 supportedAAModes |= kMSAA_AntialiasFlag; |
| 33 if (0 != caps.maxRasterSamples() && |
| 34 glslCaps.sampleMaskOverrideCoverageSupport()) { |
| 35 supportedAAModes |= kMixedSamples_AntialiasFlag; |
| 36 } |
| 37 } |
| 38 return supportedAAModes; |
| 39 } |
| 40 |
| 41 GrInstanceProcessor::GrInstanceProcessor(BatchInfo batchInfo, GrBuffer* paramsBu
ffer) |
| 42 : fBatchInfo(batchInfo) { |
| 43 this->initClassID<GrInstanceProcessor>(); |
| 44 |
| 45 this->addVertexAttrib(Attribute("shapeCoords", kVec2f_GrVertexAttribType, kH
igh_GrSLPrecision)); |
| 46 this->addVertexAttrib(Attribute("vertexAttrs", kInt_GrVertexAttribType)); |
| 47 this->addVertexAttrib(Attribute("instanceInfo", kUint_GrVertexAttribType)); |
| 48 this->addVertexAttrib(Attribute("shapeMatrixX", kVec3f_GrVertexAttribType, |
| 49 kHigh_GrSLPrecision)); |
| 50 this->addVertexAttrib(Attribute("shapeMatrixY", kVec3f_GrVertexAttribType, |
| 51 kHigh_GrSLPrecision)); |
| 52 this->addVertexAttrib(Attribute("color", kVec4f_GrVertexAttribType, kLow_GrS
LPrecision)); |
| 53 this->addVertexAttrib(Attribute("localRect", kVec4f_GrVertexAttribType, kHig
h_GrSLPrecision)); |
| 54 |
| 55 GR_STATIC_ASSERT(0 == kShapeCoords_AttribIdx); |
| 56 GR_STATIC_ASSERT(1 == kVertexAttrs_AttribIdx); |
| 57 GR_STATIC_ASSERT(2 == kInstanceInfo_AttribIdx); |
| 58 GR_STATIC_ASSERT(3 == kShapeMatrixX_AttribIdx); |
| 59 GR_STATIC_ASSERT(4 == kShapeMatrixY_AttribIdx); |
| 60 GR_STATIC_ASSERT(5 == kColor_AttribIdx); |
| 61 GR_STATIC_ASSERT(6 == kLocalRect_AttribIdx); |
| 62 GR_STATIC_ASSERT(7 == kNumVertexAttribs); |
| 63 |
| 64 if (fBatchInfo.fHasParams) { |
| 65 SkASSERT(paramsBuffer); |
| 66 fParamsAccess.reset(0, kRGBA_float_GrPixelConfig, paramsBuffer, kVertex_
GrShaderFlag); |
| 67 this->addBufferAccess(&fParamsAccess); |
| 68 } |
| 69 |
| 70 if (fBatchInfo.fAntialiasMode >= kMSAA_AntialiasMode) { |
| 71 if (!fBatchInfo.isSimpleRects() || |
| 72 kMixedSamples_AntialiasMode == fBatchInfo.fAntialiasMode) { |
| 73 this->setWillUseSampleLocations(); |
| 74 } |
| 75 } |
| 76 } |
| 77 |
| 78 class GrGLSLInstanceProcessor : public GrGLSLGeometryProcessor, private GrInstan
cedRenderingTypes { |
| 79 public: |
| 80 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override; |
| 81 |
| 82 private: |
| 83 void setData(const GrGLSLProgramDataManager&, const GrPrimitiveProcessor&) o
verride {} |
| 84 |
| 85 class VertexInputs; |
| 86 class Backend; |
| 87 class BackendNonAA; |
| 88 class BackendCoverage; |
| 89 class BackendMultisample; |
| 90 |
| 91 typedef GrGLSLGeometryProcessor INHERITED; |
| 92 }; |
| 93 |
| 94 GrGLSLPrimitiveProcessor* GrInstanceProcessor::createGLSLInstance(const GrGLSLCa
ps&) const { |
| 95 return new GrGLSLInstanceProcessor(); |
| 96 } |
| 97 |
| 98 class GrGLSLInstanceProcessor::VertexInputs { |
| 99 public: |
| 100 VertexInputs(const GrInstanceProcessor& instProc, GrGLSLVertexBuilder* verte
xBuilder) |
| 101 : fInstProc(instProc), |
| 102 fVertexBuilder(vertexBuilder) { |
| 103 } |
| 104 |
| 105 void initParams(const SamplerHandle paramsBuffer) { |
| 106 fParamsBuffer = paramsBuffer; |
| 107 fVertexBuilder->definef("PARAMS_IDX_MASK", "0x%xu", kParamsIdx_InfoMask)
; |
| 108 fVertexBuilder->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 109 fVertexBuilder->codeAppendf("int paramsIdx = int(%s & PARAMS_IDX_MASK);"
, |
| 110 this->attr(kInstanceInfo_AttribIdx)); |
| 111 } |
| 112 |
| 113 const char* attr(AttribIdx idx) const { return fInstProc.getAttrib(idx).fNam
e; } |
| 114 |
| 115 void fetchNextParam(GrSLType type = kVec4f_GrSLType) const { |
| 116 SkASSERT(fParamsBuffer.isValid()); |
| 117 if (type != kVec4f_GrSLType) { |
| 118 fVertexBuilder->codeAppendf("%s(", GrGLSLTypeString(type)); |
| 119 } |
| 120 fVertexBuilder->appendTexelFetch(fParamsBuffer, "paramsIdx++"); |
| 121 if (type != kVec4f_GrSLType) { |
| 122 fVertexBuilder->codeAppend(")"); |
| 123 } |
| 124 } |
| 125 |
| 126 void skipParams(unsigned n) const { |
| 127 SkASSERT(fParamsBuffer.isValid()); |
| 128 fVertexBuilder->codeAppendf("paramsIdx += %u;", n); |
| 129 } |
| 130 |
| 131 private: |
| 132 const GrInstanceProcessor& fInstProc; |
| 133 GrGLSLVertexBuilder* fVertexBuilder; |
| 134 SamplerHandle fParamsBuffer; |
| 135 }; |
| 136 |
| 137 class GrGLSLInstanceProcessor::Backend { |
| 138 public: |
| 139 static Backend* SK_WARN_UNUSED_RESULT Create(const GrGLSLProgramBuilder*, Ba
tchInfo, |
| 140 const VertexInputs&); |
| 141 virtual ~Backend() {} |
| 142 |
| 143 void init(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*); |
| 144 virtual void setupRect(GrGLSLVertexBuilder*) = 0; |
| 145 virtual void setupOval(GrGLSLVertexBuilder*) = 0; |
| 146 void setupRRect(GrGLSLVertexBuilder*); |
| 147 |
| 148 void initInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*); |
| 149 virtual void setupInnerRect(GrGLSLVertexBuilder*) = 0; |
| 150 virtual void setupInnerOval(GrGLSLVertexBuilder*) = 0; |
| 151 void setupInnerRRect(GrGLSLVertexBuilder*); |
| 152 |
| 153 const char* outShapeCoords() { |
| 154 return fModifiedShapeCoords ? fModifiedShapeCoords : fInputs.attr(kShape
Coords_AttribIdx); |
| 155 } |
| 156 |
| 157 void emitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, const char* ou
tCoverage, |
| 158 const char* outColor); |
| 159 |
| 160 protected: |
| 161 Backend(BatchInfo batchInfo, const VertexInputs& inputs) |
| 162 : fBatchInfo(batchInfo), |
| 163 fInputs(inputs), |
| 164 fModifiesCoverage(false), |
| 165 fModifiesColor(false), |
| 166 fNeedsNeighborRadii(false), |
| 167 fColor(kVec4f_GrSLType), |
| 168 fTriangleIsArc(kInt_GrSLType), |
| 169 fArcCoords(kVec2f_GrSLType), |
| 170 fInnerShapeCoords(kVec2f_GrSLType), |
| 171 fInnerRRect(kVec4f_GrSLType), |
| 172 fModifiedShapeCoords(nullptr) { |
| 173 if (fBatchInfo.fShapeTypes & kRRect_ShapesMask) { |
| 174 fModifiedShapeCoords = "adjustedShapeCoords"; |
| 175 } |
| 176 } |
| 177 |
| 178 virtual void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) = 0; |
| 179 virtual void adjustRRectVertices(GrGLSLVertexBuilder*); |
| 180 virtual void onSetupRRect(GrGLSLVertexBuilder*) {} |
| 181 |
| 182 virtual void onInitInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) =
0; |
| 183 virtual void onSetupInnerRRect(GrGLSLVertexBuilder*) = 0; |
| 184 |
| 185 virtual void onEmitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, |
| 186 const char* outCoverage, const char* outColor) = 0; |
| 187 |
| 188 void setupSimpleRadii(GrGLSLVertexBuilder*); |
| 189 void setupNinePatchRadii(GrGLSLVertexBuilder*); |
| 190 void setupComplexRadii(GrGLSLVertexBuilder*); |
| 191 |
| 192 const BatchInfo fBatchInfo; |
| 193 const VertexInputs& fInputs; |
| 194 bool fModifiesCoverage; |
| 195 bool fModifiesColor; |
| 196 bool fNeedsNeighborRadii; |
| 197 GrGLSLVertToFrag fColor; |
| 198 GrGLSLVertToFrag fTriangleIsArc; |
| 199 GrGLSLVertToFrag fArcCoords; |
| 200 GrGLSLVertToFrag fInnerShapeCoords; |
| 201 GrGLSLVertToFrag fInnerRRect; |
| 202 const char* fModifiedShapeCoords; |
| 203 }; |
| 204 |
| 205 void GrGLSLInstanceProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) { |
| 206 const GrInstanceProcessor& ip = args.fGP.cast<GrInstanceProcessor>(); |
| 207 GrGLSLUniformHandler* uniHandler = args.fUniformHandler; |
| 208 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler; |
| 209 GrGLSLVertexBuilder* v = args.fVertBuilder; |
| 210 GrGLSLPPFragmentBuilder* f = args.fFragBuilder; |
| 211 |
| 212 varyingHandler->emitAttributes(ip); |
| 213 |
| 214 VertexInputs inputs(ip, v); |
| 215 if (ip.batchInfo().fHasParams) { |
| 216 SkASSERT(1 == ip.numBuffers()); |
| 217 inputs.initParams(args.fBufferSamplers[0]); |
| 218 } |
| 219 |
| 220 if (!ip.batchInfo().fHasPerspective) { |
| 221 v->codeAppendf("mat2x3 shapeMatrix = mat2x3(%s, %s);", |
| 222 inputs.attr(kShapeMatrixX_AttribIdx), inputs.attr(kShapeM
atrixY_AttribIdx)); |
| 223 } else { |
| 224 v->definef("PERSPECTIVE_FLAG", "0x%xu", kPerspective_InfoFlag); |
| 225 v->codeAppendf("mat3 shapeMatrix = mat3(%s, %s, vec3(0, 0, 1));", |
| 226 inputs.attr(kShapeMatrixX_AttribIdx), inputs.attr(kShapeM
atrixY_AttribIdx)); |
| 227 v->codeAppendf("if (0u != (%s & PERSPECTIVE_FLAG)) {", |
| 228 inputs.attr(kInstanceInfo_AttribIdx)); |
| 229 v->codeAppend ( "shapeMatrix[2] = "); |
| 230 inputs.fetchNextParam(kVec3f_GrSLType); |
| 231 v->codeAppend ( ";"); |
| 232 v->codeAppend ("}"); |
| 233 } |
| 234 |
| 235 int usedShapeTypes = 0; |
| 236 |
| 237 bool hasSingleShapeType = SkIsPow2(ip.batchInfo().fShapeTypes); |
| 238 if (!hasSingleShapeType) { |
| 239 usedShapeTypes |= ip.batchInfo().fShapeTypes; |
| 240 v->define("SHAPE_TYPE_BIT", kShapeType_InfoBit); |
| 241 v->codeAppendf("uint shapeType = %s >> SHAPE_TYPE_BIT;", |
| 242 inputs.attr(kInstanceInfo_AttribIdx)); |
| 243 } |
| 244 |
| 245 SkAutoTDelete<Backend> backend(Backend::Create(v->getProgramBuilder(), ip.ba
tchInfo(), inputs)); |
| 246 backend->init(varyingHandler, v); |
| 247 |
| 248 if (hasSingleShapeType) { |
| 249 if (kRect_ShapeFlag == ip.batchInfo().fShapeTypes) { |
| 250 backend->setupRect(v); |
| 251 } else if (kOval_ShapeFlag == ip.batchInfo().fShapeTypes) { |
| 252 backend->setupOval(v); |
| 253 } else { |
| 254 backend->setupRRect(v); |
| 255 } |
| 256 } else { |
| 257 v->codeAppend ("switch (shapeType) {"); |
| 258 if (ip.batchInfo().fShapeTypes & kRect_ShapeFlag) { |
| 259 v->codeAppend ("case RECT_SHAPE_TYPE: {"); |
| 260 backend->setupRect(v); |
| 261 v->codeAppend ("} break;"); |
| 262 } |
| 263 if (ip.batchInfo().fShapeTypes & kOval_ShapeFlag) { |
| 264 v->codeAppend ("case OVAL_SHAPE_TYPE: {"); |
| 265 backend->setupOval(v); |
| 266 v->codeAppend ("} break;"); |
| 267 } |
| 268 if (ip.batchInfo().fShapeTypes & kRRect_ShapesMask) { |
| 269 v->codeAppend ("default: {"); |
| 270 backend->setupRRect(v); |
| 271 v->codeAppend ("} break;"); |
| 272 } |
| 273 v->codeAppend ("}"); |
| 274 } |
| 275 |
| 276 if (ip.batchInfo().fInnerShapeTypes) { |
| 277 bool hasSingleInnerShapeType = SkIsPow2(ip.batchInfo().fInnerShapeTypes)
; |
| 278 if (!hasSingleInnerShapeType) { |
| 279 usedShapeTypes |= ip.batchInfo().fInnerShapeTypes; |
| 280 v->definef("INNER_SHAPE_TYPE_MASK", "0x%xu", kInnerShapeType_InfoMas
k); |
| 281 v->define("INNER_SHAPE_TYPE_BIT", kInnerShapeType_InfoBit); |
| 282 v->codeAppendf("uint innerShapeType = ((%s & INNER_SHAPE_TYPE_MASK)
>> " |
| 283 "INNER_SHAPE_TYPE_BIT);", |
| 284 inputs.attr(kInstanceInfo_AttribIdx)); |
| 285 } |
| 286 // Here we take advantage of the fact that outerRect == localRect in rec
ordDRRect. |
| 287 v->codeAppendf("vec4 outer = %s;", inputs.attr(kLocalRect_AttribIdx)); |
| 288 v->codeAppend ("vec4 inner = "); |
| 289 inputs.fetchNextParam(); |
| 290 v->codeAppend (";"); |
| 291 // innerCoords is a transform from shape coords to inner shape coords: |
| 292 // e.g. innerShapeCoords = shapeCoords * innerCoords.xy + innerCoords.zw |
| 293 v->codeAppend ("vec4 innerCoords = vec4(outer.zw - outer.xy, " |
| 294 "outer.xy + outer.zw - inner.xy -
inner.zw) / " |
| 295 "(inner.zw - inner.xy).xyxy;"); |
| 296 v->codeAppendf("vec2 innerShapeCoords = %s * innerCoords.xy + innerCoord
s.zw;", |
| 297 backend->outShapeCoords()); |
| 298 |
| 299 backend->initInnerShape(varyingHandler, v); |
| 300 |
| 301 if (hasSingleInnerShapeType) { |
| 302 if (kRect_ShapeFlag == ip.batchInfo().fInnerShapeTypes) { |
| 303 backend->setupInnerRect(v); |
| 304 } else if (kOval_ShapeFlag == ip.batchInfo().fInnerShapeTypes) { |
| 305 backend->setupInnerOval(v); |
| 306 } else { |
| 307 backend->setupInnerRRect(v); |
| 308 } |
| 309 } else { |
| 310 v->codeAppend("switch (innerShapeType) {"); |
| 311 if (ip.batchInfo().fInnerShapeTypes & kRect_ShapeFlag) { |
| 312 v->codeAppend("case RECT_SHAPE_TYPE: {"); |
| 313 backend->setupInnerRect(v); |
| 314 v->codeAppend("} break;"); |
| 315 } |
| 316 if (ip.batchInfo().fInnerShapeTypes & kOval_ShapeFlag) { |
| 317 v->codeAppend("case OVAL_SHAPE_TYPE: {"); |
| 318 backend->setupInnerOval(v); |
| 319 v->codeAppend("} break;"); |
| 320 } |
| 321 if (ip.batchInfo().fInnerShapeTypes & kRRect_ShapesMask) { |
| 322 v->codeAppend("default: {"); |
| 323 backend->setupInnerRRect(v); |
| 324 v->codeAppend("} break;"); |
| 325 } |
| 326 v->codeAppend("}"); |
| 327 } |
| 328 } |
| 329 |
| 330 if (usedShapeTypes & kRect_ShapeFlag) { |
| 331 v->definef("RECT_SHAPE_TYPE", "%du", kRect_ShapeType); |
| 332 } |
| 333 if (usedShapeTypes & kOval_ShapeFlag) { |
| 334 v->definef("OVAL_SHAPE_TYPE", "%du", kOval_ShapeType); |
| 335 } |
| 336 |
| 337 backend->emitCode(v, f, args.fOutputCoverage, args.fOutputColor); |
| 338 |
| 339 GrSLType positionType = ip.batchInfo().fHasPerspective ? kVec3f_GrSLType : k
Vec2f_GrSLType; |
| 340 v->codeAppendf("%s deviceCoords = vec3(%s, 1) * shapeMatrix;", |
| 341 GrGLSLTypeString(positionType), backend->outShapeCoords()); |
| 342 gpArgs->fPositionVar.set(positionType, "deviceCoords"); |
| 343 |
| 344 const char* localCoords = nullptr; |
| 345 if (ip.batchInfo().fUsesLocalCoords) { |
| 346 localCoords = "localCoords"; |
| 347 v->codeAppendf("vec2 t = 0.5 * (%s + vec2(1));", backend->outShapeCoords
()); |
| 348 v->codeAppendf("vec2 localCoords = (1.0 - t) * %s.xy + t * %s.zw;", |
| 349 inputs.attr(kLocalRect_AttribIdx), inputs.attr(kLocalRect
_AttribIdx)); |
| 350 } |
| 351 |
| 352 this->emitTransforms(v, varyingHandler, uniHandler, gpArgs->fPositionVar, lo
calCoords, |
| 353 args.fTransformsIn, args.fTransformsOut); |
| 354 |
| 355 if (ip.batchInfo().fHasLocalMatrix && ip.batchInfo().fHasParams) { |
| 356 v->definef("LOCAL_MATRIX_FLAG", "0x%xu", kLocalMatrix_InfoFlag); |
| 357 v->codeAppendf("if (0u != (%s & LOCAL_MATRIX_FLAG)) {", |
| 358 inputs.attr(kInstanceInfo_AttribIdx)); |
| 359 if (!ip.batchInfo().fUsesLocalCoords) { |
| 360 inputs.skipParams(2); |
| 361 } else { |
| 362 v->codeAppendf( "mat2x3 localMatrix;"); |
| 363 v->codeAppend ( "localMatrix[0] = "); |
| 364 inputs.fetchNextParam(kVec3f_GrSLType); |
| 365 v->codeAppend ( ";"); |
| 366 v->codeAppend ( "localMatrix[1] = "); |
| 367 inputs.fetchNextParam(kVec3f_GrSLType); |
| 368 v->codeAppend ( ";"); |
| 369 v->codeAppend ( "localCoords = vec3(localCoords, 1) * localMatrix
;"); |
| 370 } |
| 371 v->codeAppend("}"); |
| 372 } |
| 373 } |
| 374 |
| 375 ////////////////////////////////////////////////////////////////////////////////
//////////////////// |
| 376 |
| 377 void GrGLSLInstanceProcessor::Backend::init(GrGLSLVaryingHandler* varyingHandler
, |
| 378 GrGLSLVertexBuilder* v) { |
| 379 if (fModifiedShapeCoords) { |
| 380 v->codeAppendf("vec2 %s = %s;", |
| 381 fModifiedShapeCoords, fInputs.attr(kShapeCoords_AttribIdx
)); |
| 382 } |
| 383 |
| 384 this->onInit(varyingHandler, v); |
| 385 |
| 386 if (!fColor.vsOut()) { |
| 387 varyingHandler->addFlatVarying("color", &fColor, kLow_GrSLPrecision); |
| 388 v->codeAppendf("%s = %s;", fColor.vsOut(), fInputs.attr(kColor_AttribIdx
)); |
| 389 } |
| 390 } |
| 391 |
| 392 void GrGLSLInstanceProcessor::Backend::setupRRect(GrGLSLVertexBuilder* v) { |
| 393 v->codeAppendf("uvec2 corner = uvec2(%s & 1, (%s >> 1) & 1);", |
| 394 fInputs.attr(kVertexAttrs_AttribIdx), fInputs.attr(kVertexAtt
rs_AttribIdx)); |
| 395 v->codeAppend ("vec2 cornerSign = vec2(corner) * 2.0 - 1.0;"); |
| 396 v->codeAppendf("vec2 radii%s;", fNeedsNeighborRadii ? ", neighborRadii" : ""
); |
| 397 v->codeAppend ("mat2 p = "); |
| 398 fInputs.fetchNextParam(kMat22f_GrSLType); |
| 399 v->codeAppend (";"); |
| 400 uint8_t types = fBatchInfo.fShapeTypes & kRRect_ShapesMask; |
| 401 if (0 == (types & (types - 1))) { |
| 402 if (kSimpleRRect_ShapeFlag == types) { |
| 403 this->setupSimpleRadii(v); |
| 404 } else if (kNinePatch_ShapeFlag == types) { |
| 405 this->setupNinePatchRadii(v); |
| 406 } else if (kComplexRRect_ShapeFlag == types) { |
| 407 this->setupComplexRadii(v); |
| 408 } |
| 409 } else { |
| 410 v->codeAppend("switch (shapeType) {"); |
| 411 if (types & kSimpleRRect_ShapeFlag) { |
| 412 v->definef("SIMPLE_R_RECT_SHAPE_TYPE", "%du", kSimpleRRect_ShapeType
); |
| 413 v->codeAppend ("case SIMPLE_R_RECT_SHAPE_TYPE: {"); |
| 414 this->setupSimpleRadii(v); |
| 415 v->codeAppend ("} break;"); |
| 416 } |
| 417 if (types & kNinePatch_ShapeFlag) { |
| 418 v->definef("NINE_PATCH_SHAPE_TYPE", "%du", kNinePatch_ShapeType); |
| 419 v->codeAppend ("case NINE_PATCH_SHAPE_TYPE: {"); |
| 420 this->setupNinePatchRadii(v); |
| 421 v->codeAppend ("} break;"); |
| 422 } |
| 423 if (types & kComplexRRect_ShapeFlag) { |
| 424 v->codeAppend ("default: {"); |
| 425 this->setupComplexRadii(v); |
| 426 v->codeAppend ("} break;"); |
| 427 } |
| 428 v->codeAppend("}"); |
| 429 } |
| 430 |
| 431 this->adjustRRectVertices(v); |
| 432 |
| 433 if (fArcCoords.vsOut()) { |
| 434 v->codeAppendf("%s = (cornerSign * %s + radii - vec2(1)) / radii;", |
| 435 fArcCoords.vsOut(), fModifiedShapeCoords); |
| 436 } |
| 437 if (fTriangleIsArc.vsOut()) { |
| 438 v->codeAppendf("%s = int(all(equal(vec2(1), abs(%s))));", |
| 439 fTriangleIsArc.vsOut(), fInputs.attr(kShapeCoords_AttribI
dx)); |
| 440 } |
| 441 |
| 442 this->onSetupRRect(v); |
| 443 } |
| 444 |
| 445 void GrGLSLInstanceProcessor::Backend::setupSimpleRadii(GrGLSLVertexBuilder* v)
{ |
| 446 if (fNeedsNeighborRadii) { |
| 447 v->codeAppend ("neighborRadii = "); |
| 448 } |
| 449 v->codeAppend("radii = p[0] * 2.0 / p[1];"); |
| 450 } |
| 451 |
| 452 void GrGLSLInstanceProcessor::Backend::setupNinePatchRadii(GrGLSLVertexBuilder*
v) { |
| 453 v->codeAppend("radii = vec2(p[0][corner.x], p[1][corner.y]);"); |
| 454 if (fNeedsNeighborRadii) { |
| 455 v->codeAppend("neighborRadii = vec2(p[0][1u - corner.x], p[1][1u - corne
r.y]);"); |
| 456 } |
| 457 } |
| 458 |
| 459 void GrGLSLInstanceProcessor::Backend::setupComplexRadii(GrGLSLVertexBuilder* v)
{ |
| 460 /** |
| 461 * The x and y radii of each arc are stored in separate vectors, |
| 462 * in the following order: |
| 463 * |
| 464 * __x1 _ _ _ x3__ |
| 465 * |
| 466 * y1 | | y2 |
| 467 * |
| 468 * | | |
| 469 * |
| 470 * y3 |__ _ _ _ __| y4 |
| 471 * x2 x4 |
| 472 * |
| 473 */ |
| 474 v->codeAppend("mat2 p2 = "); |
| 475 fInputs.fetchNextParam(kMat22f_GrSLType); |
| 476 v->codeAppend(";"); |
| 477 v->codeAppend("radii = vec2(p[corner.x][corner.y], p2[corner.y][corner.x]);"
); |
| 478 if (fNeedsNeighborRadii) { |
| 479 v->codeAppend("neighborRadii = vec2(p[1u - corner.x][corner.y], " |
| 480 "p2[1u - corner.y][corner.x]);"); |
| 481 } |
| 482 } |
| 483 |
| 484 void GrGLSLInstanceProcessor::Backend::adjustRRectVertices(GrGLSLVertexBuilder*
v) { |
| 485 // Resize the 4 triangles that arcs are drawn into so they match their corre
sponding radii. |
| 486 // 0.5 is a special value that indicates the edge of an arc triangle. |
| 487 v->codeAppendf("if (abs(%s.x) == 0.5)" |
| 488 "%s.x = cornerSign.x * (1.0 - radii.x);", |
| 489 fInputs.attr(kShapeCoords_AttribIdx), fModifiedShapeCoord
s); |
| 490 v->codeAppendf("if (abs(%s.y) == 0.5) " |
| 491 "%s.y = cornerSign.y * (1.0 - radii.y);", |
| 492 fInputs.attr(kShapeCoords_AttribIdx), fModifiedShapeCoord
s); |
| 493 } |
| 494 |
| 495 void GrGLSLInstanceProcessor::Backend::initInnerShape(GrGLSLVaryingHandler* vary
ingHandler, |
| 496 GrGLSLVertexBuilder* v) { |
| 497 SkASSERT(!(fBatchInfo.fInnerShapeTypes & (kNinePatch_ShapeFlag | kComplexRRe
ct_ShapeFlag))); |
| 498 |
| 499 this->onInitInnerShape(varyingHandler, v); |
| 500 |
| 501 if (fInnerShapeCoords.vsOut()) { |
| 502 v->codeAppendf("%s = innerShapeCoords;", fInnerShapeCoords.vsOut()); |
| 503 } |
| 504 } |
| 505 |
| 506 void GrGLSLInstanceProcessor::Backend::setupInnerRRect(GrGLSLVertexBuilder* v) { |
| 507 v->codeAppend("mat2 innerP = "); |
| 508 fInputs.fetchNextParam(kMat22f_GrSLType); |
| 509 v->codeAppend(";"); |
| 510 v->codeAppend("vec2 innerRadii = innerP[0] * 2.0 / innerP[1];"); |
| 511 this->onSetupInnerRRect(v); |
| 512 } |
| 513 |
| 514 void GrGLSLInstanceProcessor::Backend::emitCode(GrGLSLVertexBuilder* v, GrGLSLPP
FragmentBuilder* f, |
| 515 const char* outCoverage, const c
har* outColor) { |
| 516 this->onEmitCode(v, f, fModifiesCoverage ? outCoverage : nullptr, |
| 517 fModifiesColor ? outColor : nullptr); |
| 518 if (!fModifiesCoverage) { |
| 519 // Even though the subclass doesn't use coverage, we are expected to ass
ign some value. |
| 520 f->codeAppendf("%s = vec4(1);", outCoverage); |
| 521 } |
| 522 if (!fModifiesColor) { |
| 523 // The subclass didn't assign a value to the output color. |
| 524 f->codeAppendf("%s = %s;", outColor, fColor.fsIn()); |
| 525 } |
| 526 } |
| 527 |
| 528 ////////////////////////////////////////////////////////////////////////////////
//////////////////// |
| 529 |
| 530 class GrGLSLInstanceProcessor::BackendNonAA : public Backend { |
| 531 public: |
| 532 BackendNonAA(BatchInfo batchInfo, const VertexInputs& inputs) |
| 533 : INHERITED(batchInfo, inputs) { |
| 534 if (fBatchInfo.fCannotDiscard && !fBatchInfo.isSimpleRects()) { |
| 535 fModifiesColor = !fBatchInfo.fCannotTweakAlphaForCoverage; |
| 536 fModifiesCoverage = !fModifiesColor; |
| 537 } |
| 538 } |
| 539 |
| 540 private: |
| 541 void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; |
| 542 void setupRect(GrGLSLVertexBuilder*) override; |
| 543 void setupOval(GrGLSLVertexBuilder*) override; |
| 544 |
| 545 void onInitInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; |
| 546 void setupInnerRect(GrGLSLVertexBuilder*) override; |
| 547 void setupInnerOval(GrGLSLVertexBuilder*) override; |
| 548 void onSetupInnerRRect(GrGLSLVertexBuilder*) override; |
| 549 |
| 550 void onEmitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, const char*, |
| 551 const char*) override; |
| 552 |
| 553 typedef Backend INHERITED; |
| 554 }; |
| 555 |
| 556 void GrGLSLInstanceProcessor::BackendNonAA::onInit(GrGLSLVaryingHandler* varying
Handler, |
| 557 GrGLSLVertexBuilder*) { |
| 558 if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) { |
| 559 varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kHigh_G
rSLPrecision); |
| 560 varyingHandler->addVarying("arcCoords", &fArcCoords, kMedium_GrSLPrecisi
on); |
| 561 } |
| 562 } |
| 563 |
| 564 void GrGLSLInstanceProcessor::BackendNonAA::setupRect(GrGLSLVertexBuilder* v) { |
| 565 if (fTriangleIsArc.vsOut()) { |
| 566 v->codeAppendf("%s = 0;", fTriangleIsArc.vsOut()); |
| 567 } |
| 568 } |
| 569 |
| 570 void GrGLSLInstanceProcessor::BackendNonAA::setupOval(GrGLSLVertexBuilder* v) { |
| 571 SkASSERT(fArcCoords.vsOut()); |
| 572 SkASSERT(fTriangleIsArc.vsOut()); |
| 573 v->codeAppendf("%s = %s;", fArcCoords.vsOut(), this->outShapeCoords()); |
| 574 v->codeAppendf("%s = %s & 1;", fTriangleIsArc.vsOut(), fInputs.attr(kVertexA
ttrs_AttribIdx)); |
| 575 } |
| 576 |
| 577 void GrGLSLInstanceProcessor::BackendNonAA::onInitInnerShape(GrGLSLVaryingHandle
r* varyingHandler, |
| 578 GrGLSLVertexBuilder
*) { |
| 579 varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, kMedium_G
rSLPrecision); |
| 580 if (kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes && |
| 581 kOval_ShapeFlag != fBatchInfo.fInnerShapeTypes) { |
| 582 varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kMedium_GrSLP
recision); |
| 583 } |
| 584 } |
| 585 |
| 586 void GrGLSLInstanceProcessor::BackendNonAA::setupInnerRect(GrGLSLVertexBuilder*
v) { |
| 587 if (fInnerRRect.vsOut()) { |
| 588 v->codeAppendf("%s = vec4(1);", fInnerRRect.vsOut()); |
| 589 } |
| 590 } |
| 591 |
| 592 void GrGLSLInstanceProcessor::BackendNonAA::setupInnerOval(GrGLSLVertexBuilder*
v) { |
| 593 if (fInnerRRect.vsOut()) { |
| 594 v->codeAppendf("%s = vec4(0, 0, 1, 1);", fInnerRRect.vsOut()); |
| 595 } |
| 596 } |
| 597 |
| 598 void GrGLSLInstanceProcessor::BackendNonAA::onSetupInnerRRect(GrGLSLVertexBuilde
r* v) { |
| 599 v->codeAppendf("%s = vec4(1.0 - innerRadii, 1.0 / innerRadii);", fInnerRRect
.vsOut()); |
| 600 } |
| 601 |
| 602 void GrGLSLInstanceProcessor::BackendNonAA::onEmitCode(GrGLSLVertexBuilder*, |
| 603 GrGLSLPPFragmentBuilder*
f, |
| 604 const char* outCoverage, |
| 605 const char* outColor) { |
| 606 const char* dropFragment = nullptr; |
| 607 if (!fBatchInfo.fCannotDiscard) { |
| 608 dropFragment = "discard"; |
| 609 } else if (fModifiesCoverage) { |
| 610 f->appendPrecisionModifier(kLow_GrSLPrecision); |
| 611 f->codeAppend ("float covered = 1.0;"); |
| 612 dropFragment = "covered = 0.0"; |
| 613 } else if (fModifiesColor) { |
| 614 f->appendPrecisionModifier(kLow_GrSLPrecision); |
| 615 f->codeAppendf("vec4 color = %s;", fColor.fsIn()); |
| 616 dropFragment = "color = vec4(0)"; |
| 617 } |
| 618 if (fTriangleIsArc.fsIn()) { |
| 619 SkASSERT(dropFragment); |
| 620 f->appendPrecisionModifier(kLow_GrSLPrecision); |
| 621 f->codeAppendf("if (%s != 0 && dot(%s, %s) > 1.0) %s;", |
| 622 fTriangleIsArc.fsIn(), fArcCoords.fsIn(), fArcCoords.fsIn
(), dropFragment); |
| 623 } |
| 624 if (fBatchInfo.fInnerShapeTypes) { |
| 625 SkASSERT(dropFragment); |
| 626 f->codeAppendf("// Inner shape.\n"); |
| 627 if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) { |
| 628 f->codeAppendf("if (all(lessThanEqual(abs(%s), vec2(1)))) %s;", |
| 629 fInnerShapeCoords.fsIn(), dropFragment); |
| 630 } else if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) { |
| 631 f->codeAppendf("if ((dot(%s, %s) <= 1.0)) %s;", |
| 632 fInnerShapeCoords.fsIn(), fInnerShapeCoords.fsIn(), d
ropFragment); |
| 633 } else { |
| 634 f->codeAppendf("if (all(lessThan(abs(%s), vec2(1)))) {", fInnerShape
Coords.fsIn()); |
| 635 f->codeAppendf( "vec2 distanceToArcEdge = abs(%s) - %s.xy;", |
| 636 fInnerShapeCoords.fsIn(), fInnerRRect.fsIn()); |
| 637 f->codeAppend ( "if (any(lessThan(distanceToArcEdge, vec2(0)))) {
"); |
| 638 f->codeAppendf( "%s;", dropFragment); |
| 639 f->codeAppend ( "} else {"); |
| 640 f->codeAppendf( "vec2 rrectCoords = distanceToArcEdge * %s.zw
;", |
| 641 fInnerRRect.fsIn()); |
| 642 f->codeAppend ( "if (dot(rrectCoords, rrectCoords) <= 1.0) {"
); |
| 643 f->codeAppendf( "%s;", dropFragment); |
| 644 f->codeAppend ( "}"); |
| 645 f->codeAppend ( "}"); |
| 646 f->codeAppend ("}"); |
| 647 } |
| 648 } |
| 649 if (fModifiesCoverage) { |
| 650 f->codeAppendf("%s = vec4(covered);", outCoverage); |
| 651 } else if (fModifiesColor) { |
| 652 f->codeAppendf("%s = color;", outColor); |
| 653 } |
| 654 } |
| 655 |
| 656 ////////////////////////////////////////////////////////////////////////////////
//////////////////// |
| 657 |
| 658 class GrGLSLInstanceProcessor::BackendCoverage : public Backend { |
| 659 public: |
| 660 BackendCoverage(BatchInfo batchInfo, const VertexInputs& inputs) |
| 661 : INHERITED(batchInfo, inputs), |
| 662 fColorTimesCoverage(kVec4f_GrSLType), |
| 663 fDistanceToEdge(kFloat_GrSLType), |
| 664 fEllipseCoords(kVec2f_GrSLType), |
| 665 fEllipseName(kVec2f_GrSLType), |
| 666 fBloatedRadius(kFloat_GrSLType), |
| 667 fDistanceToInnerEdge(kVec2f_GrSLType), |
| 668 fInnerShapeBloatedHalfSize(kVec2f_GrSLType), |
| 669 fInnerEllipseCoords(kVec2f_GrSLType), |
| 670 fInnerEllipseName(kVec2f_GrSLType) { |
| 671 fShapeIsCircle = !fBatchInfo.fNonSquare && !(fBatchInfo.fShapeTypes & kR
Rect_ShapesMask); |
| 672 fTweakAlphaForCoverage = !fBatchInfo.fCannotTweakAlphaForCoverage && |
| 673 !fBatchInfo.fInnerShapeTypes; |
| 674 fModifiesCoverage = !fTweakAlphaForCoverage; |
| 675 fModifiesColor = fTweakAlphaForCoverage; |
| 676 fModifiedShapeCoords = "bloatedShapeCoords"; |
| 677 } |
| 678 |
| 679 private: |
| 680 void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; |
| 681 void setupRect(GrGLSLVertexBuilder*) override; |
| 682 void setupOval(GrGLSLVertexBuilder*) override; |
| 683 void adjustRRectVertices(GrGLSLVertexBuilder*) override; |
| 684 void onSetupRRect(GrGLSLVertexBuilder*) override; |
| 685 |
| 686 void onInitInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; |
| 687 void setupInnerRect(GrGLSLVertexBuilder*) override; |
| 688 void setupInnerOval(GrGLSLVertexBuilder*) override; |
| 689 void onSetupInnerRRect(GrGLSLVertexBuilder*) override; |
| 690 |
| 691 void onEmitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, const char*
outCoverage, |
| 692 const char* outColor) override; |
| 693 |
| 694 void emitRect(GrGLSLPPFragmentBuilder*, const char* outCoverage, const char*
outColor); |
| 695 void emitCircle(GrGLSLPPFragmentBuilder*, const char* outCoverage); |
| 696 void emitArc(GrGLSLPPFragmentBuilder* f, const char* ellipseCoords, const ch
ar* ellipseName, |
| 697 bool ellipseCoordsNeedClamp, bool ellipseCoordsMayBeNegative, |
| 698 const char* outCoverage); |
| 699 void emitInnerRect(GrGLSLPPFragmentBuilder*, const char* outCoverage); |
| 700 |
| 701 GrGLSLVertToFrag fColorTimesCoverage; |
| 702 GrGLSLVertToFrag fDistanceToEdge; |
| 703 GrGLSLVertToFrag fEllipseCoords; |
| 704 GrGLSLVertToFrag fEllipseName; |
| 705 GrGLSLVertToFrag fBloatedRadius; |
| 706 GrGLSLVertToFrag fDistanceToInnerEdge; |
| 707 GrGLSLVertToFrag fInnerShapeBloatedHalfSize; |
| 708 GrGLSLVertToFrag fInnerEllipseCoords; |
| 709 GrGLSLVertToFrag fInnerEllipseName; |
| 710 bool fShapeIsCircle; |
| 711 bool fTweakAlphaForCoverage; |
| 712 |
| 713 typedef Backend INHERITED; |
| 714 }; |
| 715 |
| 716 void GrGLSLInstanceProcessor::BackendCoverage::onInit(GrGLSLVaryingHandler* vary
ingHandler, |
| 717 GrGLSLVertexBuilder* v) { |
| 718 v->codeAppend ("mat2 shapeTransposeMatrix = transpose(mat2(shapeMatrix));"); |
| 719 v->codeAppend ("vec2 shapeHalfSize = vec2(length(shapeTransposeMatrix[0]), " |
| 720 "length(shapeTransposeMatrix[1]));"
); |
| 721 v->codeAppend ("vec2 bloat = 0.5 / shapeHalfSize;"); |
| 722 v->codeAppendf("bloatedShapeCoords = %s * (1.0 + bloat);", |
| 723 fInputs.attr(kShapeCoords_AttribIdx)); |
| 724 |
| 725 if (kOval_ShapeFlag != fBatchInfo.fShapeTypes) { |
| 726 if (fTweakAlphaForCoverage) { |
| 727 varyingHandler->addVarying("colorTimesCoverage", &fColorTimesCoverag
e, |
| 728 kLow_GrSLPrecision); |
| 729 if (kRect_ShapeFlag == fBatchInfo.fShapeTypes) { |
| 730 fColor = fColorTimesCoverage; |
| 731 } |
| 732 } else { |
| 733 varyingHandler->addVarying("distanceToEdge", &fDistanceToEdge, kLow_
GrSLPrecision); |
| 734 } |
| 735 v->codeAppend("float distanceToEdge = 0.0;"); |
| 736 } |
| 737 if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) { |
| 738 varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kHigh_G
rSLPrecision); |
| 739 if (!fShapeIsCircle) { |
| 740 varyingHandler->addVarying("ellipseCoords", &fEllipseCoords, kHigh_G
rSLPrecision); |
| 741 varyingHandler->addFlatVarying("ellipseName", &fEllipseName, kHigh_G
rSLPrecision); |
| 742 } else { |
| 743 varyingHandler->addVarying("circleCoords", &fEllipseCoords, kMedium_
GrSLPrecision); |
| 744 varyingHandler->addFlatVarying("bloatedRadius", &fBloatedRadius, kMe
dium_GrSLPrecision); |
| 745 } |
| 746 } |
| 747 } |
| 748 |
| 749 void GrGLSLInstanceProcessor::BackendCoverage::setupRect(GrGLSLVertexBuilder* v)
{ |
| 750 // Offset the inner and outer rects by one pixel. Inner vs outer is indicate
d by coordAttrs. |
| 751 v->codeAppendf("vec2 rectBloat = (%s != 0) ? bloat : -bloat;", |
| 752 fInputs.attr(kVertexAttrs_AttribIdx)); |
| 753 v->codeAppendf("bloatedShapeCoords = %s * max(vec2(1.0 + rectBloat), vec2(0)
);", |
| 754 fInputs.attr(kShapeCoords_AttribIdx)); |
| 755 |
| 756 // The geometry is laid out in such a way that distanceToEdge will be 0 and
1 on the vertices, |
| 757 // but we still need to recompute this value because when the rect gets thin
ner than one pixel, |
| 758 // the interior edge of the border will necessarily clamp. |
| 759 v->codeAppend ("vec2 d = shapeHalfSize + 0.5 - abs(bloatedShapeCoords) * sha
peHalfSize;"); |
| 760 v->codeAppend ("distanceToEdge = min(d.x, d.y);"); |
| 761 |
| 762 if (fTriangleIsArc.vsOut()) { |
| 763 v->codeAppendf("%s = 0;", fTriangleIsArc.vsOut()); |
| 764 } |
| 765 } |
| 766 |
| 767 void GrGLSLInstanceProcessor::BackendCoverage::setupOval(GrGLSLVertexBuilder* v)
{ |
| 768 // Offset the inner and outer octagons by one pixel. Inner vs outer is indic
ated by coordAttrs. |
| 769 v->codeAppendf("vec2 ovalBloat = (%s != 0) ? bloat : -bloat;", |
| 770 fInputs.attr(kVertexAttrs_AttribIdx)); |
| 771 v->codeAppendf("bloatedShapeCoords = %s * max(vec2(1.0 + ovalBloat), vec2(0)
);", |
| 772 fInputs.attr(kShapeCoords_AttribIdx)); |
| 773 v->codeAppendf("%s = bloatedShapeCoords * shapeHalfSize;", fEllipseCoords.vs
Out()); |
| 774 if (fEllipseName.vsOut()) { |
| 775 v->codeAppendf("%s = 1.0 / (shapeHalfSize * shapeHalfSize);", fEllipseNa
me.vsOut()); |
| 776 } |
| 777 if (fBloatedRadius.vsOut()) { |
| 778 SkASSERT(fShapeIsCircle); |
| 779 v->codeAppendf("%s = shapeHalfSize.x + bloat.x;", fBloatedRadius.vsOut()
); |
| 780 } |
| 781 if (fTriangleIsArc.vsOut()) { |
| 782 v->codeAppendf("%s = int(%s != 0);", |
| 783 fTriangleIsArc.vsOut(), fInputs.attr(kVertexAttrs_AttribI
dx)); |
| 784 } |
| 785 if (fColorTimesCoverage.vsOut() || fDistanceToEdge.vsOut()) { |
| 786 v->codeAppendf("distanceToEdge = 1.0;"); |
| 787 } |
| 788 } |
| 789 |
| 790 void GrGLSLInstanceProcessor::BackendCoverage::adjustRRectVertices(GrGLSLVertexB
uilder* v) { |
| 791 // We try to let the AA borders line up with the arc edges on their particul
ar side, but we |
| 792 // can't allow them to get closer than one half pixel to the edge or they mi
ght overlap with |
| 793 // their neighboring border. |
| 794 v->codeAppend("vec2 innerEdge = max(1.0 - bloat, vec2(0));"); |
| 795 v->codeAppend ("vec2 borderEdge = cornerSign * clamp(1.0 - radii, -innerEdge
, innerEdge);"); |
| 796 // 0.5 is a special value that indicates this vertex is an arc edge. |
| 797 v->codeAppendf("if (abs(%s.x) == 0.5)" |
| 798 "%s.x = borderEdge.x;", |
| 799 fInputs.attr(kShapeCoords_AttribIdx), fModifiedShapeCoord
s); |
| 800 v->codeAppendf("if (abs(%s.y) == 0.5)" |
| 801 "%s.y = borderEdge.y;", |
| 802 fInputs.attr(kShapeCoords_AttribIdx), fModifiedShapeCoord
s); |
| 803 |
| 804 // Adjust the interior border vertices to make the border one pixel wide. 0.
75 is a special |
| 805 // value to indicate these points. |
| 806 v->codeAppendf("if (abs(%s.x) == 0.75) " |
| 807 "%s.x = cornerSign.x * innerEdge.x;", |
| 808 fInputs.attr(kShapeCoords_AttribIdx), fModifiedShapeCoord
s); |
| 809 v->codeAppendf("if (abs(%s.y) == 0.75) " |
| 810 "%s.y = cornerSign.y * innerEdge.y;", |
| 811 fInputs.attr(kShapeCoords_AttribIdx), fModifiedShapeCoord
s); |
| 812 } |
| 813 |
| 814 void GrGLSLInstanceProcessor::BackendCoverage::onSetupRRect(GrGLSLVertexBuilder*
v) { |
| 815 // The geometry is laid out in such a way that distanceToEdge will be 0 and
1 on the vertices, |
| 816 // but we still need to recompute this value because when the rrect gets thi
nner than one pixel, |
| 817 // the interior edge of the border will necessarily clamp. |
| 818 v->codeAppend("vec2 d = shapeHalfSize + 0.5 - abs(bloatedShapeCoords) * shap
eHalfSize;"); |
| 819 v->codeAppend("distanceToEdge = min(d.x, d.y);"); |
| 820 |
| 821 SkASSERT(!fShapeIsCircle); |
| 822 // The AA border does not get closer than one half pixel to the edge of the
rect, so to get a |
| 823 // smooth transition from flat edge to arc, we don't allow the radii to be s
maller than one half |
| 824 // pixel. (We don't worry about the transition on the opposite side when a r
adius is so large |
| 825 // that the border clamped on that side.) |
| 826 v->codeAppendf("vec2 clampedRadii = max(radii, bloat);"); |
| 827 v->codeAppendf("%s = (cornerSign * %s + clampedRadii - vec2(1)) * shapeHalfS
ize;", |
| 828 fEllipseCoords.vsOut(), fModifiedShapeCoords); |
| 829 v->codeAppendf("%s = 1.0 / (clampedRadii * clampedRadii * shapeHalfSize * sh
apeHalfSize);", |
| 830 fEllipseName.vsOut()); |
| 831 } |
| 832 |
| 833 void |
| 834 GrGLSLInstanceProcessor::BackendCoverage::onInitInnerShape(GrGLSLVaryingHandler*
varyingHandler, |
| 835 GrGLSLVertexBuilder*
v) { |
| 836 v->codeAppend("vec2 innerShapeHalfSize = shapeHalfSize / innerCoords.xy;"); |
| 837 |
| 838 if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) { |
| 839 varyingHandler->addVarying("innerEllipseCoords", &fInnerEllipseCoords, |
| 840 kMedium_GrSLPrecision); |
| 841 varyingHandler->addFlatVarying("innerEllipseName", &fInnerEllipseName, |
| 842 kMedium_GrSLPrecision); |
| 843 } else { |
| 844 varyingHandler->addVarying("distanceToInnerEdge", &fDistanceToInnerEdge, |
| 845 kMedium_GrSLPrecision); |
| 846 varyingHandler->addFlatVarying("innerShapeBloatedHalfSize", &fInnerShape
BloatedHalfSize, |
| 847 kMedium_GrSLPrecision); |
| 848 if (kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes) { |
| 849 varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, k
High_GrSLPrecision); |
| 850 varyingHandler->addFlatVarying("innerEllipseName", &fInnerEllipseNam
e, |
| 851 kMedium_GrSLPrecision); |
| 852 varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kHigh_GrS
LPrecision); |
| 853 } |
| 854 } |
| 855 } |
| 856 |
| 857 void GrGLSLInstanceProcessor::BackendCoverage::setupInnerRect(GrGLSLVertexBuilde
r* v) { |
| 858 if (fInnerRRect.vsOut()) { |
| 859 // The fragment shader will generalize every inner shape as a round rect
. Since this one |
| 860 // is a rect, we simply emit bogus parameters for the round rect (negati
ve radii) that |
| 861 // ensure the fragment shader always takes the "emitRect" codepath. |
| 862 v->codeAppendf("%s = vec4(2.0 * (inner.zw - inner.xy) / (outer.zw - oute
r.xy), vec2(0));", |
| 863 fInnerRRect.vsOut()); |
| 864 } |
| 865 } |
| 866 |
| 867 void GrGLSLInstanceProcessor::BackendCoverage::setupInnerOval(GrGLSLVertexBuilde
r* v) { |
| 868 v->codeAppendf("%s = 1.0 / (innerShapeHalfSize * innerShapeHalfSize);", |
| 869 fInnerEllipseName.vsOut()); |
| 870 if (fInnerEllipseCoords.vsOut()) { |
| 871 v->codeAppendf("%s = innerShapeCoords * innerShapeHalfSize;", fInnerElli
pseCoords.vsOut()); |
| 872 } |
| 873 if (fInnerRRect.vsOut()) { |
| 874 v->codeAppendf("%s = vec4(0, 0, innerShapeHalfSize);", fInnerRRect.vsOut
()); |
| 875 } |
| 876 } |
| 877 |
| 878 void GrGLSLInstanceProcessor::BackendCoverage::onSetupInnerRRect(GrGLSLVertexBui
lder* v) { |
| 879 // The distance to ellipse formula doesn't work well when the radii are less
than half a pixel. |
| 880 v->codeAppend ("innerRadii = max(innerRadii, bloat);"); |
| 881 v->codeAppendf("%s = 1.0 / (innerRadii * innerRadii * innerShapeHalfSize * " |
| 882 "innerShapeHalfSize);", |
| 883 fInnerEllipseName.vsOut()); |
| 884 v->codeAppendf("%s = vec4(1.0 - innerRadii, innerShapeHalfSize);", fInnerRRe
ct.vsOut()); |
| 885 } |
| 886 |
| 887 void GrGLSLInstanceProcessor::BackendCoverage::onEmitCode(GrGLSLVertexBuilder* v
, |
| 888 GrGLSLPPFragmentBuilde
r* f, |
| 889 const char* outCoverag
e, |
| 890 const char* outColor)
{ |
| 891 if (fColorTimesCoverage.vsOut()) { |
| 892 v->codeAppendf("%s = %s * distanceToEdge;", |
| 893 fColorTimesCoverage.vsOut(), fInputs.attr(kColor_AttribId
x)); |
| 894 } |
| 895 if (fDistanceToEdge.vsOut()) { |
| 896 v->codeAppendf("%s = distanceToEdge;", fDistanceToEdge.vsOut()); |
| 897 } |
| 898 |
| 899 SkString coverage("float coverage"); |
| 900 if (f->getProgramBuilder()->glslCaps()->usesPrecisionModifiers()) { |
| 901 coverage.prependf("lowp "); |
| 902 } |
| 903 if (fBatchInfo.fInnerShapeTypes || (!fTweakAlphaForCoverage && fTriangleIsAr
c.fsIn())) { |
| 904 f->codeAppendf("%s;", coverage.c_str()); |
| 905 coverage = "coverage"; |
| 906 } |
| 907 if (fTriangleIsArc.fsIn()) { |
| 908 f->codeAppendf("if (%s == 0) {", fTriangleIsArc.fsIn()); |
| 909 this->emitRect(f, coverage.c_str(), outColor); |
| 910 f->codeAppend ("} else {"); |
| 911 if (fShapeIsCircle) { |
| 912 this->emitCircle(f, coverage.c_str()); |
| 913 } else { |
| 914 bool ellipseCoordsMayBeNegative = SkToBool(fBatchInfo.fShapeTypes &
kOval_ShapeFlag); |
| 915 this->emitArc(f, fEllipseCoords.fsIn(), fEllipseName.fsIn(), |
| 916 true /*ellipseCoordsNeedClamp*/, ellipseCoordsMayBeNeg
ative, |
| 917 coverage.c_str()); |
| 918 } |
| 919 if (fTweakAlphaForCoverage) { |
| 920 f->codeAppendf("%s = %s * coverage;", outColor, fColor.fsIn()); |
| 921 } |
| 922 f->codeAppend ("}"); |
| 923 } else { |
| 924 this->emitRect(f, coverage.c_str(), outColor); |
| 925 } |
| 926 |
| 927 if (fBatchInfo.fInnerShapeTypes) { |
| 928 f->codeAppendf("// Inner shape.\n"); |
| 929 SkString innerCoverage("float innerCoverage"); |
| 930 if (f->getProgramBuilder()->glslCaps()->usesPrecisionModifiers()) { |
| 931 innerCoverage.prependf("lowp "); |
| 932 } |
| 933 if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) { |
| 934 this->emitArc(f, fInnerEllipseCoords.fsIn(), fInnerEllipseName.fsIn(
), |
| 935 true /*ellipseCoordsNeedClamp*/, true /*ellipseCoordsM
ayBeNegative*/, |
| 936 innerCoverage.c_str()); |
| 937 } else { |
| 938 v->codeAppendf("%s = innerShapeCoords * innerShapeHalfSize;", |
| 939 fDistanceToInnerEdge.vsOut()); |
| 940 v->codeAppendf("%s = innerShapeHalfSize + 0.5;", fInnerShapeBloatedH
alfSize.vsOut()); |
| 941 |
| 942 if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) { |
| 943 this->emitInnerRect(f, innerCoverage.c_str()); |
| 944 } else { |
| 945 f->appendPrecisionModifier(kLow_GrSLPrecision); |
| 946 f->codeAppend ("float innerCoverage = 0.0;"); |
| 947 f->codeAppendf("vec2 distanceToArcEdge = abs(%s) - %s.xy;", |
| 948 fInnerShapeCoords.fsIn(), fInnerRRect.fsIn()); |
| 949 f->codeAppend ("if (any(lessThan(distanceToArcEdge, vec2(1e-5)))
) {"); |
| 950 this->emitInnerRect(f, "innerCoverage"); |
| 951 f->codeAppend ("} else {"); |
| 952 f->codeAppendf( "vec2 ellipseCoords = distanceToArcEdge * %s.
zw;", |
| 953 fInnerRRect.fsIn()); |
| 954 this->emitArc(f, "ellipseCoords", fInnerEllipseName.fsIn(), |
| 955 false /*ellipseCoordsNeedClamp*/, |
| 956 false /*ellipseCoordsMayBeNegative*/, "innerCovera
ge"); |
| 957 f->codeAppend ("}"); |
| 958 } |
| 959 } |
| 960 f->codeAppendf("%s = vec4(max(coverage - innerCoverage, 0));", outCovera
ge); |
| 961 } else if (!fTweakAlphaForCoverage) { |
| 962 f->codeAppendf("%s = vec4(coverage);", outCoverage); |
| 963 } |
| 964 } |
| 965 |
| 966 void GrGLSLInstanceProcessor::BackendCoverage::emitRect(GrGLSLPPFragmentBuilder*
f, |
| 967 const char* outCoverage, |
| 968 const char* outColor) { |
| 969 if (fColorTimesCoverage.fsIn()) { |
| 970 f->codeAppendf("%s = %s;", outColor, fColorTimesCoverage.fsIn()); |
| 971 } else if (fTweakAlphaForCoverage) { |
| 972 // We are drawing just ovals. The interior rect always has 100% coverage
. |
| 973 f->codeAppendf("%s = %s;", outColor, fColor.fsIn()); |
| 974 } else if (fDistanceToEdge.fsIn()) { |
| 975 f->codeAppendf("%s = %s;", outCoverage, fDistanceToEdge.fsIn()); |
| 976 } else { |
| 977 f->codeAppendf("%s = 1.0;", outCoverage); |
| 978 } |
| 979 } |
| 980 |
| 981 void GrGLSLInstanceProcessor::BackendCoverage::emitCircle(GrGLSLPPFragmentBuilde
r* f, |
| 982 const char* outCoverag
e) { |
| 983 // TODO: circleCoords = max(circleCoords, 0) if we decide to do this optimiz
ation on rrects. |
| 984 SkASSERT(!(kRRect_ShapesMask & fBatchInfo.fShapeTypes)); |
| 985 f->codeAppendf("float distanceToEdge = %s - length(%s);", |
| 986 fBloatedRadius.fsIn(), fEllipseCoords.fsIn()); |
| 987 f->codeAppendf("%s = clamp(distanceToEdge, 0.0, 1.0);", outCoverage); |
| 988 } |
| 989 |
| 990 void GrGLSLInstanceProcessor::BackendCoverage::emitArc(GrGLSLPPFragmentBuilder*
f, |
| 991 const char* ellipseCoords
, |
| 992 const char* ellipseName, |
| 993 bool ellipseCoordsNeedCla
mp, |
| 994 bool ellipseCoordsMayBeNe
gative, |
| 995 const char* outCoverage)
{ |
| 996 SkASSERT(!ellipseCoordsMayBeNegative || ellipseCoordsNeedClamp); |
| 997 if (ellipseCoordsNeedClamp) { |
| 998 // This serves two purposes: |
| 999 // - To restrict the arcs of rounded rects to their positive quadrants. |
| 1000 // - To avoid inversesqrt(0) in the ellipse formula. |
| 1001 if (ellipseCoordsMayBeNegative) { |
| 1002 f->codeAppendf("vec2 ellipseClampedCoords = max(abs(%s), vec2(1e-4))
;", ellipseCoords); |
| 1003 } else { |
| 1004 f->codeAppendf("vec2 ellipseClampedCoords = max(%s, vec2(1e-4));", e
llipseCoords); |
| 1005 } |
| 1006 ellipseCoords = "ellipseClampedCoords"; |
| 1007 } |
| 1008 // ellipseCoords are in pixel space and ellipseName is 1 / rx^2, 1 / ry^2. |
| 1009 f->codeAppendf("vec2 Z = %s * %s;", ellipseCoords, ellipseName); |
| 1010 // implicit is the evaluation of (x/rx)^2 + (y/ry)^2 - 1. |
| 1011 f->codeAppendf("float implicit = dot(Z, %s) - 1.0;", ellipseCoords); |
| 1012 // gradDot is the squared length of the gradient of the implicit. |
| 1013 f->codeAppendf("float gradDot = 4.0 * dot(Z, Z);"); |
| 1014 f->appendPrecisionModifier(kLow_GrSLPrecision); |
| 1015 f->codeAppend ("float approxDist = implicit * inversesqrt(gradDot);"); |
| 1016 f->codeAppendf("%s = clamp(0.5 - approxDist, 0.0, 1.0);", outCoverage); |
| 1017 } |
| 1018 |
| 1019 void GrGLSLInstanceProcessor::BackendCoverage::emitInnerRect(GrGLSLPPFragmentBui
lder* f, |
| 1020 const char* outCove
rage) { |
| 1021 f->appendPrecisionModifier(kLow_GrSLPrecision); |
| 1022 f->codeAppendf("vec2 c = %s - abs(%s);", |
| 1023 fInnerShapeBloatedHalfSize.fsIn(), fDistanceToInnerEdge.fsIn(
)); |
| 1024 f->codeAppendf("%s = clamp(min(c.x, c.y), 0.0, 1.0);", outCoverage); |
| 1025 } |
| 1026 |
| 1027 ////////////////////////////////////////////////////////////////////////////////
//////////////////// |
| 1028 |
| 1029 class GrGLSLInstanceProcessor::BackendMultisample : public Backend { |
| 1030 public: |
| 1031 BackendMultisample(BatchInfo batchInfo, const VertexInputs& inputs, int effe
ctiveSampleCnt) |
| 1032 : INHERITED(batchInfo, inputs), |
| 1033 fEffectiveSampleCnt(effectiveSampleCnt), |
| 1034 fShapeCoords(kVec2f_GrSLType), |
| 1035 fShapeInverseMatrix(kMat22f_GrSLType), |
| 1036 fFragShapeHalfSpan(kVec2f_GrSLType), |
| 1037 fArcTest(kVec2f_GrSLType), |
| 1038 fArcInverseMatrix(kMat22f_GrSLType), |
| 1039 fFragArcHalfSpan(kVec2f_GrSLType), |
| 1040 fEarlyAccept(kInt_GrSLType), |
| 1041 fInnerShapeInverseMatrix(kMat22f_GrSLType), |
| 1042 fFragInnerShapeHalfSpan(kVec2f_GrSLType) { |
| 1043 fRectTrianglesMaySplit = fBatchInfo.fHasPerspective; |
| 1044 fNeedsNeighborRadii = this->isMixedSampled() && !fBatchInfo.fHasPerspect
ive; |
| 1045 } |
| 1046 |
| 1047 private: |
| 1048 bool isMixedSampled() const { return kMixedSamples_AntialiasMode == fBatchIn
fo.fAntialiasMode; } |
| 1049 |
| 1050 void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; |
| 1051 void setupRect(GrGLSLVertexBuilder*) override; |
| 1052 void setupOval(GrGLSLVertexBuilder*) override; |
| 1053 void adjustRRectVertices(GrGLSLVertexBuilder*) override; |
| 1054 void onSetupRRect(GrGLSLVertexBuilder*) override; |
| 1055 |
| 1056 void onInitInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; |
| 1057 void setupInnerRect(GrGLSLVertexBuilder*) override; |
| 1058 void setupInnerOval(GrGLSLVertexBuilder*) override; |
| 1059 void onSetupInnerRRect(GrGLSLVertexBuilder*) override; |
| 1060 |
| 1061 void onEmitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, const char*, |
| 1062 const char*) override; |
| 1063 |
| 1064 struct EmitShapeCoords { |
| 1065 const GrGLSLVarying* fVarying; |
| 1066 const char* fInverseMatrix; |
| 1067 const char* fFragHalfSpan; |
| 1068 }; |
| 1069 |
| 1070 struct EmitShapeOpts { |
| 1071 bool fIsTightGeometry; |
| 1072 bool fResolveMixedSamples; |
| 1073 bool fInvertCoverage; |
| 1074 }; |
| 1075 |
| 1076 void emitRect(GrGLSLPPFragmentBuilder*, const EmitShapeCoords&, const EmitSh
apeOpts&); |
| 1077 void emitArc(GrGLSLPPFragmentBuilder*, const EmitShapeCoords&, bool coordsMa
yBeNegative, |
| 1078 bool clampCoords, const EmitShapeOpts&); |
| 1079 void emitSimpleRRect(GrGLSLPPFragmentBuilder*, const EmitShapeCoords&, const
char* rrect, |
| 1080 const EmitShapeOpts&); |
| 1081 void interpolateAtSample(GrGLSLPPFragmentBuilder*, const GrGLSLVarying&, con
st char* sampleIdx, |
| 1082 const char* interpolationMatrix); |
| 1083 void acceptOrRejectWholeFragment(GrGLSLPPFragmentBuilder*, bool inside, cons
t EmitShapeOpts&); |
| 1084 void acceptCoverageMask(GrGLSLPPFragmentBuilder*, const char* shapeMask, con
st EmitShapeOpts&, |
| 1085 bool maybeSharedEdge = true); |
| 1086 |
| 1087 int fEffectiveSampleCnt; |
| 1088 bool fRectTrianglesMaySplit; |
| 1089 GrGLSLVertToFrag fShapeCoords; |
| 1090 GrGLSLVertToFrag fShapeInverseMatrix; |
| 1091 GrGLSLVertToFrag fFragShapeHalfSpan; |
| 1092 GrGLSLVertToFrag fArcTest; |
| 1093 GrGLSLVertToFrag fArcInverseMatrix; |
| 1094 GrGLSLVertToFrag fFragArcHalfSpan; |
| 1095 GrGLSLVertToFrag fEarlyAccept; |
| 1096 GrGLSLVertToFrag fInnerShapeInverseMatrix; |
| 1097 GrGLSLVertToFrag fFragInnerShapeHalfSpan; |
| 1098 SkString fSquareFun; |
| 1099 |
| 1100 typedef Backend INHERITED; |
| 1101 }; |
| 1102 |
| 1103 void GrGLSLInstanceProcessor::BackendMultisample::onInit(GrGLSLVaryingHandler* v
aryingHandler, |
| 1104 GrGLSLVertexBuilder* v)
{ |
| 1105 if (!this->isMixedSampled()) { |
| 1106 if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) { |
| 1107 varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, |
| 1108 kHigh_GrSLPrecision); |
| 1109 varyingHandler->addVarying("arcCoords", &fArcCoords, kHigh_GrSLPreci
sion); |
| 1110 if (!fBatchInfo.fHasPerspective) { |
| 1111 varyingHandler->addFlatVarying("arcInverseMatrix", &fArcInverseM
atrix, |
| 1112 kHigh_GrSLPrecision); |
| 1113 varyingHandler->addFlatVarying("fragArcHalfSpan", &fFragArcHalfS
pan, |
| 1114 kHigh_GrSLPrecision); |
| 1115 } |
| 1116 } else if (!fBatchInfo.fInnerShapeTypes) { |
| 1117 return; |
| 1118 } |
| 1119 } else { |
| 1120 varyingHandler->addVarying("shapeCoords", &fShapeCoords, kHigh_GrSLPreci
sion); |
| 1121 if (!fBatchInfo.fHasPerspective) { |
| 1122 varyingHandler->addFlatVarying("shapeInverseMatrix", &fShapeInverseM
atrix, |
| 1123 kHigh_GrSLPrecision); |
| 1124 varyingHandler->addFlatVarying("fragShapeHalfSpan", &fFragShapeHalfS
pan, |
| 1125 kHigh_GrSLPrecision); |
| 1126 } |
| 1127 if (fBatchInfo.fShapeTypes & kRRect_ShapesMask) { |
| 1128 varyingHandler->addVarying("arcCoords", &fArcCoords, kHigh_GrSLPreci
sion); |
| 1129 varyingHandler->addVarying("arcTest", &fArcTest, kHigh_GrSLPrecision
); |
| 1130 if (!fBatchInfo.fHasPerspective) { |
| 1131 varyingHandler->addFlatVarying("arcInverseMatrix", &fArcInverseM
atrix, |
| 1132 kHigh_GrSLPrecision); |
| 1133 varyingHandler->addFlatVarying("fragArcHalfSpan", &fFragArcHalfS
pan, |
| 1134 kHigh_GrSLPrecision); |
| 1135 } |
| 1136 } else if (fBatchInfo.fShapeTypes & kOval_ShapeFlag) { |
| 1137 fArcCoords = fShapeCoords; |
| 1138 fArcInverseMatrix = fShapeInverseMatrix; |
| 1139 fFragArcHalfSpan = fFragShapeHalfSpan; |
| 1140 if (fBatchInfo.fShapeTypes & kRect_ShapeFlag) { |
| 1141 varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, |
| 1142 kHigh_GrSLPrecision); |
| 1143 } |
| 1144 } |
| 1145 if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) { |
| 1146 v->definef("SAMPLE_MASK_ALL", "0x%x", (1 << fEffectiveSampleCnt) - 1)
; |
| 1147 varyingHandler->addFlatVarying("earlyAccept", &fEarlyAccept, kHigh_Gr
SLPrecision); |
| 1148 } |
| 1149 } |
| 1150 if (!fBatchInfo.fHasPerspective) { |
| 1151 v->codeAppend("mat2 shapeInverseMatrix = inverse(mat2(shapeMatrix));"); |
| 1152 v->codeAppend("vec2 fragShapeSpan = abs(vec4(shapeInverseMatrix).xz) + " |
| 1153 "abs(vec4(shapeInverseMatrix).yw);"); |
| 1154 } |
| 1155 } |
| 1156 |
| 1157 void GrGLSLInstanceProcessor::BackendMultisample::setupRect(GrGLSLVertexBuilder*
v) { |
| 1158 if (fShapeCoords.vsOut()) { |
| 1159 v->codeAppendf("%s = %s;", fShapeCoords.vsOut(), this->outShapeCoords())
; |
| 1160 } |
| 1161 if (fShapeInverseMatrix.vsOut()) { |
| 1162 v->codeAppendf("%s = shapeInverseMatrix;", fShapeInverseMatrix.vsOut()); |
| 1163 } |
| 1164 if (fFragShapeHalfSpan.vsOut()) { |
| 1165 v->codeAppendf("%s = 0.5 * fragShapeSpan;", fFragShapeHalfSpan.vsOut()); |
| 1166 } |
| 1167 if (fArcTest.vsOut()) { |
| 1168 // Pick a value that is not > 0. |
| 1169 v->codeAppendf("%s = vec2(0);", fArcTest.vsOut()); |
| 1170 } |
| 1171 if (fTriangleIsArc.vsOut()) { |
| 1172 v->codeAppendf("%s = 0;", fTriangleIsArc.vsOut()); |
| 1173 } |
| 1174 if (fEarlyAccept.vsOut()) { |
| 1175 v->codeAppendf("%s = SAMPLE_MASK_ALL;", fEarlyAccept.vsOut()); |
| 1176 } |
| 1177 } |
| 1178 |
| 1179 void GrGLSLInstanceProcessor::BackendMultisample::setupOval(GrGLSLVertexBuilder*
v) { |
| 1180 v->codeAppendf("%s = abs(%s);", fArcCoords.vsOut(), this->outShapeCoords()); |
| 1181 if (fArcInverseMatrix.vsOut()) { |
| 1182 v->codeAppendf("vec2 s = sign(%s);", this->outShapeCoords()); |
| 1183 v->codeAppendf("%s = shapeInverseMatrix * mat2(s.x, 0, 0 , s.y);", |
| 1184 fArcInverseMatrix.vsOut()); |
| 1185 } |
| 1186 if (fFragArcHalfSpan.vsOut()) { |
| 1187 v->codeAppendf("%s = 0.5 * fragShapeSpan;", fFragArcHalfSpan.vsOut()); |
| 1188 } |
| 1189 if (fArcTest.vsOut()) { |
| 1190 // Pick a value that is > 0. |
| 1191 v->codeAppendf("%s = vec2(1);", fArcTest.vsOut()); |
| 1192 } |
| 1193 if (fTriangleIsArc.vsOut()) { |
| 1194 if (!this->isMixedSampled()) { |
| 1195 v->codeAppendf("%s = %s & 1;", |
| 1196 fTriangleIsArc.vsOut(), fInputs.attr(kVertexAttrs_Att
ribIdx)); |
| 1197 } else { |
| 1198 v->codeAppendf("%s = 1;", fTriangleIsArc.vsOut()); |
| 1199 } |
| 1200 } |
| 1201 if (fEarlyAccept.vsOut()) { |
| 1202 v->codeAppendf("%s = ~%s & SAMPLE_MASK_ALL;", |
| 1203 fEarlyAccept.vsOut(), fInputs.attr(kVertexAttrs_AttribIdx
)); |
| 1204 } |
| 1205 } |
| 1206 |
| 1207 void GrGLSLInstanceProcessor::BackendMultisample::adjustRRectVertices(GrGLSLVert
exBuilder* v) { |
| 1208 if (!this->isMixedSampled()) { |
| 1209 INHERITED::adjustRRectVertices(v); |
| 1210 return; |
| 1211 } |
| 1212 |
| 1213 if (!fBatchInfo.fHasPerspective) { |
| 1214 // For the mixed samples algorithm it's best to bloat the corner triangl
es a bit so that |
| 1215 // more of the pixels that cross into the arc region are completely insi
de the shared edges. |
| 1216 // We also snap to a regular rect if the radii shrink smaller than a pix
el. |
| 1217 v->codeAppend ("vec2 midpt = 0.5 * (neighborRadii - radii);"); |
| 1218 v->codeAppend ("vec2 cornerSize = any(lessThan(radii, fragShapeSpan)) ?
" |
| 1219 "vec2(0) : min(radii + 0.5 * fragShapeSpan, 1.0 - mid
pt);"); |
| 1220 } else { |
| 1221 // TODO: We could still bloat the corner triangle in the perspective cas
e; we would just |
| 1222 // need to find the screen-space derivative of shape coords at this part
icular point. |
| 1223 v->codeAppend ("vec2 cornerSize = any(lessThan(radii, vec2(1e-3))) ? vec
2(0) : radii;"); |
| 1224 } |
| 1225 |
| 1226 v->codeAppendf("if (abs(%s.x) == 0.5)" |
| 1227 "%s.x = cornerSign.x * (1.0 - cornerSize.x);", |
| 1228 fInputs.attr(kShapeCoords_AttribIdx), fModifiedShapeCoord
s); |
| 1229 v->codeAppendf("if (abs(%s.y) == 0.5)" |
| 1230 "%s.y = cornerSign.y * (1.0 - cornerSize.y);", |
| 1231 fInputs.attr(kShapeCoords_AttribIdx), fModifiedShapeCoord
s); |
| 1232 } |
| 1233 |
| 1234 void GrGLSLInstanceProcessor::BackendMultisample::onSetupRRect(GrGLSLVertexBuild
er* v) { |
| 1235 if (fShapeCoords.vsOut()) { |
| 1236 v->codeAppendf("%s = %s;", fShapeCoords.vsOut(), this->outShapeCoords())
; |
| 1237 } |
| 1238 if (fShapeInverseMatrix.vsOut()) { |
| 1239 v->codeAppendf("%s = shapeInverseMatrix;", fShapeInverseMatrix.vsOut()); |
| 1240 } |
| 1241 if (fFragShapeHalfSpan.vsOut()) { |
| 1242 v->codeAppendf("%s = 0.5 * fragShapeSpan;", fFragShapeHalfSpan.vsOut()); |
| 1243 } |
| 1244 if (fArcInverseMatrix.vsOut()) { |
| 1245 v->codeAppend ("vec2 s = cornerSign / radii;"); |
| 1246 v->codeAppendf("%s = shapeInverseMatrix * mat2(s.x, 0, 0, s.y);", |
| 1247 fArcInverseMatrix.vsOut()); |
| 1248 } |
| 1249 if (fFragArcHalfSpan.vsOut()) { |
| 1250 v->codeAppendf("%s = 0.5 * (abs(vec4(%s).xz) + abs(vec4(%s).yw));", |
| 1251 fFragArcHalfSpan.vsOut(), fArcInverseMatrix.vsOut(), |
| 1252 fArcInverseMatrix.vsOut()); |
| 1253 } |
| 1254 if (fArcTest.vsOut()) { |
| 1255 // The interior triangles are laid out as a fan. fArcTest is both distan
ces from shared |
| 1256 // edges of a fan triangle to a point within that triangle. fArcTest is
used to check if a |
| 1257 // fragment is too close to either shared edge, in which case we point s
ample the shape as a |
| 1258 // rect at that point in order to guarantee the mixed samples discard lo
gic works correctly. |
| 1259 v->codeAppendf("%s = (cornerSize == vec2(0)) ? vec2(0) : " |
| 1260 "cornerSign * %s * mat2(1, cornerSize.x - 1.0, cornerSize
.y - 1.0, 1);", |
| 1261 fArcTest.vsOut(), fModifiedShapeCoords); |
| 1262 if (!fBatchInfo.fHasPerspective) { |
| 1263 // Shift the point at which distances to edges are measured from the
center of the pixel |
| 1264 // to the corner. This way the sign of fArcTest will quickly tell us
whether a pixel |
| 1265 // is completely inside the shared edge. Perspective mode will accom
plish this same task |
| 1266 // by finding the derivatives in the fragment shader. |
| 1267 v->codeAppendf("%s -= 0.5 * (fragShapeSpan.yx * abs(radii - 1.0) + f
ragShapeSpan);", |
| 1268 fArcTest.vsOut()); |
| 1269 } |
| 1270 } |
| 1271 if (fEarlyAccept.vsOut()) { |
| 1272 SkASSERT(this->isMixedSampled()); |
| 1273 v->codeAppendf("%s = all(equal(vec2(1), abs(%s))) ? 0 : SAMPLE_MASK_ALL;
", |
| 1274 fEarlyAccept.vsOut(), fInputs.attr(kShapeCoords_AttribIdx
)); |
| 1275 } |
| 1276 } |
| 1277 |
| 1278 void |
| 1279 GrGLSLInstanceProcessor::BackendMultisample::onInitInnerShape(GrGLSLVaryingHandl
er* varyingHandler, |
| 1280 GrGLSLVertexBuilde
r* v) { |
| 1281 varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, kHigh_GrS
LPrecision); |
| 1282 if (kOval_ShapeFlag != fBatchInfo.fInnerShapeTypes && |
| 1283 kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes) { |
| 1284 varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kHigh_GrSLPre
cision); |
| 1285 } |
| 1286 if (!fBatchInfo.fHasPerspective) { |
| 1287 varyingHandler->addFlatVarying("innerShapeInverseMatrix", &fInnerShapeIn
verseMatrix, |
| 1288 kHigh_GrSLPrecision); |
| 1289 v->codeAppendf("%s = shapeInverseMatrix * mat2(innerCoords.x, 0, 0, inne
rCoords.y);", |
| 1290 fInnerShapeInverseMatrix.vsOut()); |
| 1291 varyingHandler->addFlatVarying("fragInnerShapeHalfSpan", &fFragInnerShap
eHalfSpan, |
| 1292 kHigh_GrSLPrecision); |
| 1293 v->codeAppendf("%s = 0.5 * fragShapeSpan * innerCoords.xy;", |
| 1294 fFragInnerShapeHalfSpan.vsOut()); |
| 1295 } |
| 1296 } |
| 1297 |
| 1298 void GrGLSLInstanceProcessor::BackendMultisample::setupInnerRect(GrGLSLVertexBui
lder* v) { |
| 1299 if (fInnerRRect.vsOut()) { |
| 1300 // The fragment shader will generalize every inner shape as a round rect
. Since this one |
| 1301 // is a rect, we simply emit bogus parameters for the round rect (negati
ve radii) that |
| 1302 // ensure the fragment shader always takes the "sample as rect" codepath
. |
| 1303 v->codeAppendf("%s = vec4(2.0 * (inner.zw - inner.xy) / (outer.zw - oute
r.xy), vec2(0));", |
| 1304 fInnerRRect.vsOut()); |
| 1305 } |
| 1306 } |
| 1307 |
| 1308 void GrGLSLInstanceProcessor::BackendMultisample::setupInnerOval(GrGLSLVertexBui
lder* v) { |
| 1309 if (fInnerRRect.vsOut()) { |
| 1310 v->codeAppendf("%s = vec4(0, 0, 1, 1);", fInnerRRect.vsOut()); |
| 1311 } |
| 1312 } |
| 1313 |
| 1314 void GrGLSLInstanceProcessor::BackendMultisample::onSetupInnerRRect(GrGLSLVertex
Builder* v) { |
| 1315 // Avoid numeric instability by not allowing the inner radii to get smaller
than 1/10th pixel. |
| 1316 if (fFragInnerShapeHalfSpan.vsOut()) { |
| 1317 v->codeAppendf("innerRadii = max(innerRadii, 2e-1 * %s);", fFragInnerSha
peHalfSpan.vsOut()); |
| 1318 } else { |
| 1319 v->codeAppend ("innerRadii = max(innerRadii, vec2(1e-4));"); |
| 1320 } |
| 1321 v->codeAppendf("%s = vec4(1.0 - innerRadii, 1.0 / innerRadii);", fInnerRRect
.vsOut()); |
| 1322 } |
| 1323 |
| 1324 void GrGLSLInstanceProcessor::BackendMultisample::onEmitCode(GrGLSLVertexBuilder
*, |
| 1325 GrGLSLPPFragmentBui
lder* f, |
| 1326 const char*, const
char*) { |
| 1327 f->define("SAMPLE_COUNT", fEffectiveSampleCnt); |
| 1328 if (this->isMixedSampled()) { |
| 1329 f->definef("SAMPLE_MASK_ALL", "0x%x", (1 << fEffectiveSampleCnt) - 1); |
| 1330 f->definef("SAMPLE_MASK_MSB", "0x%x", 1 << (fEffectiveSampleCnt - 1)); |
| 1331 } |
| 1332 |
| 1333 if (kRect_ShapeFlag != (fBatchInfo.fShapeTypes | fBatchInfo.fInnerShapeTypes
)) { |
| 1334 GrGLSLShaderVar x("x", kVec2f_GrSLType, GrGLSLShaderVar::kNonArray, kHig
h_GrSLPrecision); |
| 1335 f->emitFunction(kFloat_GrSLType, "square", 1, &x, "return dot(x, x);", &
fSquareFun); |
| 1336 } |
| 1337 |
| 1338 const char* arcTest = fArcTest.fsIn(); |
| 1339 |
| 1340 if (fBatchInfo.fHasPerspective) { |
| 1341 if (fArcTest.fsIn()) { |
| 1342 f->enableFeature(GrGLSLPPFragmentBuilder::kStandardDerivatives_GLSLF
eature); |
| 1343 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1344 f->codeAppendf("vec2 arcTest = %s - 0.5 * fwidth(%s);", |
| 1345 fArcTest.fsIn(), fArcTest.fsIn()); |
| 1346 arcTest = "arcTest"; |
| 1347 } |
| 1348 if (fBatchInfo.fInnerShapeTypes) { |
| 1349 // For the inner shape we interpolate between samples without perspe
ctive. We take the |
| 1350 // derivatives here before any mixed samples discard logic may occur
. |
| 1351 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1352 f->codeAppendf("mat2 innerShapeInverseMatrix = transpose(mat2(dFdx(%
s), -dFdy(%s)));", |
| 1353 fInnerShapeCoords.fsIn(), fInnerShapeCoords.fsIn()); |
| 1354 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1355 f->codeAppendf("vec2 fragInnerShapeHalfSpan = 0.5 * fwidth(%s);", |
| 1356 fInnerShapeCoords.fsIn()); |
| 1357 } |
| 1358 } |
| 1359 |
| 1360 EmitShapeCoords shapeCoords; |
| 1361 shapeCoords.fVarying = &fShapeCoords; |
| 1362 shapeCoords.fInverseMatrix = fShapeInverseMatrix.fsIn(); |
| 1363 shapeCoords.fFragHalfSpan = fFragShapeHalfSpan.fsIn(); |
| 1364 |
| 1365 EmitShapeCoords arcCoords; |
| 1366 arcCoords.fVarying = &fArcCoords; |
| 1367 arcCoords.fInverseMatrix = fArcInverseMatrix.fsIn(); |
| 1368 arcCoords.fFragHalfSpan = fFragArcHalfSpan.fsIn(); |
| 1369 bool clampArcCoords = this->isMixedSampled() && (fBatchInfo.fShapeTypes & kR
Rect_ShapesMask); |
| 1370 |
| 1371 EmitShapeOpts opts; |
| 1372 opts.fIsTightGeometry = true; |
| 1373 opts.fResolveMixedSamples = this->isMixedSampled(); |
| 1374 opts.fInvertCoverage = false; |
| 1375 |
| 1376 if (!this->isMixedSampled()) { |
| 1377 SkASSERT(!arcTest); |
| 1378 if (fTriangleIsArc.fsIn()) { |
| 1379 f->codeAppendf("if (%s != 0) {", fTriangleIsArc.fsIn()); |
| 1380 if (fBatchInfo.fHasPerspective) { |
| 1381 // For regular MSAA, we interpolate between arc samples without
perspective. Mixed |
| 1382 // samples can't do this optimization because it requires perspe
ctive-correct |
| 1383 // interpolation in order for its discard logic to work properly
. |
| 1384 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1385 f->codeAppendf("mat2 arcInverseMatrix = transpose(mat2(dFdx(%s),
-dFdy(%s)));", |
| 1386 fArcCoords.fsIn(), fArcCoords.fsIn()); |
| 1387 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1388 f->codeAppendf("vec2 fragArcHalfSpan = 0.5 * fwidth(%s);", fArcC
oords.fsIn()); |
| 1389 arcCoords.fInverseMatrix = "arcInverseMatrix"; |
| 1390 arcCoords.fFragHalfSpan = "fragArcHalfSpan"; |
| 1391 } |
| 1392 this->emitArc(f, arcCoords, false, clampArcCoords, opts); |
| 1393 |
| 1394 f->codeAppend ("}"); |
| 1395 } |
| 1396 } else { |
| 1397 const char* earlyAccept = fEarlyAccept.fsIn() ? fEarlyAccept.fsIn() : "S
AMPLE_MASK_ALL"; |
| 1398 // If the sample mask is all set at this point it means we are inside an
arc triangle. |
| 1399 f->codeAppendf("if (gl_SampleMaskIn[0] == %s) {", earlyAccept); |
| 1400 f->overrideSampleCoverage(earlyAccept); |
| 1401 f->codeAppend ("} else {"); |
| 1402 if (arcTest) { |
| 1403 f->codeAppendf("if (gl_SampleMaskIn[0] == SAMPLE_MASK_ALL || " |
| 1404 "all(greaterThan(%s, vec2(0)))) {", arcTest); |
| 1405 this->emitArc(f, arcCoords, false, clampArcCoords, opts); |
| 1406 f->codeAppend ("} else {"); |
| 1407 this->emitRect(f, shapeCoords, opts); |
| 1408 f->codeAppend ("}"); |
| 1409 } else if (fTriangleIsArc.fsIn()) { |
| 1410 f->codeAppendf("if (%s == 0) {", fTriangleIsArc.fsIn()); |
| 1411 this->emitRect(f, shapeCoords, opts); |
| 1412 f->codeAppend ("} else {"); |
| 1413 this->emitArc(f, arcCoords, false, clampArcCoords, opts); |
| 1414 f->codeAppend ("}"); |
| 1415 } else if (fBatchInfo.fShapeTypes == kOval_ShapeFlag) { |
| 1416 this->emitArc(f, arcCoords, false, clampArcCoords, opts); |
| 1417 } else { |
| 1418 SkASSERT(fBatchInfo.fShapeTypes == kRect_ShapeFlag); |
| 1419 this->emitRect(f, shapeCoords, opts); |
| 1420 } |
| 1421 f->codeAppend ("}"); |
| 1422 } |
| 1423 |
| 1424 if (fBatchInfo.fInnerShapeTypes) { |
| 1425 f->codeAppendf("// Inner shape.\n"); |
| 1426 |
| 1427 EmitShapeCoords innerShapeCoords; |
| 1428 innerShapeCoords.fVarying = &fInnerShapeCoords; |
| 1429 if (!fBatchInfo.fHasPerspective) { |
| 1430 innerShapeCoords.fInverseMatrix = fInnerShapeInverseMatrix.fsIn(); |
| 1431 innerShapeCoords.fFragHalfSpan = fFragInnerShapeHalfSpan.fsIn(); |
| 1432 } else { |
| 1433 // These were defined above. |
| 1434 innerShapeCoords.fInverseMatrix = "innerShapeInverseMatrix"; |
| 1435 innerShapeCoords.fFragHalfSpan = "fragInnerShapeHalfSpan"; |
| 1436 } |
| 1437 |
| 1438 EmitShapeOpts innerOpts; |
| 1439 innerOpts.fIsTightGeometry = false; |
| 1440 innerOpts.fResolveMixedSamples = false; // Mixed samples are resolved in
the outer shape. |
| 1441 innerOpts.fInvertCoverage = true; |
| 1442 |
| 1443 if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) { |
| 1444 this->emitArc(f, innerShapeCoords, true, false, innerOpts); |
| 1445 } else { |
| 1446 f->codeAppendf("if (all(lessThan(abs(%s), 1.0 + %s))) {", |
| 1447 fInnerShapeCoords.fsIn(), innerShapeCoords.fFragHalfS
pan); |
| 1448 if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) { |
| 1449 this->emitRect(f, innerShapeCoords, innerOpts); |
| 1450 } else { |
| 1451 this->emitSimpleRRect(f, innerShapeCoords, fInnerRRect.fsIn(), i
nnerOpts); |
| 1452 } |
| 1453 f->codeAppend ("}"); |
| 1454 } |
| 1455 } |
| 1456 } |
| 1457 |
| 1458 void GrGLSLInstanceProcessor::BackendMultisample::emitRect(GrGLSLPPFragmentBuild
er* f, |
| 1459 const EmitShapeCoords
& coords, |
| 1460 const EmitShapeOpts&
opts) { |
| 1461 // Full MSAA doesn't need to do anything to draw a rect. |
| 1462 SkASSERT(!opts.fIsTightGeometry || opts.fResolveMixedSamples); |
| 1463 if (coords.fFragHalfSpan) { |
| 1464 f->codeAppendf("if (all(lessThanEqual(abs(%s), 1.0 - %s))) {", |
| 1465 coords.fVarying->fsIn(), coords.fFragHalfSpan); |
| 1466 // The entire pixel is inside the rect. |
| 1467 this->acceptOrRejectWholeFragment(f, true, opts); |
| 1468 f->codeAppend ("} else "); |
| 1469 if (opts.fIsTightGeometry && !fRectTrianglesMaySplit) { |
| 1470 f->codeAppendf("if (any(lessThan(abs(%s), 1.0 - %s))) {", |
| 1471 coords.fVarying->fsIn(), coords.fFragHalfSpan); |
| 1472 // The pixel falls on an edge of the rectangle and is known to not b
e on a shared edge. |
| 1473 this->acceptCoverageMask(f, "gl_SampleMaskIn[0]", opts, false); |
| 1474 f->codeAppend ("} else"); |
| 1475 } |
| 1476 f->codeAppend ("{"); |
| 1477 } |
| 1478 f->codeAppend ("int rectMask = 0;"); |
| 1479 f->codeAppend ("for (int i = 0; i < SAMPLE_COUNT; i++) {"); |
| 1480 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1481 f->codeAppend ( "vec2 pt = "); |
| 1482 this->interpolateAtSample(f, *coords.fVarying, "i", coords.fInverseMatrix); |
| 1483 f->codeAppend ( ";"); |
| 1484 f->codeAppend ( "if (all(lessThan(abs(pt), vec2(1)))) rectMask |= (1 << i
);"); |
| 1485 f->codeAppend ("}"); |
| 1486 this->acceptCoverageMask(f, "rectMask", opts); |
| 1487 if (coords.fFragHalfSpan) { |
| 1488 f->codeAppend ("}"); |
| 1489 } |
| 1490 } |
| 1491 |
| 1492 void GrGLSLInstanceProcessor::BackendMultisample::emitArc(GrGLSLPPFragmentBuilde
r* f, |
| 1493 const EmitShapeCoords&
coords, |
| 1494 bool coordsMayBeNegati
ve, |
| 1495 bool clampCoords, |
| 1496 const EmitShapeOpts& o
pts) { |
| 1497 if (coords.fFragHalfSpan) { |
| 1498 SkString absArcCoords; |
| 1499 absArcCoords.printf(coordsMayBeNegative ? "abs(%s)" : "%s", coords.fVary
ing->fsIn()); |
| 1500 if (clampCoords) { |
| 1501 f->codeAppendf("if (%s(max(%s + %s, vec2(0))) < 1.0) {", |
| 1502 fSquareFun.c_str(), absArcCoords.c_str(), coords.fFra
gHalfSpan); |
| 1503 } else { |
| 1504 f->codeAppendf("if (%s(%s + %s) < 1.0) {", |
| 1505 fSquareFun.c_str(), absArcCoords.c_str(), coords.fFra
gHalfSpan); |
| 1506 } |
| 1507 // The entire pixel is inside the arc. |
| 1508 this->acceptOrRejectWholeFragment(f, true, opts); |
| 1509 f->codeAppendf("} else if (%s(max(%s - %s, vec2(0))) >= 1.0) {", |
| 1510 fSquareFun.c_str(), absArcCoords.c_str(), coords.fFragHal
fSpan); |
| 1511 // The entire pixel is outside the arc. |
| 1512 this->acceptOrRejectWholeFragment(f, false, opts); |
| 1513 f->codeAppend ("} else {"); |
| 1514 } |
| 1515 f->codeAppend ( "int arcMask = 0;"); |
| 1516 f->codeAppend ( "for (int i = 0; i < SAMPLE_COUNT; i++) {"); |
| 1517 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1518 f->codeAppend ( "vec2 pt = "); |
| 1519 this->interpolateAtSample(f, *coords.fVarying, "i", coords.fInverseMatrix); |
| 1520 f->codeAppend ( ";"); |
| 1521 if (clampCoords) { |
| 1522 SkASSERT(!coordsMayBeNegative); |
| 1523 f->codeAppend ( "pt = max(pt, vec2(0));"); |
| 1524 } |
| 1525 f->codeAppendf( "if (%s(pt) < 1.0) arcMask |= (1 << i);", fSquareFun.
c_str()); |
| 1526 f->codeAppend ( "}"); |
| 1527 this->acceptCoverageMask(f, "arcMask", opts); |
| 1528 if (coords.fFragHalfSpan) { |
| 1529 f->codeAppend ("}"); |
| 1530 } |
| 1531 } |
| 1532 |
| 1533 void GrGLSLInstanceProcessor::BackendMultisample::emitSimpleRRect(GrGLSLPPFragme
ntBuilder* f, |
| 1534 const EmitShap
eCoords& coords, |
| 1535 const char* rr
ect, |
| 1536 const EmitShap
eOpts& opts) { |
| 1537 // For now the round rect requires data for nonperspective interpolation. |
| 1538 SkASSERT(coords.fInverseMatrix); |
| 1539 SkASSERT(coords.fFragHalfSpan); |
| 1540 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1541 f->codeAppendf("vec2 distanceToArcEdge = abs(%s) - %s.xy;", coords.fVarying-
>fsIn(), rrect); |
| 1542 f->codeAppend ("if (any(lessThan(distanceToArcEdge, vec2(0)))) {"); |
| 1543 this->emitRect(f, coords, opts); |
| 1544 f->codeAppend ("} else {"); |
| 1545 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1546 f->codeAppendf( "vec2 rrectCoords = distanceToArcEdge * %s.zw;", rrect); |
| 1547 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1548 f->codeAppendf( "vec2 fragRRectHalfSpan = %s * %s.zw;", coords.fFragHalfS
pan, rrect); |
| 1549 f->codeAppendf( "if (%s(rrectCoords + fragRRectHalfSpan) <= 1.0) {", fSqu
areFun.c_str()); |
| 1550 // The entire pixel is inside the round rect. |
| 1551 this->acceptOrRejectWholeFragment(f, true, opts); |
| 1552 f->codeAppendf( "} else if (%s(max(rrectCoords - fragRRectHalfSpan, vec2(
0))) >= 1.0) {", |
| 1553 fSquareFun.c_str()); |
| 1554 // The entire pixel is outside the round rect. |
| 1555 this->acceptOrRejectWholeFragment(f, false, opts); |
| 1556 f->codeAppend ( "} else {"); |
| 1557 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1558 f->codeAppendf( "vec2 s = %s.zw * sign(%s);", rrect, coords.fVarying-
>fsIn()); |
| 1559 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1560 f->codeAppendf( "mat2 innerRRectInverseMatrix = %s * mat2(s.x, 0, 0,
s.y);", |
| 1561 coords.fInverseMatrix); |
| 1562 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1563 f->codeAppend ( "int rrectMask = 0;"); |
| 1564 f->codeAppend ( "for (int i = 0; i < SAMPLE_COUNT; i++) {"); |
| 1565 f->appendPrecisionModifier(kHigh_GrSLPrecision); |
| 1566 f->codeAppend ( "vec2 pt = rrectCoords + "); |
| 1567 f->appendOffsetToSample("i", GrGLSLFPFragmentBuilder::kSkiaDevice_Coordinate
s); |
| 1568 f->codeAppend ( "* innerRRectInverseMatrix;"); |
| 1569 f->codeAppendf( "if (%s(max(pt, vec2(0))) < 1.0) rrectMask |= (1
<< i);", |
| 1570 fSquareFun.c_str()); |
| 1571 f->codeAppend ( "}"); |
| 1572 this->acceptCoverageMask(f, "rrectMask", opts); |
| 1573 f->codeAppend ( "}"); |
| 1574 f->codeAppend ("}"); |
| 1575 } |
| 1576 |
| 1577 void GrGLSLInstanceProcessor::BackendMultisample::interpolateAtSample(GrGLSLPPFr
agmentBuilder* f, |
| 1578 const GrGLSLVa
rying& varying, |
| 1579 const char* sa
mpleIdx, |
| 1580 const char* in
terpolationMatrix) { |
| 1581 if (interpolationMatrix) { |
| 1582 f->codeAppendf("(%s + ", varying.fsIn()); |
| 1583 f->appendOffsetToSample(sampleIdx, GrGLSLFPFragmentBuilder::kSkiaDevice_
Coordinates); |
| 1584 f->codeAppendf(" * %s)", interpolationMatrix); |
| 1585 } else { |
| 1586 SkAssertResult( |
| 1587 f->enableFeature(GrGLSLFragmentBuilder::kMultisampleInterpolation_GL
SLFeature)); |
| 1588 f->codeAppendf("interpolateAtOffset(%s, ", varying.fsIn()); |
| 1589 f->appendOffsetToSample(sampleIdx, GrGLSLFPFragmentBuilder::kGLSLWindow_
Coordinates); |
| 1590 f->codeAppend(")"); |
| 1591 } |
| 1592 } |
| 1593 |
| 1594 void |
| 1595 GrGLSLInstanceProcessor::BackendMultisample::acceptOrRejectWholeFragment(GrGLSLP
PFragmentBuilder* f, |
| 1596 bool ins
ide, |
| 1597 const Em
itShapeOpts& opts) { |
| 1598 if (inside != opts.fInvertCoverage) { // Accept the entire fragment. |
| 1599 if (opts.fResolveMixedSamples) { |
| 1600 // This is a mixed sampled fragment in the interior of the shape. Re
assign 100% coverage |
| 1601 // to one fragment, and drop all other fragments that may fall on th
is same pixel. Since |
| 1602 // our geometry is water tight and non-overlapping, we can take adva
ntage of the |
| 1603 // properties that (1) the incoming sample masks will be disjoint ac
ross fragments that |
| 1604 // fall on a common pixel, and (2) since the entire fragment is insi
de the shape, each |
| 1605 // sample's corresponding bit will be set in the incoming sample mas
k of exactly one |
| 1606 // fragment. |
| 1607 f->codeAppend("if ((gl_SampleMaskIn[0] & SAMPLE_MASK_MSB) == 0) {"); |
| 1608 // Drop this fragment. |
| 1609 if (!fBatchInfo.fCannotDiscard) { |
| 1610 f->codeAppend("discard;"); |
| 1611 } else { |
| 1612 f->overrideSampleCoverage("0"); |
| 1613 } |
| 1614 f->codeAppend("} else {"); |
| 1615 // Override the lone surviving fragment to full coverage. |
| 1616 f->overrideSampleCoverage("-1"); |
| 1617 f->codeAppend("}"); |
| 1618 } |
| 1619 } else { // Reject the entire fragment. |
| 1620 if (!fBatchInfo.fCannotDiscard) { |
| 1621 f->codeAppend("discard;"); |
| 1622 } else if (opts.fResolveMixedSamples) { |
| 1623 f->overrideSampleCoverage("0"); |
| 1624 } else { |
| 1625 f->maskSampleCoverage("0"); |
| 1626 } |
| 1627 } |
| 1628 } |
| 1629 |
| 1630 void GrGLSLInstanceProcessor::BackendMultisample::acceptCoverageMask(GrGLSLPPFra
gmentBuilder* f, |
| 1631 const char*
shapeMask, |
| 1632 const EmitS
hapeOpts& opts, |
| 1633 bool maybeS
haredEdge) { |
| 1634 if (opts.fResolveMixedSamples) { |
| 1635 if (maybeSharedEdge) { |
| 1636 // This is a mixed sampled fragment, potentially on the outer edge o
f the shape, with |
| 1637 // only partial shape coverage. Override the coverage of one fragmen
t to "shapeMask", |
| 1638 // and drop all other fragments that may fall on this same pixel. Si
nce our geometry is |
| 1639 // water tight, non-overlapping, and completely contains the shape,
this means that each |
| 1640 // "on" bit from shapeMask is guaranteed to be set in the incoming s
ample mask of one, |
| 1641 // and only one, fragment that falls on this same pixel. |
| 1642 SkASSERT(!opts.fInvertCoverage); |
| 1643 f->codeAppendf("if ((gl_SampleMaskIn[0] & (1 << findMSB(%s))) == 0)
{", shapeMask); |
| 1644 // Drop this fragment. |
| 1645 if (!fBatchInfo.fCannotDiscard) { |
| 1646 f->codeAppend ("discard;"); |
| 1647 } else { |
| 1648 f->overrideSampleCoverage("0"); |
| 1649 } |
| 1650 f->codeAppend ("} else {"); |
| 1651 // Override the coverage of the lone surviving fragment to "shapeMas
k". |
| 1652 f->overrideSampleCoverage(shapeMask); |
| 1653 f->codeAppend ("}"); |
| 1654 } else { |
| 1655 f->overrideSampleCoverage(shapeMask); |
| 1656 } |
| 1657 } else { |
| 1658 f->maskSampleCoverage(shapeMask, opts.fInvertCoverage); |
| 1659 } |
| 1660 } |
| 1661 |
| 1662 ////////////////////////////////////////////////////////////////////////////////
//////////////////// |
| 1663 |
| 1664 GrGLSLInstanceProcessor::Backend* |
| 1665 GrGLSLInstanceProcessor::Backend::Create(const GrGLSLProgramBuilder* p, BatchInf
o batchInfo, |
| 1666 const VertexInputs& inputs) { |
| 1667 switch (batchInfo.fAntialiasMode) { |
| 1668 default: |
| 1669 SkFAIL("Unexpected antialias mode."); |
| 1670 case kNone_AntialiasMode: |
| 1671 return new BackendNonAA(batchInfo, inputs); |
| 1672 case kCoverage_AntialiasMode: |
| 1673 return new BackendCoverage(batchInfo, inputs); |
| 1674 case kMSAA_AntialiasMode: |
| 1675 case kMixedSamples_AntialiasMode: { |
| 1676 const GrPipeline& pipeline = p->pipeline(); |
| 1677 const GrRenderTargetPriv& rtp = pipeline.getRenderTarget()->renderTa
rgetPriv(); |
| 1678 const GrGpu::MultisampleSpecs& specs = rtp.getMultisampleSpecs(pipel
ine.getStencil()); |
| 1679 return new BackendMultisample(batchInfo, inputs, specs.fEffectiveSam
pleCnt); |
| 1680 } |
| 1681 } |
| 1682 } |
| 1683 |
| 1684 ////////////////////////////////////////////////////////////////////////////////
//////////////////// |
| 1685 |
| 1686 const GrInstancedRenderingTypes::ShapeVertex kVertexData[] = { |
| 1687 // Rectangle. |
| 1688 {+1, +1, ~0}, /*0*/ |
| 1689 {-1, +1, ~0}, /*1*/ |
| 1690 {-1, -1, ~0}, /*2*/ |
| 1691 {+1, -1, ~0}, /*3*/ |
| 1692 // The next 4 are for the bordered version. |
| 1693 {+1, +1, 0}, /*4*/ |
| 1694 {-1, +1, 0}, /*5*/ |
| 1695 {-1, -1, 0}, /*6*/ |
| 1696 {+1, -1, 0}, /*7*/ |
| 1697 |
| 1698 // Octagon that inscribes the unit circle, cut by an interior unit octagon. |
| 1699 {+1.000000f, 0.000000f, 0}, /* 8*/ |
| 1700 {+1.000000f, +0.414214f, ~0}, /* 9*/ |
| 1701 {+0.707106f, +0.707106f, 0}, /*10*/ |
| 1702 {+0.414214f, +1.000000f, ~0}, /*11*/ |
| 1703 { 0.000000f, +1.000000f, 0}, /*12*/ |
| 1704 {-0.414214f, +1.000000f, ~0}, /*13*/ |
| 1705 {-0.707106f, +0.707106f, 0}, /*14*/ |
| 1706 {-1.000000f, +0.414214f, ~0}, /*15*/ |
| 1707 {-1.000000f, 0.000000f, 0}, /*16*/ |
| 1708 {-1.000000f, -0.414214f, ~0}, /*17*/ |
| 1709 {-0.707106f, -0.707106f, 0}, /*18*/ |
| 1710 {-0.414214f, -1.000000f, ~0}, /*19*/ |
| 1711 { 0.000000f, -1.000000f, 0}, /*20*/ |
| 1712 {+0.414214f, -1.000000f, ~0}, /*21*/ |
| 1713 {+0.707106f, -0.707106f, 0}, /*22*/ |
| 1714 {+1.000000f, -0.414214f, ~0}, /*23*/ |
| 1715 // This vertex is for the fanned versions. |
| 1716 { 0.000000f, 0.000000f, ~0}, /*24*/ |
| 1717 |
| 1718 // Rectangle with disjoint corner segments. |
| 1719 {+1.0, +0.5, 0x3}, /*25*/ |
| 1720 {+1.0, +1.0, 0x3}, /*26*/ |
| 1721 {+0.5, +1.0, 0x3}, /*27*/ |
| 1722 {-0.5, +1.0, 0x2}, /*28*/ |
| 1723 {-1.0, +1.0, 0x2}, /*29*/ |
| 1724 {-1.0, +0.5, 0x2}, /*30*/ |
| 1725 {-1.0, -0.5, 0x0}, /*31*/ |
| 1726 {-1.0, -1.0, 0x0}, /*32*/ |
| 1727 {-0.5, -1.0, 0x0}, /*33*/ |
| 1728 {+0.5, -1.0, 0x1}, /*34*/ |
| 1729 {+1.0, -1.0, 0x1}, /*35*/ |
| 1730 {+1.0, -0.5, 0x1}, /*36*/ |
| 1731 // The next 4 are for the fanned version. |
| 1732 { 0.0, 0.0, 0x3}, /*37*/ |
| 1733 { 0.0, 0.0, 0x2}, /*38*/ |
| 1734 { 0.0, 0.0, 0x0}, /*39*/ |
| 1735 { 0.0, 0.0, 0x1}, /*40*/ |
| 1736 // The next 8 are for the bordered version. |
| 1737 {+0.75, +0.50, 0x3}, /*41*/ |
| 1738 {+0.50, +0.75, 0x3}, /*42*/ |
| 1739 {-0.50, +0.75, 0x2}, /*43*/ |
| 1740 {-0.75, +0.50, 0x2}, /*44*/ |
| 1741 {-0.75, -0.50, 0x0}, /*45*/ |
| 1742 {-0.50, -0.75, 0x0}, /*46*/ |
| 1743 {+0.50, -0.75, 0x1}, /*47*/ |
| 1744 {+0.75, -0.50, 0x1}, /*48*/ |
| 1745 |
| 1746 // 16-gon that inscribes the unit circle, cut by an interior unit 16-gon. |
| 1747 {+1.000000f, +0.000000f, 0}, /*49*/ |
| 1748 {+1.000000f, +0.198913f, ~0}, /*50*/ |
| 1749 {+0.923879f, +0.382683f, 0}, /*51*/ |
| 1750 {+0.847760f, +0.566455f, ~0}, /*52*/ |
| 1751 {+0.707106f, +0.707106f, 0}, /*53*/ |
| 1752 {+0.566455f, +0.847760f, ~0}, /*54*/ |
| 1753 {+0.382683f, +0.923879f, 0}, /*55*/ |
| 1754 {+0.198913f, +1.000000f, ~0}, /*56*/ |
| 1755 {+0.000000f, +1.000000f, 0}, /*57*/ |
| 1756 {-0.198913f, +1.000000f, ~0}, /*58*/ |
| 1757 {-0.382683f, +0.923879f, 0}, /*59*/ |
| 1758 {-0.566455f, +0.847760f, ~0}, /*60*/ |
| 1759 {-0.707106f, +0.707106f, 0}, /*61*/ |
| 1760 {-0.847760f, +0.566455f, ~0}, /*62*/ |
| 1761 {-0.923879f, +0.382683f, 0}, /*63*/ |
| 1762 {-1.000000f, +0.198913f, ~0}, /*64*/ |
| 1763 {-1.000000f, +0.000000f, 0}, /*65*/ |
| 1764 {-1.000000f, -0.198913f, ~0}, /*66*/ |
| 1765 {-0.923879f, -0.382683f, 0}, /*67*/ |
| 1766 {-0.847760f, -0.566455f, ~0}, /*68*/ |
| 1767 {-0.707106f, -0.707106f, 0}, /*69*/ |
| 1768 {-0.566455f, -0.847760f, ~0}, /*70*/ |
| 1769 {-0.382683f, -0.923879f, 0}, /*71*/ |
| 1770 {-0.198913f, -1.000000f, ~0}, /*72*/ |
| 1771 {-0.000000f, -1.000000f, 0}, /*73*/ |
| 1772 {+0.198913f, -1.000000f, ~0}, /*74*/ |
| 1773 {+0.382683f, -0.923879f, 0}, /*75*/ |
| 1774 {+0.566455f, -0.847760f, ~0}, /*76*/ |
| 1775 {+0.707106f, -0.707106f, 0}, /*77*/ |
| 1776 {+0.847760f, -0.566455f, ~0}, /*78*/ |
| 1777 {+0.923879f, -0.382683f, 0}, /*79*/ |
| 1778 {+1.000000f, -0.198913f, ~0}, /*80*/ |
| 1779 }; |
| 1780 |
| 1781 const uint8_t kIndexData[] = { |
| 1782 // Rectangle. |
| 1783 0, 1, 2, |
| 1784 0, 2, 3, |
| 1785 |
| 1786 // Rectangle with a border. |
| 1787 0, 1, 5, |
| 1788 5, 4, 0, |
| 1789 1, 2, 6, |
| 1790 6, 5, 1, |
| 1791 2, 3, 7, |
| 1792 7, 6, 2, |
| 1793 3, 0, 4, |
| 1794 4, 7, 3, |
| 1795 4, 5, 6, |
| 1796 6, 7, 4, |
| 1797 |
| 1798 // Octagon that inscribes the unit circle, cut by an interior unit octagon. |
| 1799 10, 8, 9, |
| 1800 12, 10, 11, |
| 1801 14, 12, 13, |
| 1802 16, 14, 15, |
| 1803 18, 16, 17, |
| 1804 20, 18, 19, |
| 1805 22, 20, 21, |
| 1806 8, 22, 23, |
| 1807 8, 10, 12, |
| 1808 12, 14, 16, |
| 1809 16, 18, 20, |
| 1810 20, 22, 8, |
| 1811 8, 12, 16, |
| 1812 16, 20, 8, |
| 1813 |
| 1814 // Same octagons, but with the interior arranged as a fan. Used by mixed sam
ples. |
| 1815 10, 8, 9, |
| 1816 12, 10, 11, |
| 1817 14, 12, 13, |
| 1818 16, 14, 15, |
| 1819 18, 16, 17, |
| 1820 20, 18, 19, |
| 1821 22, 20, 21, |
| 1822 8, 22, 23, |
| 1823 24, 8, 10, |
| 1824 12, 24, 10, |
| 1825 24, 12, 14, |
| 1826 16, 24, 14, |
| 1827 24, 16, 18, |
| 1828 20, 24, 18, |
| 1829 24, 20, 22, |
| 1830 8, 24, 22, |
| 1831 |
| 1832 // Same octagons, but with the inner and outer disjoint. Used by coverage AA
. |
| 1833 8, 22, 23, |
| 1834 9, 8, 23, |
| 1835 10, 8, 9, |
| 1836 11, 10, 9, |
| 1837 12, 10, 11, |
| 1838 13, 12, 11, |
| 1839 14, 12, 13, |
| 1840 15, 14, 13, |
| 1841 16, 14, 15, |
| 1842 17, 16, 15, |
| 1843 18, 16, 17, |
| 1844 19, 18, 17, |
| 1845 20, 18, 19, |
| 1846 21, 20, 19, |
| 1847 22, 20, 21, |
| 1848 23, 22, 21, |
| 1849 22, 8, 10, |
| 1850 10, 12, 14, |
| 1851 14, 16, 18, |
| 1852 18, 20, 22, |
| 1853 22, 10, 14, |
| 1854 14, 18, 22, |
| 1855 |
| 1856 // Rectangle with disjoint corner segments. |
| 1857 27, 25, 26, |
| 1858 30, 28, 29, |
| 1859 33, 31, 32, |
| 1860 36, 34, 35, |
| 1861 25, 27, 28, |
| 1862 28, 30, 31, |
| 1863 31, 33, 34, |
| 1864 34, 36, 25, |
| 1865 25, 28, 31, |
| 1866 31, 34, 25, |
| 1867 |
| 1868 // Same rectangle with disjoint corners, but with the interior arranged as a
fan. Used by |
| 1869 // mixed samples. |
| 1870 27, 25, 26, |
| 1871 30, 28, 29, |
| 1872 33, 31, 32, |
| 1873 36, 34, 35, |
| 1874 27, 37, 25, |
| 1875 28, 37, 27, |
| 1876 30, 38, 28, |
| 1877 31, 38, 30, |
| 1878 33, 39, 31, |
| 1879 34, 39, 33, |
| 1880 36, 40, 34, |
| 1881 25, 40, 36, |
| 1882 |
| 1883 // Same rectangle with disjoint corners, with a border as well. Used by cove
rage AA. |
| 1884 41, 25, 26, |
| 1885 42, 41, 26, |
| 1886 27, 42, 26, |
| 1887 43, 28, 29, |
| 1888 44, 43, 29, |
| 1889 30, 44, 29, |
| 1890 45, 31, 32, |
| 1891 46, 45, 32, |
| 1892 33, 46, 32, |
| 1893 47, 34, 35, |
| 1894 48, 47, 35, |
| 1895 36, 48, 35, |
| 1896 27, 28, 42, |
| 1897 42, 28, 43, |
| 1898 30, 31, 44, |
| 1899 44, 31, 45, |
| 1900 33, 34, 46, |
| 1901 46, 34, 47, |
| 1902 36, 25, 48, |
| 1903 48, 25, 41, |
| 1904 41, 42, 43, |
| 1905 43, 44, 45, |
| 1906 45, 46, 47, |
| 1907 47, 48, 41, |
| 1908 41, 43, 45, |
| 1909 45, 47, 41, |
| 1910 |
| 1911 // Same as the disjoint octagons, but with 16-gons instead. Used by coverage
AA when the oval is |
| 1912 // sufficiently large. |
| 1913 49, 79, 80, |
| 1914 50, 49, 80, |
| 1915 51, 49, 50, |
| 1916 52, 51, 50, |
| 1917 53, 51, 52, |
| 1918 54, 53, 52, |
| 1919 55, 53, 54, |
| 1920 56, 55, 54, |
| 1921 57, 55, 56, |
| 1922 58, 57, 56, |
| 1923 59, 57, 58, |
| 1924 60, 59, 58, |
| 1925 61, 59, 60, |
| 1926 62, 61, 60, |
| 1927 63, 61, 62, |
| 1928 64, 63, 62, |
| 1929 65, 63, 64, |
| 1930 66, 65, 64, |
| 1931 67, 65, 66, |
| 1932 68, 67, 66, |
| 1933 69, 67, 68, |
| 1934 70, 69, 68, |
| 1935 71, 69, 70, |
| 1936 72, 71, 70, |
| 1937 73, 71, 72, |
| 1938 74, 73, 72, |
| 1939 75, 73, 74, |
| 1940 76, 75, 74, |
| 1941 77, 75, 76, |
| 1942 78, 77, 76, |
| 1943 79, 77, 78, |
| 1944 80, 79, 78, |
| 1945 49, 51, 53, |
| 1946 53, 55, 57, |
| 1947 57, 59, 61, |
| 1948 61, 63, 65, |
| 1949 65, 67, 69, |
| 1950 69, 71, 73, |
| 1951 73, 75, 77, |
| 1952 77, 79, 49, |
| 1953 49, 53, 57, |
| 1954 57, 61, 65, |
| 1955 65, 69, 73, |
| 1956 73, 77, 49, |
| 1957 49, 57, 65, |
| 1958 65, 73, 49, |
| 1959 }; |
| 1960 |
| 1961 enum { |
| 1962 kRect_FirstIndex = 0, |
| 1963 kRect_TriCount = 2, |
| 1964 |
| 1965 kFramedRect_FirstIndex = 6, |
| 1966 kFramedRect_TriCount = 10, |
| 1967 |
| 1968 kOctagons_FirstIndex = 36, |
| 1969 kOctagons_TriCount = 14, |
| 1970 |
| 1971 kOctagonsFanned_FirstIndex = 78, |
| 1972 kOctagonsFanned_TriCount = 16, |
| 1973 |
| 1974 kDisjointOctagons_FirstIndex = 126, |
| 1975 kDisjointOctagons_TriCount = 22, |
| 1976 |
| 1977 kCorneredRect_FirstIndex = 192, |
| 1978 kCorneredRect_TriCount = 10, |
| 1979 |
| 1980 kCorneredRectFanned_FirstIndex = 222, |
| 1981 kCorneredRectFanned_TriCount = 12, |
| 1982 |
| 1983 kCorneredFramedRect_FirstIndex = 258, |
| 1984 kCorneredFramedRect_TriCount = 26, |
| 1985 |
| 1986 kDisjoint16Gons_FirstIndex = 336, |
| 1987 kDisjoint16Gons_TriCount = 46, |
| 1988 }; |
| 1989 |
| 1990 static const GrUniqueKey::Domain kShapeBufferDomain = GrUniqueKey::GenerateDomai
n(); |
| 1991 |
| 1992 template<GrBufferType Type> static const GrUniqueKey& get_shape_buffer_key() { |
| 1993 static GrUniqueKey* kKey; |
| 1994 if (!kKey) { |
| 1995 kKey = new GrUniqueKey; |
| 1996 GrUniqueKey::Builder builder(kKey, kShapeBufferDomain, 1); |
| 1997 builder[0] = Type; |
| 1998 } |
| 1999 return *kKey; |
| 2000 } |
| 2001 |
| 2002 const GrBuffer* GrInstanceProcessor::FindOrCreateVertexBuffer(GrGpu* gpu) { |
| 2003 GrResourceCache* cache = gpu->getContext()->getResourceCache(); |
| 2004 const GrUniqueKey& key = get_shape_buffer_key<kVertex_GrBufferType>(); |
| 2005 if (GrGpuResource* cached = cache->findAndRefUniqueResource(key)) { |
| 2006 return static_cast<GrBuffer*>(cached); |
| 2007 } |
| 2008 if (GrBuffer* buffer = gpu->createBuffer(sizeof(kVertexData), kVertex_GrBuff
erType, |
| 2009 kStatic_GrAccessPattern, kVertexDat
a)) { |
| 2010 buffer->resourcePriv().setUniqueKey(key); |
| 2011 return buffer; |
| 2012 } |
| 2013 return nullptr; |
| 2014 } |
| 2015 |
| 2016 const GrBuffer* GrInstanceProcessor::FindOrCreateIndex8Buffer(GrGpu* gpu) { |
| 2017 GrResourceCache* cache = gpu->getContext()->getResourceCache(); |
| 2018 const GrUniqueKey& key = get_shape_buffer_key<kIndex_GrBufferType>(); |
| 2019 if (GrGpuResource* cached = cache->findAndRefUniqueResource(key)) { |
| 2020 return static_cast<GrBuffer*>(cached); |
| 2021 } |
| 2022 if (GrBuffer* buffer = gpu->createBuffer(sizeof(kIndexData), kIndex_GrBuffer
Type, |
| 2023 kStatic_GrAccessPattern, kIndexData
)) { |
| 2024 buffer->resourcePriv().setUniqueKey(key); |
| 2025 return buffer; |
| 2026 } |
| 2027 return nullptr; |
| 2028 } |
| 2029 |
| 2030 void GrInstanceProcessor::GetIndexRangeForRect(AntialiasMode aa, uint32_t* first
Index, |
| 2031 uint32_t* count) { |
| 2032 static const uint32_t kRectRanges[] = { |
| 2033 kRect_FirstIndex, 3 * kRect_TriCount, // kNone |
| 2034 kFramedRect_FirstIndex, 3 * kFramedRect_TriCount, // kCoverage |
| 2035 kRect_FirstIndex, 3 * kRect_TriCount, // kMSAA |
| 2036 kRect_FirstIndex, 3 * kRect_TriCount // kMixedSamples |
| 2037 }; |
| 2038 |
| 2039 SkASSERT(aa >= 0 && aa <= kLast_AntialiasMode); |
| 2040 *firstIndex = kRectRanges[2 * aa]; |
| 2041 *count = kRectRanges[2 * aa + 1]; |
| 2042 } |
| 2043 |
| 2044 void GrInstanceProcessor::GetIndexRangeForOval(AntialiasMode aa, const SkRect& d
evBounds, |
| 2045 uint32_t* firstIndex, uint32_t* c
ount) { |
| 2046 if (kCoverage_AntialiasMode == aa && devBounds.height() * devBounds.width()
>= 256 * 256) { |
| 2047 // This threshold was chosen quasi-scientifically on Tegra X1. |
| 2048 *firstIndex = kDisjoint16Gons_FirstIndex; |
| 2049 *count = 3 * kDisjoint16Gons_TriCount; |
| 2050 return; |
| 2051 } |
| 2052 |
| 2053 static const uint32_t kRRectRanges[] = { |
| 2054 kOctagons_FirstIndex, 3 * kOctagons_TriCount, // kNone |
| 2055 kDisjointOctagons_FirstIndex, 3 * kDisjointOctagons_TriCount, // kCove
rage |
| 2056 kOctagons_FirstIndex, 3 * kOctagons_TriCount, // kMSAA |
| 2057 kOctagonsFanned_FirstIndex, 3 * kOctagonsFanned_TriCount // kMixe
dSamples |
| 2058 }; |
| 2059 |
| 2060 SkASSERT(aa >= 0 && aa <= kLast_AntialiasMode); |
| 2061 *firstIndex = kRRectRanges[2 * aa]; |
| 2062 *count = kRRectRanges[2 * aa + 1]; |
| 2063 } |
| 2064 |
| 2065 void GrInstanceProcessor::GetIndexRangeForRRect(AntialiasMode aa, uint32_t* firs
tIndex, |
| 2066 uint32_t* count) { |
| 2067 static const uint32_t kRRectRanges[] = { |
| 2068 kCorneredRect_FirstIndex, 3 * kCorneredRect_TriCount, // k
None |
| 2069 kCorneredFramedRect_FirstIndex, 3 * kCorneredFramedRect_TriCount, // k
Coverage |
| 2070 kCorneredRect_FirstIndex, 3 * kCorneredRect_TriCount, // k
MSAA |
| 2071 kCorneredRectFanned_FirstIndex, 3 * kCorneredRectFanned_TriCount // k
MixedSamples |
| 2072 }; |
| 2073 |
| 2074 SkASSERT(aa >= 0 && aa <= kLast_AntialiasMode); |
| 2075 *firstIndex = kRRectRanges[2 * aa]; |
| 2076 *count = kRRectRanges[2 * aa + 1]; |
| 2077 } |
OLD | NEW |