OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2016 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #include "InstanceProcessor.h" | |
9 | |
10 #include "GrContext.h" | |
11 #include "GrRenderTargetPriv.h" | |
12 #include "GrResourceCache.h" | |
13 #include "GrResourceProvider.h" | |
14 #include "glsl/GrGLSLGeometryProcessor.h" | |
15 #include "glsl/GrGLSLFragmentShaderBuilder.h" | |
16 #include "glsl/GrGLSLProgramBuilder.h" | |
17 #include "glsl/GrGLSLVarying.h" | |
18 | |
19 namespace gr_instanced { | |
20 | |
21 bool InstanceProcessor::IsSupported(const GrGLSLCaps& glslCaps, const GrCaps& ca
ps, | |
22 AntialiasMode* lastSupportedAAMode) { | |
23 if (!glslCaps.canUseAnyFunctionInShader() || | |
24 !glslCaps.flatInterpolationSupport() || | |
25 !glslCaps.integerSupport() || | |
26 0 == glslCaps.maxVertexSamplers() || | |
27 !caps.shaderCaps()->texelBufferSupport() || | |
28 caps.maxVertexAttributes() < kNumAttribs) { | |
29 return false; | |
30 } | |
31 if (caps.sampleLocationsSupport() && | |
32 glslCaps.sampleVariablesSupport() && | |
33 glslCaps.shaderDerivativeSupport()) { | |
34 if (0 != caps.maxRasterSamples() && | |
35 glslCaps.sampleMaskOverrideCoverageSupport()) { | |
36 *lastSupportedAAMode = AntialiasMode::kMixedSamples; | |
37 } else { | |
38 *lastSupportedAAMode = AntialiasMode::kMSAA; | |
39 } | |
40 } else { | |
41 *lastSupportedAAMode = AntialiasMode::kCoverage; | |
42 } | |
43 return true; | |
44 } | |
45 | |
46 InstanceProcessor::InstanceProcessor(BatchInfo batchInfo, GrBuffer* paramsBuffer
) | |
47 : fBatchInfo(batchInfo) { | |
48 this->initClassID<InstanceProcessor>(); | |
49 | |
50 this->addVertexAttrib(Attribute("shapeCoords", kVec2f_GrVertexAttribType, kH
igh_GrSLPrecision)); | |
51 this->addVertexAttrib(Attribute("vertexAttrs", kInt_GrVertexAttribType)); | |
52 this->addVertexAttrib(Attribute("instanceInfo", kUint_GrVertexAttribType)); | |
53 this->addVertexAttrib(Attribute("shapeMatrixX", kVec3f_GrVertexAttribType, | |
54 kHigh_GrSLPrecision)); | |
55 this->addVertexAttrib(Attribute("shapeMatrixY", kVec3f_GrVertexAttribType, | |
56 kHigh_GrSLPrecision)); | |
57 this->addVertexAttrib(Attribute("color", kVec4f_GrVertexAttribType, kLow_GrS
LPrecision)); | |
58 this->addVertexAttrib(Attribute("localRect", kVec4f_GrVertexAttribType, kHig
h_GrSLPrecision)); | |
59 | |
60 GR_STATIC_ASSERT(0 == (int)Attrib::kShapeCoords); | |
61 GR_STATIC_ASSERT(1 == (int)Attrib::kVertexAttrs); | |
62 GR_STATIC_ASSERT(2 == (int)Attrib::kInstanceInfo); | |
63 GR_STATIC_ASSERT(3 == (int)Attrib::kShapeMatrixX); | |
64 GR_STATIC_ASSERT(4 == (int)Attrib::kShapeMatrixY); | |
65 GR_STATIC_ASSERT(5 == (int)Attrib::kColor); | |
66 GR_STATIC_ASSERT(6 == (int)Attrib::kLocalRect); | |
67 GR_STATIC_ASSERT(7 == kNumAttribs); | |
68 | |
69 if (fBatchInfo.fHasParams) { | |
70 SkASSERT(paramsBuffer); | |
71 fParamsAccess.reset(kRGBA_float_GrPixelConfig, paramsBuffer, kVertex_GrS
haderFlag); | |
72 this->addBufferAccess(&fParamsAccess); | |
73 } | |
74 | |
75 if (fBatchInfo.fAntialiasMode >= AntialiasMode::kMSAA) { | |
76 if (!fBatchInfo.isSimpleRects() || | |
77 AntialiasMode::kMixedSamples == fBatchInfo.fAntialiasMode) { | |
78 this->setWillUseSampleLocations(); | |
79 } | |
80 } | |
81 } | |
82 | |
83 class GLSLInstanceProcessor : public GrGLSLGeometryProcessor { | |
84 public: | |
85 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override; | |
86 | |
87 private: | |
88 void setData(const GrGLSLProgramDataManager&, const GrPrimitiveProcessor&) o
verride {} | |
89 | |
90 class VertexInputs; | |
91 class Backend; | |
92 class BackendNonAA; | |
93 class BackendCoverage; | |
94 class BackendMultisample; | |
95 | |
96 typedef GrGLSLGeometryProcessor INHERITED; | |
97 }; | |
98 | |
99 GrGLSLPrimitiveProcessor* InstanceProcessor::createGLSLInstance(const GrGLSLCaps
&) const { | |
100 return new GLSLInstanceProcessor(); | |
101 } | |
102 | |
103 class GLSLInstanceProcessor::VertexInputs { | |
104 public: | |
105 VertexInputs(const InstanceProcessor& instProc, GrGLSLVertexBuilder* vertexB
uilder) | |
106 : fInstProc(instProc), | |
107 fVertexBuilder(vertexBuilder) { | |
108 } | |
109 | |
110 void initParams(const SamplerHandle paramsBuffer) { | |
111 fParamsBuffer = paramsBuffer; | |
112 fVertexBuilder->definef("PARAMS_IDX_MASK", "0x%xu", kParamsIdx_InfoMask)
; | |
113 fVertexBuilder->appendPrecisionModifier(kHigh_GrSLPrecision); | |
114 fVertexBuilder->codeAppendf("int paramsIdx = int(%s & PARAMS_IDX_MASK);"
, | |
115 this->attr(Attrib::kInstanceInfo)); | |
116 } | |
117 | |
118 const char* attr(Attrib attr) const { return fInstProc.getAttrib((int)attr).
fName; } | |
119 | |
120 void fetchNextParam(GrSLType type = kVec4f_GrSLType) const { | |
121 SkASSERT(fParamsBuffer.isValid()); | |
122 if (type != kVec4f_GrSLType) { | |
123 fVertexBuilder->codeAppendf("%s(", GrGLSLTypeString(type)); | |
124 } | |
125 fVertexBuilder->appendTexelFetch(fParamsBuffer, "paramsIdx++"); | |
126 if (type != kVec4f_GrSLType) { | |
127 fVertexBuilder->codeAppend(")"); | |
128 } | |
129 } | |
130 | |
131 void skipParams(unsigned n) const { | |
132 SkASSERT(fParamsBuffer.isValid()); | |
133 fVertexBuilder->codeAppendf("paramsIdx += %u;", n); | |
134 } | |
135 | |
136 private: | |
137 const InstanceProcessor& fInstProc; | |
138 GrGLSLVertexBuilder* fVertexBuilder; | |
139 SamplerHandle fParamsBuffer; | |
140 }; | |
141 | |
142 class GLSLInstanceProcessor::Backend { | |
143 public: | |
144 static Backend* SK_WARN_UNUSED_RESULT Create(const GrGLSLProgramBuilder*, Ba
tchInfo, | |
145 const VertexInputs&); | |
146 virtual ~Backend() {} | |
147 | |
148 void init(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*); | |
149 virtual void setupRect(GrGLSLVertexBuilder*) = 0; | |
150 virtual void setupOval(GrGLSLVertexBuilder*) = 0; | |
151 void setupRRect(GrGLSLVertexBuilder*); | |
152 | |
153 void initInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*); | |
154 virtual void setupInnerRect(GrGLSLVertexBuilder*) = 0; | |
155 virtual void setupInnerOval(GrGLSLVertexBuilder*) = 0; | |
156 void setupInnerRRect(GrGLSLVertexBuilder*); | |
157 | |
158 const char* outShapeCoords() { | |
159 return fModifiedShapeCoords ? fModifiedShapeCoords : fInputs.attr(Attrib
::kShapeCoords); | |
160 } | |
161 | |
162 void emitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, const char* ou
tCoverage, | |
163 const char* outColor); | |
164 | |
165 protected: | |
166 Backend(BatchInfo batchInfo, const VertexInputs& inputs) | |
167 : fBatchInfo(batchInfo), | |
168 fInputs(inputs), | |
169 fModifiesCoverage(false), | |
170 fModifiesColor(false), | |
171 fNeedsNeighborRadii(false), | |
172 fColor(kVec4f_GrSLType), | |
173 fTriangleIsArc(kInt_GrSLType), | |
174 fArcCoords(kVec2f_GrSLType), | |
175 fInnerShapeCoords(kVec2f_GrSLType), | |
176 fInnerRRect(kVec4f_GrSLType), | |
177 fModifiedShapeCoords(nullptr) { | |
178 if (fBatchInfo.fShapeTypes & kRRect_ShapesMask) { | |
179 fModifiedShapeCoords = "adjustedShapeCoords"; | |
180 } | |
181 } | |
182 | |
183 virtual void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) = 0; | |
184 virtual void adjustRRectVertices(GrGLSLVertexBuilder*); | |
185 virtual void onSetupRRect(GrGLSLVertexBuilder*) {} | |
186 | |
187 virtual void onInitInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) =
0; | |
188 virtual void onSetupInnerRRect(GrGLSLVertexBuilder*) = 0; | |
189 | |
190 virtual void onEmitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, | |
191 const char* outCoverage, const char* outColor) = 0; | |
192 | |
193 void setupSimpleRadii(GrGLSLVertexBuilder*); | |
194 void setupNinePatchRadii(GrGLSLVertexBuilder*); | |
195 void setupComplexRadii(GrGLSLVertexBuilder*); | |
196 | |
197 const BatchInfo fBatchInfo; | |
198 const VertexInputs& fInputs; | |
199 bool fModifiesCoverage; | |
200 bool fModifiesColor; | |
201 bool fNeedsNeighborRadii; | |
202 GrGLSLVertToFrag fColor; | |
203 GrGLSLVertToFrag fTriangleIsArc; | |
204 GrGLSLVertToFrag fArcCoords; | |
205 GrGLSLVertToFrag fInnerShapeCoords; | |
206 GrGLSLVertToFrag fInnerRRect; | |
207 const char* fModifiedShapeCoords; | |
208 }; | |
209 | |
210 void GLSLInstanceProcessor::onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) { | |
211 const InstanceProcessor& ip = args.fGP.cast<InstanceProcessor>(); | |
212 GrGLSLUniformHandler* uniHandler = args.fUniformHandler; | |
213 GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler; | |
214 GrGLSLVertexBuilder* v = args.fVertBuilder; | |
215 GrGLSLPPFragmentBuilder* f = args.fFragBuilder; | |
216 | |
217 varyingHandler->emitAttributes(ip); | |
218 | |
219 VertexInputs inputs(ip, v); | |
220 if (ip.batchInfo().fHasParams) { | |
221 SkASSERT(1 == ip.numBuffers()); | |
222 inputs.initParams(args.fBufferSamplers[0]); | |
223 } | |
224 | |
225 if (!ip.batchInfo().fHasPerspective) { | |
226 v->codeAppendf("mat2x3 shapeMatrix = mat2x3(%s, %s);", | |
227 inputs.attr(Attrib::kShapeMatrixX), inputs.attr(Attrib::k
ShapeMatrixY)); | |
228 } else { | |
229 v->definef("PERSPECTIVE_FLAG", "0x%xu", kPerspective_InfoFlag); | |
230 v->codeAppendf("mat3 shapeMatrix = mat3(%s, %s, vec3(0, 0, 1));", | |
231 inputs.attr(Attrib::kShapeMatrixX), inputs.attr(Attrib::k
ShapeMatrixY)); | |
232 v->codeAppendf("if (0u != (%s & PERSPECTIVE_FLAG)) {", | |
233 inputs.attr(Attrib::kInstanceInfo)); | |
234 v->codeAppend ( "shapeMatrix[2] = "); | |
235 inputs.fetchNextParam(kVec3f_GrSLType); | |
236 v->codeAppend ( ";"); | |
237 v->codeAppend ("}"); | |
238 } | |
239 | |
240 int usedShapeTypes = 0; | |
241 | |
242 bool hasSingleShapeType = SkIsPow2(ip.batchInfo().fShapeTypes); | |
243 if (!hasSingleShapeType) { | |
244 usedShapeTypes |= ip.batchInfo().fShapeTypes; | |
245 v->define("SHAPE_TYPE_BIT", kShapeType_InfoBit); | |
246 v->codeAppendf("uint shapeType = %s >> SHAPE_TYPE_BIT;", | |
247 inputs.attr(Attrib::kInstanceInfo)); | |
248 } | |
249 | |
250 SkAutoTDelete<Backend> backend(Backend::Create(v->getProgramBuilder(), ip.ba
tchInfo(), inputs)); | |
251 backend->init(varyingHandler, v); | |
252 | |
253 if (hasSingleShapeType) { | |
254 if (kRect_ShapeFlag == ip.batchInfo().fShapeTypes) { | |
255 backend->setupRect(v); | |
256 } else if (kOval_ShapeFlag == ip.batchInfo().fShapeTypes) { | |
257 backend->setupOval(v); | |
258 } else { | |
259 backend->setupRRect(v); | |
260 } | |
261 } else { | |
262 v->codeAppend ("switch (shapeType) {"); | |
263 if (ip.batchInfo().fShapeTypes & kRect_ShapeFlag) { | |
264 v->codeAppend ("case RECT_SHAPE_TYPE: {"); | |
265 backend->setupRect(v); | |
266 v->codeAppend ("} break;"); | |
267 } | |
268 if (ip.batchInfo().fShapeTypes & kOval_ShapeFlag) { | |
269 v->codeAppend ("case OVAL_SHAPE_TYPE: {"); | |
270 backend->setupOval(v); | |
271 v->codeAppend ("} break;"); | |
272 } | |
273 if (ip.batchInfo().fShapeTypes & kRRect_ShapesMask) { | |
274 v->codeAppend ("default: {"); | |
275 backend->setupRRect(v); | |
276 v->codeAppend ("} break;"); | |
277 } | |
278 v->codeAppend ("}"); | |
279 } | |
280 | |
281 if (ip.batchInfo().fInnerShapeTypes) { | |
282 bool hasSingleInnerShapeType = SkIsPow2(ip.batchInfo().fInnerShapeTypes)
; | |
283 if (!hasSingleInnerShapeType) { | |
284 usedShapeTypes |= ip.batchInfo().fInnerShapeTypes; | |
285 v->definef("INNER_SHAPE_TYPE_MASK", "0x%xu", kInnerShapeType_InfoMas
k); | |
286 v->define("INNER_SHAPE_TYPE_BIT", kInnerShapeType_InfoBit); | |
287 v->codeAppendf("uint innerShapeType = ((%s & INNER_SHAPE_TYPE_MASK)
>> " | |
288 "INNER_SHAPE_TYPE_BIT);", | |
289 inputs.attr(Attrib::kInstanceInfo)); | |
290 } | |
291 // Here we take advantage of the fact that outerRect == localRect in rec
ordDRRect. | |
292 v->codeAppendf("vec4 outer = %s;", inputs.attr(Attrib::kLocalRect)); | |
293 v->codeAppend ("vec4 inner = "); | |
294 inputs.fetchNextParam(); | |
295 v->codeAppend (";"); | |
296 // outer2Inner is a transform from shape coords to inner shape coords: | |
297 // e.g. innerShapeCoords = shapeCoords * outer2Inner.xy + outer2Inner.zw | |
298 v->codeAppend ("vec4 outer2Inner = vec4(outer.zw - outer.xy, " | |
299 "outer.xy + outer.zw - inner.xy -
inner.zw) / " | |
300 "(inner.zw - inner.xy).xyxy;"); | |
301 v->codeAppendf("vec2 innerShapeCoords = %s * outer2Inner.xy + outer2Inne
r.zw;", | |
302 backend->outShapeCoords()); | |
303 | |
304 backend->initInnerShape(varyingHandler, v); | |
305 | |
306 if (hasSingleInnerShapeType) { | |
307 if (kRect_ShapeFlag == ip.batchInfo().fInnerShapeTypes) { | |
308 backend->setupInnerRect(v); | |
309 } else if (kOval_ShapeFlag == ip.batchInfo().fInnerShapeTypes) { | |
310 backend->setupInnerOval(v); | |
311 } else { | |
312 backend->setupInnerRRect(v); | |
313 } | |
314 } else { | |
315 v->codeAppend("switch (innerShapeType) {"); | |
316 if (ip.batchInfo().fInnerShapeTypes & kRect_ShapeFlag) { | |
317 v->codeAppend("case RECT_SHAPE_TYPE: {"); | |
318 backend->setupInnerRect(v); | |
319 v->codeAppend("} break;"); | |
320 } | |
321 if (ip.batchInfo().fInnerShapeTypes & kOval_ShapeFlag) { | |
322 v->codeAppend("case OVAL_SHAPE_TYPE: {"); | |
323 backend->setupInnerOval(v); | |
324 v->codeAppend("} break;"); | |
325 } | |
326 if (ip.batchInfo().fInnerShapeTypes & kRRect_ShapesMask) { | |
327 v->codeAppend("default: {"); | |
328 backend->setupInnerRRect(v); | |
329 v->codeAppend("} break;"); | |
330 } | |
331 v->codeAppend("}"); | |
332 } | |
333 } | |
334 | |
335 if (usedShapeTypes & kRect_ShapeFlag) { | |
336 v->definef("RECT_SHAPE_TYPE", "%du", (int)ShapeType::kRect); | |
337 } | |
338 if (usedShapeTypes & kOval_ShapeFlag) { | |
339 v->definef("OVAL_SHAPE_TYPE", "%du", (int)ShapeType::kOval); | |
340 } | |
341 | |
342 backend->emitCode(v, f, args.fOutputCoverage, args.fOutputColor); | |
343 | |
344 const char* localCoords = nullptr; | |
345 if (ip.batchInfo().fUsesLocalCoords) { | |
346 localCoords = "localCoords"; | |
347 v->codeAppendf("vec2 t = 0.5 * (%s + vec2(1));", backend->outShapeCoords
()); | |
348 v->codeAppendf("vec2 localCoords = (1.0 - t) * %s.xy + t * %s.zw;", | |
349 inputs.attr(Attrib::kLocalRect), inputs.attr(Attrib::kLoc
alRect)); | |
350 } | |
351 if (ip.batchInfo().fHasLocalMatrix && ip.batchInfo().fHasParams) { | |
352 v->definef("LOCAL_MATRIX_FLAG", "0x%xu", kLocalMatrix_InfoFlag); | |
353 v->codeAppendf("if (0u != (%s & LOCAL_MATRIX_FLAG)) {", | |
354 inputs.attr(Attrib::kInstanceInfo)); | |
355 if (!ip.batchInfo().fUsesLocalCoords) { | |
356 inputs.skipParams(2); | |
357 } else { | |
358 v->codeAppendf( "mat2x3 localMatrix;"); | |
359 v->codeAppend ( "localMatrix[0] = "); | |
360 inputs.fetchNextParam(kVec3f_GrSLType); | |
361 v->codeAppend ( ";"); | |
362 v->codeAppend ( "localMatrix[1] = "); | |
363 inputs.fetchNextParam(kVec3f_GrSLType); | |
364 v->codeAppend ( ";"); | |
365 v->codeAppend ( "localCoords = (vec3(localCoords, 1) * localMatri
x).xy;"); | |
366 } | |
367 v->codeAppend("}"); | |
368 } | |
369 | |
370 GrSLType positionType = ip.batchInfo().fHasPerspective ? kVec3f_GrSLType : k
Vec2f_GrSLType; | |
371 v->codeAppendf("%s deviceCoords = vec3(%s, 1) * shapeMatrix;", | |
372 GrGLSLTypeString(positionType), backend->outShapeCoords()); | |
373 gpArgs->fPositionVar.set(positionType, "deviceCoords"); | |
374 | |
375 this->emitTransforms(v, varyingHandler, uniHandler, gpArgs->fPositionVar, lo
calCoords, | |
376 args.fTransformsIn, args.fTransformsOut); | |
377 } | |
378 | |
379 ////////////////////////////////////////////////////////////////////////////////
//////////////////// | |
380 | |
381 void GLSLInstanceProcessor::Backend::init(GrGLSLVaryingHandler* varyingHandler, | |
382 GrGLSLVertexBuilder* v) { | |
383 if (fModifiedShapeCoords) { | |
384 v->codeAppendf("vec2 %s = %s;", fModifiedShapeCoords, fInputs.attr(Attri
b::kShapeCoords)); | |
385 } | |
386 | |
387 this->onInit(varyingHandler, v); | |
388 | |
389 if (!fColor.vsOut()) { | |
390 varyingHandler->addFlatVarying("color", &fColor, kLow_GrSLPrecision); | |
391 v->codeAppendf("%s = %s;", fColor.vsOut(), fInputs.attr(Attrib::kColor))
; | |
392 } | |
393 } | |
394 | |
395 void GLSLInstanceProcessor::Backend::setupRRect(GrGLSLVertexBuilder* v) { | |
396 v->codeAppendf("uvec2 corner = uvec2(%s & 1, (%s >> 1) & 1);", | |
397 fInputs.attr(Attrib::kVertexAttrs), fInputs.attr(Attrib::kVer
texAttrs)); | |
398 v->codeAppend ("vec2 cornerSign = vec2(corner) * 2.0 - 1.0;"); | |
399 v->codeAppendf("vec2 radii%s;", fNeedsNeighborRadii ? ", neighborRadii" : ""
); | |
400 v->codeAppend ("mat2 p = "); | |
401 fInputs.fetchNextParam(kMat22f_GrSLType); | |
402 v->codeAppend (";"); | |
403 uint8_t types = fBatchInfo.fShapeTypes & kRRect_ShapesMask; | |
404 if (0 == (types & (types - 1))) { | |
405 if (kSimpleRRect_ShapeFlag == types) { | |
406 this->setupSimpleRadii(v); | |
407 } else if (kNinePatch_ShapeFlag == types) { | |
408 this->setupNinePatchRadii(v); | |
409 } else if (kComplexRRect_ShapeFlag == types) { | |
410 this->setupComplexRadii(v); | |
411 } | |
412 } else { | |
413 v->codeAppend("switch (shapeType) {"); | |
414 if (types & kSimpleRRect_ShapeFlag) { | |
415 v->definef("SIMPLE_R_RECT_SHAPE_TYPE", "%du", (int)ShapeType::kSimpl
eRRect); | |
416 v->codeAppend ("case SIMPLE_R_RECT_SHAPE_TYPE: {"); | |
417 this->setupSimpleRadii(v); | |
418 v->codeAppend ("} break;"); | |
419 } | |
420 if (types & kNinePatch_ShapeFlag) { | |
421 v->definef("NINE_PATCH_SHAPE_TYPE", "%du", (int)ShapeType::kNinePatc
h); | |
422 v->codeAppend ("case NINE_PATCH_SHAPE_TYPE: {"); | |
423 this->setupNinePatchRadii(v); | |
424 v->codeAppend ("} break;"); | |
425 } | |
426 if (types & kComplexRRect_ShapeFlag) { | |
427 v->codeAppend ("default: {"); | |
428 this->setupComplexRadii(v); | |
429 v->codeAppend ("} break;"); | |
430 } | |
431 v->codeAppend("}"); | |
432 } | |
433 | |
434 this->adjustRRectVertices(v); | |
435 | |
436 if (fArcCoords.vsOut()) { | |
437 v->codeAppendf("%s = (cornerSign * %s + radii - vec2(1)) / radii;", | |
438 fArcCoords.vsOut(), fModifiedShapeCoords); | |
439 } | |
440 if (fTriangleIsArc.vsOut()) { | |
441 v->codeAppendf("%s = int(all(equal(vec2(1), abs(%s))));", | |
442 fTriangleIsArc.vsOut(), fInputs.attr(Attrib::kShapeCoords
)); | |
443 } | |
444 | |
445 this->onSetupRRect(v); | |
446 } | |
447 | |
448 void GLSLInstanceProcessor::Backend::setupSimpleRadii(GrGLSLVertexBuilder* v) { | |
449 if (fNeedsNeighborRadii) { | |
450 v->codeAppend ("neighborRadii = "); | |
451 } | |
452 v->codeAppend("radii = p[0] * 2.0 / p[1];"); | |
453 } | |
454 | |
455 void GLSLInstanceProcessor::Backend::setupNinePatchRadii(GrGLSLVertexBuilder* v)
{ | |
456 v->codeAppend("radii = vec2(p[0][corner.x], p[1][corner.y]);"); | |
457 if (fNeedsNeighborRadii) { | |
458 v->codeAppend("neighborRadii = vec2(p[0][1u - corner.x], p[1][1u - corne
r.y]);"); | |
459 } | |
460 } | |
461 | |
462 void GLSLInstanceProcessor::Backend::setupComplexRadii(GrGLSLVertexBuilder* v) { | |
463 /** | |
464 * The x and y radii of each arc are stored in separate vectors, | |
465 * in the following order: | |
466 * | |
467 * __x1 _ _ _ x3__ | |
468 * | |
469 * y1 | | y2 | |
470 * | |
471 * | | | |
472 * | |
473 * y3 |__ _ _ _ __| y4 | |
474 * x2 x4 | |
475 * | |
476 */ | |
477 v->codeAppend("mat2 p2 = "); | |
478 fInputs.fetchNextParam(kMat22f_GrSLType); | |
479 v->codeAppend(";"); | |
480 v->codeAppend("radii = vec2(p[corner.x][corner.y], p2[corner.y][corner.x]);"
); | |
481 if (fNeedsNeighborRadii) { | |
482 v->codeAppend("neighborRadii = vec2(p[1u - corner.x][corner.y], " | |
483 "p2[1u - corner.y][corner.x]);"); | |
484 } | |
485 } | |
486 | |
487 void GLSLInstanceProcessor::Backend::adjustRRectVertices(GrGLSLVertexBuilder* v)
{ | |
488 // Resize the 4 triangles that arcs are drawn into so they match their corre
sponding radii. | |
489 // 0.5 is a special value that indicates the edge of an arc triangle. | |
490 v->codeAppendf("if (abs(%s.x) == 0.5)" | |
491 "%s.x = cornerSign.x * (1.0 - radii.x);", | |
492 fInputs.attr(Attrib::kShapeCoords), fModifiedShapeCoords)
; | |
493 v->codeAppendf("if (abs(%s.y) == 0.5) " | |
494 "%s.y = cornerSign.y * (1.0 - radii.y);", | |
495 fInputs.attr(Attrib::kShapeCoords), fModifiedShapeCoords)
; | |
496 } | |
497 | |
498 void GLSLInstanceProcessor::Backend::initInnerShape(GrGLSLVaryingHandler* varyin
gHandler, | |
499 GrGLSLVertexBuilder* v) { | |
500 SkASSERT(!(fBatchInfo.fInnerShapeTypes & (kNinePatch_ShapeFlag | kComplexRRe
ct_ShapeFlag))); | |
501 | |
502 this->onInitInnerShape(varyingHandler, v); | |
503 | |
504 if (fInnerShapeCoords.vsOut()) { | |
505 v->codeAppendf("%s = innerShapeCoords;", fInnerShapeCoords.vsOut()); | |
506 } | |
507 } | |
508 | |
509 void GLSLInstanceProcessor::Backend::setupInnerRRect(GrGLSLVertexBuilder* v) { | |
510 v->codeAppend("mat2 innerP = "); | |
511 fInputs.fetchNextParam(kMat22f_GrSLType); | |
512 v->codeAppend(";"); | |
513 v->codeAppend("vec2 innerRadii = innerP[0] * 2.0 / innerP[1];"); | |
514 this->onSetupInnerRRect(v); | |
515 } | |
516 | |
517 void GLSLInstanceProcessor::Backend::emitCode(GrGLSLVertexBuilder* v, GrGLSLPPFr
agmentBuilder* f, | |
518 const char* outCoverage, const cha
r* outColor) { | |
519 this->onEmitCode(v, f, fModifiesCoverage ? outCoverage : nullptr, | |
520 fModifiesColor ? outColor : nullptr); | |
521 if (!fModifiesCoverage) { | |
522 // Even though the subclass doesn't use coverage, we are expected to ass
ign some value. | |
523 f->codeAppendf("%s = vec4(1);", outCoverage); | |
524 } | |
525 if (!fModifiesColor) { | |
526 // The subclass didn't assign a value to the output color. | |
527 f->codeAppendf("%s = %s;", outColor, fColor.fsIn()); | |
528 } | |
529 } | |
530 | |
531 ////////////////////////////////////////////////////////////////////////////////
//////////////////// | |
532 | |
533 class GLSLInstanceProcessor::BackendNonAA : public Backend { | |
534 public: | |
535 BackendNonAA(BatchInfo batchInfo, const VertexInputs& inputs) | |
536 : INHERITED(batchInfo, inputs) { | |
537 if (fBatchInfo.fCannotDiscard && !fBatchInfo.isSimpleRects()) { | |
538 fModifiesColor = !fBatchInfo.fCannotTweakAlphaForCoverage; | |
539 fModifiesCoverage = !fModifiesColor; | |
540 } | |
541 } | |
542 | |
543 private: | |
544 void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; | |
545 void setupRect(GrGLSLVertexBuilder*) override; | |
546 void setupOval(GrGLSLVertexBuilder*) override; | |
547 | |
548 void onInitInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; | |
549 void setupInnerRect(GrGLSLVertexBuilder*) override; | |
550 void setupInnerOval(GrGLSLVertexBuilder*) override; | |
551 void onSetupInnerRRect(GrGLSLVertexBuilder*) override; | |
552 | |
553 void onEmitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, const char*, | |
554 const char*) override; | |
555 | |
556 typedef Backend INHERITED; | |
557 }; | |
558 | |
559 void GLSLInstanceProcessor::BackendNonAA::onInit(GrGLSLVaryingHandler* varyingHa
ndler, | |
560 GrGLSLVertexBuilder*) { | |
561 if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) { | |
562 varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kHigh_G
rSLPrecision); | |
563 varyingHandler->addVarying("arcCoords", &fArcCoords, kMedium_GrSLPrecisi
on); | |
564 } | |
565 } | |
566 | |
567 void GLSLInstanceProcessor::BackendNonAA::setupRect(GrGLSLVertexBuilder* v) { | |
568 if (fTriangleIsArc.vsOut()) { | |
569 v->codeAppendf("%s = 0;", fTriangleIsArc.vsOut()); | |
570 } | |
571 } | |
572 | |
573 void GLSLInstanceProcessor::BackendNonAA::setupOval(GrGLSLVertexBuilder* v) { | |
574 SkASSERT(fArcCoords.vsOut()); | |
575 SkASSERT(fTriangleIsArc.vsOut()); | |
576 v->codeAppendf("%s = %s;", fArcCoords.vsOut(), this->outShapeCoords()); | |
577 v->codeAppendf("%s = %s & 1;", fTriangleIsArc.vsOut(), fInputs.attr(Attrib::
kVertexAttrs)); | |
578 } | |
579 | |
580 void GLSLInstanceProcessor::BackendNonAA::onInitInnerShape(GrGLSLVaryingHandler*
varyingHandler, | |
581 GrGLSLVertexBuilder*)
{ | |
582 varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, kMedium_G
rSLPrecision); | |
583 if (kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes && | |
584 kOval_ShapeFlag != fBatchInfo.fInnerShapeTypes) { | |
585 varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kMedium_GrSLP
recision); | |
586 } | |
587 } | |
588 | |
589 void GLSLInstanceProcessor::BackendNonAA::setupInnerRect(GrGLSLVertexBuilder* v)
{ | |
590 if (fInnerRRect.vsOut()) { | |
591 v->codeAppendf("%s = vec4(1);", fInnerRRect.vsOut()); | |
592 } | |
593 } | |
594 | |
595 void GLSLInstanceProcessor::BackendNonAA::setupInnerOval(GrGLSLVertexBuilder* v)
{ | |
596 if (fInnerRRect.vsOut()) { | |
597 v->codeAppendf("%s = vec4(0, 0, 1, 1);", fInnerRRect.vsOut()); | |
598 } | |
599 } | |
600 | |
601 void GLSLInstanceProcessor::BackendNonAA::onSetupInnerRRect(GrGLSLVertexBuilder*
v) { | |
602 v->codeAppendf("%s = vec4(1.0 - innerRadii, 1.0 / innerRadii);", fInnerRRect
.vsOut()); | |
603 } | |
604 | |
605 void GLSLInstanceProcessor::BackendNonAA::onEmitCode(GrGLSLVertexBuilder*, | |
606 GrGLSLPPFragmentBuilder* f, | |
607 const char* outCoverage, | |
608 const char* outColor) { | |
609 const char* dropFragment = nullptr; | |
610 if (!fBatchInfo.fCannotDiscard) { | |
611 dropFragment = "discard"; | |
612 } else if (fModifiesCoverage) { | |
613 f->appendPrecisionModifier(kLow_GrSLPrecision); | |
614 f->codeAppend ("float covered = 1.0;"); | |
615 dropFragment = "covered = 0.0"; | |
616 } else if (fModifiesColor) { | |
617 f->appendPrecisionModifier(kLow_GrSLPrecision); | |
618 f->codeAppendf("vec4 color = %s;", fColor.fsIn()); | |
619 dropFragment = "color = vec4(0)"; | |
620 } | |
621 if (fTriangleIsArc.fsIn()) { | |
622 SkASSERT(dropFragment); | |
623 f->codeAppendf("if (%s != 0 && dot(%s, %s) > 1.0) %s;", | |
624 fTriangleIsArc.fsIn(), fArcCoords.fsIn(), fArcCoords.fsIn
(), dropFragment); | |
625 } | |
626 if (fBatchInfo.fInnerShapeTypes) { | |
627 SkASSERT(dropFragment); | |
628 f->codeAppendf("// Inner shape.\n"); | |
629 if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) { | |
630 f->codeAppendf("if (all(lessThanEqual(abs(%s), vec2(1)))) %s;", | |
631 fInnerShapeCoords.fsIn(), dropFragment); | |
632 } else if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) { | |
633 f->codeAppendf("if ((dot(%s, %s) <= 1.0)) %s;", | |
634 fInnerShapeCoords.fsIn(), fInnerShapeCoords.fsIn(), d
ropFragment); | |
635 } else { | |
636 f->codeAppendf("if (all(lessThan(abs(%s), vec2(1)))) {", fInnerShape
Coords.fsIn()); | |
637 f->codeAppendf( "vec2 distanceToArcEdge = abs(%s) - %s.xy;", | |
638 fInnerShapeCoords.fsIn(), fInnerRRect.fsIn()); | |
639 f->codeAppend ( "if (any(lessThan(distanceToArcEdge, vec2(0)))) {
"); | |
640 f->codeAppendf( "%s;", dropFragment); | |
641 f->codeAppend ( "} else {"); | |
642 f->codeAppendf( "vec2 rrectCoords = distanceToArcEdge * %s.zw
;", | |
643 fInnerRRect.fsIn()); | |
644 f->codeAppend ( "if (dot(rrectCoords, rrectCoords) <= 1.0) {"
); | |
645 f->codeAppendf( "%s;", dropFragment); | |
646 f->codeAppend ( "}"); | |
647 f->codeAppend ( "}"); | |
648 f->codeAppend ("}"); | |
649 } | |
650 } | |
651 if (fModifiesCoverage) { | |
652 f->codeAppendf("%s = vec4(covered);", outCoverage); | |
653 } else if (fModifiesColor) { | |
654 f->codeAppendf("%s = color;", outColor); | |
655 } | |
656 } | |
657 | |
658 ////////////////////////////////////////////////////////////////////////////////
//////////////////// | |
659 | |
660 class GLSLInstanceProcessor::BackendCoverage : public Backend { | |
661 public: | |
662 BackendCoverage(BatchInfo batchInfo, const VertexInputs& inputs) | |
663 : INHERITED(batchInfo, inputs), | |
664 fColorTimesRectCoverage(kVec4f_GrSLType), | |
665 fRectCoverage(kFloat_GrSLType), | |
666 fEllipseCoords(kVec2f_GrSLType), | |
667 fEllipseName(kVec2f_GrSLType), | |
668 fBloatedRadius(kFloat_GrSLType), | |
669 fDistanceToInnerEdge(kVec2f_GrSLType), | |
670 fInnerShapeBloatedHalfSize(kVec2f_GrSLType), | |
671 fInnerEllipseCoords(kVec2f_GrSLType), | |
672 fInnerEllipseName(kVec2f_GrSLType) { | |
673 fShapeIsCircle = !fBatchInfo.fNonSquare && !(fBatchInfo.fShapeTypes & kR
Rect_ShapesMask); | |
674 fTweakAlphaForCoverage = !fBatchInfo.fCannotTweakAlphaForCoverage && | |
675 !fBatchInfo.fInnerShapeTypes; | |
676 fModifiesCoverage = !fTweakAlphaForCoverage; | |
677 fModifiesColor = fTweakAlphaForCoverage; | |
678 fModifiedShapeCoords = "bloatedShapeCoords"; | |
679 } | |
680 | |
681 private: | |
682 void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; | |
683 void setupRect(GrGLSLVertexBuilder*) override; | |
684 void setupOval(GrGLSLVertexBuilder*) override; | |
685 void adjustRRectVertices(GrGLSLVertexBuilder*) override; | |
686 void onSetupRRect(GrGLSLVertexBuilder*) override; | |
687 | |
688 void onInitInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; | |
689 void setupInnerRect(GrGLSLVertexBuilder*) override; | |
690 void setupInnerOval(GrGLSLVertexBuilder*) override; | |
691 void onSetupInnerRRect(GrGLSLVertexBuilder*) override; | |
692 | |
693 void onEmitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, const char*
outCoverage, | |
694 const char* outColor) override; | |
695 | |
696 void emitRect(GrGLSLPPFragmentBuilder*, const char* outCoverage, const char*
outColor); | |
697 void emitCircle(GrGLSLPPFragmentBuilder*, const char* outCoverage); | |
698 void emitArc(GrGLSLPPFragmentBuilder* f, const char* ellipseCoords, const ch
ar* ellipseName, | |
699 bool ellipseCoordsNeedClamp, bool ellipseCoordsMayBeNegative, | |
700 const char* outCoverage); | |
701 void emitInnerRect(GrGLSLPPFragmentBuilder*, const char* outCoverage); | |
702 | |
703 GrGLSLVertToFrag fColorTimesRectCoverage; | |
704 GrGLSLVertToFrag fRectCoverage; | |
705 GrGLSLVertToFrag fEllipseCoords; | |
706 GrGLSLVertToFrag fEllipseName; | |
707 GrGLSLVertToFrag fBloatedRadius; | |
708 GrGLSLVertToFrag fDistanceToInnerEdge; | |
709 GrGLSLVertToFrag fInnerShapeBloatedHalfSize; | |
710 GrGLSLVertToFrag fInnerEllipseCoords; | |
711 GrGLSLVertToFrag fInnerEllipseName; | |
712 bool fShapeIsCircle; | |
713 bool fTweakAlphaForCoverage; | |
714 | |
715 typedef Backend INHERITED; | |
716 }; | |
717 | |
718 void GLSLInstanceProcessor::BackendCoverage::onInit(GrGLSLVaryingHandler* varyin
gHandler, | |
719 GrGLSLVertexBuilder* v) { | |
720 v->codeAppend ("mat2 shapeTransposeMatrix = transpose(mat2(shapeMatrix));"); | |
721 v->codeAppend ("vec2 shapeHalfSize = vec2(length(shapeTransposeMatrix[0]), " | |
722 "length(shapeTransposeMatrix[1]));"
); | |
723 v->codeAppend ("vec2 bloat = 0.5 / shapeHalfSize;"); | |
724 v->codeAppendf("bloatedShapeCoords = %s * (1.0 + bloat);", fInputs.attr(Attr
ib::kShapeCoords)); | |
725 | |
726 if (kOval_ShapeFlag != fBatchInfo.fShapeTypes) { | |
727 if (fTweakAlphaForCoverage) { | |
728 varyingHandler->addVarying("colorTimesRectCoverage", &fColorTimesRec
tCoverage, | |
729 kLow_GrSLPrecision); | |
730 if (kRect_ShapeFlag == fBatchInfo.fShapeTypes) { | |
731 fColor = fColorTimesRectCoverage; | |
732 } | |
733 } else { | |
734 varyingHandler->addVarying("rectCoverage", &fRectCoverage, kLow_GrSL
Precision); | |
735 } | |
736 v->codeAppend("float rectCoverage = 0.0;"); | |
737 } | |
738 if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) { | |
739 varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, kHigh_G
rSLPrecision); | |
740 if (!fShapeIsCircle) { | |
741 varyingHandler->addVarying("ellipseCoords", &fEllipseCoords, kHigh_G
rSLPrecision); | |
742 varyingHandler->addFlatVarying("ellipseName", &fEllipseName, kHigh_G
rSLPrecision); | |
743 } else { | |
744 varyingHandler->addVarying("circleCoords", &fEllipseCoords, kMedium_
GrSLPrecision); | |
745 varyingHandler->addFlatVarying("bloatedRadius", &fBloatedRadius, kMe
dium_GrSLPrecision); | |
746 } | |
747 } | |
748 } | |
749 | |
750 void GLSLInstanceProcessor::BackendCoverage::setupRect(GrGLSLVertexBuilder* v) { | |
751 // Make the border one pixel wide. Inner vs outer is indicated by coordAttrs
. | |
752 v->codeAppendf("vec2 rectBloat = (%s != 0) ? bloat : -bloat;", | |
753 fInputs.attr(Attrib::kVertexAttrs)); | |
754 // Here we use the absolute value, because when the rect is thinner than a p
ixel, this makes it | |
755 // mark the spot where pixel center is within half a pixel of the *opposite*
edge. This, | |
756 // combined with the "maxCoverage" logic below gives us mathematically corre
ct coverage even for | |
757 // subpixel rectangles. | |
758 v->codeAppendf("bloatedShapeCoords = %s * abs(vec2(1.0 + rectBloat));", | |
759 fInputs.attr(Attrib::kShapeCoords)); | |
760 | |
761 // Determine coverage at the vertex. Coverage naturally ramps from 0 to 1 un
less the rect is | |
762 // narrower than a pixel. | |
763 v->codeAppend ("float maxCoverage = 4.0 * min(0.5, shapeHalfSize.x) *" | |
764 "min(0.5, shapeHalfSize.y);"); | |
765 v->codeAppendf("rectCoverage = (%s != 0) ? 0.0 : maxCoverage;", | |
766 fInputs.attr(Attrib::kVertexAttrs)); | |
767 | |
768 if (fTriangleIsArc.vsOut()) { | |
769 v->codeAppendf("%s = 0;", fTriangleIsArc.vsOut()); | |
770 } | |
771 } | |
772 | |
773 void GLSLInstanceProcessor::BackendCoverage::setupOval(GrGLSLVertexBuilder* v) { | |
774 // Offset the inner and outer octagons by one pixel. Inner vs outer is indic
ated by coordAttrs. | |
775 v->codeAppendf("vec2 ovalBloat = (%s != 0) ? bloat : -bloat;", | |
776 fInputs.attr(Attrib::kVertexAttrs)); | |
777 v->codeAppendf("bloatedShapeCoords = %s * max(vec2(1.0 + ovalBloat), vec2(0)
);", | |
778 fInputs.attr(Attrib::kShapeCoords)); | |
779 v->codeAppendf("%s = bloatedShapeCoords * shapeHalfSize;", fEllipseCoords.vs
Out()); | |
780 if (fEllipseName.vsOut()) { | |
781 v->codeAppendf("%s = 1.0 / (shapeHalfSize * shapeHalfSize);", fEllipseNa
me.vsOut()); | |
782 } | |
783 if (fBloatedRadius.vsOut()) { | |
784 SkASSERT(fShapeIsCircle); | |
785 v->codeAppendf("%s = shapeHalfSize.x + 0.5;", fBloatedRadius.vsOut()); | |
786 } | |
787 if (fTriangleIsArc.vsOut()) { | |
788 v->codeAppendf("%s = int(%s != 0);", | |
789 fTriangleIsArc.vsOut(), fInputs.attr(Attrib::kVertexAttrs
)); | |
790 } | |
791 if (fColorTimesRectCoverage.vsOut() || fRectCoverage.vsOut()) { | |
792 v->codeAppendf("rectCoverage = 1.0;"); | |
793 } | |
794 } | |
795 | |
796 void GLSLInstanceProcessor::BackendCoverage::adjustRRectVertices(GrGLSLVertexBui
lder* v) { | |
797 // We try to let the AA borders line up with the arc edges on their particul
ar side, but we | |
798 // can't allow them to get closer than one half pixel to the edge or they mi
ght overlap with | |
799 // their neighboring border. | |
800 v->codeAppend("vec2 innerEdge = max(1.0 - bloat, vec2(0));"); | |
801 v->codeAppend ("vec2 borderEdge = cornerSign * clamp(1.0 - radii, -innerEdge
, innerEdge);"); | |
802 // 0.5 is a special value that indicates this vertex is an arc edge. | |
803 v->codeAppendf("if (abs(%s.x) == 0.5)" | |
804 "bloatedShapeCoords.x = borderEdge.x;", fInputs.attr(Attr
ib::kShapeCoords)); | |
805 v->codeAppendf("if (abs(%s.y) == 0.5)" | |
806 "bloatedShapeCoords.y = borderEdge.y;", fInputs.attr(Attr
ib::kShapeCoords)); | |
807 | |
808 // Adjust the interior border vertices to make the border one pixel wide. 0.
75 is a special | |
809 // value to indicate these points. | |
810 v->codeAppendf("if (abs(%s.x) == 0.75) " | |
811 "bloatedShapeCoords.x = cornerSign.x * innerEdge.x;", | |
812 fInputs.attr(Attrib::kShapeCoords)); | |
813 v->codeAppendf("if (abs(%s.y) == 0.75) " | |
814 "bloatedShapeCoords.y = cornerSign.y * innerEdge.y;", | |
815 fInputs.attr(Attrib::kShapeCoords)); | |
816 } | |
817 | |
818 void GLSLInstanceProcessor::BackendCoverage::onSetupRRect(GrGLSLVertexBuilder* v
) { | |
819 // The geometry is laid out in such a way that rectCoverage will be 0 and 1
on the vertices, but | |
820 // we still need to recompute this value because when the rrect gets thinner
than one pixel, the | |
821 // interior edge of the border will necessarily clamp, and we need to match
the AA behavior of | |
822 // the arc segments (i.e. distance from bloated edge only; ignoring the fact
that the pixel | |
823 // actully has less coverage because it's not completely inside the opposite
edge.) | |
824 v->codeAppend("vec2 d = shapeHalfSize + 0.5 - abs(bloatedShapeCoords) * shap
eHalfSize;"); | |
825 v->codeAppend("rectCoverage = min(d.x, d.y);"); | |
826 | |
827 SkASSERT(!fShapeIsCircle); | |
828 // The AA border does not get closer than one half pixel to the edge of the
rect, so to get a | |
829 // smooth transition from flat edge to arc, we don't allow the radii to be s
maller than one half | |
830 // pixel. (We don't worry about the transition on the opposite side when a r
adius is so large | |
831 // that the border clamped on that side.) | |
832 v->codeAppendf("vec2 clampedRadii = max(radii, bloat);"); | |
833 v->codeAppendf("%s = (cornerSign * bloatedShapeCoords + clampedRadii - vec2(
1)) * " | |
834 "shapeHalfSize;", fEllipseCoords.vsOut()); | |
835 v->codeAppendf("%s = 1.0 / (clampedRadii * clampedRadii * shapeHalfSize * sh
apeHalfSize);", | |
836 fEllipseName.vsOut()); | |
837 } | |
838 | |
839 void GLSLInstanceProcessor::BackendCoverage::onInitInnerShape(GrGLSLVaryingHandl
er* varyingHandler, | |
840 GrGLSLVertexBuilde
r* v) { | |
841 v->codeAppend("vec2 innerShapeHalfSize = shapeHalfSize / outer2Inner.xy;"); | |
842 | |
843 if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) { | |
844 varyingHandler->addVarying("innerEllipseCoords", &fInnerEllipseCoords, | |
845 kMedium_GrSLPrecision); | |
846 varyingHandler->addFlatVarying("innerEllipseName", &fInnerEllipseName, | |
847 kMedium_GrSLPrecision); | |
848 } else { | |
849 varyingHandler->addVarying("distanceToInnerEdge", &fDistanceToInnerEdge, | |
850 kMedium_GrSLPrecision); | |
851 varyingHandler->addFlatVarying("innerShapeBloatedHalfSize", &fInnerShape
BloatedHalfSize, | |
852 kMedium_GrSLPrecision); | |
853 if (kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes) { | |
854 varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, k
High_GrSLPrecision); | |
855 varyingHandler->addFlatVarying("innerEllipseName", &fInnerEllipseNam
e, | |
856 kMedium_GrSLPrecision); | |
857 varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kHigh_GrS
LPrecision); | |
858 } | |
859 } | |
860 } | |
861 | |
862 void GLSLInstanceProcessor::BackendCoverage::setupInnerRect(GrGLSLVertexBuilder*
v) { | |
863 if (fInnerRRect.vsOut()) { | |
864 // The fragment shader will generalize every inner shape as a round rect
. Since this one | |
865 // is a rect, we simply emit bogus parameters for the round rect (effect
ively negative | |
866 // radii) that ensure the fragment shader always takes the "emitRect" co
depath. | |
867 v->codeAppendf("%s.xy = abs(outer2Inner.xy) * (1.0 + bloat) + abs(outer2
Inner.zw);", | |
868 fInnerRRect.vsOut()); | |
869 } | |
870 } | |
871 | |
872 void GLSLInstanceProcessor::BackendCoverage::setupInnerOval(GrGLSLVertexBuilder*
v) { | |
873 v->codeAppendf("%s = 1.0 / (innerShapeHalfSize * innerShapeHalfSize);", | |
874 fInnerEllipseName.vsOut()); | |
875 if (fInnerEllipseCoords.vsOut()) { | |
876 v->codeAppendf("%s = innerShapeCoords * innerShapeHalfSize;", fInnerElli
pseCoords.vsOut()); | |
877 } | |
878 if (fInnerRRect.vsOut()) { | |
879 v->codeAppendf("%s = vec4(0, 0, innerShapeHalfSize);", fInnerRRect.vsOut
()); | |
880 } | |
881 } | |
882 | |
883 void GLSLInstanceProcessor::BackendCoverage::onSetupInnerRRect(GrGLSLVertexBuild
er* v) { | |
884 // The distance to ellipse formula doesn't work well when the radii are less
than half a pixel. | |
885 v->codeAppend ("innerRadii = max(innerRadii, bloat);"); | |
886 v->codeAppendf("%s = 1.0 / (innerRadii * innerRadii * innerShapeHalfSize * " | |
887 "innerShapeHalfSize);", | |
888 fInnerEllipseName.vsOut()); | |
889 v->codeAppendf("%s = vec4(1.0 - innerRadii, innerShapeHalfSize);", fInnerRRe
ct.vsOut()); | |
890 } | |
891 | |
892 void GLSLInstanceProcessor::BackendCoverage::onEmitCode(GrGLSLVertexBuilder* v, | |
893 GrGLSLPPFragmentBuilder*
f, | |
894 const char* outCoverage, | |
895 const char* outColor) { | |
896 if (fColorTimesRectCoverage.vsOut()) { | |
897 SkASSERT(!fRectCoverage.vsOut()); | |
898 v->codeAppendf("%s = %s * rectCoverage;", | |
899 fColorTimesRectCoverage.vsOut(), fInputs.attr(Attrib::kCo
lor)); | |
900 } | |
901 if (fRectCoverage.vsOut()) { | |
902 SkASSERT(!fColorTimesRectCoverage.vsOut()); | |
903 v->codeAppendf("%s = rectCoverage;", fRectCoverage.vsOut()); | |
904 } | |
905 | |
906 SkString coverage("float coverage"); | |
907 if (f->getProgramBuilder()->glslCaps()->usesPrecisionModifiers()) { | |
908 coverage.prependf("lowp "); | |
909 } | |
910 if (fBatchInfo.fInnerShapeTypes || (!fTweakAlphaForCoverage && fTriangleIsAr
c.fsIn())) { | |
911 f->codeAppendf("%s;", coverage.c_str()); | |
912 coverage = "coverage"; | |
913 } | |
914 if (fTriangleIsArc.fsIn()) { | |
915 f->codeAppendf("if (%s == 0) {", fTriangleIsArc.fsIn()); | |
916 this->emitRect(f, coverage.c_str(), outColor); | |
917 f->codeAppend ("} else {"); | |
918 if (fShapeIsCircle) { | |
919 this->emitCircle(f, coverage.c_str()); | |
920 } else { | |
921 bool ellipseCoordsMayBeNegative = SkToBool(fBatchInfo.fShapeTypes &
kOval_ShapeFlag); | |
922 this->emitArc(f, fEllipseCoords.fsIn(), fEllipseName.fsIn(), | |
923 true /*ellipseCoordsNeedClamp*/, ellipseCoordsMayBeNeg
ative, | |
924 coverage.c_str()); | |
925 } | |
926 if (fTweakAlphaForCoverage) { | |
927 f->codeAppendf("%s = %s * coverage;", outColor, fColor.fsIn()); | |
928 } | |
929 f->codeAppend ("}"); | |
930 } else { | |
931 this->emitRect(f, coverage.c_str(), outColor); | |
932 } | |
933 | |
934 if (fBatchInfo.fInnerShapeTypes) { | |
935 f->codeAppendf("// Inner shape.\n"); | |
936 SkString innerCoverageDecl("float innerCoverage"); | |
937 if (f->getProgramBuilder()->glslCaps()->usesPrecisionModifiers()) { | |
938 innerCoverageDecl.prependf("lowp "); | |
939 } | |
940 if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) { | |
941 this->emitArc(f, fInnerEllipseCoords.fsIn(), fInnerEllipseName.fsIn(
), | |
942 true /*ellipseCoordsNeedClamp*/, true /*ellipseCoordsM
ayBeNegative*/, | |
943 innerCoverageDecl.c_str()); | |
944 } else { | |
945 v->codeAppendf("%s = innerShapeCoords * innerShapeHalfSize;", | |
946 fDistanceToInnerEdge.vsOut()); | |
947 v->codeAppendf("%s = innerShapeHalfSize + 0.5;", fInnerShapeBloatedH
alfSize.vsOut()); | |
948 | |
949 if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) { | |
950 this->emitInnerRect(f, innerCoverageDecl.c_str()); | |
951 } else { | |
952 f->codeAppendf("%s = 0.0;", innerCoverageDecl.c_str()); | |
953 f->codeAppendf("vec2 distanceToArcEdge = abs(%s) - %s.xy;", | |
954 fInnerShapeCoords.fsIn(), fInnerRRect.fsIn()); | |
955 f->codeAppend ("if (any(lessThan(distanceToArcEdge, vec2(1e-5)))
) {"); | |
956 this->emitInnerRect(f, "innerCoverage"); | |
957 f->codeAppend ("} else {"); | |
958 f->codeAppendf( "vec2 ellipseCoords = distanceToArcEdge * %s.
zw;", | |
959 fInnerRRect.fsIn()); | |
960 this->emitArc(f, "ellipseCoords", fInnerEllipseName.fsIn(), | |
961 false /*ellipseCoordsNeedClamp*/, | |
962 false /*ellipseCoordsMayBeNegative*/, "innerCovera
ge"); | |
963 f->codeAppend ("}"); | |
964 } | |
965 } | |
966 f->codeAppendf("%s = vec4(max(coverage - innerCoverage, 0.0));", outCove
rage); | |
967 } else if (!fTweakAlphaForCoverage) { | |
968 f->codeAppendf("%s = vec4(coverage);", outCoverage); | |
969 } | |
970 } | |
971 | |
972 void GLSLInstanceProcessor::BackendCoverage::emitRect(GrGLSLPPFragmentBuilder* f
, | |
973 const char* outCoverage, | |
974 const char* outColor) { | |
975 if (fColorTimesRectCoverage.fsIn()) { | |
976 f->codeAppendf("%s = %s;", outColor, fColorTimesRectCoverage.fsIn()); | |
977 } else if (fTweakAlphaForCoverage) { | |
978 // We are drawing just ovals. The interior rect always has 100% coverage
. | |
979 f->codeAppendf("%s = %s;", outColor, fColor.fsIn()); | |
980 } else if (fRectCoverage.fsIn()) { | |
981 f->codeAppendf("%s = %s;", outCoverage, fRectCoverage.fsIn()); | |
982 } else { | |
983 f->codeAppendf("%s = 1.0;", outCoverage); | |
984 } | |
985 } | |
986 | |
987 void GLSLInstanceProcessor::BackendCoverage::emitCircle(GrGLSLPPFragmentBuilder*
f, | |
988 const char* outCoverage)
{ | |
989 // TODO: circleCoords = max(circleCoords, 0) if we decide to do this optimiz
ation on rrects. | |
990 SkASSERT(!(kRRect_ShapesMask & fBatchInfo.fShapeTypes)); | |
991 f->codeAppendf("float distanceToEdge = %s - length(%s);", | |
992 fBloatedRadius.fsIn(), fEllipseCoords.fsIn()); | |
993 f->codeAppendf("%s = clamp(distanceToEdge, 0.0, 1.0);", outCoverage); | |
994 } | |
995 | |
996 void GLSLInstanceProcessor::BackendCoverage::emitArc(GrGLSLPPFragmentBuilder* f, | |
997 const char* ellipseCoords, | |
998 const char* ellipseName, | |
999 bool ellipseCoordsNeedClamp
, | |
1000 bool ellipseCoordsMayBeNega
tive, | |
1001 const char* outCoverage) { | |
1002 SkASSERT(!ellipseCoordsMayBeNegative || ellipseCoordsNeedClamp); | |
1003 if (ellipseCoordsNeedClamp) { | |
1004 // This serves two purposes: | |
1005 // - To restrict the arcs of rounded rects to their positive quadrants. | |
1006 // - To avoid inversesqrt(0) in the ellipse formula. | |
1007 if (ellipseCoordsMayBeNegative) { | |
1008 f->codeAppendf("vec2 ellipseClampedCoords = max(abs(%s), vec2(1e-4))
;", ellipseCoords); | |
1009 } else { | |
1010 f->codeAppendf("vec2 ellipseClampedCoords = max(%s, vec2(1e-4));", e
llipseCoords); | |
1011 } | |
1012 ellipseCoords = "ellipseClampedCoords"; | |
1013 } | |
1014 // ellipseCoords are in pixel space and ellipseName is 1 / rx^2, 1 / ry^2. | |
1015 f->codeAppendf("vec2 Z = %s * %s;", ellipseCoords, ellipseName); | |
1016 // implicit is the evaluation of (x/rx)^2 + (y/ry)^2 - 1. | |
1017 f->codeAppendf("float implicit = dot(Z, %s) - 1.0;", ellipseCoords); | |
1018 // gradDot is the squared length of the gradient of the implicit. | |
1019 f->codeAppendf("float gradDot = 4.0 * dot(Z, Z);"); | |
1020 f->appendPrecisionModifier(kLow_GrSLPrecision); | |
1021 f->codeAppend ("float approxDist = implicit * inversesqrt(gradDot);"); | |
1022 f->codeAppendf("%s = clamp(0.5 - approxDist, 0.0, 1.0);", outCoverage); | |
1023 } | |
1024 | |
1025 void GLSLInstanceProcessor::BackendCoverage::emitInnerRect(GrGLSLPPFragmentBuild
er* f, | |
1026 const char* outCovera
ge) { | |
1027 f->appendPrecisionModifier(kLow_GrSLPrecision); | |
1028 f->codeAppendf("vec2 c = %s - abs(%s);", | |
1029 fInnerShapeBloatedHalfSize.fsIn(), fDistanceToInnerEdge.fsIn(
)); | |
1030 f->codeAppendf("%s = clamp(min(c.x, c.y), 0.0, 1.0);", outCoverage); | |
1031 } | |
1032 | |
1033 ////////////////////////////////////////////////////////////////////////////////
//////////////////// | |
1034 | |
1035 class GLSLInstanceProcessor::BackendMultisample : public Backend { | |
1036 public: | |
1037 BackendMultisample(BatchInfo batchInfo, const VertexInputs& inputs, int effe
ctiveSampleCnt) | |
1038 : INHERITED(batchInfo, inputs), | |
1039 fEffectiveSampleCnt(effectiveSampleCnt), | |
1040 fShapeCoords(kVec2f_GrSLType), | |
1041 fShapeInverseMatrix(kMat22f_GrSLType), | |
1042 fFragShapeHalfSpan(kVec2f_GrSLType), | |
1043 fArcTest(kVec2f_GrSLType), | |
1044 fArcInverseMatrix(kMat22f_GrSLType), | |
1045 fFragArcHalfSpan(kVec2f_GrSLType), | |
1046 fEarlyAccept(kInt_GrSLType), | |
1047 fInnerShapeInverseMatrix(kMat22f_GrSLType), | |
1048 fFragInnerShapeHalfSpan(kVec2f_GrSLType) { | |
1049 fRectTrianglesMaySplit = fBatchInfo.fHasPerspective; | |
1050 fNeedsNeighborRadii = this->isMixedSampled() && !fBatchInfo.fHasPerspect
ive; | |
1051 } | |
1052 | |
1053 private: | |
1054 bool isMixedSampled() const { return AntialiasMode::kMixedSamples == fBatchI
nfo.fAntialiasMode; } | |
1055 | |
1056 void onInit(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; | |
1057 void setupRect(GrGLSLVertexBuilder*) override; | |
1058 void setupOval(GrGLSLVertexBuilder*) override; | |
1059 void adjustRRectVertices(GrGLSLVertexBuilder*) override; | |
1060 void onSetupRRect(GrGLSLVertexBuilder*) override; | |
1061 | |
1062 void onInitInnerShape(GrGLSLVaryingHandler*, GrGLSLVertexBuilder*) override; | |
1063 void setupInnerRect(GrGLSLVertexBuilder*) override; | |
1064 void setupInnerOval(GrGLSLVertexBuilder*) override; | |
1065 void onSetupInnerRRect(GrGLSLVertexBuilder*) override; | |
1066 | |
1067 void onEmitCode(GrGLSLVertexBuilder*, GrGLSLPPFragmentBuilder*, const char*, | |
1068 const char*) override; | |
1069 | |
1070 struct EmitShapeCoords { | |
1071 const GrGLSLVarying* fVarying; | |
1072 const char* fInverseMatrix; | |
1073 const char* fFragHalfSpan; | |
1074 }; | |
1075 | |
1076 struct EmitShapeOpts { | |
1077 bool fIsTightGeometry; | |
1078 bool fResolveMixedSamples; | |
1079 bool fInvertCoverage; | |
1080 }; | |
1081 | |
1082 void emitRect(GrGLSLPPFragmentBuilder*, const EmitShapeCoords&, const EmitSh
apeOpts&); | |
1083 void emitArc(GrGLSLPPFragmentBuilder*, const EmitShapeCoords&, bool coordsMa
yBeNegative, | |
1084 bool clampCoords, const EmitShapeOpts&); | |
1085 void emitSimpleRRect(GrGLSLPPFragmentBuilder*, const EmitShapeCoords&, const
char* rrect, | |
1086 const EmitShapeOpts&); | |
1087 void interpolateAtSample(GrGLSLPPFragmentBuilder*, const GrGLSLVarying&, con
st char* sampleIdx, | |
1088 const char* interpolationMatrix); | |
1089 void acceptOrRejectWholeFragment(GrGLSLPPFragmentBuilder*, bool inside, cons
t EmitShapeOpts&); | |
1090 void acceptCoverageMask(GrGLSLPPFragmentBuilder*, const char* shapeMask, con
st EmitShapeOpts&, | |
1091 bool maybeSharedEdge = true); | |
1092 | |
1093 int fEffectiveSampleCnt; | |
1094 bool fRectTrianglesMaySplit; | |
1095 GrGLSLVertToFrag fShapeCoords; | |
1096 GrGLSLVertToFrag fShapeInverseMatrix; | |
1097 GrGLSLVertToFrag fFragShapeHalfSpan; | |
1098 GrGLSLVertToFrag fArcTest; | |
1099 GrGLSLVertToFrag fArcInverseMatrix; | |
1100 GrGLSLVertToFrag fFragArcHalfSpan; | |
1101 GrGLSLVertToFrag fEarlyAccept; | |
1102 GrGLSLVertToFrag fInnerShapeInverseMatrix; | |
1103 GrGLSLVertToFrag fFragInnerShapeHalfSpan; | |
1104 SkString fSquareFun; | |
1105 | |
1106 typedef Backend INHERITED; | |
1107 }; | |
1108 | |
1109 void GLSLInstanceProcessor::BackendMultisample::onInit(GrGLSLVaryingHandler* var
yingHandler, | |
1110 GrGLSLVertexBuilder* v) { | |
1111 if (!this->isMixedSampled()) { | |
1112 if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) { | |
1113 varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, | |
1114 kHigh_GrSLPrecision); | |
1115 varyingHandler->addVarying("arcCoords", &fArcCoords, kHigh_GrSLPreci
sion); | |
1116 if (!fBatchInfo.fHasPerspective) { | |
1117 varyingHandler->addFlatVarying("arcInverseMatrix", &fArcInverseM
atrix, | |
1118 kHigh_GrSLPrecision); | |
1119 varyingHandler->addFlatVarying("fragArcHalfSpan", &fFragArcHalfS
pan, | |
1120 kHigh_GrSLPrecision); | |
1121 } | |
1122 } else if (!fBatchInfo.fInnerShapeTypes) { | |
1123 return; | |
1124 } | |
1125 } else { | |
1126 varyingHandler->addVarying("shapeCoords", &fShapeCoords, kHigh_GrSLPreci
sion); | |
1127 if (!fBatchInfo.fHasPerspective) { | |
1128 varyingHandler->addFlatVarying("shapeInverseMatrix", &fShapeInverseM
atrix, | |
1129 kHigh_GrSLPrecision); | |
1130 varyingHandler->addFlatVarying("fragShapeHalfSpan", &fFragShapeHalfS
pan, | |
1131 kHigh_GrSLPrecision); | |
1132 } | |
1133 if (fBatchInfo.fShapeTypes & kRRect_ShapesMask) { | |
1134 varyingHandler->addVarying("arcCoords", &fArcCoords, kHigh_GrSLPreci
sion); | |
1135 varyingHandler->addVarying("arcTest", &fArcTest, kHigh_GrSLPrecision
); | |
1136 if (!fBatchInfo.fHasPerspective) { | |
1137 varyingHandler->addFlatVarying("arcInverseMatrix", &fArcInverseM
atrix, | |
1138 kHigh_GrSLPrecision); | |
1139 varyingHandler->addFlatVarying("fragArcHalfSpan", &fFragArcHalfS
pan, | |
1140 kHigh_GrSLPrecision); | |
1141 } | |
1142 } else if (fBatchInfo.fShapeTypes & kOval_ShapeFlag) { | |
1143 fArcCoords = fShapeCoords; | |
1144 fArcInverseMatrix = fShapeInverseMatrix; | |
1145 fFragArcHalfSpan = fFragShapeHalfSpan; | |
1146 if (fBatchInfo.fShapeTypes & kRect_ShapeFlag) { | |
1147 varyingHandler->addFlatVarying("triangleIsArc", &fTriangleIsArc, | |
1148 kHigh_GrSLPrecision); | |
1149 } | |
1150 } | |
1151 if (kRect_ShapeFlag != fBatchInfo.fShapeTypes) { | |
1152 v->definef("SAMPLE_MASK_ALL", "0x%x", (1 << fEffectiveSampleCnt) - 1)
; | |
1153 varyingHandler->addFlatVarying("earlyAccept", &fEarlyAccept, kHigh_Gr
SLPrecision); | |
1154 } | |
1155 } | |
1156 if (!fBatchInfo.fHasPerspective) { | |
1157 v->codeAppend("mat2 shapeInverseMatrix = inverse(mat2(shapeMatrix));"); | |
1158 v->codeAppend("vec2 fragShapeSpan = abs(vec4(shapeInverseMatrix).xz) + " | |
1159 "abs(vec4(shapeInverseMatrix).yw);"); | |
1160 } | |
1161 } | |
1162 | |
1163 void GLSLInstanceProcessor::BackendMultisample::setupRect(GrGLSLVertexBuilder* v
) { | |
1164 if (fShapeCoords.vsOut()) { | |
1165 v->codeAppendf("%s = %s;", fShapeCoords.vsOut(), this->outShapeCoords())
; | |
1166 } | |
1167 if (fShapeInverseMatrix.vsOut()) { | |
1168 v->codeAppendf("%s = shapeInverseMatrix;", fShapeInverseMatrix.vsOut()); | |
1169 } | |
1170 if (fFragShapeHalfSpan.vsOut()) { | |
1171 v->codeAppendf("%s = 0.5 * fragShapeSpan;", fFragShapeHalfSpan.vsOut()); | |
1172 } | |
1173 if (fArcTest.vsOut()) { | |
1174 // Pick a value that is not > 0. | |
1175 v->codeAppendf("%s = vec2(0);", fArcTest.vsOut()); | |
1176 } | |
1177 if (fTriangleIsArc.vsOut()) { | |
1178 v->codeAppendf("%s = 0;", fTriangleIsArc.vsOut()); | |
1179 } | |
1180 if (fEarlyAccept.vsOut()) { | |
1181 v->codeAppendf("%s = SAMPLE_MASK_ALL;", fEarlyAccept.vsOut()); | |
1182 } | |
1183 } | |
1184 | |
1185 void GLSLInstanceProcessor::BackendMultisample::setupOval(GrGLSLVertexBuilder* v
) { | |
1186 v->codeAppendf("%s = abs(%s);", fArcCoords.vsOut(), this->outShapeCoords()); | |
1187 if (fArcInverseMatrix.vsOut()) { | |
1188 v->codeAppendf("vec2 s = sign(%s);", this->outShapeCoords()); | |
1189 v->codeAppendf("%s = shapeInverseMatrix * mat2(s.x, 0, 0 , s.y);", | |
1190 fArcInverseMatrix.vsOut()); | |
1191 } | |
1192 if (fFragArcHalfSpan.vsOut()) { | |
1193 v->codeAppendf("%s = 0.5 * fragShapeSpan;", fFragArcHalfSpan.vsOut()); | |
1194 } | |
1195 if (fArcTest.vsOut()) { | |
1196 // Pick a value that is > 0. | |
1197 v->codeAppendf("%s = vec2(1);", fArcTest.vsOut()); | |
1198 } | |
1199 if (fTriangleIsArc.vsOut()) { | |
1200 if (!this->isMixedSampled()) { | |
1201 v->codeAppendf("%s = %s & 1;", | |
1202 fTriangleIsArc.vsOut(), fInputs.attr(Attrib::kVertexA
ttrs)); | |
1203 } else { | |
1204 v->codeAppendf("%s = 1;", fTriangleIsArc.vsOut()); | |
1205 } | |
1206 } | |
1207 if (fEarlyAccept.vsOut()) { | |
1208 v->codeAppendf("%s = ~%s & SAMPLE_MASK_ALL;", | |
1209 fEarlyAccept.vsOut(), fInputs.attr(Attrib::kVertexAttrs))
; | |
1210 } | |
1211 } | |
1212 | |
1213 void GLSLInstanceProcessor::BackendMultisample::adjustRRectVertices(GrGLSLVertex
Builder* v) { | |
1214 if (!this->isMixedSampled()) { | |
1215 INHERITED::adjustRRectVertices(v); | |
1216 return; | |
1217 } | |
1218 | |
1219 if (!fBatchInfo.fHasPerspective) { | |
1220 // For the mixed samples algorithm it's best to bloat the corner triangl
es a bit so that | |
1221 // more of the pixels that cross into the arc region are completely insi
de the shared edges. | |
1222 // We also snap to a regular rect if the radii shrink smaller than a pix
el. | |
1223 v->codeAppend ("vec2 midpt = 0.5 * (neighborRadii - radii);"); | |
1224 v->codeAppend ("vec2 cornerSize = any(lessThan(radii, fragShapeSpan)) ?
" | |
1225 "vec2(0) : min(radii + 0.5 * fragShapeSpan, 1.0 - mid
pt);"); | |
1226 } else { | |
1227 // TODO: We could still bloat the corner triangle in the perspective cas
e; we would just | |
1228 // need to find the screen-space derivative of shape coords at this part
icular point. | |
1229 v->codeAppend ("vec2 cornerSize = any(lessThan(radii, vec2(1e-3))) ? vec
2(0) : radii;"); | |
1230 } | |
1231 | |
1232 v->codeAppendf("if (abs(%s.x) == 0.5)" | |
1233 "%s.x = cornerSign.x * (1.0 - cornerSize.x);", | |
1234 fInputs.attr(Attrib::kShapeCoords), fModifiedShapeCoords)
; | |
1235 v->codeAppendf("if (abs(%s.y) == 0.5)" | |
1236 "%s.y = cornerSign.y * (1.0 - cornerSize.y);", | |
1237 fInputs.attr(Attrib::kShapeCoords), fModifiedShapeCoords)
; | |
1238 } | |
1239 | |
1240 void GLSLInstanceProcessor::BackendMultisample::onSetupRRect(GrGLSLVertexBuilder
* v) { | |
1241 if (fShapeCoords.vsOut()) { | |
1242 v->codeAppendf("%s = %s;", fShapeCoords.vsOut(), this->outShapeCoords())
; | |
1243 } | |
1244 if (fShapeInverseMatrix.vsOut()) { | |
1245 v->codeAppendf("%s = shapeInverseMatrix;", fShapeInverseMatrix.vsOut()); | |
1246 } | |
1247 if (fFragShapeHalfSpan.vsOut()) { | |
1248 v->codeAppendf("%s = 0.5 * fragShapeSpan;", fFragShapeHalfSpan.vsOut()); | |
1249 } | |
1250 if (fArcInverseMatrix.vsOut()) { | |
1251 v->codeAppend ("vec2 s = cornerSign / radii;"); | |
1252 v->codeAppendf("%s = shapeInverseMatrix * mat2(s.x, 0, 0, s.y);", | |
1253 fArcInverseMatrix.vsOut()); | |
1254 } | |
1255 if (fFragArcHalfSpan.vsOut()) { | |
1256 v->codeAppendf("%s = 0.5 * (abs(vec4(%s).xz) + abs(vec4(%s).yw));", | |
1257 fFragArcHalfSpan.vsOut(), fArcInverseMatrix.vsOut(), | |
1258 fArcInverseMatrix.vsOut()); | |
1259 } | |
1260 if (fArcTest.vsOut()) { | |
1261 // The interior triangles are laid out as a fan. fArcTest is both distan
ces from shared | |
1262 // edges of a fan triangle to a point within that triangle. fArcTest is
used to check if a | |
1263 // fragment is too close to either shared edge, in which case we point s
ample the shape as a | |
1264 // rect at that point in order to guarantee the mixed samples discard lo
gic works correctly. | |
1265 v->codeAppendf("%s = (cornerSize == vec2(0)) ? vec2(0) : " | |
1266 "cornerSign * %s * mat2(1, cornerSize.x - 1.0, cornerSize
.y - 1.0, 1);", | |
1267 fArcTest.vsOut(), fModifiedShapeCoords); | |
1268 if (!fBatchInfo.fHasPerspective) { | |
1269 // Shift the point at which distances to edges are measured from the
center of the pixel | |
1270 // to the corner. This way the sign of fArcTest will quickly tell us
whether a pixel | |
1271 // is completely inside the shared edge. Perspective mode will accom
plish this same task | |
1272 // by finding the derivatives in the fragment shader. | |
1273 v->codeAppendf("%s -= 0.5 * (fragShapeSpan.yx * abs(radii - 1.0) + f
ragShapeSpan);", | |
1274 fArcTest.vsOut()); | |
1275 } | |
1276 } | |
1277 if (fEarlyAccept.vsOut()) { | |
1278 SkASSERT(this->isMixedSampled()); | |
1279 v->codeAppendf("%s = all(equal(vec2(1), abs(%s))) ? 0 : SAMPLE_MASK_ALL;
", | |
1280 fEarlyAccept.vsOut(), fInputs.attr(Attrib::kShapeCoords))
; | |
1281 } | |
1282 } | |
1283 | |
1284 void | |
1285 GLSLInstanceProcessor::BackendMultisample::onInitInnerShape(GrGLSLVaryingHandler
* varyingHandler, | |
1286 GrGLSLVertexBuilder*
v) { | |
1287 varyingHandler->addVarying("innerShapeCoords", &fInnerShapeCoords, kHigh_GrS
LPrecision); | |
1288 if (kOval_ShapeFlag != fBatchInfo.fInnerShapeTypes && | |
1289 kRect_ShapeFlag != fBatchInfo.fInnerShapeTypes) { | |
1290 varyingHandler->addFlatVarying("innerRRect", &fInnerRRect, kHigh_GrSLPre
cision); | |
1291 } | |
1292 if (!fBatchInfo.fHasPerspective) { | |
1293 varyingHandler->addFlatVarying("innerShapeInverseMatrix", &fInnerShapeIn
verseMatrix, | |
1294 kHigh_GrSLPrecision); | |
1295 v->codeAppendf("%s = shapeInverseMatrix * mat2(outer2Inner.x, 0, 0, oute
r2Inner.y);", | |
1296 fInnerShapeInverseMatrix.vsOut()); | |
1297 varyingHandler->addFlatVarying("fragInnerShapeHalfSpan", &fFragInnerShap
eHalfSpan, | |
1298 kHigh_GrSLPrecision); | |
1299 v->codeAppendf("%s = 0.5 * fragShapeSpan * outer2Inner.xy;", | |
1300 fFragInnerShapeHalfSpan.vsOut()); | |
1301 } | |
1302 } | |
1303 | |
1304 void GLSLInstanceProcessor::BackendMultisample::setupInnerRect(GrGLSLVertexBuild
er* v) { | |
1305 if (fInnerRRect.vsOut()) { | |
1306 // The fragment shader will generalize every inner shape as a round rect
. Since this one | |
1307 // is a rect, we simply emit bogus parameters for the round rect (negati
ve radii) that | |
1308 // ensure the fragment shader always takes the "sample as rect" codepath
. | |
1309 v->codeAppendf("%s = vec4(2.0 * (inner.zw - inner.xy) / (outer.zw - oute
r.xy), vec2(0));", | |
1310 fInnerRRect.vsOut()); | |
1311 } | |
1312 } | |
1313 | |
1314 void GLSLInstanceProcessor::BackendMultisample::setupInnerOval(GrGLSLVertexBuild
er* v) { | |
1315 if (fInnerRRect.vsOut()) { | |
1316 v->codeAppendf("%s = vec4(0, 0, 1, 1);", fInnerRRect.vsOut()); | |
1317 } | |
1318 } | |
1319 | |
1320 void GLSLInstanceProcessor::BackendMultisample::onSetupInnerRRect(GrGLSLVertexBu
ilder* v) { | |
1321 // Avoid numeric instability by not allowing the inner radii to get smaller
than 1/10th pixel. | |
1322 if (fFragInnerShapeHalfSpan.vsOut()) { | |
1323 v->codeAppendf("innerRadii = max(innerRadii, 2e-1 * %s);", fFragInnerSha
peHalfSpan.vsOut()); | |
1324 } else { | |
1325 v->codeAppend ("innerRadii = max(innerRadii, vec2(1e-4));"); | |
1326 } | |
1327 v->codeAppendf("%s = vec4(1.0 - innerRadii, 1.0 / innerRadii);", fInnerRRect
.vsOut()); | |
1328 } | |
1329 | |
1330 void GLSLInstanceProcessor::BackendMultisample::onEmitCode(GrGLSLVertexBuilder*, | |
1331 GrGLSLPPFragmentBuild
er* f, | |
1332 const char*, const ch
ar*) { | |
1333 f->define("SAMPLE_COUNT", fEffectiveSampleCnt); | |
1334 if (this->isMixedSampled()) { | |
1335 f->definef("SAMPLE_MASK_ALL", "0x%x", (1 << fEffectiveSampleCnt) - 1); | |
1336 f->definef("SAMPLE_MASK_MSB", "0x%x", 1 << (fEffectiveSampleCnt - 1)); | |
1337 } | |
1338 | |
1339 if (kRect_ShapeFlag != (fBatchInfo.fShapeTypes | fBatchInfo.fInnerShapeTypes
)) { | |
1340 GrGLSLShaderVar x("x", kVec2f_GrSLType, GrGLSLShaderVar::kNonArray, kHig
h_GrSLPrecision); | |
1341 f->emitFunction(kFloat_GrSLType, "square", 1, &x, "return dot(x, x);", &
fSquareFun); | |
1342 } | |
1343 | |
1344 EmitShapeCoords shapeCoords; | |
1345 shapeCoords.fVarying = &fShapeCoords; | |
1346 shapeCoords.fInverseMatrix = fShapeInverseMatrix.fsIn(); | |
1347 shapeCoords.fFragHalfSpan = fFragShapeHalfSpan.fsIn(); | |
1348 | |
1349 EmitShapeCoords arcCoords; | |
1350 arcCoords.fVarying = &fArcCoords; | |
1351 arcCoords.fInverseMatrix = fArcInverseMatrix.fsIn(); | |
1352 arcCoords.fFragHalfSpan = fFragArcHalfSpan.fsIn(); | |
1353 bool clampArcCoords = this->isMixedSampled() && (fBatchInfo.fShapeTypes & kR
Rect_ShapesMask); | |
1354 | |
1355 EmitShapeOpts opts; | |
1356 opts.fIsTightGeometry = true; | |
1357 opts.fResolveMixedSamples = this->isMixedSampled(); | |
1358 opts.fInvertCoverage = false; | |
1359 | |
1360 if (fBatchInfo.fHasPerspective && fBatchInfo.fInnerShapeTypes) { | |
1361 // This determines if the fragment should consider the inner shape in it
s sample mask. | |
1362 // We take the derivative early in case discards may occur before we get
to the inner shape. | |
1363 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1364 f->codeAppendf("vec2 fragInnerShapeApproxHalfSpan = 0.5 * fwidth(%s);", | |
1365 fInnerShapeCoords.fsIn()); | |
1366 } | |
1367 | |
1368 if (!this->isMixedSampled()) { | |
1369 SkASSERT(!fArcTest.fsIn()); | |
1370 if (fTriangleIsArc.fsIn()) { | |
1371 f->codeAppendf("if (%s != 0) {", fTriangleIsArc.fsIn()); | |
1372 this->emitArc(f, arcCoords, false, clampArcCoords, opts); | |
1373 | |
1374 f->codeAppend ("}"); | |
1375 } | |
1376 } else { | |
1377 const char* arcTest = fArcTest.fsIn(); | |
1378 SkASSERT(arcTest); | |
1379 if (fBatchInfo.fHasPerspective) { | |
1380 // The non-perspective version accounts for fwith() in the vertex sh
ader. | |
1381 // We make sure to take the derivative here, before a neighbor pixel
may early accept. | |
1382 f->enableFeature(GrGLSLPPFragmentBuilder::kStandardDerivatives_GLSLF
eature); | |
1383 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1384 f->codeAppendf("vec2 arcTest = %s - 0.5 * fwidth(%s);", | |
1385 fArcTest.fsIn(), fArcTest.fsIn()); | |
1386 arcTest = "arcTest"; | |
1387 } | |
1388 const char* earlyAccept = fEarlyAccept.fsIn() ? fEarlyAccept.fsIn() : "S
AMPLE_MASK_ALL"; | |
1389 f->codeAppendf("if (gl_SampleMaskIn[0] == %s) {", earlyAccept); | |
1390 f->overrideSampleCoverage(earlyAccept); | |
1391 f->codeAppend ("} else {"); | |
1392 if (arcTest) { | |
1393 // At this point, if the sample mask is all set it means we are insi
de an arc triangle. | |
1394 f->codeAppendf("if (gl_SampleMaskIn[0] == SAMPLE_MASK_ALL || " | |
1395 "all(greaterThan(%s, vec2(0)))) {", arcTest); | |
1396 this->emitArc(f, arcCoords, false, clampArcCoords, opts); | |
1397 f->codeAppend ("} else {"); | |
1398 this->emitRect(f, shapeCoords, opts); | |
1399 f->codeAppend ("}"); | |
1400 } else if (fTriangleIsArc.fsIn()) { | |
1401 f->codeAppendf("if (%s == 0) {", fTriangleIsArc.fsIn()); | |
1402 this->emitRect(f, shapeCoords, opts); | |
1403 f->codeAppend ("} else {"); | |
1404 this->emitArc(f, arcCoords, false, clampArcCoords, opts); | |
1405 f->codeAppend ("}"); | |
1406 } else if (fBatchInfo.fShapeTypes == kOval_ShapeFlag) { | |
1407 this->emitArc(f, arcCoords, false, clampArcCoords, opts); | |
1408 } else { | |
1409 SkASSERT(fBatchInfo.fShapeTypes == kRect_ShapeFlag); | |
1410 this->emitRect(f, shapeCoords, opts); | |
1411 } | |
1412 f->codeAppend ("}"); | |
1413 } | |
1414 | |
1415 if (fBatchInfo.fInnerShapeTypes) { | |
1416 f->codeAppendf("// Inner shape.\n"); | |
1417 | |
1418 EmitShapeCoords innerShapeCoords; | |
1419 innerShapeCoords.fVarying = &fInnerShapeCoords; | |
1420 if (!fBatchInfo.fHasPerspective) { | |
1421 innerShapeCoords.fInverseMatrix = fInnerShapeInverseMatrix.fsIn(); | |
1422 innerShapeCoords.fFragHalfSpan = fFragInnerShapeHalfSpan.fsIn(); | |
1423 } | |
1424 | |
1425 EmitShapeOpts innerOpts; | |
1426 innerOpts.fIsTightGeometry = false; | |
1427 innerOpts.fResolveMixedSamples = false; // Mixed samples are resolved in
the outer shape. | |
1428 innerOpts.fInvertCoverage = true; | |
1429 | |
1430 if (kOval_ShapeFlag == fBatchInfo.fInnerShapeTypes) { | |
1431 this->emitArc(f, innerShapeCoords, true, false, innerOpts); | |
1432 } else { | |
1433 f->codeAppendf("if (all(lessThan(abs(%s), 1.0 + %s))) {", fInnerShap
eCoords.fsIn(), | |
1434 !fBatchInfo.fHasPerspective ? innerShapeCoords.fFragH
alfSpan | |
1435 : "fragInnerShapeApproxHa
lfSpan"); // Above. | |
1436 if (kRect_ShapeFlag == fBatchInfo.fInnerShapeTypes) { | |
1437 this->emitRect(f, innerShapeCoords, innerOpts); | |
1438 } else { | |
1439 this->emitSimpleRRect(f, innerShapeCoords, fInnerRRect.fsIn(), i
nnerOpts); | |
1440 } | |
1441 f->codeAppend ("}"); | |
1442 } | |
1443 } | |
1444 } | |
1445 | |
1446 void GLSLInstanceProcessor::BackendMultisample::emitRect(GrGLSLPPFragmentBuilder
* f, | |
1447 const EmitShapeCoords&
coords, | |
1448 const EmitShapeOpts& op
ts) { | |
1449 // Full MSAA doesn't need to do anything to draw a rect. | |
1450 SkASSERT(!opts.fIsTightGeometry || opts.fResolveMixedSamples); | |
1451 if (coords.fFragHalfSpan) { | |
1452 f->codeAppendf("if (all(lessThanEqual(abs(%s), 1.0 - %s))) {", | |
1453 coords.fVarying->fsIn(), coords.fFragHalfSpan); | |
1454 // The entire pixel is inside the rect. | |
1455 this->acceptOrRejectWholeFragment(f, true, opts); | |
1456 f->codeAppend ("} else "); | |
1457 if (opts.fIsTightGeometry && !fRectTrianglesMaySplit) { | |
1458 f->codeAppendf("if (any(lessThan(abs(%s), 1.0 - %s))) {", | |
1459 coords.fVarying->fsIn(), coords.fFragHalfSpan); | |
1460 // The pixel falls on an edge of the rectangle and is known to not b
e on a shared edge. | |
1461 this->acceptCoverageMask(f, "gl_SampleMaskIn[0]", opts, false); | |
1462 f->codeAppend ("} else"); | |
1463 } | |
1464 f->codeAppend ("{"); | |
1465 } | |
1466 f->codeAppend ("int rectMask = 0;"); | |
1467 f->codeAppend ("for (int i = 0; i < SAMPLE_COUNT; i++) {"); | |
1468 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1469 f->codeAppend ( "vec2 pt = "); | |
1470 this->interpolateAtSample(f, *coords.fVarying, "i", coords.fInverseMatrix); | |
1471 f->codeAppend ( ";"); | |
1472 f->codeAppend ( "if (all(lessThan(abs(pt), vec2(1)))) rectMask |= (1 << i
);"); | |
1473 f->codeAppend ("}"); | |
1474 this->acceptCoverageMask(f, "rectMask", opts); | |
1475 if (coords.fFragHalfSpan) { | |
1476 f->codeAppend ("}"); | |
1477 } | |
1478 } | |
1479 | |
1480 void GLSLInstanceProcessor::BackendMultisample::emitArc(GrGLSLPPFragmentBuilder*
f, | |
1481 const EmitShapeCoords& c
oords, | |
1482 bool coordsMayBeNegative
, bool clampCoords, | |
1483 const EmitShapeOpts& opt
s) { | |
1484 if (coords.fFragHalfSpan) { | |
1485 SkString absArcCoords; | |
1486 absArcCoords.printf(coordsMayBeNegative ? "abs(%s)" : "%s", coords.fVary
ing->fsIn()); | |
1487 if (clampCoords) { | |
1488 f->codeAppendf("if (%s(max(%s + %s, vec2(0))) < 1.0) {", | |
1489 fSquareFun.c_str(), absArcCoords.c_str(), coords.fFra
gHalfSpan); | |
1490 } else { | |
1491 f->codeAppendf("if (%s(%s + %s) < 1.0) {", | |
1492 fSquareFun.c_str(), absArcCoords.c_str(), coords.fFra
gHalfSpan); | |
1493 } | |
1494 // The entire pixel is inside the arc. | |
1495 this->acceptOrRejectWholeFragment(f, true, opts); | |
1496 f->codeAppendf("} else if (%s(max(%s - %s, vec2(0))) >= 1.0) {", | |
1497 fSquareFun.c_str(), absArcCoords.c_str(), coords.fFragHal
fSpan); | |
1498 // The entire pixel is outside the arc. | |
1499 this->acceptOrRejectWholeFragment(f, false, opts); | |
1500 f->codeAppend ("} else {"); | |
1501 } | |
1502 f->codeAppend ( "int arcMask = 0;"); | |
1503 f->codeAppend ( "for (int i = 0; i < SAMPLE_COUNT; i++) {"); | |
1504 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1505 f->codeAppend ( "vec2 pt = "); | |
1506 this->interpolateAtSample(f, *coords.fVarying, "i", coords.fInverseMatrix); | |
1507 f->codeAppend ( ";"); | |
1508 if (clampCoords) { | |
1509 SkASSERT(!coordsMayBeNegative); | |
1510 f->codeAppend ( "pt = max(pt, vec2(0));"); | |
1511 } | |
1512 f->codeAppendf( "if (%s(pt) < 1.0) arcMask |= (1 << i);", fSquareFun.
c_str()); | |
1513 f->codeAppend ( "}"); | |
1514 this->acceptCoverageMask(f, "arcMask", opts); | |
1515 if (coords.fFragHalfSpan) { | |
1516 f->codeAppend ("}"); | |
1517 } | |
1518 } | |
1519 | |
1520 void GLSLInstanceProcessor::BackendMultisample::emitSimpleRRect(GrGLSLPPFragment
Builder* f, | |
1521 const EmitShapeC
oords& coords, | |
1522 const char* rrec
t, | |
1523 const EmitShapeO
pts& opts) { | |
1524 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1525 f->codeAppendf("vec2 distanceToArcEdge = abs(%s) - %s.xy;", coords.fVarying-
>fsIn(), rrect); | |
1526 f->codeAppend ("if (any(lessThan(distanceToArcEdge, vec2(0)))) {"); | |
1527 this->emitRect(f, coords, opts); | |
1528 f->codeAppend ("} else {"); | |
1529 if (coords.fInverseMatrix && coords.fFragHalfSpan) { | |
1530 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1531 f->codeAppendf("vec2 rrectCoords = distanceToArcEdge * %s.zw;", rrect); | |
1532 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1533 f->codeAppendf("vec2 fragRRectHalfSpan = %s * %s.zw;", coords.fFragHalfS
pan, rrect); | |
1534 f->codeAppendf("if (%s(rrectCoords + fragRRectHalfSpan) <= 1.0) {", fSqu
areFun.c_str()); | |
1535 // The entire pixel is inside the round rect. | |
1536 this->acceptOrRejectWholeFragment(f, true, opts); | |
1537 f->codeAppendf("} else if (%s(max(rrectCoords - fragRRectHalfSpan, vec2(
0))) >= 1.0) {", | |
1538 fSquareFun.c_str()); | |
1539 // The entire pixel is outside the round rect. | |
1540 this->acceptOrRejectWholeFragment(f, false, opts); | |
1541 f->codeAppend ("} else {"); | |
1542 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1543 f->codeAppendf( "vec2 s = %s.zw * sign(%s);", rrect, coords.fVarying-
>fsIn()); | |
1544 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1545 f->codeAppendf( "mat2 innerRRectInverseMatrix = %s * mat2(s.x, 0, 0,
s.y);", | |
1546 coords.fInverseMatrix); | |
1547 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1548 f->codeAppend ( "int rrectMask = 0;"); | |
1549 f->codeAppend ( "for (int i = 0; i < SAMPLE_COUNT; i++) {"); | |
1550 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1551 f->codeAppend ( "vec2 pt = rrectCoords + "); | |
1552 f->appendOffsetToSample("i", GrGLSLFPFragmentBuilder::kSkiaDevice_Coordi
nates); | |
1553 f->codeAppend ( "* innerRRectInverseMatrix;"); | |
1554 f->codeAppendf( "if (%s(max(pt, vec2(0))) < 1.0) rrectMask |= (1
<< i);", | |
1555 fSquareFun.c_str()); | |
1556 f->codeAppend ( "}"); | |
1557 this->acceptCoverageMask(f, "rrectMask", opts); | |
1558 f->codeAppend ("}"); | |
1559 } else { | |
1560 f->codeAppend ("int rrectMask = 0;"); | |
1561 f->codeAppend ("for (int i = 0; i < SAMPLE_COUNT; i++) {"); | |
1562 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1563 f->codeAppend ( "vec2 shapePt = "); | |
1564 this->interpolateAtSample(f, *coords.fVarying, "i", nullptr); | |
1565 f->codeAppend ( ";"); | |
1566 f->appendPrecisionModifier(kHigh_GrSLPrecision); | |
1567 f->codeAppendf( "vec2 rrectPt = max(abs(shapePt) - %s.xy, vec2(0)) *
%s.zw;", | |
1568 rrect, rrect); | |
1569 f->codeAppendf( "if (%s(rrectPt) < 1.0) rrectMask |= (1 << i);", fSqu
areFun.c_str()); | |
1570 f->codeAppend ("}"); | |
1571 this->acceptCoverageMask(f, "rrectMask", opts); | |
1572 } | |
1573 f->codeAppend ("}"); | |
1574 } | |
1575 | |
1576 void GLSLInstanceProcessor::BackendMultisample::interpolateAtSample(GrGLSLPPFrag
mentBuilder* f, | |
1577 const GrGLSLVa
rying& varying, | |
1578 const char* sa
mpleIdx, | |
1579 const char* in
terpolationMatrix) { | |
1580 if (interpolationMatrix) { | |
1581 f->codeAppendf("(%s + ", varying.fsIn()); | |
1582 f->appendOffsetToSample(sampleIdx, GrGLSLFPFragmentBuilder::kSkiaDevice_
Coordinates); | |
1583 f->codeAppendf(" * %s)", interpolationMatrix); | |
1584 } else { | |
1585 SkAssertResult( | |
1586 f->enableFeature(GrGLSLFragmentBuilder::kMultisampleInterpolation_GL
SLFeature)); | |
1587 f->codeAppendf("interpolateAtOffset(%s, ", varying.fsIn()); | |
1588 f->appendOffsetToSample(sampleIdx, GrGLSLFPFragmentBuilder::kGLSLWindow_
Coordinates); | |
1589 f->codeAppend(")"); | |
1590 } | |
1591 } | |
1592 | |
1593 void | |
1594 GLSLInstanceProcessor::BackendMultisample::acceptOrRejectWholeFragment(GrGLSLPPF
ragmentBuilder* f, | |
1595 bool insi
de, | |
1596 const Emi
tShapeOpts& opts) { | |
1597 if (inside != opts.fInvertCoverage) { // Accept the entire fragment. | |
1598 if (opts.fResolveMixedSamples) { | |
1599 // This is a mixed sampled fragment in the interior of the shape. Re
assign 100% coverage | |
1600 // to one fragment, and drop all other fragments that may fall on th
is same pixel. Since | |
1601 // our geometry is water tight and non-overlapping, we can take adva
ntage of the | |
1602 // properties that (1) the incoming sample masks will be disjoint ac
ross fragments that | |
1603 // fall on a common pixel, and (2) since the entire fragment is insi
de the shape, each | |
1604 // sample's corresponding bit will be set in the incoming sample mas
k of exactly one | |
1605 // fragment. | |
1606 f->codeAppend("if ((gl_SampleMaskIn[0] & SAMPLE_MASK_MSB) == 0) {"); | |
1607 // Drop this fragment. | |
1608 if (!fBatchInfo.fCannotDiscard) { | |
1609 f->codeAppend("discard;"); | |
1610 } else { | |
1611 f->overrideSampleCoverage("0"); | |
1612 } | |
1613 f->codeAppend("} else {"); | |
1614 // Override the lone surviving fragment to full coverage. | |
1615 f->overrideSampleCoverage("-1"); | |
1616 f->codeAppend("}"); | |
1617 } | |
1618 } else { // Reject the entire fragment. | |
1619 if (!fBatchInfo.fCannotDiscard) { | |
1620 f->codeAppend("discard;"); | |
1621 } else if (opts.fResolveMixedSamples) { | |
1622 f->overrideSampleCoverage("0"); | |
1623 } else { | |
1624 f->maskSampleCoverage("0"); | |
1625 } | |
1626 } | |
1627 } | |
1628 | |
1629 void GLSLInstanceProcessor::BackendMultisample::acceptCoverageMask(GrGLSLPPFragm
entBuilder* f, | |
1630 const char* s
hapeMask, | |
1631 const EmitSha
peOpts& opts, | |
1632 bool maybeSha
redEdge) { | |
1633 if (opts.fResolveMixedSamples) { | |
1634 if (maybeSharedEdge) { | |
1635 // This is a mixed sampled fragment, potentially on the outer edge o
f the shape, with | |
1636 // only partial shape coverage. Override the coverage of one fragmen
t to "shapeMask", | |
1637 // and drop all other fragments that may fall on this same pixel. Si
nce our geometry is | |
1638 // water tight, non-overlapping, and completely contains the shape,
this means that each | |
1639 // "on" bit from shapeMask is guaranteed to be set in the incoming s
ample mask of one, | |
1640 // and only one, fragment that falls on this same pixel. | |
1641 SkASSERT(!opts.fInvertCoverage); | |
1642 f->codeAppendf("if ((gl_SampleMaskIn[0] & (1 << findMSB(%s))) == 0)
{", shapeMask); | |
1643 // Drop this fragment. | |
1644 if (!fBatchInfo.fCannotDiscard) { | |
1645 f->codeAppend ("discard;"); | |
1646 } else { | |
1647 f->overrideSampleCoverage("0"); | |
1648 } | |
1649 f->codeAppend ("} else {"); | |
1650 // Override the coverage of the lone surviving fragment to "shapeMas
k". | |
1651 f->overrideSampleCoverage(shapeMask); | |
1652 f->codeAppend ("}"); | |
1653 } else { | |
1654 f->overrideSampleCoverage(shapeMask); | |
1655 } | |
1656 } else { | |
1657 f->maskSampleCoverage(shapeMask, opts.fInvertCoverage); | |
1658 } | |
1659 } | |
1660 | |
1661 ////////////////////////////////////////////////////////////////////////////////
//////////////////// | |
1662 | |
1663 GLSLInstanceProcessor::Backend* | |
1664 GLSLInstanceProcessor::Backend::Create(const GrGLSLProgramBuilder* p, BatchInfo
batchInfo, | |
1665 const VertexInputs& inputs) { | |
1666 switch (batchInfo.fAntialiasMode) { | |
1667 default: | |
1668 SkFAIL("Unexpected antialias mode."); | |
1669 case AntialiasMode::kNone: | |
1670 return new BackendNonAA(batchInfo, inputs); | |
1671 case AntialiasMode::kCoverage: | |
1672 return new BackendCoverage(batchInfo, inputs); | |
1673 case AntialiasMode::kMSAA: | |
1674 case AntialiasMode::kMixedSamples: { | |
1675 const GrPipeline& pipeline = p->pipeline(); | |
1676 const GrRenderTargetPriv& rtp = pipeline.getRenderTarget()->renderTa
rgetPriv(); | |
1677 const GrGpu::MultisampleSpecs& specs = rtp.getMultisampleSpecs(pipel
ine.getStencil()); | |
1678 return new BackendMultisample(batchInfo, inputs, specs.fEffectiveSam
pleCnt); | |
1679 } | |
1680 } | |
1681 } | |
1682 | |
1683 ////////////////////////////////////////////////////////////////////////////////
//////////////////// | |
1684 | |
1685 const ShapeVertex kVertexData[] = { | |
1686 // Rectangle. | |
1687 {+1, +1, ~0}, /*0*/ | |
1688 {-1, +1, ~0}, /*1*/ | |
1689 {-1, -1, ~0}, /*2*/ | |
1690 {+1, -1, ~0}, /*3*/ | |
1691 // The next 4 are for the bordered version. | |
1692 {+1, +1, 0}, /*4*/ | |
1693 {-1, +1, 0}, /*5*/ | |
1694 {-1, -1, 0}, /*6*/ | |
1695 {+1, -1, 0}, /*7*/ | |
1696 | |
1697 // Octagon that inscribes the unit circle, cut by an interior unit octagon. | |
1698 {+1.000000f, 0.000000f, 0}, /* 8*/ | |
1699 {+1.000000f, +0.414214f, ~0}, /* 9*/ | |
1700 {+0.707106f, +0.707106f, 0}, /*10*/ | |
1701 {+0.414214f, +1.000000f, ~0}, /*11*/ | |
1702 { 0.000000f, +1.000000f, 0}, /*12*/ | |
1703 {-0.414214f, +1.000000f, ~0}, /*13*/ | |
1704 {-0.707106f, +0.707106f, 0}, /*14*/ | |
1705 {-1.000000f, +0.414214f, ~0}, /*15*/ | |
1706 {-1.000000f, 0.000000f, 0}, /*16*/ | |
1707 {-1.000000f, -0.414214f, ~0}, /*17*/ | |
1708 {-0.707106f, -0.707106f, 0}, /*18*/ | |
1709 {-0.414214f, -1.000000f, ~0}, /*19*/ | |
1710 { 0.000000f, -1.000000f, 0}, /*20*/ | |
1711 {+0.414214f, -1.000000f, ~0}, /*21*/ | |
1712 {+0.707106f, -0.707106f, 0}, /*22*/ | |
1713 {+1.000000f, -0.414214f, ~0}, /*23*/ | |
1714 // This vertex is for the fanned versions. | |
1715 { 0.000000f, 0.000000f, ~0}, /*24*/ | |
1716 | |
1717 // Rectangle with disjoint corner segments. | |
1718 {+1.0, +0.5, 0x3}, /*25*/ | |
1719 {+1.0, +1.0, 0x3}, /*26*/ | |
1720 {+0.5, +1.0, 0x3}, /*27*/ | |
1721 {-0.5, +1.0, 0x2}, /*28*/ | |
1722 {-1.0, +1.0, 0x2}, /*29*/ | |
1723 {-1.0, +0.5, 0x2}, /*30*/ | |
1724 {-1.0, -0.5, 0x0}, /*31*/ | |
1725 {-1.0, -1.0, 0x0}, /*32*/ | |
1726 {-0.5, -1.0, 0x0}, /*33*/ | |
1727 {+0.5, -1.0, 0x1}, /*34*/ | |
1728 {+1.0, -1.0, 0x1}, /*35*/ | |
1729 {+1.0, -0.5, 0x1}, /*36*/ | |
1730 // The next 4 are for the fanned version. | |
1731 { 0.0, 0.0, 0x3}, /*37*/ | |
1732 { 0.0, 0.0, 0x2}, /*38*/ | |
1733 { 0.0, 0.0, 0x0}, /*39*/ | |
1734 { 0.0, 0.0, 0x1}, /*40*/ | |
1735 // The next 8 are for the bordered version. | |
1736 {+0.75, +0.50, 0x3}, /*41*/ | |
1737 {+0.50, +0.75, 0x3}, /*42*/ | |
1738 {-0.50, +0.75, 0x2}, /*43*/ | |
1739 {-0.75, +0.50, 0x2}, /*44*/ | |
1740 {-0.75, -0.50, 0x0}, /*45*/ | |
1741 {-0.50, -0.75, 0x0}, /*46*/ | |
1742 {+0.50, -0.75, 0x1}, /*47*/ | |
1743 {+0.75, -0.50, 0x1}, /*48*/ | |
1744 | |
1745 // 16-gon that inscribes the unit circle, cut by an interior unit 16-gon. | |
1746 {+1.000000f, +0.000000f, 0}, /*49*/ | |
1747 {+1.000000f, +0.198913f, ~0}, /*50*/ | |
1748 {+0.923879f, +0.382683f, 0}, /*51*/ | |
1749 {+0.847760f, +0.566455f, ~0}, /*52*/ | |
1750 {+0.707106f, +0.707106f, 0}, /*53*/ | |
1751 {+0.566455f, +0.847760f, ~0}, /*54*/ | |
1752 {+0.382683f, +0.923879f, 0}, /*55*/ | |
1753 {+0.198913f, +1.000000f, ~0}, /*56*/ | |
1754 {+0.000000f, +1.000000f, 0}, /*57*/ | |
1755 {-0.198913f, +1.000000f, ~0}, /*58*/ | |
1756 {-0.382683f, +0.923879f, 0}, /*59*/ | |
1757 {-0.566455f, +0.847760f, ~0}, /*60*/ | |
1758 {-0.707106f, +0.707106f, 0}, /*61*/ | |
1759 {-0.847760f, +0.566455f, ~0}, /*62*/ | |
1760 {-0.923879f, +0.382683f, 0}, /*63*/ | |
1761 {-1.000000f, +0.198913f, ~0}, /*64*/ | |
1762 {-1.000000f, +0.000000f, 0}, /*65*/ | |
1763 {-1.000000f, -0.198913f, ~0}, /*66*/ | |
1764 {-0.923879f, -0.382683f, 0}, /*67*/ | |
1765 {-0.847760f, -0.566455f, ~0}, /*68*/ | |
1766 {-0.707106f, -0.707106f, 0}, /*69*/ | |
1767 {-0.566455f, -0.847760f, ~0}, /*70*/ | |
1768 {-0.382683f, -0.923879f, 0}, /*71*/ | |
1769 {-0.198913f, -1.000000f, ~0}, /*72*/ | |
1770 {-0.000000f, -1.000000f, 0}, /*73*/ | |
1771 {+0.198913f, -1.000000f, ~0}, /*74*/ | |
1772 {+0.382683f, -0.923879f, 0}, /*75*/ | |
1773 {+0.566455f, -0.847760f, ~0}, /*76*/ | |
1774 {+0.707106f, -0.707106f, 0}, /*77*/ | |
1775 {+0.847760f, -0.566455f, ~0}, /*78*/ | |
1776 {+0.923879f, -0.382683f, 0}, /*79*/ | |
1777 {+1.000000f, -0.198913f, ~0}, /*80*/ | |
1778 }; | |
1779 | |
1780 const uint8_t kIndexData[] = { | |
1781 // Rectangle. | |
1782 0, 1, 2, | |
1783 0, 2, 3, | |
1784 | |
1785 // Rectangle with a border. | |
1786 0, 1, 5, | |
1787 5, 4, 0, | |
1788 1, 2, 6, | |
1789 6, 5, 1, | |
1790 2, 3, 7, | |
1791 7, 6, 2, | |
1792 3, 0, 4, | |
1793 4, 7, 3, | |
1794 4, 5, 6, | |
1795 6, 7, 4, | |
1796 | |
1797 // Octagon that inscribes the unit circle, cut by an interior unit octagon. | |
1798 10, 8, 9, | |
1799 12, 10, 11, | |
1800 14, 12, 13, | |
1801 16, 14, 15, | |
1802 18, 16, 17, | |
1803 20, 18, 19, | |
1804 22, 20, 21, | |
1805 8, 22, 23, | |
1806 8, 10, 12, | |
1807 12, 14, 16, | |
1808 16, 18, 20, | |
1809 20, 22, 8, | |
1810 8, 12, 16, | |
1811 16, 20, 8, | |
1812 | |
1813 // Same octagons, but with the interior arranged as a fan. Used by mixed sam
ples. | |
1814 10, 8, 9, | |
1815 12, 10, 11, | |
1816 14, 12, 13, | |
1817 16, 14, 15, | |
1818 18, 16, 17, | |
1819 20, 18, 19, | |
1820 22, 20, 21, | |
1821 8, 22, 23, | |
1822 24, 8, 10, | |
1823 12, 24, 10, | |
1824 24, 12, 14, | |
1825 16, 24, 14, | |
1826 24, 16, 18, | |
1827 20, 24, 18, | |
1828 24, 20, 22, | |
1829 8, 24, 22, | |
1830 | |
1831 // Same octagons, but with the inner and outer disjoint. Used by coverage AA
. | |
1832 8, 22, 23, | |
1833 9, 8, 23, | |
1834 10, 8, 9, | |
1835 11, 10, 9, | |
1836 12, 10, 11, | |
1837 13, 12, 11, | |
1838 14, 12, 13, | |
1839 15, 14, 13, | |
1840 16, 14, 15, | |
1841 17, 16, 15, | |
1842 18, 16, 17, | |
1843 19, 18, 17, | |
1844 20, 18, 19, | |
1845 21, 20, 19, | |
1846 22, 20, 21, | |
1847 23, 22, 21, | |
1848 22, 8, 10, | |
1849 10, 12, 14, | |
1850 14, 16, 18, | |
1851 18, 20, 22, | |
1852 22, 10, 14, | |
1853 14, 18, 22, | |
1854 | |
1855 // Rectangle with disjoint corner segments. | |
1856 27, 25, 26, | |
1857 30, 28, 29, | |
1858 33, 31, 32, | |
1859 36, 34, 35, | |
1860 25, 27, 28, | |
1861 28, 30, 31, | |
1862 31, 33, 34, | |
1863 34, 36, 25, | |
1864 25, 28, 31, | |
1865 31, 34, 25, | |
1866 | |
1867 // Same rectangle with disjoint corners, but with the interior arranged as a
fan. Used by | |
1868 // mixed samples. | |
1869 27, 25, 26, | |
1870 30, 28, 29, | |
1871 33, 31, 32, | |
1872 36, 34, 35, | |
1873 27, 37, 25, | |
1874 28, 37, 27, | |
1875 30, 38, 28, | |
1876 31, 38, 30, | |
1877 33, 39, 31, | |
1878 34, 39, 33, | |
1879 36, 40, 34, | |
1880 25, 40, 36, | |
1881 | |
1882 // Same rectangle with disjoint corners, with a border as well. Used by cove
rage AA. | |
1883 41, 25, 26, | |
1884 42, 41, 26, | |
1885 27, 42, 26, | |
1886 43, 28, 29, | |
1887 44, 43, 29, | |
1888 30, 44, 29, | |
1889 45, 31, 32, | |
1890 46, 45, 32, | |
1891 33, 46, 32, | |
1892 47, 34, 35, | |
1893 48, 47, 35, | |
1894 36, 48, 35, | |
1895 27, 28, 42, | |
1896 42, 28, 43, | |
1897 30, 31, 44, | |
1898 44, 31, 45, | |
1899 33, 34, 46, | |
1900 46, 34, 47, | |
1901 36, 25, 48, | |
1902 48, 25, 41, | |
1903 41, 42, 43, | |
1904 43, 44, 45, | |
1905 45, 46, 47, | |
1906 47, 48, 41, | |
1907 41, 43, 45, | |
1908 45, 47, 41, | |
1909 | |
1910 // Same as the disjoint octagons, but with 16-gons instead. Used by coverage
AA when the oval is | |
1911 // sufficiently large. | |
1912 49, 79, 80, | |
1913 50, 49, 80, | |
1914 51, 49, 50, | |
1915 52, 51, 50, | |
1916 53, 51, 52, | |
1917 54, 53, 52, | |
1918 55, 53, 54, | |
1919 56, 55, 54, | |
1920 57, 55, 56, | |
1921 58, 57, 56, | |
1922 59, 57, 58, | |
1923 60, 59, 58, | |
1924 61, 59, 60, | |
1925 62, 61, 60, | |
1926 63, 61, 62, | |
1927 64, 63, 62, | |
1928 65, 63, 64, | |
1929 66, 65, 64, | |
1930 67, 65, 66, | |
1931 68, 67, 66, | |
1932 69, 67, 68, | |
1933 70, 69, 68, | |
1934 71, 69, 70, | |
1935 72, 71, 70, | |
1936 73, 71, 72, | |
1937 74, 73, 72, | |
1938 75, 73, 74, | |
1939 76, 75, 74, | |
1940 77, 75, 76, | |
1941 78, 77, 76, | |
1942 79, 77, 78, | |
1943 80, 79, 78, | |
1944 49, 51, 53, | |
1945 53, 55, 57, | |
1946 57, 59, 61, | |
1947 61, 63, 65, | |
1948 65, 67, 69, | |
1949 69, 71, 73, | |
1950 73, 75, 77, | |
1951 77, 79, 49, | |
1952 49, 53, 57, | |
1953 57, 61, 65, | |
1954 65, 69, 73, | |
1955 73, 77, 49, | |
1956 49, 57, 65, | |
1957 65, 73, 49, | |
1958 }; | |
1959 | |
1960 enum { | |
1961 kRect_FirstIndex = 0, | |
1962 kRect_TriCount = 2, | |
1963 | |
1964 kFramedRect_FirstIndex = 6, | |
1965 kFramedRect_TriCount = 10, | |
1966 | |
1967 kOctagons_FirstIndex = 36, | |
1968 kOctagons_TriCount = 14, | |
1969 | |
1970 kOctagonsFanned_FirstIndex = 78, | |
1971 kOctagonsFanned_TriCount = 16, | |
1972 | |
1973 kDisjointOctagons_FirstIndex = 126, | |
1974 kDisjointOctagons_TriCount = 22, | |
1975 | |
1976 kCorneredRect_FirstIndex = 192, | |
1977 kCorneredRect_TriCount = 10, | |
1978 | |
1979 kCorneredRectFanned_FirstIndex = 222, | |
1980 kCorneredRectFanned_TriCount = 12, | |
1981 | |
1982 kCorneredFramedRect_FirstIndex = 258, | |
1983 kCorneredFramedRect_TriCount = 26, | |
1984 | |
1985 kDisjoint16Gons_FirstIndex = 336, | |
1986 kDisjoint16Gons_TriCount = 46, | |
1987 }; | |
1988 | |
1989 static const GrUniqueKey::Domain kShapeBufferDomain = GrUniqueKey::GenerateDomai
n(); | |
1990 | |
1991 template<GrBufferType Type> static const GrUniqueKey& get_shape_buffer_key() { | |
1992 static GrUniqueKey* kKey; | |
1993 if (!kKey) { | |
1994 kKey = new GrUniqueKey; | |
1995 GrUniqueKey::Builder builder(kKey, kShapeBufferDomain, 1); | |
1996 builder[0] = Type; | |
1997 } | |
1998 return *kKey; | |
1999 } | |
2000 | |
2001 const GrBuffer* InstanceProcessor::FindOrCreateVertexBuffer(GrGpu* gpu) { | |
2002 GrResourceCache* cache = gpu->getContext()->getResourceCache(); | |
2003 const GrUniqueKey& key = get_shape_buffer_key<kVertex_GrBufferType>(); | |
2004 if (GrGpuResource* cached = cache->findAndRefUniqueResource(key)) { | |
2005 return static_cast<GrBuffer*>(cached); | |
2006 } | |
2007 if (GrBuffer* buffer = gpu->createBuffer(sizeof(kVertexData), kVertex_GrBuff
erType, | |
2008 kStatic_GrAccessPattern, kVertexDat
a)) { | |
2009 buffer->resourcePriv().setUniqueKey(key); | |
2010 return buffer; | |
2011 } | |
2012 return nullptr; | |
2013 } | |
2014 | |
2015 const GrBuffer* InstanceProcessor::FindOrCreateIndex8Buffer(GrGpu* gpu) { | |
2016 GrResourceCache* cache = gpu->getContext()->getResourceCache(); | |
2017 const GrUniqueKey& key = get_shape_buffer_key<kIndex_GrBufferType>(); | |
2018 if (GrGpuResource* cached = cache->findAndRefUniqueResource(key)) { | |
2019 return static_cast<GrBuffer*>(cached); | |
2020 } | |
2021 if (GrBuffer* buffer = gpu->createBuffer(sizeof(kIndexData), kIndex_GrBuffer
Type, | |
2022 kStatic_GrAccessPattern, kIndexData
)) { | |
2023 buffer->resourcePriv().setUniqueKey(key); | |
2024 return buffer; | |
2025 } | |
2026 return nullptr; | |
2027 } | |
2028 | |
2029 IndexRange InstanceProcessor::GetIndexRangeForRect(AntialiasMode aa) { | |
2030 static constexpr IndexRange kRectRanges[kNumAntialiasModes] = { | |
2031 {kRect_FirstIndex, 3 * kRect_TriCount}, // kNone | |
2032 {kFramedRect_FirstIndex, 3 * kFramedRect_TriCount}, // kCoverage | |
2033 {kRect_FirstIndex, 3 * kRect_TriCount}, // kMSAA | |
2034 {kRect_FirstIndex, 3 * kRect_TriCount} // kMixedSamples | |
2035 }; | |
2036 | |
2037 SkASSERT(aa >= AntialiasMode::kNone && aa <= AntialiasMode::kMixedSamples); | |
2038 return kRectRanges[(int)aa]; | |
2039 | |
2040 GR_STATIC_ASSERT(0 == (int)AntialiasMode::kNone); | |
2041 GR_STATIC_ASSERT(1 == (int)AntialiasMode::kCoverage); | |
2042 GR_STATIC_ASSERT(2 == (int)AntialiasMode::kMSAA); | |
2043 GR_STATIC_ASSERT(3 == (int)AntialiasMode::kMixedSamples); | |
2044 } | |
2045 | |
2046 IndexRange InstanceProcessor::GetIndexRangeForOval(AntialiasMode aa, const SkRec
t& devBounds) { | |
2047 if (AntialiasMode::kCoverage == aa && devBounds.height() * devBounds.width()
>= 256 * 256) { | |
2048 // This threshold was chosen quasi-scientifically on Tegra X1. | |
2049 return {kDisjoint16Gons_FirstIndex, 3 * kDisjoint16Gons_TriCount}; | |
2050 } | |
2051 | |
2052 static constexpr IndexRange kOvalRanges[kNumAntialiasModes] = { | |
2053 {kOctagons_FirstIndex, 3 * kOctagons_TriCount}, // kNo
ne | |
2054 {kDisjointOctagons_FirstIndex, 3 * kDisjointOctagons_TriCount}, // kCo
verage | |
2055 {kOctagons_FirstIndex, 3 * kOctagons_TriCount}, // kMS
AA | |
2056 {kOctagonsFanned_FirstIndex, 3 * kOctagonsFanned_TriCount} // kMi
xedSamples | |
2057 }; | |
2058 | |
2059 SkASSERT(aa >= AntialiasMode::kNone && aa <= AntialiasMode::kMixedSamples); | |
2060 return kOvalRanges[(int)aa]; | |
2061 | |
2062 GR_STATIC_ASSERT(0 == (int)AntialiasMode::kNone); | |
2063 GR_STATIC_ASSERT(1 == (int)AntialiasMode::kCoverage); | |
2064 GR_STATIC_ASSERT(2 == (int)AntialiasMode::kMSAA); | |
2065 GR_STATIC_ASSERT(3 == (int)AntialiasMode::kMixedSamples); | |
2066 } | |
2067 | |
2068 IndexRange InstanceProcessor::GetIndexRangeForRRect(AntialiasMode aa) { | |
2069 static constexpr IndexRange kRRectRanges[kNumAntialiasModes] = { | |
2070 {kCorneredRect_FirstIndex, 3 * kCorneredRect_TriCount}, //
kNone | |
2071 {kCorneredFramedRect_FirstIndex, 3 * kCorneredFramedRect_TriCount}, //
kCoverage | |
2072 {kCorneredRect_FirstIndex, 3 * kCorneredRect_TriCount}, //
kMSAA | |
2073 {kCorneredRectFanned_FirstIndex, 3 * kCorneredRectFanned_TriCount} //
kMixedSamples | |
2074 }; | |
2075 | |
2076 SkASSERT(aa >= AntialiasMode::kNone && aa <= AntialiasMode::kMixedSamples); | |
2077 return kRRectRanges[(int)aa]; | |
2078 | |
2079 GR_STATIC_ASSERT(0 == (int)AntialiasMode::kNone); | |
2080 GR_STATIC_ASSERT(1 == (int)AntialiasMode::kCoverage); | |
2081 GR_STATIC_ASSERT(2 == (int)AntialiasMode::kMSAA); | |
2082 GR_STATIC_ASSERT(3 == (int)AntialiasMode::kMixedSamples); | |
2083 } | |
2084 | |
2085 const char* InstanceProcessor::GetNameOfIndexRange(IndexRange range) { | |
2086 switch (range.fStart) { | |
2087 case kRect_FirstIndex: return "basic_rect"; | |
2088 case kFramedRect_FirstIndex: return "coverage_rect"; | |
2089 | |
2090 case kOctagons_FirstIndex: return "basic_oval"; | |
2091 case kDisjointOctagons_FirstIndex: return "coverage_oval"; | |
2092 case kOctagonsFanned_FirstIndex: return "mixed_samples_oval"; | |
2093 | |
2094 case kCorneredRect_FirstIndex: return "basic_round_rect"; | |
2095 case kCorneredFramedRect_FirstIndex: return "coverage_round_rect"; | |
2096 case kCorneredRectFanned_FirstIndex: return "mixed_samples_round_rect"; | |
2097 | |
2098 default: return "unknown"; | |
2099 } | |
2100 } | |
2101 | |
2102 } | |
OLD | NEW |