OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2016 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #include "InstancedRendering.h" | |
9 | |
10 #include "GrBatchFlushState.h" | |
11 #include "GrPipeline.h" | |
12 #include "GrResourceProvider.h" | |
13 #include "instanced/InstanceProcessor.h" | |
14 | |
15 namespace gr_instanced { | |
16 | |
17 InstancedRendering::InstancedRendering(GrGpu* gpu, AntialiasMode lastSupportedAA
Mode, | |
18 bool canRenderToFloat) | |
19 : fGpu(SkRef(gpu)), | |
20 fLastSupportedAAMode(lastSupportedAAMode), | |
21 fCanRenderToFloat(canRenderToFloat), | |
22 fState(State::kRecordingDraws), | |
23 fDrawPool(1024 * sizeof(Batch::Draw), 1024 * sizeof(Batch::Draw)) { | |
24 } | |
25 | |
26 GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix&
viewMatrix, | |
27 GrColor color, bool antialias, | |
28 const GrInstancedPipelineInfo& info,
bool* useHWAA) { | |
29 return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, rect, an
tialias, info, | |
30 useHWAA); | |
31 } | |
32 | |
33 GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix&
viewMatrix, | |
34 GrColor color, const SkRect& localRe
ct, bool antialias, | |
35 const GrInstancedPipelineInfo& info,
bool* useHWAA) { | |
36 return this->recordShape(ShapeType::kRect, rect, viewMatrix, color, localRec
t, antialias, info, | |
37 useHWAA); | |
38 } | |
39 | |
40 GrDrawBatch* InstancedRendering::recordRect(const SkRect& rect, const SkMatrix&
viewMatrix, | |
41 GrColor color, const SkMatrix& local
Matrix, | |
42 bool antialias, const GrInstancedPip
elineInfo& info, | |
43 bool* useHWAA) { | |
44 if (localMatrix.hasPerspective()) { | |
45 return nullptr; // Perspective is not yet supported in the local matrix. | |
46 } | |
47 if (Batch* batch = this->recordShape(ShapeType::kRect, rect, viewMatrix, col
or, rect, antialias, | |
48 info, useHWAA)) { | |
49 batch->getSingleInstance().fInfo |= kLocalMatrix_InfoFlag; | |
50 batch->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX()
, | |
51 localMatrix.getTranslateX()); | |
52 batch->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY()
, | |
53 localMatrix.getTranslateY()); | |
54 batch->fInfo.fHasLocalMatrix = true; | |
55 return batch; | |
56 } | |
57 return nullptr; | |
58 } | |
59 | |
60 GrDrawBatch* InstancedRendering::recordOval(const SkRect& oval, const SkMatrix&
viewMatrix, | |
61 GrColor color, bool antialias, | |
62 const GrInstancedPipelineInfo& info,
bool* useHWAA) { | |
63 return this->recordShape(ShapeType::kOval, oval, viewMatrix, color, oval, an
tialias, info, | |
64 useHWAA); | |
65 } | |
66 | |
67 GrDrawBatch* InstancedRendering::recordRRect(const SkRRect& rrect, const SkMatri
x& viewMatrix, | |
68 GrColor color, bool antialias, | |
69 const GrInstancedPipelineInfo& info
, bool* useHWAA) { | |
70 if (Batch* batch = this->recordShape(GetRRectShapeType(rrect), rrect.rect(),
viewMatrix, color, | |
71 rrect.rect(), antialias, info, useHWAA)
) { | |
72 batch->appendRRectParams(rrect); | |
73 return batch; | |
74 } | |
75 return nullptr; | |
76 } | |
77 | |
78 GrDrawBatch* InstancedRendering::recordDRRect(const SkRRect& outer, const SkRRec
t& inner, | |
79 const SkMatrix& viewMatrix, GrColo
r color, | |
80 bool antialias, const GrInstancedP
ipelineInfo& info, | |
81 bool* useHWAA) { | |
82 if (inner.getType() > SkRRect::kSimple_Type) { | |
83 return nullptr; // Complex inner round rects are not yet supported. | |
84 } | |
85 if (SkRRect::kEmpty_Type == inner.getType()) { | |
86 return this->recordRRect(outer, viewMatrix, color, antialias, info, useH
WAA); | |
87 } | |
88 if (Batch* batch = this->recordShape(GetRRectShapeType(outer), outer.rect(),
viewMatrix, color, | |
89 outer.rect(), antialias, info, useHWAA)
) { | |
90 batch->appendRRectParams(outer); | |
91 ShapeType innerShapeType = GetRRectShapeType(inner); | |
92 batch->fInfo.fInnerShapeTypes |= GetShapeFlag(innerShapeType); | |
93 batch->getSingleInstance().fInfo |= ((int)innerShapeType << kInnerShapeT
ype_InfoBit); | |
94 batch->appendParamsTexel(inner.rect().asScalars(), 4); | |
95 batch->appendRRectParams(inner); | |
96 return batch; | |
97 } | |
98 return nullptr; | |
99 } | |
100 | |
101 InstancedRendering::Batch* InstancedRendering::recordShape(ShapeType type, const
SkRect& bounds, | |
102 const SkMatrix& viewM
atrix, | |
103 GrColor color, const
SkRect& localRect, | |
104 bool antialias, | |
105 const GrInstancedPipe
lineInfo& info, | |
106 bool* useHWAA) { | |
107 SkASSERT(State::kRecordingDraws == fState); | |
108 | |
109 if (info.fIsRenderingToFloat && !fCanRenderToFloat) { | |
110 return nullptr; | |
111 } | |
112 | |
113 AntialiasMode antialiasMode; | |
114 if (!this->selectAntialiasMode(viewMatrix, antialias, info, useHWAA, &antial
iasMode)) { | |
115 return nullptr; | |
116 } | |
117 | |
118 Batch* batch = this->createBatch(); | |
119 batch->fInfo.fAntialiasMode = antialiasMode; | |
120 batch->fInfo.fShapeTypes = GetShapeFlag(type); | |
121 batch->fInfo.fCannotDiscard = !info.fCanDiscard; | |
122 | |
123 Instance& instance = batch->getSingleInstance(); | |
124 instance.fInfo = (int)type << kShapeType_InfoBit; | |
125 | |
126 // The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we
find the matrix that | |
127 // will map this rectangle to the same device coordinates as "viewMatrix * b
ounds". | |
128 float sx = 0.5f * bounds.width(); | |
129 float sy = 0.5f * bounds.height(); | |
130 float tx = sx + bounds.fLeft; | |
131 float ty = sy + bounds.fTop; | |
132 if (!viewMatrix.hasPerspective()) { | |
133 float* m = instance.fShapeMatrix2x3; | |
134 m[0] = viewMatrix.getScaleX() * sx; | |
135 m[1] = viewMatrix.getSkewX() * sy; | |
136 m[2] = viewMatrix.getTranslateX() + | |
137 viewMatrix.getScaleX() * tx + viewMatrix.getSkewX() * ty; | |
138 | |
139 m[3] = viewMatrix.getSkewY() * sx; | |
140 m[4] = viewMatrix.getScaleY() * sy; | |
141 m[5] = viewMatrix.getTranslateY() + | |
142 viewMatrix.getSkewY() * tx + viewMatrix.getScaleY() * ty; | |
143 | |
144 // Since 'm' is a 2x3 matrix that maps the rect [-1, +1] into the shape'
s device-space quad, | |
145 // it's quite simple to find the bounding rectangle: | |
146 float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]); | |
147 float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]); | |
148 batch->fBounds.fLeft = m[2] - devBoundsHalfWidth; | |
149 batch->fBounds.fRight = m[2] + devBoundsHalfWidth; | |
150 batch->fBounds.fTop = m[5] - devBoundsHalfHeight; | |
151 batch->fBounds.fBottom = m[5] + devBoundsHalfHeight; | |
152 | |
153 // TODO: Is this worth the CPU overhead? | |
154 batch->fInfo.fNonSquare = | |
155 fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early o
ut. | |
156 fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew? | |
157 fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) > 1e-2f;
// Diff. lengths? | |
158 } else { | |
159 SkMatrix shapeMatrix(viewMatrix); | |
160 shapeMatrix.preTranslate(tx, ty); | |
161 shapeMatrix.preScale(sx, sy); | |
162 instance.fInfo |= kPerspective_InfoFlag; | |
163 | |
164 float* m = instance.fShapeMatrix2x3; | |
165 m[0] = SkScalarToFloat(shapeMatrix.getScaleX()); | |
166 m[1] = SkScalarToFloat(shapeMatrix.getSkewX()); | |
167 m[2] = SkScalarToFloat(shapeMatrix.getTranslateX()); | |
168 m[3] = SkScalarToFloat(shapeMatrix.getSkewY()); | |
169 m[4] = SkScalarToFloat(shapeMatrix.getScaleY()); | |
170 m[5] = SkScalarToFloat(shapeMatrix.getTranslateY()); | |
171 | |
172 // Send the perspective column as a param. | |
173 batch->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[Sk
Matrix::kMPersp1], | |
174 shapeMatrix[SkMatrix::kMPersp2]); | |
175 batch->fInfo.fHasPerspective = true; | |
176 | |
177 viewMatrix.mapRect(&batch->fBounds, bounds); | |
178 | |
179 batch->fInfo.fNonSquare = true; | |
180 } | |
181 | |
182 instance.fColor = color; | |
183 | |
184 const float* rectAsFloats = localRect.asScalars(); // Ensure SkScalar == flo
at. | |
185 memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float)); | |
186 | |
187 return batch; | |
188 } | |
189 | |
190 inline bool InstancedRendering::selectAntialiasMode(const SkMatrix& viewMatrix,
bool antialias, | |
191 const GrInstancedPipelineInf
o& info, | |
192 bool* useHWAA, AntialiasMode
* antialiasMode) { | |
193 SkASSERT(!info.fColorDisabled || info.fDrawingShapeToStencil); | |
194 SkASSERT(!info.fIsMixedSampled || info.fIsMultisampled); | |
195 | |
196 if (!info.fIsMultisampled || fGpu->caps()->multisampleDisableSupport()) { | |
197 SkASSERT(fLastSupportedAAMode >= AntialiasMode::kCoverage); | |
198 if (!antialias) { | |
199 if (info.fDrawingShapeToStencil && !info.fCanDiscard) { | |
200 // We can't draw to the stencil buffer without discard (or sampl
e mask if MSAA). | |
201 return false; | |
202 } | |
203 *antialiasMode = AntialiasMode::kNone; | |
204 *useHWAA = false; | |
205 return true; | |
206 } | |
207 | |
208 if (info.canUseCoverageAA() && viewMatrix.preservesRightAngles()) { | |
209 *antialiasMode = AntialiasMode::kCoverage; | |
210 *useHWAA = false; | |
211 return true; | |
212 } | |
213 } | |
214 | |
215 if (info.fIsMultisampled && fLastSupportedAAMode >= AntialiasMode::kMSAA) { | |
216 if (!info.fIsMixedSampled || info.fColorDisabled) { | |
217 *antialiasMode = AntialiasMode::kMSAA; | |
218 *useHWAA = true; | |
219 return true; | |
220 } | |
221 if (fLastSupportedAAMode >= AntialiasMode::kMixedSamples) { | |
222 *antialiasMode = AntialiasMode::kMixedSamples; | |
223 *useHWAA = true; | |
224 return true; | |
225 } | |
226 } | |
227 | |
228 return false; | |
229 } | |
230 | |
231 InstancedRendering::Batch::Batch(uint32_t classID, InstancedRendering* ir) | |
232 : INHERITED(classID), | |
233 fInstancedRendering(ir), | |
234 fIsTracked(false), | |
235 fNumDraws(1), | |
236 fNumChangesInGeometry(0) { | |
237 fHeadDraw = fTailDraw = (Draw*)fInstancedRendering->fDrawPool.allocate(sizeo
f(Draw)); | |
238 #ifdef SK_DEBUG | |
239 fHeadDraw->fGeometry = {-1, 0}; | |
240 #endif | |
241 fHeadDraw->fNext = nullptr; | |
242 } | |
243 | |
244 InstancedRendering::Batch::~Batch() { | |
245 if (fIsTracked) { | |
246 fInstancedRendering->fTrackedBatches.remove(this); | |
247 } | |
248 | |
249 Draw* draw = fHeadDraw; | |
250 while (draw) { | |
251 Draw* next = draw->fNext; | |
252 fInstancedRendering->fDrawPool.release(draw); | |
253 draw = next; | |
254 } | |
255 } | |
256 | |
257 void InstancedRendering::Batch::appendRRectParams(const SkRRect& rrect) { | |
258 SkASSERT(!fIsTracked); | |
259 switch (rrect.getType()) { | |
260 case SkRRect::kSimple_Type: { | |
261 const SkVector& radii = rrect.getSimpleRadii(); | |
262 this->appendParamsTexel(radii.x(), radii.y(), rrect.width(), rrect.h
eight()); | |
263 return; | |
264 } | |
265 case SkRRect::kNinePatch_Type: { | |
266 float twoOverW = 2 / rrect.width(); | |
267 float twoOverH = 2 / rrect.height(); | |
268 const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner); | |
269 const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner); | |
270 this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBR.x() * twoOve
rW, | |
271 radiiTL.y() * twoOverH, radiiBR.y() * twoOve
rH); | |
272 return; | |
273 } | |
274 case SkRRect::kComplex_Type: { | |
275 /** | |
276 * The x and y radii of each arc are stored in separate vectors, | |
277 * in the following order: | |
278 * | |
279 * __x1 _ _ _ x3__ | |
280 * y1 | | y2 | |
281 * | |
282 * | | | |
283 * | |
284 * y3 |__ _ _ _ __| y4 | |
285 * x2 x4 | |
286 * | |
287 */ | |
288 float twoOverW = 2 / rrect.width(); | |
289 float twoOverH = 2 / rrect.height(); | |
290 const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner); | |
291 const SkVector& radiiTR = rrect.radii(SkRRect::kUpperRight_Corner); | |
292 const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner); | |
293 const SkVector& radiiBL = rrect.radii(SkRRect::kLowerLeft_Corner); | |
294 this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBL.x() * twoOve
rW, | |
295 radiiTR.x() * twoOverW, radiiBR.x() * twoOve
rW); | |
296 this->appendParamsTexel(radiiTL.y() * twoOverH, radiiTR.y() * twoOve
rH, | |
297 radiiBL.y() * twoOverH, radiiBR.y() * twoOve
rH); | |
298 return; | |
299 } | |
300 default: return; | |
301 } | |
302 } | |
303 | |
304 void InstancedRendering::Batch::appendParamsTexel(const SkScalar* vals, int coun
t) { | |
305 SkASSERT(!fIsTracked); | |
306 SkASSERT(count <= 4 && count >= 0); | |
307 const float* valsAsFloats = vals; // Ensure SkScalar == float. | |
308 memcpy(&fParams.push_back(), valsAsFloats, count * sizeof(float)); | |
309 fInfo.fHasParams = true; | |
310 } | |
311 | |
312 void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScal
ar z, SkScalar w) { | |
313 SkASSERT(!fIsTracked); | |
314 ParamsTexel& texel = fParams.push_back(); | |
315 texel.fX = SkScalarToFloat(x); | |
316 texel.fY = SkScalarToFloat(y); | |
317 texel.fZ = SkScalarToFloat(z); | |
318 texel.fW = SkScalarToFloat(w); | |
319 fInfo.fHasParams = true; | |
320 } | |
321 | |
322 void InstancedRendering::Batch::appendParamsTexel(SkScalar x, SkScalar y, SkScal
ar z) { | |
323 SkASSERT(!fIsTracked); | |
324 ParamsTexel& texel = fParams.push_back(); | |
325 texel.fX = SkScalarToFloat(x); | |
326 texel.fY = SkScalarToFloat(y); | |
327 texel.fZ = SkScalarToFloat(z); | |
328 fInfo.fHasParams = true; | |
329 } | |
330 | |
331 void InstancedRendering::Batch::initBatchTracker(const GrXPOverridesForBatch& ov
errides) { | |
332 Draw& draw = this->getSingleDraw(); // This will assert if we have > 1 comma
nd. | |
333 SkASSERT(draw.fGeometry.isEmpty()); | |
334 SkASSERT(SkIsPow2(fInfo.fShapeTypes)); | |
335 SkASSERT(!fIsTracked); | |
336 | |
337 if (kRect_ShapeFlag == fInfo.fShapeTypes) { | |
338 draw.fGeometry = InstanceProcessor::GetIndexRangeForRect(fInfo.fAntialia
sMode); | |
339 } else if (kOval_ShapeFlag == fInfo.fShapeTypes) { | |
340 draw.fGeometry = InstanceProcessor::GetIndexRangeForOval(fInfo.fAntialia
sMode, fBounds); | |
341 } else { | |
342 draw.fGeometry = InstanceProcessor::GetIndexRangeForRRect(fInfo.fAntiali
asMode); | |
343 } | |
344 | |
345 if (!fParams.empty()) { | |
346 SkASSERT(fInstancedRendering->fParams.count() < (int)kParamsIdx_InfoMask
); // TODO: cleaner. | |
347 this->getSingleInstance().fInfo |= fInstancedRendering->fParams.count(); | |
348 fInstancedRendering->fParams.push_back_n(fParams.count(), fParams.begin(
)); | |
349 } | |
350 | |
351 GrColor overrideColor; | |
352 if (overrides.getOverrideColorIfSet(&overrideColor)) { | |
353 SkASSERT(State::kRecordingDraws == fInstancedRendering->fState); | |
354 this->getSingleInstance().fColor = overrideColor; | |
355 } | |
356 fInfo.fUsesLocalCoords = overrides.readsLocalCoords(); | |
357 fInfo.fCannotTweakAlphaForCoverage = !overrides.canTweakAlphaForCoverage(); | |
358 | |
359 fInstancedRendering->fTrackedBatches.addToTail(this); | |
360 fIsTracked = true; | |
361 } | |
362 | |
363 bool InstancedRendering::Batch::onCombineIfPossible(GrBatch* other, const GrCaps
& caps) { | |
364 Batch* that = static_cast<Batch*>(other); | |
365 SkASSERT(fInstancedRendering == that->fInstancedRendering); | |
366 SkASSERT(fTailDraw); | |
367 SkASSERT(that->fTailDraw); | |
368 | |
369 if (!fInfo.canJoin(that->fInfo) || | |
370 !GrPipeline::CanCombine(*this->pipeline(), this->bounds(), | |
371 *that->pipeline(), that->bounds(), caps)) { | |
372 return false; | |
373 } | |
374 | |
375 fBounds.join(that->fBounds); | |
376 fInfo.join(that->fInfo); | |
377 | |
378 // Adopt the other batch's draws. | |
379 fNumDraws += that->fNumDraws; | |
380 fNumChangesInGeometry += that->fNumChangesInGeometry; | |
381 if (fTailDraw->fGeometry != that->fHeadDraw->fGeometry) { | |
382 ++fNumChangesInGeometry; | |
383 } | |
384 fTailDraw->fNext = that->fHeadDraw; | |
385 fTailDraw = that->fTailDraw; | |
386 | |
387 that->fHeadDraw = that->fTailDraw = nullptr; | |
388 | |
389 return true; | |
390 } | |
391 | |
392 void InstancedRendering::Batch::computePipelineOptimizations(GrInitInvariantOutp
ut* color, | |
393 GrInitInvariantOutpu
t* coverage, | |
394 GrBatchToXPOverrides
* overrides) const { | |
395 // We need to be careful about fInfo here and consider how it might change a
s batches combine. | |
396 // e.g. We can't make an assumption based on fInfo.isSimpleRects() because t
he batch might | |
397 // later combine with a non-rect. | |
398 color->setUnknownFourComponents(); | |
399 if (fInfo.fAntialiasMode >= AntialiasMode::kMSAA) { | |
400 coverage->setKnownSingleComponent(255); | |
401 } else if (AntialiasMode::kNone == fInfo.fAntialiasMode && !fInfo.fCannotDis
card) { | |
402 coverage->setKnownSingleComponent(255); | |
403 } else { | |
404 coverage->setUnknownSingleComponent(); | |
405 } | |
406 } | |
407 | |
408 void InstancedRendering::beginFlush(GrResourceProvider* rp) { | |
409 SkASSERT(State::kRecordingDraws == fState); | |
410 fState = State::kFlushing; | |
411 | |
412 if (fTrackedBatches.isEmpty()) { | |
413 return; | |
414 } | |
415 | |
416 if (!fVertexBuffer) { | |
417 fVertexBuffer.reset(InstanceProcessor::FindOrCreateVertexBuffer(fGpu)); | |
418 if (!fVertexBuffer) { | |
419 return; | |
420 } | |
421 } | |
422 | |
423 if (!fIndexBuffer) { | |
424 fIndexBuffer.reset(InstanceProcessor::FindOrCreateIndex8Buffer(fGpu)); | |
425 if (!fIndexBuffer) { | |
426 return; | |
427 } | |
428 } | |
429 | |
430 if (!fParams.empty()) { | |
431 fParamsBuffer.reset(rp->createBuffer(fParams.count() * sizeof(ParamsTexe
l), | |
432 kTexel_GrBufferType, kDynamic_GrAcc
essPattern, | |
433 GrResourceProvider::kNoPendingIO_Fl
ag, | |
434 fParams.begin())); | |
435 if (!fParamsBuffer) { | |
436 return; | |
437 } | |
438 } | |
439 | |
440 this->onBeginFlush(rp); | |
441 } | |
442 | |
443 void InstancedRendering::Batch::onDraw(GrBatchFlushState* state) { | |
444 SkASSERT(State::kFlushing == fInstancedRendering->fState); | |
445 SkASSERT(state->gpu() == fInstancedRendering->gpu()); | |
446 | |
447 state->gpu()->handleDirtyContext(); | |
448 if (GrXferBarrierType barrierType = this->pipeline()->xferBarrierType(*state
->gpu()->caps())) { | |
449 state->gpu()->xferBarrier(this->pipeline()->getRenderTarget(), barrierTy
pe); | |
450 } | |
451 | |
452 InstanceProcessor instProc(fInfo, fInstancedRendering->fParamsBuffer); | |
453 fInstancedRendering->onDraw(*this->pipeline(), instProc, this); | |
454 } | |
455 | |
456 void InstancedRendering::endFlush() { | |
457 // The caller is expected to delete all tracked batches (i.e. batches whose
initBatchTracker | |
458 // method has been called) before ending the flush. | |
459 SkASSERT(fTrackedBatches.isEmpty()); | |
460 fParams.reset(); | |
461 fParamsBuffer.reset(); | |
462 this->onEndFlush(); | |
463 fState = State::kRecordingDraws; | |
464 // Hold on to the shape coords and index buffers. | |
465 } | |
466 | |
467 void InstancedRendering::resetGpuResources(ResetType resetType) { | |
468 fVertexBuffer.reset(); | |
469 fIndexBuffer.reset(); | |
470 fParamsBuffer.reset(); | |
471 this->onResetGpuResources(resetType); | |
472 } | |
473 | |
474 } | |
OLD | NEW |