Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(131)

Side by Side Diff: src/gpu/GrInstancedRendering.cpp

Issue 1897203002: Implement instanced rendering for simple shapes (Closed) Base URL: https://skia.googlesource.com/skia.git@upload2_requireHWAA
Patch Set: rebase Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrInstancedRendering.h"
9
10 #include "GrBatchFlushState.h"
11 #include "GrPipeline.h"
12 #include "GrResourceProvider.h"
13 #include "effects/GrInstanceProcessor.h"
14
15 GrInstancedRendering::GrInstancedRendering(GrGpu* gpu, uint32_t supportedAAModes ,
16 size_t sizeofBatchClass)
17 : fGpu(SkRef(gpu)),
18 fSupportedAAModes(supportedAAModes),
19 fState(State::kRecordingShapes),
20 fBatchAllocator(sizeofBatchClass) {
21 SkDEBUGCODE(fInUseBatchCount = 0;)
22 }
23
24 GrDrawBatch* GrInstancedRendering::recordRect(const SkRect& rect, const SkMatrix & viewMatrix,
25 GrColor color, bool antialias, uin t32_t flags,
26 bool* useHWAA) {
27 return this->recordShape(kRect_ShapeType, rect, viewMatrix, color, rect, ant ialias, flags,
28 useHWAA);
29 }
30
31 GrDrawBatch* GrInstancedRendering::recordRect(const SkRect& rect, const SkMatrix & viewMatrix,
32 GrColor color, const SkRect& local Rect,
33 bool antialias, uint32_t flags, bo ol* useHWAA) {
34 return this->recordShape(kRect_ShapeType, rect, viewMatrix, color, localRect , antialias, flags,
35 useHWAA);
36 }
37
38 GrDrawBatch* GrInstancedRendering::recordRect(const SkRect& rect, const SkMatrix & viewMatrix,
39 GrColor color, const SkMatrix& loc alMatrix,
40 bool antialias, uint32_t flags, bo ol* useHWAA) {
41 if (localMatrix.hasPerspective()) {
42 return nullptr; // Perspective is not yet supported in the local matrix.
43 }
44 if (Batch* batch = this->recordShape(kRect_ShapeType, rect, viewMatrix, colo r, rect, antialias,
45 flags, useHWAA)) {
46 fInstances.back().fInfo |= kLocalMatrix_InfoFlag;
47 this->appendParamsTexel(localMatrix.getScaleX(), localMatrix.getSkewX(),
48 localMatrix.getTranslateX());
49 this->appendParamsTexel(localMatrix.getSkewY(), localMatrix.getScaleY(),
50 localMatrix.getTranslateY());
51 batch->fTracker.fHasLocalMatrix = true;
52 batch->fTracker.fHasParams = true;
53 return batch;
54 }
55 return nullptr;
56 }
57
58 GrDrawBatch* GrInstancedRendering::recordOval(const SkRect& oval, const SkMatrix & viewMatrix,
59 GrColor color, bool antialias, uin t32_t flags,
60 bool* useHWAA) {
61 return this->recordShape(kOval_ShapeType, oval, viewMatrix, color, oval, ant ialias, flags,
62 useHWAA);
63 }
64
65 GrDrawBatch* GrInstancedRendering::recordRRect(const SkRRect& rrect, const SkMat rix& viewMatrix,
66 GrColor color, bool antialias, ui nt32_t flags,
67 bool* useHWAA) {
68 if (Batch* batch = this->recordShape(RRectShapeType(rrect), rrect.rect(), vi ewMatrix, color,
69 rrect.rect(), antialias, flags, useHWAA )) {
70 this->appendRRectParams(rrect, &batch->fTracker);
71 return batch;
72 }
73 return nullptr;
74 }
75
76 GrDrawBatch* GrInstancedRendering::recordDRRect(const SkRRect& outer, const SkRR ect& inner,
77 const SkMatrix& viewMatrix, GrCo lor color,
78 bool antialias, uint32_t flags, bool* useHWAA) {
79 if (inner.getType() > SkRRect::kSimple_Type) {
80 return nullptr; // Complex inner round rects are not yet supported.
81 }
82 if (SkRRect::kEmpty_Type == inner.getType()) {
83 return this->recordRRect(outer, viewMatrix, color, antialias, flags, use HWAA);
84 }
85 if (Batch* batch = this->recordShape(RRectShapeType(outer), outer.rect(), vi ewMatrix, color,
86 outer.rect(), antialias, flags, useHWAA )) {
87 this->appendRRectParams(outer, &batch->fTracker);
88 ShapeType innerShapeType = RRectShapeType(inner);
89 batch->fTracker.fInnerShapeTypes |= (1 << innerShapeType);
90 fInstances.back().fInfo |= (innerShapeType << kInnerShapeType_InfoBit);
91 this->appendParamsTexel(inner.rect().asScalars(), 4);
92 this->appendRRectParams(inner, &batch->fTracker);
93 batch->fTracker.fHasParams = true;
94 return batch;
95 }
96 return nullptr;
97 }
98
99 GrInstancedRendering::Batch* GrInstancedRendering::recordShape(ShapeType type, c onst SkRect& bounds,
100 const SkMatrix& v iewMatrix,
101 GrColor color,
102 const SkRect& loc alRect,
103 bool antialias, u int32_t flags,
104 bool* useHWAA) {
105 SkASSERT(State::kRecordingShapes == fState);
106
107 uint32_t paramsIdx = fParams.count();
108 if (paramsIdx > kParamsIdx_InfoMask) {
109 return nullptr; // paramsIdx is too large for its allotted space.
110 }
111
112 AntialiasMode aa;
113 if (!this->selectAntialiasMode(viewMatrix, antialias, flags, &aa, useHWAA)) {
114 return nullptr;
115 }
116
117 Batch* batch = this->constructBatch(fBatchAllocator.push_back(), aa, fInstan ces.count());
118 SkASSERT(batch == fBatchAllocator.back()); // We rely on batch ptr equality with the allocator.
119 SkDEBUGCODE(++fInUseBatchCount;)
120 batch->fTracker.fShapeTypes |= (1 << type);
121 batch->fTracker.fCannotDiscard = !(flags & kUseDiscard_Flag);
122
123 Instance& instance = fInstances.push_back();
124 instance.fInfo = (type << kShapeType_InfoBit) | paramsIdx;
125
126 // The instanced shape renderer draws rectangles of [-1, -1, +1, +1], so we find the matrix that
127 // will map this rectangle to the same device coordinates as "viewMatrix * b ounds".
128 float sx = 0.5f * bounds.width();
129 float sy = 0.5f * bounds.height();
130 float tx = sx + bounds.fLeft;
131 float ty = sy + bounds.fTop;
132 if (!viewMatrix.hasPerspective()) {
133 float* m = instance.fShapeMatrix2x3;
134 m[0] = viewMatrix.getScaleX() * sx;
135 m[1] = viewMatrix.getSkewX() * sy;
136 m[2] = viewMatrix.getTranslateX() +
137 viewMatrix.getScaleX() * tx + viewMatrix.getSkewX() * ty;
138
139 m[3] = viewMatrix.getSkewY() * sx;
140 m[4] = viewMatrix.getScaleY() * sy;
141 m[5] = viewMatrix.getTranslateY() +
142 viewMatrix.getSkewY() * tx + viewMatrix.getScaleY() * ty;
143
144 // Since 'm' is a 2x3 matrix that maps the rect [-1, +1] into the shape' s device-space quad,
145 // it's quite simple to find the bounding rectangle:
146 float devBoundsHalfWidth = fabsf(m[0]) + fabsf(m[1]);
147 float devBoundsHalfHeight = fabsf(m[3]) + fabsf(m[4]);
148 batch->fBounds.fLeft = m[2] - devBoundsHalfWidth;
149 batch->fBounds.fRight = m[2] + devBoundsHalfWidth;
150 batch->fBounds.fTop = m[5] - devBoundsHalfHeight;
151 batch->fBounds.fBottom = m[5] + devBoundsHalfHeight;
152
153 // TODO: Is this worth the CPU overhead?
154 batch->fTracker.fNonSquare =
155 fabsf(devBoundsHalfHeight - devBoundsHalfWidth) > 0.5f || // Early o ut.
156 fabs(m[0] * m[3] + m[1] * m[4]) > 1e-3f || // Skew?
157 fabs(m[0] * m[0] + m[1] * m[1] - m[3] * m[3] - m[4] * m[4]) > 1e-2f; // Diff. lengths?
158 } else {
159 SkMatrix shapeMatrix(viewMatrix);
160 shapeMatrix.preTranslate(tx, ty);
161 shapeMatrix.preScale(sx, sy);
162 instance.fInfo |= kPerspective_InfoFlag;
163
164 float* m = instance.fShapeMatrix2x3;
165 m[0] = SkScalarToFloat(shapeMatrix.getScaleX());
166 m[1] = SkScalarToFloat(shapeMatrix.getSkewX());
167 m[2] = SkScalarToFloat(shapeMatrix.getTranslateX());
168 m[3] = SkScalarToFloat(shapeMatrix.getSkewY());
169 m[4] = SkScalarToFloat(shapeMatrix.getScaleY());
170 m[5] = SkScalarToFloat(shapeMatrix.getTranslateY());
171
172 // Send the perspective column as a param.
173 this->appendParamsTexel(shapeMatrix[SkMatrix::kMPersp0], shapeMatrix[SkM atrix::kMPersp1],
174 shapeMatrix[SkMatrix::kMPersp2]);
175 batch->fTracker.fHasPerspective = true;
176 batch->fTracker.fHasParams = true;
177
178 viewMatrix.mapRect(&batch->fBounds, bounds);
179
180 batch->fTracker.fNonSquare = true;
181 }
182
183 instance.fColor = color;
184 #if SK_SCALAR_IS_FLOAT
bsalomon 2016/04/25 13:22:38 I'd just static assert that SkScalar is float here
Chris Dalton 2016/04/26 19:18:07 Done. (SkScalar == double isn't on the table?)
bsalomon 2016/04/27 14:10:32 It's on the table but there is no plan to tackle i
185 const float* rectAsFloats = localRect.asScalars();
186 memcpy(&instance.fLocalRect, rectAsFloats, 4 * sizeof(float));
187 #else
188 instance.fLocalRect[0] = SkScalarToFloat(localRect.left());
189 instance.fLocalRect[1] = SkScalarToFloat(localRect.top());
190 instance.fLocalRect[2] = SkScalarToFloat(localRect.right());
191 instance.fLocalRect[3] = SkScalarToFloat(localRect.bottom());
192 #endif
193
194 return batch;
195 }
196
197 inline bool GrInstancedRendering::selectAntialiasMode(const SkMatrix& viewMatrix , bool antialias,
198 uint32_t flags, AntialiasM ode* antialiasMode,
199 bool* useHWAA) {
200 SkASSERT(flags & (kColorWrite_Flag | kStencilWrite_Flag));
201 SkASSERT((flags & (kColorBufferMSAA_Flag | kStencilBufferMSAA_Flag)) != kCol orBufferMSAA_Flag);
202
203 if (!antialias) {
204 if (!(fSupportedAAModes & kNone_AntialiasFlag)) {
205 return false;
206 }
207 if ((flags & (kStencilWrite_Flag | kUseDiscard_Flag)) == kStencilWrite_F lag) {
208 // We can only subtract coverage from the stencil output via discard when no MSAA.
209 return false;
210 }
211 *antialiasMode = kNone_AntialiasMode;
212 *useHWAA = false;
213 return true;
214 }
215
216 if (!(flags & (kColorBufferMSAA_Flag | kStencilWrite_Flag)) &&
217 viewMatrix.preservesRightAngles()) {
218 SkASSERT(fSupportedAAModes & kCoverage_AntialiasFlag);
219 *antialiasMode = kCoverage_AntialiasMode;
220 *useHWAA = false;
221 return true;
222 }
223
224 if ((fSupportedAAModes & kMSAA_AntialiasFlag) && (flags & kStencilBufferMSAA _Flag)) {
225 if ((flags ^ kColorWrite_Flag) & (kColorWrite_Flag | kColorBufferMSAA_Fl ag)) {
226 // We either do not write color, or the color buffer is multisampled .
227 *antialiasMode = kMSAA_AntialiasMode;
228 *useHWAA = true;
229 return true;
230 }
231 if (fSupportedAAModes & kMixedSamples_AntialiasFlag) {
232 *antialiasMode = kMixedSamples_AntialiasMode;
233 *useHWAA = true;
234 return true;
235 }
236 }
237
238 return false;
239 }
240
241 void GrInstancedRendering::appendRRectParams(const SkRRect& rrect, BatchTracker* tracker) {
242 switch (rrect.getType()) {
243 case SkRRect::kSimple_Type: {
244 const SkVector& radii = rrect.getSimpleRadii();
245 this->appendParamsTexel(radii.x(), radii.y(), rrect.width(), rrect.h eight());
246 tracker->fHasParams = true;
247 return;
248 }
249 case SkRRect::kNinePatch_Type: {
250 float twoOverW = 2 / rrect.width();
251 float twoOverH = 2 / rrect.height();
252 const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
253 const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
254 this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBR.x() * twoOve rW,
255 radiiTL.y() * twoOverH, radiiBR.y() * twoOve rH);
256 tracker->fHasParams = true;
257 return;
258 }
259 case SkRRect::kComplex_Type: {
260 /**
261 * The x and y radii of each arc are stored in separate vectors,
262 * in the following order:
263 *
264 * __x1 _ _ _ x3__
265 * y1 | | y2
266 *
267 * | |
268 *
269 * y3 |__ _ _ _ __| y4
270 * x2 x4
271 *
272 */
273 float twoOverW = 2 / rrect.width();
274 float twoOverH = 2 / rrect.height();
275 const SkVector& radiiTL = rrect.radii(SkRRect::kUpperLeft_Corner);
276 const SkVector& radiiTR = rrect.radii(SkRRect::kUpperRight_Corner);
277 const SkVector& radiiBR = rrect.radii(SkRRect::kLowerRight_Corner);
278 const SkVector& radiiBL = rrect.radii(SkRRect::kLowerLeft_Corner);
279 this->appendParamsTexel(radiiTL.x() * twoOverW, radiiBL.x() * twoOve rW,
280 radiiTR.x() * twoOverW, radiiBR.x() * twoOve rW);
281 this->appendParamsTexel(radiiTL.y() * twoOverH, radiiTR.y() * twoOve rH,
282 radiiBL.y() * twoOverH, radiiBR.y() * twoOve rH);
283 tracker->fHasParams = true;
284 return;
285 }
286 default: return;
287 }
288 }
289
290 void GrInstancedRendering::appendParamsTexel(const SkScalar* vals, int count) {
291 SkASSERT(count <= 4 && count >= 0);
292 #if SK_SCALAR_IS_FLOAT
bsalomon 2016/04/25 13:22:38 ditto
Chris Dalton 2016/04/26 19:18:07 Done.
293 const float* valsAsFloats = vals;
294 memcpy(&fParams.push_back(), valsAsFloats, count * sizeof(float));
295 #else
296 float* params = fParams.push_back().fValues;
297 for (int i = 0; i < count; i++) {
298 params[i] = SkScalarToFloat(vals[i]);
299 }
300 #endif
301 }
302
303 void GrInstancedRendering::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w) {
304 ParamsTexel& texel = fParams.push_back();
305 texel.fX = SkScalarToFloat(x);
306 texel.fY = SkScalarToFloat(y);
307 texel.fZ = SkScalarToFloat(z);
308 texel.fW = SkScalarToFloat(w);
309 }
310
311 void GrInstancedRendering::appendParamsTexel(SkScalar x, SkScalar y, SkScalar z) {
312 ParamsTexel& texel = fParams.push_back();
313 texel.fX = SkScalarToFloat(x);
314 texel.fY = SkScalarToFloat(y);
315 texel.fZ = SkScalarToFloat(z);
316 }
317
318 void GrInstancedRendering::Batch::computePipelineOptimizations(GrInitInvariantOu tput* color,
319 GrInitInvariantOutpu t* coverage,
320 GrBatchToXPOverrides * overrides) const {
321 // We can't generally rely on fTracker here because it might change when bat ches get combined.
bsalomon 2016/04/25 13:22:38 Isn't this always called before batches are combin
Chris Dalton 2016/04/25 17:01:19 Yes, but for example a non-discarding non-AA draw
Chris Dalton 2016/04/26 19:18:07 Done. (Made the comment better)
bsalomon 2016/04/27 14:10:32 Ok, that makes it clearer.
322 color->setUnknownFourComponents();
323 if (fAntialiasMode >= kMSAA_AntialiasMode) {
324 coverage->setKnownSingleComponent(255);
325 } else if (kNone_AntialiasMode == fAntialiasMode && !fTracker.fCannotDiscard ) {
326 // We can rely on fCannotDiscard because this particular field does not change.
327 coverage->setKnownSingleComponent(255);
328 } else {
329 coverage->setUnknownSingleComponent();
330 }
331 }
332
333 void GrInstancedRendering::Batch::initBatchTracker(const GrXPOverridesForBatch& overrides) {
334 fTracker.fUsesColor = overrides.readsColor();
335 fTracker.fUsesCoverage = overrides.readsCoverage();
336 fTracker.fUsesLocalCoords = overrides.readsLocalCoords();
337 fTracker.fCannotTweakAlphaForCoverage = !overrides.canTweakAlphaForCoverage( );
338
339 GrColor overrideColor;
340 if (overrides.getOverrideColorIfSet(&overrideColor)) {
341 SkASSERT(State::kRecordingShapes == fInstancedRendering->fState);
342 SkASSERT(!fIsCombined);
343 fInstancedRendering->fInstances[fFirstInstanceIdx].fColor = overrideColo r;
344 }
345 }
346
347 void GrInstancedRendering::commitToGpu(GrResourceProvider* rp) {
348 SkASSERT(State::kRecordingShapes == fState);
349 fState = State::kDrawingBatches;
350
351 if (fInstances.empty()) {
352 return;
353 }
354
355 if (!fVertexBuffer) {
356 fVertexBuffer.reset(GrInstanceProcessor::FindOrCreateVertexBuffer(fGpu)) ;
357 if (!fVertexBuffer) {
358 return;
359 }
360 }
361
362 if (!fIndexBuffer) {
363 fIndexBuffer.reset(GrInstanceProcessor::FindOrCreateIndex8Buffer(fGpu));
364 if (!fIndexBuffer) {
365 return;
366 }
367 }
368
369 fInstanceBuffer.reset(rp->createBuffer(fInstances.count() * sizeof(Instance) ,
370 kVertex_GrBufferType, kDynamic_GrAcce ssPattern,
371 GrResourceProvider::kNoPendingIO_Flag ,
372 fInstances.begin()));
373 if (!fInstanceBuffer) {
374 return;
375 }
376
377 if (!fParams.empty()) {
378 fParamsBuffer.reset(rp->createBuffer(fParams.count() * sizeof(ParamsTexe l),
379 kTexel_GrBufferType, kDynamic_GrAcc essPattern,
380 GrResourceProvider::kNoPendingIO_Fl ag,
381 fParams.begin()));
382 if (!fParamsBuffer) {
383 return;
384 }
385 }
386
387 this->onCommitToGpu(rp);
388 }
389
390 void GrInstancedRendering::Batch::onDraw(GrBatchFlushState* state) {
391 SkASSERT(State::kDrawingBatches == fInstancedRendering->fState);
392 SkASSERT(state->gpu() == fInstancedRendering->gpu());
393
394 GrInstanceProcessor instProc(fTracker, fInstancedRendering->fParamsBuffer, f AntialiasMode);
395 fInstancedRendering->onDraw(*this->pipeline(), instProc, this);
396 }
397
398 void GrInstancedRendering::Batch::onDelete() const {
bsalomon 2016/04/25 13:22:38 Is it really necessary to you your own custom allo
Chris Dalton 2016/04/25 17:01:19 Oh really? How long have they lived in block alloc
Chris Dalton 2016/04/26 19:18:07 Added a comment to the BatchAllocator class in GrI
bsalomon 2016/04/27 14:10:32 Until they're unreffed (at the end of GrDT flush).
csmartdalton 2016/05/20 01:09:45 The main advantage to owning is that you know when
bsalomon 2016/05/20 14:04:48 There is no reason we couldn't have the instanced
csmartdalton 2016/05/21 06:54:24 OK, I like that idea.
bsalomon 2016/05/23 14:05:43 Cool, I suppose the notification could happen from
399 this->~Batch();
400 SkDEBUGCODE(--fInstancedRendering->fInUseBatchCount);
401 if (this == fInstancedRendering->fBatchAllocator.back()) {
402 fInstancedRendering->fBatchAllocator.pop_back();
403 }
404 }
405
406 void GrInstancedRendering::restart() {
407 SkASSERT(0 == fInUseBatchCount);
408 fBatchAllocator.reset();
409 // Hold on to the shape coords and index buffers.
410 fInstances.reset();
411 fParams.reset();
412 fInstanceBuffer.reset();
413 fParamsBuffer.reset();
414 this->onRestart();
415 fState = State::kRecordingShapes;
416 }
417
418 void GrInstancedRendering::clearGpuResources(ClearType clearType) {
419 fVertexBuffer.reset();
420 fIndexBuffer.reset();
421 fInstanceBuffer.reset();
422 fParamsBuffer.reset();
423 this->onClearGpuResources(clearType);
424 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698