Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(300)

Side by Side Diff: src/gpu/gl/GrGLInstancedRendering.cpp

Issue 1897203002: Implement instanced rendering for simple shapes (Closed) Base URL: https://skia.googlesource.com/skia.git@upload2_requireHWAA
Patch Set: Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrGLInstancedRendering.h"
9
10 #include "GrGLBuffer.h"
11 #include "GrGLGpu.h"
12 #include "GrResourceProvider.h"
13 #include "effects/GrInstanceProcessor.h"
14
15 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
16
17 class GrGLInstancedRendering::GLBatch : public GrInstancedRendering::Batch {
18 public:
19 DEFINE_BATCH_CLASS_ID
20
21 GLBatch(GrGLInstancedRendering* instRendering, AntialiasMode aa, uint32_t fl ags,
22 int instanceIdx)
23 : INHERITED(ClassID(), instRendering, aa, flags, instanceIdx) {
24 }
25
26 void initBatchTracker(const GrXPOverridesForBatch&) override;
27 bool onCombineIfPossible(GrBatch* other, const GrCaps& caps) override;
28
29 private:
30 GrGLInstancedRendering* glInstancedRendering() const {
31 return static_cast<GrGLInstancedRendering*>(fInstancedRendering);
32 }
33
34 SkSTArray<4, GrGLDrawElementsIndirectCommand, true> fDrawCmds;
35 GrGLDrawElementsIndirectCommand* fDrawCmdsOffsetInBuffe r;
36
37 friend class GrGLInstancedRendering;
38
39 typedef Batch INHERITED;
40 };
41
42 GrGLInstancedRendering* GrGLInstancedRendering::CreateIfSupported(GrGLGpu* gpu) {
43 const GrGLCaps& caps = gpu->glCaps();
44 if (!caps.vertexArrayObjectSupport() ||
45 !caps.drawIndirectSupport() ||
46 !caps.baseInstanceSupport()) {
47 return nullptr;
48 }
49 uint32_t supportedAAModes = GrInstanceProcessor::GetSupportedAAModes(*caps.g lslCaps(), caps);
50 if (!caps.multisampleDisableSupport()) {
51 // The non-AA shaders require MSAA to be disabled.
52 supportedAAModes &= ~kNone_AntialiasFlag;
53 }
54 if (!supportedAAModes) {
55 return nullptr;
56 }
57 return new GrGLInstancedRendering(gpu, supportedAAModes);
58 }
59
60 GrGLInstancedRendering::GrGLInstancedRendering(GrGLGpu* gpu, uint32_t supportedA AModes)
61 : INHERITED(gpu, supportedAAModes, sizeof(GLBatch)),
62 fVertexArrayID(0),
63 fInstanceBufferInVertexArrayID(SK_InvalidUniqueID),
64 fTotalDrawCmdCount(0) {
65 }
66
67 GrGLInstancedRendering::~GrGLInstancedRendering() {
68 if (fVertexArrayID) {
69 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
70 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
71 }
72 }
73
74 inline GrGLGpu* GrGLInstancedRendering::glGpu() const {
75 return static_cast<GrGLGpu*>(this->gpu());
76 }
77
78 GrInstancedRendering::Batch* GrGLInstancedRendering::constructBatch(void* storag e, AntialiasMode aa,
79 uint32_t fla gs,
80 int instance Idx) {
81 return new (storage) GLBatch(this, aa, flags, instanceIdx);
82 }
83
84 void GrGLInstancedRendering::GLBatch::initBatchTracker(const GrXPOverridesForBat ch& overrides) {
85 SkASSERT(!fIsCombined);
86 SkASSERT(SkIsPow2(fTracker.fShapeTypes)); // There should only be one bit se t at this point.
87
88 INHERITED::initBatchTracker(overrides);
89
90 GrGLDrawElementsIndirectCommand& cmd = fDrawCmds.push_back();
91 cmd.fBaseInstance = fFirstInstanceIdx;
92 cmd.fInstanceCount = 1;
93 if (kRect_ShapeFlag == fTracker.fShapeTypes) {
94 GrInstanceProcessor::GetIndexRangeForRect(fAntialiasMode, &cmd.fFirstInd ex, &cmd.fCount);
95 } else if (kOval_ShapeFlag == fTracker.fShapeTypes) {
96 GrInstanceProcessor::GetIndexRangeForOval(fAntialiasMode, fBounds, &cmd. fFirstIndex,
97 &cmd.fCount);
98 } else {
99 GrInstanceProcessor::GetIndexRangeForRRect(fAntialiasMode, &cmd.fFirstIn dex, &cmd.fCount);
100 }
101 cmd.fBaseVertex = 0;
102
103 ++this->glInstancedRendering()->fTotalDrawCmdCount;
104 }
105
106 bool GrGLInstancedRendering::GLBatch::onCombineIfPossible(GrBatch* other, const GrCaps& caps) {
107 GLBatch* that = other->cast<GLBatch>();
108
109 SkASSERT(fInstancedRendering == that->fInstancedRendering);
110 SkASSERT(fDrawCmds.count());
111 SkASSERT(that->fDrawCmds.count());
112
113 if (fFlags != that->fFlags ||
114 fAntialiasMode != that->fAntialiasMode ||
115 !GrPipeline::CanCombine(*this->pipeline(), this->bounds(),
116 *that->pipeline(), that->bounds(), caps)) {
117 return false;
118 }
119
120 fBounds.join(that->fBounds);
121 fTracker.join(that->fTracker);
122
123 // Join the draw commands.
124 int i = 0;
125 if (fDrawCmds.back().fBaseInstance + fDrawCmds.back().fInstanceCount ==
126 that->fDrawCmds.front().fBaseInstance &&
127 fDrawCmds.back().fFirstIndex == that->fDrawCmds.front().fFirstIndex) {
128 SkASSERT(fDrawCmds.back().fCount == that->fDrawCmds.front().fCount);
129 SkASSERT(0 == (fDrawCmds.back().fBaseVertex | that->fDrawCmds.back().fBa seVertex));
130 fDrawCmds.back().fInstanceCount += that->fDrawCmds.front().fInstanceCoun t;
131 ++i;
132 --this->glInstancedRendering()->fTotalDrawCmdCount;
133 }
134 if (i < that->fDrawCmds.count()) {
135 fDrawCmds.push_back_n(that->fDrawCmds.count() - i, &that->fDrawCmds[i]);
136 }
137
138 return true;
139 }
140
141 void GrGLInstancedRendering::onCommitToGpu(GrResourceProvider* rp) {
142 SkASSERT(fTotalDrawCmdCount);
143 SkASSERT(!fDrawIndirectBuffer);
144
145 if (!fVertexArrayID) {
146 GL_CALL(GenVertexArrays(1, &fVertexArrayID));
147 if (!fVertexArrayID) {
148 return;
149 }
150 this->glGpu()->bindVertexArray(fVertexArrayID);
151
152 // Attach our index buffer to the vertex array.
153 GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
154 static_cast<const GrGLBuffer*>(this->indexBuffer())-> bufferID()));
155
156 // Set up the non-instanced attribs.
157 this->glGpu()->bindBuffer(kVertex_GrBufferType,
158 static_cast<const GrGLBuffer*>(this->vertexBuf fer()));
159 GL_CALL(EnableVertexAttribArray(kShapeCoords_AttribIdx));
160 GL_CALL(VertexAttribPointer(kShapeCoords_AttribIdx, 2, GR_GL_FLOAT, GR_G L_FALSE,
161 sizeof(ShapeVertex), (void*) offsetof(ShapeV ertex, fX)));
162 GL_CALL(EnableVertexAttribArray(kVertexAttrs_AttribIdx));
163 GL_CALL(VertexAttribIPointer(kVertexAttrs_AttribIdx, 1, GR_GL_INT, sizeo f(ShapeVertex),
164 (void*) offsetof(ShapeVertex, fAttrs)));
165
166 SkASSERT(SK_InvalidUniqueID == fInstanceBufferInVertexArrayID);
167 }
168
169 fDrawIndirectBuffer.reset(rp->createBuffer(sizeof(GrGLDrawElementsIndirectCo mmand) *
170 fTotalDrawCmdCount, kDrawIndirect _GrBufferType,
171 kDynamic_GrAccessPattern,
172 GrResourceProvider::kNoPendingIO_ Flag));
173 if (!fDrawIndirectBuffer) {
174 return;
175 }
176
177 // Generate a draw indirect buffer based on the instanced batches in existen ce.
178 int idx = 0;
179 auto* mappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndire ctBuffer->map());
180 SkDEBUGCODE(int inUseBatchCount = 0;)
181 for (BatchAllocator::Iter iter(this->batchAllocator()); iter.next();) {
182 GLBatch* batch = static_cast<GLBatch*>(iter.get());
183 if (!batch->fInUse) {
184 continue;
185 }
186 memcpy(&mappedCmds[idx], batch->fDrawCmds.begin(),
187 batch->fDrawCmds.count() * sizeof(GrGLDrawElementsIndirectCommand ));
188 batch->fDrawCmdsOffsetInBuffer = (GrGLDrawElementsIndirectCommand*) null ptr + idx;
189 idx += batch->fDrawCmds.count();
190 SkDEBUGCODE(++inUseBatchCount;)
191 }
192 SkASSERT(fTotalDrawCmdCount == idx);
193 SkASSERT(inUseBatchCount == fInUseBatchCount);
194 fDrawIndirectBuffer->unmap();
195 }
196
197 void GrGLInstancedRendering::onDraw(const GrPipeline& pipeline, const GrInstance Processor& instProc,
198 const Batch* baseBatch) {
199 if (!fDrawIndirectBuffer) {
200 return; // commitToGpu was not successful.
201 }
202 if (!this->glGpu()->flushGLState(pipeline, instProc)) {
203 return;
204 }
205 this->flushAttribArrays();
206 this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType,
207 static_cast<GrGLBuffer*>(fDrawIndirectBuffer.get() ));
208
209 const GLBatch* batch = static_cast<const GLBatch*>(baseBatch);
210 int numCommands = batch->fDrawCmds.count();
211
212 if (1 == numCommands || !this->glGpu()->glCaps().multiDrawIndirectSupport()) {
213 for (int i = 0; i < numCommands; ++i) {
214 GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
215 batch->fDrawCmdsOffsetInBuffer + i));
216 }
217 } else {
218 GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
219 batch->fDrawCmdsOffsetInBuffer, numCom mands, 0));
220 }
221 }
222
223 void GrGLInstancedRendering::flushAttribArrays() {
224 SkASSERT(fVertexArrayID);
225 this->glGpu()->bindVertexArray(fVertexArrayID);
226
227 if (fInstanceBufferInVertexArrayID != this->instanceBuffer()->getUniqueID()) {
228 this->glGpu()->bindBuffer(kVertex_GrBufferType,
229 static_cast<const GrGLBuffer*>(this->instanceB uffer()));
230
231 // Info attrib.
232 GL_CALL(EnableVertexAttribArray(kInstanceInfo_AttribIdx));
233 GL_CALL(VertexAttribIPointer(kInstanceInfo_AttribIdx, 1, GR_GL_UNSIGNED_ INT,
234 sizeof(Instance), (void*) offsetof(Instance , fInfo)));
235 GL_CALL(VertexAttribDivisor(kInstanceInfo_AttribIdx, 1));
236
237 // Shape matrix attrib.
238 GL_CALL(EnableVertexAttribArray(kShapeMatrixX_AttribIdx));
239 GL_CALL(EnableVertexAttribArray(kShapeMatrixY_AttribIdx));
240 GL_CALL(VertexAttribPointer(kShapeMatrixX_AttribIdx, 3, GR_GL_FLOAT, GR_ GL_FALSE,
241 sizeof(Instance),
242 (void*) offsetof(Instance, fShapeMatrix2x3[0 ])));
243 GL_CALL(VertexAttribPointer(kShapeMatrixY_AttribIdx, 3, GR_GL_FLOAT, GR_ GL_FALSE,
244 sizeof(Instance),
245 (void*) offsetof(Instance, fShapeMatrix2x3[3 ])));
246 GL_CALL(VertexAttribDivisor(kShapeMatrixX_AttribIdx, 1));
247 GL_CALL(VertexAttribDivisor(kShapeMatrixY_AttribIdx, 1));
248
249 // Color attrib.
250 GL_CALL(EnableVertexAttribArray(kColor_AttribIdx));
251 GL_CALL(VertexAttribPointer(kColor_AttribIdx, 4, GR_GL_UNSIGNED_BYTE, GR _GL_TRUE,
252 sizeof(Instance), (void*) offsetof(Instance, fColor)));
253 GL_CALL(VertexAttribDivisor(kColor_AttribIdx, 1));
254
255 // Local rect attrib.
256 GL_CALL(EnableVertexAttribArray(kLocalRect_AttribIdx));
257 GL_CALL(VertexAttribPointer(kLocalRect_AttribIdx, 4, GR_GL_FLOAT, GR_GL_ FALSE,
258 sizeof(Instance), (void*) offsetof(Instance, fLocalRect)));
259 GL_CALL(VertexAttribDivisor(kLocalRect_AttribIdx, 1));
260
261 fInstanceBufferInVertexArrayID = this->instanceBuffer()->getUniqueID();
262 }
263 }
264
265 void GrGLInstancedRendering::onRestart() {
266 fTotalDrawCmdCount = 0;
267 fDrawIndirectBuffer.reset();
268 }
269
270 void GrGLInstancedRendering::onClearGpuResources(ClearType clearType) {
271 if (fVertexArrayID && ClearType::kDestroy == clearType) {
272 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
273 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
274 }
275 fVertexArrayID = 0;
276 fInstanceBufferInVertexArrayID = SK_InvalidUniqueID;
277 fDrawIndirectBuffer.reset();
278 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698