OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2016 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #include "GLInstancedRendering.h" | |
9 | |
10 #include "GrResourceProvider.h" | |
11 #include "gl/GrGLGpu.h" | |
12 #include "instanced/InstanceProcessor.h" | |
13 | |
14 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X) | |
15 | |
16 namespace gr_instanced { | |
17 | |
18 class GLInstancedRendering::GLBatch : public InstancedRendering::Batch { | |
19 public: | |
20 DEFINE_BATCH_CLASS_ID | |
21 | |
22 GLBatch(GLInstancedRendering* instRendering) : INHERITED(ClassID(), instRend
ering) {} | |
23 int numGLCommands() const { return 1 + fNumChangesInGeometry; } | |
24 | |
25 private: | |
26 int fEmulatedBaseInstance; | |
27 int fGLDrawCmdsIdx; | |
28 | |
29 friend class GLInstancedRendering; | |
30 | |
31 typedef Batch INHERITED; | |
32 }; | |
33 | |
34 GLInstancedRendering* GLInstancedRendering::CreateIfSupported(GrGLGpu* gpu) { | |
35 #ifndef SK_BUILD_FOR_MAC | |
36 // Only whitelisting on Mac for now. Once we've been able to work through th
e various issues on | |
37 // other platforms we can enable more generally. | |
38 return nullptr; | |
39 #endif | |
40 const GrGLCaps& glCaps = gpu->glCaps(); | |
41 AntialiasMode lastSupportedAAMode; | |
42 if (!glCaps.vertexArrayObjectSupport() || | |
43 !glCaps.drawIndirectSupport() || | |
44 !InstanceProcessor::IsSupported(*glCaps.glslCaps(), glCaps, &lastSupport
edAAMode)) { | |
45 return nullptr; | |
46 } | |
47 return new GLInstancedRendering(gpu, lastSupportedAAMode); | |
48 } | |
49 | |
50 GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu, AntialiasMode lastSuppo
rtedAAMode) | |
51 : INHERITED(gpu, lastSupportedAAMode, gpu->glCaps().canDrawIndirectToFloat()
), | |
52 fVertexArrayID(0), | |
53 fGLDrawCmdsInfo(0), | |
54 fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) { | |
55 } | |
56 | |
57 GLInstancedRendering::~GLInstancedRendering() { | |
58 if (fVertexArrayID) { | |
59 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID)); | |
60 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID); | |
61 } | |
62 } | |
63 | |
64 inline GrGLGpu* GLInstancedRendering::glGpu() const { | |
65 return static_cast<GrGLGpu*>(this->gpu()); | |
66 } | |
67 | |
68 InstancedRendering::Batch* GLInstancedRendering::createBatch() { | |
69 return new GLBatch(this); | |
70 } | |
71 | |
72 void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) { | |
73 // Count what there is to draw. | |
74 BatchList::Iter iter; | |
75 iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart); | |
76 int numGLInstances = 0; | |
77 int numGLDrawCmds = 0; | |
78 while (Batch* b = iter.get()) { | |
79 GLBatch* batch = static_cast<GLBatch*>(b); | |
80 iter.next(); | |
81 | |
82 numGLInstances += batch->fNumDraws; | |
83 numGLDrawCmds += batch->numGLCommands(); | |
84 } | |
85 if (!numGLDrawCmds) { | |
86 return; | |
87 } | |
88 SkASSERT(numGLInstances); | |
89 | |
90 // Lazily create a vertex array object. | |
91 if (!fVertexArrayID) { | |
92 GL_CALL(GenVertexArrays(1, &fVertexArrayID)); | |
93 if (!fVertexArrayID) { | |
94 return; | |
95 } | |
96 this->glGpu()->bindVertexArray(fVertexArrayID); | |
97 | |
98 // Attach our index buffer to the vertex array. | |
99 GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, | |
100 static_cast<const GrGLBuffer*>(this->indexBuffer())->
bufferID())); | |
101 | |
102 // Set up the non-instanced attribs. | |
103 this->glGpu()->bindBuffer(kVertex_GrBufferType, | |
104 static_cast<const GrGLBuffer*>(this->vertexBuf
fer())); | |
105 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeCoords)); | |
106 GL_CALL(VertexAttribPointer((int)Attrib::kShapeCoords, 2, GR_GL_FLOAT, G
R_GL_FALSE, | |
107 sizeof(ShapeVertex), (void*) offsetof(ShapeV
ertex, fX))); | |
108 GL_CALL(EnableVertexAttribArray((int)Attrib::kVertexAttrs)); | |
109 GL_CALL(VertexAttribIPointer((int)Attrib::kVertexAttrs, 1, GR_GL_INT, si
zeof(ShapeVertex), | |
110 (void*) offsetof(ShapeVertex, fAttrs))); | |
111 | |
112 SkASSERT(SK_InvalidUniqueID == fInstanceAttribsBufferUniqueId); | |
113 } | |
114 | |
115 // Create and map instance and draw-indirect buffers. | |
116 SkASSERT(!fInstanceBuffer); | |
117 fInstanceBuffer.reset(static_cast<GrGLBuffer*>( | |
118 rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType
, | |
119 kDynamic_GrAccessPattern, GrResourceProvider::kNoPendin
gIO_Flag))); | |
120 if (!fInstanceBuffer) { | |
121 return; | |
122 } | |
123 | |
124 SkASSERT(!fDrawIndirectBuffer); | |
125 fDrawIndirectBuffer.reset(static_cast<GrGLBuffer*>( | |
126 rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds
, | |
127 kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern, | |
128 GrResourceProvider::kNoPendingIO_Flag))); | |
129 if (!fDrawIndirectBuffer) { | |
130 return; | |
131 } | |
132 | |
133 Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map())
; | |
134 int glInstancesIdx = 0; | |
135 | |
136 auto* glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndi
rectBuffer->map()); | |
137 int glDrawCmdsIdx = 0; | |
138 | |
139 bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport(); | |
140 | |
141 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) { | |
142 fGLDrawCmdsInfo.reset(numGLDrawCmds); | |
143 } | |
144 | |
145 // Generate the instance and draw-indirect buffer contents based on the trac
ked batches. | |
146 iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart); | |
147 while (Batch* b = iter.get()) { | |
148 GLBatch* batch = static_cast<GLBatch*>(b); | |
149 iter.next(); | |
150 | |
151 batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx; | |
152 batch->fGLDrawCmdsIdx = glDrawCmdsIdx; | |
153 | |
154 const Batch::Draw* draw = batch->fHeadDraw; | |
155 SkASSERT(draw); | |
156 do { | |
157 int instanceCount = 0; | |
158 IndexRange geometry = draw->fGeometry; | |
159 SkASSERT(!geometry.isEmpty()); | |
160 | |
161 do { | |
162 glMappedInstances[glInstancesIdx + instanceCount++] = draw->fIns
tance; | |
163 draw = draw->fNext; | |
164 } while (draw && draw->fGeometry == geometry); | |
165 | |
166 GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx]
; | |
167 glCmd.fCount = geometry.fCount; | |
168 glCmd.fInstanceCount = instanceCount; | |
169 glCmd.fFirstIndex = geometry.fStart; | |
170 glCmd.fBaseVertex = 0; | |
171 glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0; | |
172 | |
173 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) { | |
174 fGLDrawCmdsInfo[glDrawCmdsIdx].fInstanceCount = instanceCount; | |
175 #if GR_GL_LOG_INSTANCED_BATCHES | |
176 fGLDrawCmdsInfo[glDrawCmdsIdx].fGeometry = geometry; | |
177 #endif | |
178 } | |
179 | |
180 glInstancesIdx += instanceCount; | |
181 ++glDrawCmdsIdx; | |
182 } while (draw); | |
183 } | |
184 | |
185 SkASSERT(glDrawCmdsIdx == numGLDrawCmds); | |
186 fDrawIndirectBuffer->unmap(); | |
187 | |
188 SkASSERT(glInstancesIdx == numGLInstances); | |
189 fInstanceBuffer->unmap(); | |
190 } | |
191 | |
192 void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProc
essor& instProc, | |
193 const Batch* baseBatch) { | |
194 if (!fDrawIndirectBuffer) { | |
195 return; // beginFlush was not successful. | |
196 } | |
197 if (!this->glGpu()->flushGLState(pipeline, instProc)) { | |
198 return; | |
199 } | |
200 | |
201 this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, fDrawIndirectBuffer.ge
t()); | |
202 | |
203 const GrGLCaps& glCaps = this->glGpu()->glCaps(); | |
204 const GLBatch* batch = static_cast<const GLBatch*>(baseBatch); | |
205 int numCommands = batch->numGLCommands(); | |
206 | |
207 #if GR_GL_LOG_INSTANCED_BATCHES | |
208 SkASSERT(fGLDrawCmdsInfo); | |
209 SkDebugf("Instanced batch: ["); | |
210 for (int i = 0; i < numCommands; ++i) { | |
211 int glCmdIdx = batch->fGLDrawCmdsIdx + i; | |
212 SkDebugf("%s%i * %s", (i ? ", " : ""), fGLDrawCmdsInfo[glCmdIdx].fInsta
nceCount, | |
213 InstanceProcessor::GetNameOfIndexRange(fGLDrawCmdsInfo[glCmdIdx
].fGeometry)); | |
214 } | |
215 SkDebugf("]\n"); | |
216 #else | |
217 SkASSERT(SkToBool(fGLDrawCmdsInfo) == !glCaps.baseInstanceSupport()); | |
218 #endif | |
219 | |
220 if (1 == numCommands || !glCaps.baseInstanceSupport() || !glCaps.multiDrawIn
directSupport()) { | |
221 int emulatedBaseInstance = batch->fEmulatedBaseInstance; | |
222 for (int i = 0; i < numCommands; ++i) { | |
223 int glCmdIdx = batch->fGLDrawCmdsIdx + i; | |
224 this->flushInstanceAttribs(emulatedBaseInstance); | |
225 GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE, | |
226 (GrGLDrawElementsIndirectCommand*) null
ptr + glCmdIdx)); | |
227 if (!glCaps.baseInstanceSupport()) { | |
228 emulatedBaseInstance += fGLDrawCmdsInfo[glCmdIdx].fInstanceCount
; | |
229 } | |
230 } | |
231 } else { | |
232 int glCmdsIdx = batch->fGLDrawCmdsIdx; | |
233 this->flushInstanceAttribs(batch->fEmulatedBaseInstance); | |
234 GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE, | |
235 (GrGLDrawElementsIndirectCommand*) nul
lptr + glCmdsIdx, | |
236 numCommands, 0)); | |
237 } | |
238 } | |
239 | |
240 void GLInstancedRendering::flushInstanceAttribs(int baseInstance) { | |
241 SkASSERT(fVertexArrayID); | |
242 this->glGpu()->bindVertexArray(fVertexArrayID); | |
243 | |
244 SkASSERT(fInstanceBuffer); | |
245 if (fInstanceAttribsBufferUniqueId != fInstanceBuffer->getUniqueID() || | |
246 fInstanceAttribsBaseInstance != baseInstance) { | |
247 Instance* offsetInBuffer = (Instance*) nullptr + baseInstance; | |
248 | |
249 this->glGpu()->bindBuffer(kVertex_GrBufferType, fInstanceBuffer.get()); | |
250 | |
251 // Info attrib. | |
252 GL_CALL(EnableVertexAttribArray((int)Attrib::kInstanceInfo)); | |
253 GL_CALL(VertexAttribIPointer((int)Attrib::kInstanceInfo, 1, GR_GL_UNSIGN
ED_INT, | |
254 sizeof(Instance), &offsetInBuffer->fInfo)); | |
255 GL_CALL(VertexAttribDivisor((int)Attrib::kInstanceInfo, 1)); | |
256 | |
257 // Shape matrix attrib. | |
258 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixX)); | |
259 GL_CALL(EnableVertexAttribArray((int)Attrib::kShapeMatrixY)); | |
260 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixX, 3, GR_GL_FLOAT,
GR_GL_FALSE, | |
261 sizeof(Instance), &offsetInBuffer->fShapeMat
rix2x3[0])); | |
262 GL_CALL(VertexAttribPointer((int)Attrib::kShapeMatrixY, 3, GR_GL_FLOAT,
GR_GL_FALSE, | |
263 sizeof(Instance), &offsetInBuffer->fShapeMat
rix2x3[3])); | |
264 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixX, 1)); | |
265 GL_CALL(VertexAttribDivisor((int)Attrib::kShapeMatrixY, 1)); | |
266 | |
267 // Color attrib. | |
268 GL_CALL(EnableVertexAttribArray((int)Attrib::kColor)); | |
269 GL_CALL(VertexAttribPointer((int)Attrib::kColor, 4, GR_GL_UNSIGNED_BYTE,
GR_GL_TRUE, | |
270 sizeof(Instance), &offsetInBuffer->fColor)); | |
271 GL_CALL(VertexAttribDivisor((int)Attrib::kColor, 1)); | |
272 | |
273 // Local rect attrib. | |
274 GL_CALL(EnableVertexAttribArray((int)Attrib::kLocalRect)); | |
275 GL_CALL(VertexAttribPointer((int)Attrib::kLocalRect, 4, GR_GL_FLOAT, GR_
GL_FALSE, | |
276 sizeof(Instance), &offsetInBuffer->fLocalRec
t)); | |
277 GL_CALL(VertexAttribDivisor((int)Attrib::kLocalRect, 1)); | |
278 | |
279 fInstanceAttribsBufferUniqueId = fInstanceBuffer->getUniqueID(); | |
280 fInstanceAttribsBaseInstance = baseInstance; | |
281 } | |
282 } | |
283 | |
284 void GLInstancedRendering::onEndFlush() { | |
285 fInstanceBuffer.reset(); | |
286 fDrawIndirectBuffer.reset(); | |
287 fGLDrawCmdsInfo.reset(0); | |
288 } | |
289 | |
290 void GLInstancedRendering::onResetGpuResources(ResetType resetType) { | |
291 if (fVertexArrayID && ResetType::kDestroy == resetType) { | |
292 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID)); | |
293 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID); | |
294 } | |
295 fVertexArrayID = 0; | |
296 fInstanceBuffer.reset(); | |
297 fDrawIndirectBuffer.reset(); | |
298 fInstanceAttribsBufferUniqueId = SK_InvalidUniqueID; | |
299 } | |
300 | |
301 } | |
OLD | NEW |