Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(297)

Side by Side Diff: src/gpu/instanced/GLInstancedRendering.cpp

Issue 2193303002: Add ES 3.0 fallback for instanced rendering (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/instanced/GLInstancedRendering.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2016 Google Inc. 2 * Copyright 2016 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GLInstancedRendering.h" 8 #include "GLInstancedRendering.h"
9 9
10 #include "GrResourceProvider.h" 10 #include "GrResourceProvider.h"
(...skipping 16 matching lines...) Expand all
27 int fGLDrawCmdsIdx; 27 int fGLDrawCmdsIdx;
28 28
29 friend class GLInstancedRendering; 29 friend class GLInstancedRendering;
30 30
31 typedef Batch INHERITED; 31 typedef Batch INHERITED;
32 }; 32 };
33 33
34 GrCaps::InstancedSupport GLInstancedRendering::CheckSupport(const GrGLCaps& glCa ps) { 34 GrCaps::InstancedSupport GLInstancedRendering::CheckSupport(const GrGLCaps& glCa ps) {
35 // This method is only intended to be used for initializing fInstancedSuppor t in the caps. 35 // This method is only intended to be used for initializing fInstancedSuppor t in the caps.
36 SkASSERT(GrCaps::InstancedSupport::kNone == glCaps.instancedSupport()); 36 SkASSERT(GrCaps::InstancedSupport::kNone == glCaps.instancedSupport());
37 if (!glCaps.vertexArrayObjectSupport() || !glCaps.drawIndirectSupport()) { 37 if (!glCaps.vertexArrayObjectSupport() ||
38 (!glCaps.drawIndirectSupport() && !glCaps.drawInstancedSupport())) {
38 return GrCaps::InstancedSupport::kNone; 39 return GrCaps::InstancedSupport::kNone;
39 } 40 }
40 return InstanceProcessor::CheckSupport(*glCaps.glslCaps(), glCaps); 41 return InstanceProcessor::CheckSupport(*glCaps.glslCaps(), glCaps);
41 } 42 }
42 43
43 GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu) 44 GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu)
44 : INHERITED(gpu), 45 : INHERITED(gpu),
45 fVertexArrayID(0), 46 fVertexArrayID(0),
46 fGLDrawCmdsInfo(0), 47 fGLDrawCmdsInfo(0),
47 fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) { 48 fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) {
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
111 fInstanceBuffer.reset( 112 fInstanceBuffer.reset(
112 rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType , 113 rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType ,
113 kDynamic_GrAccessPattern, 114 kDynamic_GrAccessPattern,
114 GrResourceProvider::kNoPendingIO_Flag | 115 GrResourceProvider::kNoPendingIO_Flag |
115 GrResourceProvider::kRequireGpuMemory_Flag)); 116 GrResourceProvider::kRequireGpuMemory_Flag));
116 if (!fInstanceBuffer) { 117 if (!fInstanceBuffer) {
117 return; 118 return;
118 } 119 }
119 120
120 SkASSERT(!fDrawIndirectBuffer); 121 SkASSERT(!fDrawIndirectBuffer);
121 fDrawIndirectBuffer.reset( 122 if (this->glGpu()->glCaps().drawIndirectSupport()) {
122 rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds , 123 fDrawIndirectBuffer.reset(
123 kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern, 124 rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDraw Cmds,
124 GrResourceProvider::kNoPendingIO_Flag | 125 kDrawIndirect_GrBufferType, kDynamic_GrAccessPatter n,
125 GrResourceProvider::kRequireGpuMemory_Flag)); 126 GrResourceProvider::kNoPendingIO_Flag |
126 if (!fDrawIndirectBuffer) { 127 GrResourceProvider::kRequireGpuMemory_Flag));
127 return; 128 if (!fDrawIndirectBuffer) {
129 return;
130 }
128 } 131 }
129 132
130 Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map()) ; 133 Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map()) ;
134 SkASSERT(glMappedInstances);
131 int glInstancesIdx = 0; 135 int glInstancesIdx = 0;
132 136
133 auto* glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndi rectBuffer->map()); 137 GrGLDrawElementsIndirectCommand* glMappedCmds = nullptr;
134 int glDrawCmdsIdx = 0; 138 int glDrawCmdsIdx = 0;
139 if (fDrawIndirectBuffer) {
140 glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndire ctBuffer->map());
141 SkASSERT(glMappedCmds);
142 }
135 143
136 bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport(); 144 bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport();
145 SkASSERT(!baseInstanceSupport || fDrawIndirectBuffer);
137 146
147 SkASSERT(!fGLDrawCmdsInfo);
138 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) { 148 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
139 fGLDrawCmdsInfo.reset(numGLDrawCmds); 149 fGLDrawCmdsInfo.reset(numGLDrawCmds);
140 } 150 }
141 151
142 // Generate the instance and draw-indirect buffer contents based on the trac ked batches. 152 // Generate the instance and draw-indirect buffer contents based on the trac ked batches.
143 iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart); 153 iter.init(this->trackedBatches(), BatchList::Iter::kHead_IterStart);
144 while (Batch* b = iter.get()) { 154 while (Batch* b = iter.get()) {
145 GLBatch* batch = static_cast<GLBatch*>(b); 155 GLBatch* batch = static_cast<GLBatch*>(b);
146 iter.next(); 156 iter.next();
147 157
148 batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx; 158 batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
149 batch->fGLDrawCmdsIdx = glDrawCmdsIdx; 159 batch->fGLDrawCmdsIdx = glDrawCmdsIdx;
150 160
151 const Batch::Draw* draw = batch->fHeadDraw; 161 const Batch::Draw* draw = batch->fHeadDraw;
152 SkASSERT(draw); 162 SkASSERT(draw);
153 do { 163 do {
154 int instanceCount = 0; 164 int instanceCount = 0;
155 IndexRange geometry = draw->fGeometry; 165 IndexRange geometry = draw->fGeometry;
156 SkASSERT(!geometry.isEmpty()); 166 SkASSERT(!geometry.isEmpty());
157 167
158 do { 168 do {
159 glMappedInstances[glInstancesIdx + instanceCount++] = draw->fIns tance; 169 glMappedInstances[glInstancesIdx + instanceCount++] = draw->fIns tance;
160 draw = draw->fNext; 170 draw = draw->fNext;
161 } while (draw && draw->fGeometry == geometry); 171 } while (draw && draw->fGeometry == geometry);
162 172
163 GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx] ; 173 if (fDrawIndirectBuffer) {
164 glCmd.fCount = geometry.fCount; 174 GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmds Idx];
165 glCmd.fInstanceCount = instanceCount; 175 glCmd.fCount = geometry.fCount;
166 glCmd.fFirstIndex = geometry.fStart; 176 glCmd.fInstanceCount = instanceCount;
167 glCmd.fBaseVertex = 0; 177 glCmd.fFirstIndex = geometry.fStart;
168 glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0; 178 glCmd.fBaseVertex = 0;
179 glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0;
180 }
169 181
170 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) { 182 if (GR_GL_LOG_INSTANCED_BATCHES || !baseInstanceSupport) {
171 fGLDrawCmdsInfo[glDrawCmdsIdx].fInstanceCount = instanceCount; 183 GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glDrawCmdsIdx];
172 #if GR_GL_LOG_INSTANCED_BATCHES 184 cmdInfo.fGeometry = geometry;
173 fGLDrawCmdsInfo[glDrawCmdsIdx].fGeometry = geometry; 185 cmdInfo.fInstanceCount = instanceCount;
174 #endif
175 } 186 }
176 187
177 glInstancesIdx += instanceCount; 188 glInstancesIdx += instanceCount;
178 ++glDrawCmdsIdx; 189 ++glDrawCmdsIdx;
179 } while (draw); 190 } while (draw);
180 } 191 }
181 192
182 SkASSERT(glDrawCmdsIdx == numGLDrawCmds); 193 SkASSERT(glDrawCmdsIdx == numGLDrawCmds);
183 fDrawIndirectBuffer->unmap(); 194 if (fDrawIndirectBuffer) {
195 fDrawIndirectBuffer->unmap();
196 }
184 197
185 SkASSERT(glInstancesIdx == numGLInstances); 198 SkASSERT(glInstancesIdx == numGLInstances);
186 fInstanceBuffer->unmap(); 199 fInstanceBuffer->unmap();
187 } 200 }
188 201
189 void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProc essor& instProc, 202 void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProc essor& instProc,
190 const Batch* baseBatch) { 203 const Batch* baseBatch) {
191 if (!fDrawIndirectBuffer) { 204 if (!fDrawIndirectBuffer && !fGLDrawCmdsInfo) {
192 return; // beginFlush was not successful. 205 return; // beginFlush was not successful.
193 } 206 }
194 if (!this->glGpu()->flushGLState(pipeline, instProc)) { 207 if (!this->glGpu()->flushGLState(pipeline, instProc)) {
195 return; 208 return;
196 } 209 }
197 210
198 this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, fDrawIndirectBuffer.ge t()); 211 if (fDrawIndirectBuffer) {
212 this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, fDrawIndirectBuffe r.get());
213 }
199 214
200 const GrGLCaps& glCaps = this->glGpu()->glCaps(); 215 const GrGLCaps& glCaps = this->glGpu()->glCaps();
201 const GLBatch* batch = static_cast<const GLBatch*>(baseBatch); 216 const GLBatch* batch = static_cast<const GLBatch*>(baseBatch);
202 int numCommands = batch->numGLCommands(); 217 int numCommands = batch->numGLCommands();
203 218
204 #if GR_GL_LOG_INSTANCED_BATCHES 219 #if GR_GL_LOG_INSTANCED_BATCHES
205 SkASSERT(fGLDrawCmdsInfo); 220 SkASSERT(fGLDrawCmdsInfo);
206 SkDebugf("Instanced batch: ["); 221 SkDebugf("Instanced batch: [");
207 for (int i = 0; i < numCommands; ++i) { 222 for (int i = 0; i < numCommands; ++i) {
208 int glCmdIdx = batch->fGLDrawCmdsIdx + i; 223 int glCmdIdx = batch->fGLDrawCmdsIdx + i;
209 SkDebugf("%s%i * %s", (i ? ", " : ""), fGLDrawCmdsInfo[glCmdIdx].fInsta nceCount, 224 SkDebugf("%s%i * %s", (i ? ", " : ""), fGLDrawCmdsInfo[glCmdIdx].fInsta nceCount,
210 InstanceProcessor::GetNameOfIndexRange(fGLDrawCmdsInfo[glCmdIdx ].fGeometry)); 225 InstanceProcessor::GetNameOfIndexRange(fGLDrawCmdsInfo[glCmdIdx ].fGeometry));
211 } 226 }
212 SkDebugf("]\n"); 227 SkDebugf("]\n");
213 #else 228 #else
214 SkASSERT(SkToBool(fGLDrawCmdsInfo) == !glCaps.baseInstanceSupport()); 229 SkASSERT(SkToBool(fGLDrawCmdsInfo) == !glCaps.baseInstanceSupport());
215 #endif 230 #endif
216 231
217 if (1 == numCommands || !glCaps.baseInstanceSupport() || !glCaps.multiDrawIn directSupport()) { 232 if (numCommands > 1 && glCaps.multiDrawIndirectSupport() && glCaps.baseInsta nceSupport()) {
218 int emulatedBaseInstance = batch->fEmulatedBaseInstance; 233 SkASSERT(fDrawIndirectBuffer);
219 for (int i = 0; i < numCommands; ++i) {
220 int glCmdIdx = batch->fGLDrawCmdsIdx + i;
221 this->flushInstanceAttribs(emulatedBaseInstance);
222 GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
223 (GrGLDrawElementsIndirectCommand*) null ptr + glCmdIdx));
224 if (!glCaps.baseInstanceSupport()) {
225 emulatedBaseInstance += fGLDrawCmdsInfo[glCmdIdx].fInstanceCount ;
226 }
227 }
228 } else {
229 int glCmdsIdx = batch->fGLDrawCmdsIdx; 234 int glCmdsIdx = batch->fGLDrawCmdsIdx;
230 this->flushInstanceAttribs(batch->fEmulatedBaseInstance); 235 this->flushInstanceAttribs(batch->fEmulatedBaseInstance);
231 GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE, 236 GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
232 (GrGLDrawElementsIndirectCommand*) nul lptr + glCmdsIdx, 237 (GrGLDrawElementsIndirectCommand*) nul lptr + glCmdsIdx,
233 numCommands, 0)); 238 numCommands, 0));
239 return;
240 }
241
242 int emulatedBaseInstance = batch->fEmulatedBaseInstance;
243 for (int i = 0; i < numCommands; ++i) {
244 int glCmdIdx = batch->fGLDrawCmdsIdx + i;
245 const GLDrawCmdInfo& cmdInfo = fGLDrawCmdsInfo[glCmdIdx];
246 this->flushInstanceAttribs(emulatedBaseInstance);
247 if (fDrawIndirectBuffer) {
248 GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
249 (GrGLDrawElementsIndirectCommand*) null ptr + glCmdIdx));
250 } else {
251 GL_CALL(DrawElementsInstanced(GR_GL_TRIANGLES, cmdInfo.fGeometry.fCo unt,
252 GR_GL_UNSIGNED_BYTE,
253 (GrGLubyte*) nullptr + cmdInfo.fGeomet ry.fStart,
254 cmdInfo.fInstanceCount));
255 }
256 if (!glCaps.baseInstanceSupport()) {
257 emulatedBaseInstance += cmdInfo.fInstanceCount;
258 }
234 } 259 }
235 } 260 }
236 261
237 void GLInstancedRendering::flushInstanceAttribs(int baseInstance) { 262 void GLInstancedRendering::flushInstanceAttribs(int baseInstance) {
238 SkASSERT(fVertexArrayID); 263 SkASSERT(fVertexArrayID);
239 this->glGpu()->bindVertexArray(fVertexArrayID); 264 this->glGpu()->bindVertexArray(fVertexArrayID);
240 265
241 SkASSERT(fInstanceBuffer); 266 SkASSERT(fInstanceBuffer);
242 if (fInstanceAttribsBufferUniqueId != fInstanceBuffer->getUniqueID() || 267 if (fInstanceAttribsBufferUniqueId != fInstanceBuffer->getUniqueID() ||
243 fInstanceAttribsBaseInstance != baseInstance) { 268 fInstanceAttribsBaseInstance != baseInstance) {
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
289 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID)); 314 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
290 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID); 315 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
291 } 316 }
292 fVertexArrayID = 0; 317 fVertexArrayID = 0;
293 fInstanceBuffer.reset(); 318 fInstanceBuffer.reset();
294 fDrawIndirectBuffer.reset(); 319 fDrawIndirectBuffer.reset();
295 fInstanceAttribsBufferUniqueId = SK_InvalidUniqueID; 320 fInstanceAttribsBufferUniqueId = SK_InvalidUniqueID;
296 } 321 }
297 322
298 } 323 }
OLDNEW
« no previous file with comments | « src/gpu/instanced/GLInstancedRendering.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698