OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2016 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 #include "GLInstancedRendering.h" |
| 9 |
| 10 #include "GrResourceProvider.h" |
| 11 #include "gl/GrGLGpu.h" |
| 12 #include "instanced/InstanceProcessor.h" |
| 13 |
| 14 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X) |
| 15 |
| 16 namespace gr_instanced { |
| 17 |
| 18 class GLInstancedRendering::GLBatch : public InstancedRendering::Batch { |
| 19 public: |
| 20 DEFINE_BATCH_CLASS_ID |
| 21 |
| 22 GLBatch(GLInstancedRendering* instRendering, int instanceIdx) |
| 23 : INHERITED(ClassID(), instRendering, instanceIdx) { |
| 24 } |
| 25 |
| 26 private: |
| 27 int fEmulatedBaseInstance; |
| 28 int fGLDrawCmdsIdx; |
| 29 int fNumGLDrawCmds; |
| 30 |
| 31 friend class GLInstancedRendering; |
| 32 |
| 33 typedef Batch INHERITED; |
| 34 }; |
| 35 |
| 36 GLInstancedRendering* GLInstancedRendering::CreateIfSupported(GrGLGpu* gpu) { |
| 37 const GrGLCaps& caps = gpu->glCaps(); |
| 38 AntialiasMode lastSupportedAAMode; |
| 39 if (!caps.vertexArrayObjectSupport() || |
| 40 !caps.drawIndirectSupport() || |
| 41 !InstanceProcessor::IsSupported(*caps.glslCaps(), caps, &lastSupportedAA
Mode)) { |
| 42 return nullptr; |
| 43 } |
| 44 return new GLInstancedRendering(gpu, lastSupportedAAMode); |
| 45 } |
| 46 |
| 47 GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu, AntialiasMode lastSuppo
rtedAAMode) |
| 48 : INHERITED(gpu, lastSupportedAAMode), |
| 49 fVertexArrayID(0), |
| 50 fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) { |
| 51 } |
| 52 |
| 53 GLInstancedRendering::~GLInstancedRendering() { |
| 54 if (fVertexArrayID) { |
| 55 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID)); |
| 56 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID); |
| 57 } |
| 58 } |
| 59 |
| 60 inline GrGLGpu* GLInstancedRendering::glGpu() const { |
| 61 return static_cast<GrGLGpu*>(this->gpu()); |
| 62 } |
| 63 |
| 64 InstancedRendering::Batch* GLInstancedRendering::createBatch(int instanceIdx) { |
| 65 return new GLBatch(this, instanceIdx); |
| 66 } |
| 67 |
| 68 void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) { |
| 69 // Count what there is to draw. |
| 70 BatchList::Iter iter; |
| 71 iter.init(this->batchList(), BatchList::Iter::kHead_IterStart); |
| 72 int numGLInstances = 0; |
| 73 int numGLDrawCmds = 0; |
| 74 while (Batch* b = iter.get()) { |
| 75 GLBatch* batch = static_cast<GLBatch*>(b); |
| 76 iter.next(); |
| 77 |
| 78 const auto& drawCmds = batch->fDrawCmds; |
| 79 for (int c = 0; c < drawCmds.count(); ++c) { |
| 80 numGLInstances += drawCmds[c].fInstanceRange.fCount; |
| 81 if (c > 0 && drawCmds[c].fGeometry == drawCmds[c - 1].fGeometry) { |
| 82 // When two adjacent draw commands have the same geometry, we wi
ll rearrange the |
| 83 // instances for the GL buffer and combine them into a single co
mmand. |
| 84 continue; |
| 85 } |
| 86 ++numGLDrawCmds; |
| 87 } |
| 88 } |
| 89 if (!numGLDrawCmds) { |
| 90 return; |
| 91 } |
| 92 SkASSERT(numGLInstances); |
| 93 |
| 94 // Lazily create a vertex array object. |
| 95 if (!fVertexArrayID) { |
| 96 GL_CALL(GenVertexArrays(1, &fVertexArrayID)); |
| 97 if (!fVertexArrayID) { |
| 98 return; |
| 99 } |
| 100 this->glGpu()->bindVertexArray(fVertexArrayID); |
| 101 |
| 102 // Attach our index buffer to the vertex array. |
| 103 GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, |
| 104 static_cast<const GrGLBuffer*>(this->indexBuffer())->
bufferID())); |
| 105 |
| 106 // Set up the non-instanced attribs. |
| 107 this->glGpu()->bindBuffer(kVertex_GrBufferType, |
| 108 static_cast<const GrGLBuffer*>(this->vertexBuf
fer())); |
| 109 GL_CALL(EnableVertexAttribArray(kShapeCoords_AttribIdx)); |
| 110 GL_CALL(VertexAttribPointer(kShapeCoords_AttribIdx, 2, GR_GL_FLOAT, GR_G
L_FALSE, |
| 111 sizeof(ShapeVertex), (void*) offsetof(ShapeV
ertex, fX))); |
| 112 GL_CALL(EnableVertexAttribArray(kVertexAttrs_AttribIdx)); |
| 113 GL_CALL(VertexAttribIPointer(kVertexAttrs_AttribIdx, 1, GR_GL_INT, sizeo
f(ShapeVertex), |
| 114 (void*) offsetof(ShapeVertex, fAttrs))); |
| 115 |
| 116 SkASSERT(SK_InvalidUniqueID == fInstanceAttribsBufferUniqueId); |
| 117 } |
| 118 |
| 119 // Create and map instance and draw-indirect buffers. |
| 120 SkASSERT(!fInstanceBuffer); |
| 121 fInstanceBuffer.reset(static_cast<GrGLBuffer*>( |
| 122 rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType
, |
| 123 kDynamic_GrAccessPattern, GrResourceProvider::kNoPendin
gIO_Flag))); |
| 124 if (!fInstanceBuffer) { |
| 125 return; |
| 126 } |
| 127 |
| 128 SkASSERT(!fDrawIndirectBuffer); |
| 129 fDrawIndirectBuffer.reset(static_cast<GrGLBuffer*>( |
| 130 rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds
, |
| 131 kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern, |
| 132 GrResourceProvider::kNoPendingIO_Flag))); |
| 133 if (!fDrawIndirectBuffer) { |
| 134 return; |
| 135 } |
| 136 |
| 137 Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map())
; |
| 138 int glInstancesIdx = 0; |
| 139 |
| 140 auto* glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndi
rectBuffer->map()); |
| 141 int glDrawCmdsIdx = 0; |
| 142 |
| 143 bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport(); |
| 144 |
| 145 if (!baseInstanceSupport) { |
| 146 fGLDrawCmdsInfo.reset(numGLDrawCmds); |
| 147 } |
| 148 |
| 149 // Generate the instance and draw-indirect buffer contents based on the batc
hes in existence. |
| 150 iter.init(this->batchList(), BatchList::Iter::kHead_IterStart); |
| 151 while (Batch* b = iter.get()) { |
| 152 GLBatch* batch = static_cast<GLBatch*>(b); |
| 153 iter.next(); |
| 154 |
| 155 batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx; |
| 156 batch->fGLDrawCmdsIdx = glDrawCmdsIdx; |
| 157 batch->fNumGLDrawCmds = 0; |
| 158 |
| 159 const auto& drawCmds = batch->fDrawCmds; |
| 160 int cidx = 0; |
| 161 SkASSERT(!drawCmds.empty()); |
| 162 do { |
| 163 IndexRange geometry = drawCmds[cidx].fGeometry; |
| 164 int instanceCount = 0; |
| 165 |
| 166 do { |
| 167 SkASSERT(drawCmds[cidx].isValid()); |
| 168 InstanceRange range = drawCmds[cidx].fInstanceRange; |
| 169 memcpy(&glMappedInstances[glInstancesIdx + instanceCount], |
| 170 &this->instance(range.fStart), range.fCount * sizeof(Inst
ance)); |
| 171 instanceCount += range.fCount; |
| 172 } while (++cidx < drawCmds.count() && drawCmds[cidx].fGeometry == ge
ometry); |
| 173 |
| 174 GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx]
; |
| 175 glCmd.fCount = geometry.fCount; |
| 176 glCmd.fInstanceCount = instanceCount; |
| 177 glCmd.fFirstIndex = geometry.fStart; |
| 178 glCmd.fBaseVertex = 0; |
| 179 glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0; |
| 180 |
| 181 if (!baseInstanceSupport) { |
| 182 fGLDrawCmdsInfo[glDrawCmdsIdx].fInstanceCount = instanceCount; |
| 183 } |
| 184 |
| 185 glInstancesIdx += instanceCount; |
| 186 ++glDrawCmdsIdx; |
| 187 ++batch->fNumGLDrawCmds; |
| 188 } while (cidx < drawCmds.count()); |
| 189 } |
| 190 |
| 191 SkASSERT(glDrawCmdsIdx == numGLDrawCmds); |
| 192 fDrawIndirectBuffer->unmap(); |
| 193 |
| 194 SkASSERT(glInstancesIdx == numGLInstances); |
| 195 fInstanceBuffer->unmap(); |
| 196 } |
| 197 |
| 198 void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProc
essor& instProc, |
| 199 const Batch* baseBatch) { |
| 200 if (!fDrawIndirectBuffer) { |
| 201 return; // beginFlush was not successful. |
| 202 } |
| 203 if (!this->glGpu()->flushGLState(pipeline, instProc)) { |
| 204 return; |
| 205 } |
| 206 |
| 207 this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, fDrawIndirectBuffer.ge
t()); |
| 208 |
| 209 const GrGLCaps& glCaps = this->glGpu()->glCaps(); |
| 210 const GLBatch* batch = static_cast<const GLBatch*>(baseBatch); |
| 211 int numCommands = batch->fNumGLDrawCmds; |
| 212 |
| 213 #if 0 |
| 214 SkDebugf("Instanced batch: ["); |
| 215 for (int i = 0; i < numCommands; ++i) { |
| 216 SkDebugf("%s%i * %s", (i ? ", " : ""), batch->fDrawCmds[i].fInstanceRa
nge.fCount, |
| 217 InstanceProcessor::GetNameOfIndexRange(batch->fDrawCmds[i].fGeo
metry)); |
| 218 } |
| 219 SkDebugf("]\n"); |
| 220 #endif |
| 221 |
| 222 if (1 == numCommands || !glCaps.baseInstanceSupport() || !glCaps.multiDrawIn
directSupport()) { |
| 223 int emulatedBaseInstance = batch->fEmulatedBaseInstance; |
| 224 for (int i = 0; i < numCommands; ++i) { |
| 225 int glCmdIdx = batch->fGLDrawCmdsIdx + i; |
| 226 this->flushInstanceAttribs(emulatedBaseInstance); |
| 227 GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE, |
| 228 (GrGLDrawElementsIndirectCommand*) null
ptr + glCmdIdx)); |
| 229 if (!glCaps.baseInstanceSupport()) { |
| 230 emulatedBaseInstance += fGLDrawCmdsInfo[glCmdIdx].fInstanceCount
; |
| 231 } |
| 232 } |
| 233 } else { |
| 234 int glCmdsIdx = batch->fGLDrawCmdsIdx; |
| 235 this->flushInstanceAttribs(batch->fEmulatedBaseInstance); |
| 236 GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE, |
| 237 (GrGLDrawElementsIndirectCommand*) nul
lptr + glCmdsIdx, |
| 238 numCommands, 0)); |
| 239 } |
| 240 } |
| 241 |
| 242 void GLInstancedRendering::flushInstanceAttribs(int baseInstance) { |
| 243 SkASSERT(fVertexArrayID); |
| 244 this->glGpu()->bindVertexArray(fVertexArrayID); |
| 245 |
| 246 SkASSERT(fInstanceBuffer); |
| 247 if (fInstanceAttribsBufferUniqueId != fInstanceBuffer->getUniqueID() || |
| 248 fInstanceAttribsBaseInstance != baseInstance) { |
| 249 Instance* offsetInBuffer = (Instance*) nullptr + baseInstance; |
| 250 |
| 251 this->glGpu()->bindBuffer(kVertex_GrBufferType, fInstanceBuffer.get()); |
| 252 |
| 253 // Info attrib. |
| 254 GL_CALL(EnableVertexAttribArray(kInstanceInfo_AttribIdx)); |
| 255 GL_CALL(VertexAttribIPointer(kInstanceInfo_AttribIdx, 1, GR_GL_UNSIGNED_
INT, |
| 256 sizeof(Instance), &offsetInBuffer->fInfo)); |
| 257 GL_CALL(VertexAttribDivisor(kInstanceInfo_AttribIdx, 1)); |
| 258 |
| 259 // Shape matrix attrib. |
| 260 GL_CALL(EnableVertexAttribArray(kShapeMatrixX_AttribIdx)); |
| 261 GL_CALL(EnableVertexAttribArray(kShapeMatrixY_AttribIdx)); |
| 262 GL_CALL(VertexAttribPointer(kShapeMatrixX_AttribIdx, 3, GR_GL_FLOAT, GR_
GL_FALSE, |
| 263 sizeof(Instance), &offsetInBuffer->fShapeMat
rix2x3[0])); |
| 264 GL_CALL(VertexAttribPointer(kShapeMatrixY_AttribIdx, 3, GR_GL_FLOAT, GR_
GL_FALSE, |
| 265 sizeof(Instance), &offsetInBuffer->fShapeMat
rix2x3[3])); |
| 266 GL_CALL(VertexAttribDivisor(kShapeMatrixX_AttribIdx, 1)); |
| 267 GL_CALL(VertexAttribDivisor(kShapeMatrixY_AttribIdx, 1)); |
| 268 |
| 269 // Color attrib. |
| 270 GL_CALL(EnableVertexAttribArray(kColor_AttribIdx)); |
| 271 GL_CALL(VertexAttribPointer(kColor_AttribIdx, 4, GR_GL_UNSIGNED_BYTE, GR
_GL_TRUE, |
| 272 sizeof(Instance), &offsetInBuffer->fColor)); |
| 273 GL_CALL(VertexAttribDivisor(kColor_AttribIdx, 1)); |
| 274 |
| 275 // Local rect attrib. |
| 276 GL_CALL(EnableVertexAttribArray(kLocalRect_AttribIdx)); |
| 277 GL_CALL(VertexAttribPointer(kLocalRect_AttribIdx, 4, GR_GL_FLOAT, GR_GL_
FALSE, |
| 278 sizeof(Instance), &offsetInBuffer->fLocalRec
t)); |
| 279 GL_CALL(VertexAttribDivisor(kLocalRect_AttribIdx, 1)); |
| 280 |
| 281 fInstanceAttribsBufferUniqueId = fInstanceBuffer->getUniqueID(); |
| 282 fInstanceAttribsBaseInstance = baseInstance; |
| 283 } |
| 284 } |
| 285 |
| 286 void GLInstancedRendering::onEndFlush() { |
| 287 fInstanceBuffer.reset(); |
| 288 fDrawIndirectBuffer.reset(); |
| 289 fGLDrawCmdsInfo.reset(0); |
| 290 } |
| 291 |
| 292 void GLInstancedRendering::onResetGpuResources(ResetType resetType) { |
| 293 if (fVertexArrayID && ResetType::kDestroy == resetType) { |
| 294 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID)); |
| 295 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID); |
| 296 } |
| 297 fVertexArrayID = 0; |
| 298 fInstanceBuffer.reset(); |
| 299 fDrawIndirectBuffer.reset(); |
| 300 fInstanceAttribsBufferUniqueId = SK_InvalidUniqueID; |
| 301 } |
| 302 |
| 303 } |
OLD | NEW |