OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2016 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 #include "GrGLInstancedRendering.h" |
| 9 |
| 10 #include "GrGLBuffer.h" |
| 11 #include "GrGLGpu.h" |
| 12 #include "GrResourceProvider.h" |
| 13 #include "effects/GrInstanceProcessor.h" |
| 14 |
| 15 #ifdef SK_DEBUG |
| 16 #define DEBUG_PRINT(...) //SkDebugf(__VA_ARGS__) |
| 17 #else |
| 18 #define DEBUG_PRINT(...) |
| 19 #endif |
| 20 |
| 21 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X) |
| 22 |
| 23 class GrGLInstancedRendering::GLBatch : public GrInstancedRendering::Batch { |
| 24 public: |
| 25 DEFINE_BATCH_CLASS_ID |
| 26 |
| 27 GLBatch(GrGLInstancedRendering* instRendering, int instanceIdx) |
| 28 : INHERITED(ClassID(), instRendering, instanceIdx) { |
| 29 } |
| 30 |
| 31 void initBatchTracker(const GrXPOverridesForBatch&) override; |
| 32 bool onCombineIfPossible(GrBatch* other, const GrCaps& caps) override; |
| 33 |
| 34 private: |
| 35 GrGLInstancedRendering* glInstancedRendering() const { |
| 36 return static_cast<GrGLInstancedRendering*>(fInstancedRendering); |
| 37 } |
| 38 |
| 39 SkSTArray<4, GrGLDrawElementsIndirectCommand, true> fDrawCmds; |
| 40 GrGLDrawElementsIndirectCommand* fDrawCmdsOffsetInBuffe
r; |
| 41 |
| 42 friend class GrGLInstancedRendering; |
| 43 |
| 44 typedef Batch INHERITED; |
| 45 }; |
| 46 |
| 47 GrGLInstancedRendering* GrGLInstancedRendering::CreateIfSupported(GrGLGpu* gpu)
{ |
| 48 const GrGLCaps& caps = gpu->glCaps(); |
| 49 if (!caps.vertexArrayObjectSupport() || |
| 50 !caps.drawIndirectSupport() || |
| 51 !caps.baseInstanceSupport()) { |
| 52 return nullptr; |
| 53 } |
| 54 uint32_t supportedAAModes = GrInstanceProcessor::GetSupportedAAModes(*caps.g
lslCaps(), caps); |
| 55 if (!caps.multisampleDisableSupport()) { |
| 56 // The non-AA shaders require MSAA to be disabled. |
| 57 supportedAAModes &= ~kNone_AntialiasFlag; |
| 58 } |
| 59 if (!supportedAAModes) { |
| 60 return nullptr; |
| 61 } |
| 62 return new GrGLInstancedRendering(gpu, supportedAAModes); |
| 63 } |
| 64 |
| 65 GrGLInstancedRendering::GrGLInstancedRendering(GrGLGpu* gpu, uint32_t supportedA
AModes) |
| 66 : INHERITED(gpu, supportedAAModes, sizeof(GLBatch)), |
| 67 fVertexArrayID(0), |
| 68 fInstanceBufferInVertexArrayID(SK_InvalidUniqueID), |
| 69 fTotalDrawCmdCount(0) { |
| 70 } |
| 71 |
| 72 GrGLInstancedRendering::~GrGLInstancedRendering() { |
| 73 if (fVertexArrayID) { |
| 74 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID)); |
| 75 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID); |
| 76 } |
| 77 } |
| 78 |
| 79 inline GrGLGpu* GrGLInstancedRendering::glGpu() const { |
| 80 return static_cast<GrGLGpu*>(this->gpu()); |
| 81 } |
| 82 |
| 83 GrInstancedRendering::Batch* GrGLInstancedRendering::constructBatch(void* storag
e, int instIdx) { |
| 84 return new (storage) GLBatch(this, instIdx); |
| 85 } |
| 86 |
| 87 void GrGLInstancedRendering::GLBatch::initBatchTracker(const GrXPOverridesForBat
ch& overrides) { |
| 88 SkASSERT(!fIsCombined); |
| 89 SkASSERT(SkIsPow2(fInfo.fShapeTypes)); // There should only be one bit set a
t this point. |
| 90 |
| 91 INHERITED::initBatchTracker(overrides); |
| 92 |
| 93 GrGLDrawElementsIndirectCommand& cmd = fDrawCmds.push_back(); |
| 94 cmd.fBaseInstance = fFirstInstanceIdx; |
| 95 cmd.fInstanceCount = 1; |
| 96 if (kRect_ShapeFlag == fInfo.fShapeTypes) { |
| 97 GrInstanceProcessor::GetIndexRangeForRect(fInfo.fAntialiasMode, |
| 98 &cmd.fFirstIndex, &cmd.fCount)
; |
| 99 } else if (kOval_ShapeFlag == fInfo.fShapeTypes) { |
| 100 GrInstanceProcessor::GetIndexRangeForOval(fInfo.fAntialiasMode, fBounds, |
| 101 &cmd.fFirstIndex, &cmd.fCount)
; |
| 102 } else { |
| 103 GrInstanceProcessor::GetIndexRangeForRRect(fInfo.fAntialiasMode, |
| 104 &cmd.fFirstIndex, &cmd.fCount
); |
| 105 } |
| 106 cmd.fBaseVertex = 0; |
| 107 |
| 108 ++this->glInstancedRendering()->fTotalDrawCmdCount; |
| 109 } |
| 110 |
| 111 bool GrGLInstancedRendering::GLBatch::onCombineIfPossible(GrBatch* other, const
GrCaps& caps) { |
| 112 GLBatch* that = other->cast<GLBatch>(); |
| 113 |
| 114 SkASSERT(fInstancedRendering == that->fInstancedRendering); |
| 115 SkASSERT(fDrawCmds.count()); |
| 116 SkASSERT(that->fDrawCmds.count()); |
| 117 |
| 118 if (!fInfo.canJoin(that->fInfo) || |
| 119 !GrPipeline::CanCombine(*this->pipeline(), this->bounds(), |
| 120 *that->pipeline(), that->bounds(), caps)) { |
| 121 return false; |
| 122 } |
| 123 |
| 124 fBounds.join(that->fBounds); |
| 125 fInfo.join(that->fInfo); |
| 126 |
| 127 // Join the draw commands. |
| 128 int i = 0; |
| 129 if (fDrawCmds.back().fBaseInstance + fDrawCmds.back().fInstanceCount == |
| 130 that->fDrawCmds.front().fBaseInstance && |
| 131 fDrawCmds.back().fFirstIndex == that->fDrawCmds.front().fFirstIndex) { |
| 132 SkASSERT(fDrawCmds.back().fCount == that->fDrawCmds.front().fCount); |
| 133 SkASSERT(0 == (fDrawCmds.back().fBaseVertex | that->fDrawCmds.back().fBa
seVertex)); |
| 134 fDrawCmds.back().fInstanceCount += that->fDrawCmds.front().fInstanceCoun
t; |
| 135 ++i; |
| 136 --this->glInstancedRendering()->fTotalDrawCmdCount; |
| 137 } |
| 138 if (i < that->fDrawCmds.count()) { |
| 139 fDrawCmds.push_back_n(that->fDrawCmds.count() - i, &that->fDrawCmds[i]); |
| 140 } |
| 141 |
| 142 return true; |
| 143 } |
| 144 |
| 145 void GrGLInstancedRendering::onBeginFlush(GrResourceProvider* rp) { |
| 146 SkASSERT(!fDrawIndirectBuffer); |
| 147 |
| 148 if (!fTotalDrawCmdCount) { |
| 149 return; // All batches ended up getting culled. |
| 150 } |
| 151 |
| 152 if (!fVertexArrayID) { |
| 153 GL_CALL(GenVertexArrays(1, &fVertexArrayID)); |
| 154 if (!fVertexArrayID) { |
| 155 return; |
| 156 } |
| 157 this->glGpu()->bindVertexArray(fVertexArrayID); |
| 158 |
| 159 // Attach our index buffer to the vertex array. |
| 160 GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, |
| 161 static_cast<const GrGLBuffer*>(this->indexBuffer())->
bufferID())); |
| 162 |
| 163 // Set up the non-instanced attribs. |
| 164 this->glGpu()->bindBuffer(kVertex_GrBufferType, |
| 165 static_cast<const GrGLBuffer*>(this->vertexBuf
fer())); |
| 166 GL_CALL(EnableVertexAttribArray(kShapeCoords_AttribIdx)); |
| 167 GL_CALL(VertexAttribPointer(kShapeCoords_AttribIdx, 2, GR_GL_FLOAT, GR_G
L_FALSE, |
| 168 sizeof(ShapeVertex), (void*) offsetof(ShapeV
ertex, fX))); |
| 169 GL_CALL(EnableVertexAttribArray(kVertexAttrs_AttribIdx)); |
| 170 GL_CALL(VertexAttribIPointer(kVertexAttrs_AttribIdx, 1, GR_GL_INT, sizeo
f(ShapeVertex), |
| 171 (void*) offsetof(ShapeVertex, fAttrs))); |
| 172 |
| 173 SkASSERT(SK_InvalidUniqueID == fInstanceBufferInVertexArrayID); |
| 174 } |
| 175 |
| 176 fDrawIndirectBuffer.reset(rp->createBuffer(sizeof(GrGLDrawElementsIndirectCo
mmand) * |
| 177 fTotalDrawCmdCount, kDrawIndirect
_GrBufferType, |
| 178 kDynamic_GrAccessPattern, |
| 179 GrResourceProvider::kNoPendingIO_
Flag)); |
| 180 if (!fDrawIndirectBuffer) { |
| 181 return; |
| 182 } |
| 183 |
| 184 // Generate a draw indirect buffer based on the instanced batches in existen
ce. |
| 185 int idx = 0; |
| 186 auto* mappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndire
ctBuffer->map()); |
| 187 SkDEBUGCODE(int inUseBatchCount = 0;) |
| 188 for (BatchAllocator::Iter iter(this->batchAllocator()); iter.next();) { |
| 189 GLBatch* batch = static_cast<GLBatch*>(iter.get()); |
| 190 if (!batch->fInUse) { |
| 191 continue; |
| 192 } |
| 193 memcpy(&mappedCmds[idx], batch->fDrawCmds.begin(), |
| 194 batch->fDrawCmds.count() * sizeof(GrGLDrawElementsIndirectCommand
)); |
| 195 batch->fDrawCmdsOffsetInBuffer = (GrGLDrawElementsIndirectCommand*) null
ptr + idx; |
| 196 idx += batch->fDrawCmds.count(); |
| 197 SkDEBUGCODE(++inUseBatchCount;) |
| 198 } |
| 199 SkASSERT(fTotalDrawCmdCount == idx); |
| 200 SkASSERT(inUseBatchCount == fInUseBatchCount); |
| 201 fDrawIndirectBuffer->unmap(); |
| 202 } |
| 203 |
| 204 void GrGLInstancedRendering::onDraw(const GrPipeline& pipeline, const GrInstance
Processor& instProc, |
| 205 const Batch* baseBatch) { |
| 206 if (!fDrawIndirectBuffer) { |
| 207 return; // beginFlush was not successful. |
| 208 } |
| 209 if (!this->glGpu()->flushGLState(pipeline, instProc)) { |
| 210 return; |
| 211 } |
| 212 this->flushAttribArrays(); |
| 213 this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, |
| 214 static_cast<GrGLBuffer*>(fDrawIndirectBuffer.get()
)); |
| 215 |
| 216 const GLBatch* batch = static_cast<const GLBatch*>(baseBatch); |
| 217 int numCommands = batch->fDrawCmds.count(); |
| 218 |
| 219 if (1 == numCommands || !this->glGpu()->glCaps().multiDrawIndirectSupport())
{ |
| 220 for (int i = 0; i < numCommands; ++i) { |
| 221 DEBUG_PRINT("DrawIndirect: [%u @ %u]\n", |
| 222 batch->fDrawCmds[i].fInstanceCount, batch->fDrawCmds[i].
fBaseInstance); |
| 223 GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE, |
| 224 batch->fDrawCmdsOffsetInBuffer + i)); |
| 225 } |
| 226 } else { |
| 227 #ifdef SK_DEBUG |
| 228 DEBUG_PRINT("MultiDrawIndirect:"); |
| 229 for (int i = 0; i < batch->fDrawCmds.count(); i++) { |
| 230 DEBUG_PRINT(" [%u @ %u]", batch->fDrawCmds[i].fInstanceCount, |
| 231 batch->fDrawCmds[i].fBaseInstance); |
| 232 } |
| 233 DEBUG_PRINT("\n"); |
| 234 #endif |
| 235 GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE, |
| 236 batch->fDrawCmdsOffsetInBuffer, numCom
mands, 0)); |
| 237 } |
| 238 } |
| 239 |
| 240 void GrGLInstancedRendering::flushAttribArrays() { |
| 241 SkASSERT(fVertexArrayID); |
| 242 this->glGpu()->bindVertexArray(fVertexArrayID); |
| 243 |
| 244 if (fInstanceBufferInVertexArrayID != this->instanceBuffer()->getUniqueID())
{ |
| 245 this->glGpu()->bindBuffer(kVertex_GrBufferType, |
| 246 static_cast<const GrGLBuffer*>(this->instanceB
uffer())); |
| 247 |
| 248 // Info attrib. |
| 249 GL_CALL(EnableVertexAttribArray(kInstanceInfo_AttribIdx)); |
| 250 GL_CALL(VertexAttribIPointer(kInstanceInfo_AttribIdx, 1, GR_GL_UNSIGNED_
INT, |
| 251 sizeof(Instance), (void*) offsetof(Instance
, fInfo))); |
| 252 GL_CALL(VertexAttribDivisor(kInstanceInfo_AttribIdx, 1)); |
| 253 |
| 254 // Shape matrix attrib. |
| 255 GL_CALL(EnableVertexAttribArray(kShapeMatrixX_AttribIdx)); |
| 256 GL_CALL(EnableVertexAttribArray(kShapeMatrixY_AttribIdx)); |
| 257 GL_CALL(VertexAttribPointer(kShapeMatrixX_AttribIdx, 3, GR_GL_FLOAT, GR_
GL_FALSE, |
| 258 sizeof(Instance), |
| 259 (void*) offsetof(Instance, fShapeMatrix2x3[0
]))); |
| 260 GL_CALL(VertexAttribPointer(kShapeMatrixY_AttribIdx, 3, GR_GL_FLOAT, GR_
GL_FALSE, |
| 261 sizeof(Instance), |
| 262 (void*) offsetof(Instance, fShapeMatrix2x3[3
]))); |
| 263 GL_CALL(VertexAttribDivisor(kShapeMatrixX_AttribIdx, 1)); |
| 264 GL_CALL(VertexAttribDivisor(kShapeMatrixY_AttribIdx, 1)); |
| 265 |
| 266 // Color attrib. |
| 267 GL_CALL(EnableVertexAttribArray(kColor_AttribIdx)); |
| 268 GL_CALL(VertexAttribPointer(kColor_AttribIdx, 4, GR_GL_UNSIGNED_BYTE, GR
_GL_TRUE, |
| 269 sizeof(Instance), (void*) offsetof(Instance,
fColor))); |
| 270 GL_CALL(VertexAttribDivisor(kColor_AttribIdx, 1)); |
| 271 |
| 272 // Local rect attrib. |
| 273 GL_CALL(EnableVertexAttribArray(kLocalRect_AttribIdx)); |
| 274 GL_CALL(VertexAttribPointer(kLocalRect_AttribIdx, 4, GR_GL_FLOAT, GR_GL_
FALSE, |
| 275 sizeof(Instance), (void*) offsetof(Instance,
fLocalRect))); |
| 276 GL_CALL(VertexAttribDivisor(kLocalRect_AttribIdx, 1)); |
| 277 |
| 278 fInstanceBufferInVertexArrayID = this->instanceBuffer()->getUniqueID(); |
| 279 } |
| 280 } |
| 281 |
| 282 void GrGLInstancedRendering::onEndFlush() { |
| 283 fTotalDrawCmdCount = 0; |
| 284 fDrawIndirectBuffer.reset(); |
| 285 } |
| 286 |
| 287 void GrGLInstancedRendering::onResetGpuResources(ResetType resetType) { |
| 288 if (fVertexArrayID && ResetType::kDestroy == resetType) { |
| 289 GL_CALL(DeleteVertexArrays(1, &fVertexArrayID)); |
| 290 this->glGpu()->notifyVertexArrayDelete(fVertexArrayID); |
| 291 } |
| 292 fVertexArrayID = 0; |
| 293 fInstanceBufferInVertexArrayID = SK_InvalidUniqueID; |
| 294 fDrawIndirectBuffer.reset(); |
| 295 } |
OLD | NEW |