Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(304)

Unified Diff: src/gpu/gl/GrGLInstancedRendering.cpp

Issue 1897203002: Implement instanced rendering for simple shapes (Closed) Base URL: https://skia.googlesource.com/skia.git@upload2_requireHWAA
Patch Set: comments Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/gpu/gl/GrGLInstancedRendering.h ('k') | src/gpu/gl/GrGLTestInterface.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/gpu/gl/GrGLInstancedRendering.cpp
diff --git a/src/gpu/gl/GrGLInstancedRendering.cpp b/src/gpu/gl/GrGLInstancedRendering.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..eb2ab9f36aff04788612020c2ffbb2b502c5a6a1
--- /dev/null
+++ b/src/gpu/gl/GrGLInstancedRendering.cpp
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2016 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrGLInstancedRendering.h"
+
+#include "GrGLBuffer.h"
+#include "GrGLGpu.h"
+#include "GrResourceProvider.h"
+#include "effects/GrInstanceProcessor.h"
+
+#ifdef SK_DEBUG
+#define DEBUG_PRINT(...) //SkDebugf(__VA_ARGS__)
+#else
+#define DEBUG_PRINT(...)
+#endif
+
+#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
+
+class GrGLInstancedRendering::GLBatch : public GrInstancedRendering::Batch {
+public:
+ DEFINE_BATCH_CLASS_ID
+
+ GLBatch(GrGLInstancedRendering* instRendering, int instanceIdx)
+ : INHERITED(ClassID(), instRendering, instanceIdx) {
+ }
+
+ void initBatchTracker(const GrXPOverridesForBatch&) override;
+ bool onCombineIfPossible(GrBatch* other, const GrCaps& caps) override;
+
+private:
+ GrGLInstancedRendering* glInstancedRendering() const {
+ return static_cast<GrGLInstancedRendering*>(fInstancedRendering);
+ }
+
+ SkSTArray<4, GrGLDrawElementsIndirectCommand, true> fDrawCmds;
+ GrGLDrawElementsIndirectCommand* fDrawCmdsOffsetInBuffer;
+
+ friend class GrGLInstancedRendering;
+
+ typedef Batch INHERITED;
+};
+
+GrGLInstancedRendering* GrGLInstancedRendering::CreateIfSupported(GrGLGpu* gpu) {
+ const GrGLCaps& caps = gpu->glCaps();
+ if (!caps.vertexArrayObjectSupport() ||
+ !caps.drawIndirectSupport() ||
+ !caps.baseInstanceSupport()) {
+ return nullptr;
+ }
+ uint32_t supportedAAModes = GrInstanceProcessor::GetSupportedAAModes(*caps.glslCaps(), caps);
+ if (!caps.multisampleDisableSupport()) {
+ // The non-AA shaders require MSAA to be disabled.
+ supportedAAModes &= ~kNone_AntialiasFlag;
+ }
+ if (!supportedAAModes) {
+ return nullptr;
+ }
+ return new GrGLInstancedRendering(gpu, supportedAAModes);
+}
+
+GrGLInstancedRendering::GrGLInstancedRendering(GrGLGpu* gpu, uint32_t supportedAAModes)
+ : INHERITED(gpu, supportedAAModes, sizeof(GLBatch)),
+ fVertexArrayID(0),
+ fInstanceBufferInVertexArrayID(SK_InvalidUniqueID),
+ fTotalDrawCmdCount(0) {
+}
+
+GrGLInstancedRendering::~GrGLInstancedRendering() {
+ if (fVertexArrayID) {
+ GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
+ this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
+ }
+}
+
+inline GrGLGpu* GrGLInstancedRendering::glGpu() const {
+ return static_cast<GrGLGpu*>(this->gpu());
+}
+
+GrInstancedRendering::Batch* GrGLInstancedRendering::constructBatch(void* storage, int instIdx) {
+ return new (storage) GLBatch(this, instIdx);
+}
+
+void GrGLInstancedRendering::GLBatch::initBatchTracker(const GrXPOverridesForBatch& overrides) {
+ SkASSERT(!fIsCombined);
+ SkASSERT(SkIsPow2(fInfo.fShapeTypes)); // There should only be one bit set at this point.
+
+ INHERITED::initBatchTracker(overrides);
+
+ GrGLDrawElementsIndirectCommand& cmd = fDrawCmds.push_back();
+ cmd.fBaseInstance = fFirstInstanceIdx;
+ cmd.fInstanceCount = 1;
+ if (kRect_ShapeFlag == fInfo.fShapeTypes) {
+ GrInstanceProcessor::GetIndexRangeForRect(fInfo.fAntialiasMode,
+ &cmd.fFirstIndex, &cmd.fCount);
+ } else if (kOval_ShapeFlag == fInfo.fShapeTypes) {
+ GrInstanceProcessor::GetIndexRangeForOval(fInfo.fAntialiasMode, fBounds,
+ &cmd.fFirstIndex, &cmd.fCount);
+ } else {
+ GrInstanceProcessor::GetIndexRangeForRRect(fInfo.fAntialiasMode,
+ &cmd.fFirstIndex, &cmd.fCount);
+ }
+ cmd.fBaseVertex = 0;
+
+ ++this->glInstancedRendering()->fTotalDrawCmdCount;
+}
+
+bool GrGLInstancedRendering::GLBatch::onCombineIfPossible(GrBatch* other, const GrCaps& caps) {
+ GLBatch* that = other->cast<GLBatch>();
+
+ SkASSERT(fInstancedRendering == that->fInstancedRendering);
+ SkASSERT(fDrawCmds.count());
+ SkASSERT(that->fDrawCmds.count());
+
+ if (!fInfo.canJoin(that->fInfo) ||
+ !GrPipeline::CanCombine(*this->pipeline(), this->bounds(),
+ *that->pipeline(), that->bounds(), caps)) {
+ return false;
+ }
+
+ fBounds.join(that->fBounds);
+ fInfo.join(that->fInfo);
+
+ // Join the draw commands.
+ int i = 0;
+ if (fDrawCmds.back().fBaseInstance + fDrawCmds.back().fInstanceCount ==
+ that->fDrawCmds.front().fBaseInstance &&
+ fDrawCmds.back().fFirstIndex == that->fDrawCmds.front().fFirstIndex) {
+ SkASSERT(fDrawCmds.back().fCount == that->fDrawCmds.front().fCount);
+ SkASSERT(0 == (fDrawCmds.back().fBaseVertex | that->fDrawCmds.back().fBaseVertex));
+ fDrawCmds.back().fInstanceCount += that->fDrawCmds.front().fInstanceCount;
+ ++i;
+ --this->glInstancedRendering()->fTotalDrawCmdCount;
+ }
+ if (i < that->fDrawCmds.count()) {
+ fDrawCmds.push_back_n(that->fDrawCmds.count() - i, &that->fDrawCmds[i]);
+ }
+
+ return true;
+}
+
+void GrGLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
+ SkASSERT(!fDrawIndirectBuffer);
+
+ if (!fTotalDrawCmdCount) {
+ return; // All batches ended up getting culled.
+ }
+
+ if (!fVertexArrayID) {
+ GL_CALL(GenVertexArrays(1, &fVertexArrayID));
+ if (!fVertexArrayID) {
+ return;
+ }
+ this->glGpu()->bindVertexArray(fVertexArrayID);
+
+ // Attach our index buffer to the vertex array.
+ GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
+ static_cast<const GrGLBuffer*>(this->indexBuffer())->bufferID()));
+
+ // Set up the non-instanced attribs.
+ this->glGpu()->bindBuffer(kVertex_GrBufferType,
+ static_cast<const GrGLBuffer*>(this->vertexBuffer()));
+ GL_CALL(EnableVertexAttribArray(kShapeCoords_AttribIdx));
+ GL_CALL(VertexAttribPointer(kShapeCoords_AttribIdx, 2, GR_GL_FLOAT, GR_GL_FALSE,
+ sizeof(ShapeVertex), (void*) offsetof(ShapeVertex, fX)));
+ GL_CALL(EnableVertexAttribArray(kVertexAttrs_AttribIdx));
+ GL_CALL(VertexAttribIPointer(kVertexAttrs_AttribIdx, 1, GR_GL_INT, sizeof(ShapeVertex),
+ (void*) offsetof(ShapeVertex, fAttrs)));
+
+ SkASSERT(SK_InvalidUniqueID == fInstanceBufferInVertexArrayID);
+ }
+
+ fDrawIndirectBuffer.reset(rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) *
+ fTotalDrawCmdCount, kDrawIndirect_GrBufferType,
+ kDynamic_GrAccessPattern,
+ GrResourceProvider::kNoPendingIO_Flag));
+ if (!fDrawIndirectBuffer) {
+ return;
+ }
+
+ // Generate a draw indirect buffer based on the instanced batches in existence.
+ int idx = 0;
+ auto* mappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndirectBuffer->map());
+ SkDEBUGCODE(int inUseBatchCount = 0;)
+ for (BatchAllocator::Iter iter(this->batchAllocator()); iter.next();) {
+ GLBatch* batch = static_cast<GLBatch*>(iter.get());
+ if (!batch->fInUse) {
+ continue;
+ }
+ memcpy(&mappedCmds[idx], batch->fDrawCmds.begin(),
+ batch->fDrawCmds.count() * sizeof(GrGLDrawElementsIndirectCommand));
+ batch->fDrawCmdsOffsetInBuffer = (GrGLDrawElementsIndirectCommand*) nullptr + idx;
+ idx += batch->fDrawCmds.count();
+ SkDEBUGCODE(++inUseBatchCount;)
+ }
+ SkASSERT(fTotalDrawCmdCount == idx);
+ SkASSERT(inUseBatchCount == fInUseBatchCount);
+ fDrawIndirectBuffer->unmap();
+}
+
+void GrGLInstancedRendering::onDraw(const GrPipeline& pipeline, const GrInstanceProcessor& instProc,
+ const Batch* baseBatch) {
+ if (!fDrawIndirectBuffer) {
+ return; // beginFlush was not successful.
+ }
+ if (!this->glGpu()->flushGLState(pipeline, instProc)) {
+ return;
+ }
+ this->flushAttribArrays();
+ this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType,
+ static_cast<GrGLBuffer*>(fDrawIndirectBuffer.get()));
+
+ const GLBatch* batch = static_cast<const GLBatch*>(baseBatch);
+ int numCommands = batch->fDrawCmds.count();
+
+ if (1 == numCommands || !this->glGpu()->glCaps().multiDrawIndirectSupport()) {
+ for (int i = 0; i < numCommands; ++i) {
+ DEBUG_PRINT("DrawIndirect: [%u @ %u]\n",
+ batch->fDrawCmds[i].fInstanceCount, batch->fDrawCmds[i].fBaseInstance);
+ GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
+ batch->fDrawCmdsOffsetInBuffer + i));
+ }
+ } else {
+#ifdef SK_DEBUG
+ DEBUG_PRINT("MultiDrawIndirect:");
+ for (int i = 0; i < batch->fDrawCmds.count(); i++) {
+ DEBUG_PRINT(" [%u @ %u]", batch->fDrawCmds[i].fInstanceCount,
+ batch->fDrawCmds[i].fBaseInstance);
+ }
+ DEBUG_PRINT("\n");
+#endif
+ GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
+ batch->fDrawCmdsOffsetInBuffer, numCommands, 0));
+ }
+}
+
+void GrGLInstancedRendering::flushAttribArrays() {
+ SkASSERT(fVertexArrayID);
+ this->glGpu()->bindVertexArray(fVertexArrayID);
+
+ if (fInstanceBufferInVertexArrayID != this->instanceBuffer()->getUniqueID()) {
+ this->glGpu()->bindBuffer(kVertex_GrBufferType,
+ static_cast<const GrGLBuffer*>(this->instanceBuffer()));
+
+ // Info attrib.
+ GL_CALL(EnableVertexAttribArray(kInstanceInfo_AttribIdx));
+ GL_CALL(VertexAttribIPointer(kInstanceInfo_AttribIdx, 1, GR_GL_UNSIGNED_INT,
+ sizeof(Instance), (void*) offsetof(Instance, fInfo)));
+ GL_CALL(VertexAttribDivisor(kInstanceInfo_AttribIdx, 1));
+
+ // Shape matrix attrib.
+ GL_CALL(EnableVertexAttribArray(kShapeMatrixX_AttribIdx));
+ GL_CALL(EnableVertexAttribArray(kShapeMatrixY_AttribIdx));
+ GL_CALL(VertexAttribPointer(kShapeMatrixX_AttribIdx, 3, GR_GL_FLOAT, GR_GL_FALSE,
+ sizeof(Instance),
+ (void*) offsetof(Instance, fShapeMatrix2x3[0])));
+ GL_CALL(VertexAttribPointer(kShapeMatrixY_AttribIdx, 3, GR_GL_FLOAT, GR_GL_FALSE,
+ sizeof(Instance),
+ (void*) offsetof(Instance, fShapeMatrix2x3[3])));
+ GL_CALL(VertexAttribDivisor(kShapeMatrixX_AttribIdx, 1));
+ GL_CALL(VertexAttribDivisor(kShapeMatrixY_AttribIdx, 1));
+
+ // Color attrib.
+ GL_CALL(EnableVertexAttribArray(kColor_AttribIdx));
+ GL_CALL(VertexAttribPointer(kColor_AttribIdx, 4, GR_GL_UNSIGNED_BYTE, GR_GL_TRUE,
+ sizeof(Instance), (void*) offsetof(Instance, fColor)));
+ GL_CALL(VertexAttribDivisor(kColor_AttribIdx, 1));
+
+ // Local rect attrib.
+ GL_CALL(EnableVertexAttribArray(kLocalRect_AttribIdx));
+ GL_CALL(VertexAttribPointer(kLocalRect_AttribIdx, 4, GR_GL_FLOAT, GR_GL_FALSE,
+ sizeof(Instance), (void*) offsetof(Instance, fLocalRect)));
+ GL_CALL(VertexAttribDivisor(kLocalRect_AttribIdx, 1));
+
+ fInstanceBufferInVertexArrayID = this->instanceBuffer()->getUniqueID();
+ }
+}
+
+void GrGLInstancedRendering::onEndFlush() {
+ fTotalDrawCmdCount = 0;
+ fDrawIndirectBuffer.reset();
+}
+
+void GrGLInstancedRendering::onResetGpuResources(ResetType resetType) {
+ if (fVertexArrayID && ResetType::kDestroy == resetType) {
+ GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
+ this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
+ }
+ fVertexArrayID = 0;
+ fInstanceBufferInVertexArrayID = SK_InvalidUniqueID;
+ fDrawIndirectBuffer.reset();
+}
« no previous file with comments | « src/gpu/gl/GrGLInstancedRendering.h ('k') | src/gpu/gl/GrGLTestInterface.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698