| Index: src/gpu/instanced/GLInstancedRendering.cpp
|
| diff --git a/src/gpu/instanced/GLInstancedRendering.cpp b/src/gpu/instanced/GLInstancedRendering.cpp
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..1cb640cd5c2bb82793af9afe3004d645e0317d78
|
| --- /dev/null
|
| +++ b/src/gpu/instanced/GLInstancedRendering.cpp
|
| @@ -0,0 +1,303 @@
|
| +/*
|
| + * Copyright 2016 Google Inc.
|
| + *
|
| + * Use of this source code is governed by a BSD-style license that can be
|
| + * found in the LICENSE file.
|
| + */
|
| +
|
| +#include "GLInstancedRendering.h"
|
| +
|
| +#include "GrResourceProvider.h"
|
| +#include "gl/GrGLGpu.h"
|
| +#include "instanced/InstanceProcessor.h"
|
| +
|
| +#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
|
| +
|
| +namespace gr_instanced {
|
| +
|
| +class GLInstancedRendering::GLBatch : public InstancedRendering::Batch {
|
| +public:
|
| + DEFINE_BATCH_CLASS_ID
|
| +
|
| + GLBatch(GLInstancedRendering* instRendering, int instanceIdx)
|
| + : INHERITED(ClassID(), instRendering, instanceIdx) {
|
| + }
|
| +
|
| +private:
|
| + int fEmulatedBaseInstance;
|
| + int fGLDrawCmdsIdx;
|
| + int fNumGLDrawCmds;
|
| +
|
| + friend class GLInstancedRendering;
|
| +
|
| + typedef Batch INHERITED;
|
| +};
|
| +
|
| +GLInstancedRendering* GLInstancedRendering::CreateIfSupported(GrGLGpu* gpu) {
|
| + const GrGLCaps& caps = gpu->glCaps();
|
| + AntialiasMode lastSupportedAAMode;
|
| + if (!caps.vertexArrayObjectSupport() ||
|
| + !caps.drawIndirectSupport() ||
|
| + !InstanceProcessor::IsSupported(*caps.glslCaps(), caps, &lastSupportedAAMode)) {
|
| + return nullptr;
|
| + }
|
| + return new GLInstancedRendering(gpu, lastSupportedAAMode);
|
| +}
|
| +
|
| +GLInstancedRendering::GLInstancedRendering(GrGLGpu* gpu, AntialiasMode lastSupportedAAMode)
|
| + : INHERITED(gpu, lastSupportedAAMode),
|
| + fVertexArrayID(0),
|
| + fInstanceAttribsBufferUniqueId(SK_InvalidUniqueID) {
|
| +}
|
| +
|
| +GLInstancedRendering::~GLInstancedRendering() {
|
| + if (fVertexArrayID) {
|
| + GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
|
| + this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
|
| + }
|
| +}
|
| +
|
| +inline GrGLGpu* GLInstancedRendering::glGpu() const {
|
| + return static_cast<GrGLGpu*>(this->gpu());
|
| +}
|
| +
|
| +InstancedRendering::Batch* GLInstancedRendering::createBatch(int instanceIdx) {
|
| + return new GLBatch(this, instanceIdx);
|
| +}
|
| +
|
| +void GLInstancedRendering::onBeginFlush(GrResourceProvider* rp) {
|
| + // Count what there is to draw.
|
| + BatchList::Iter iter;
|
| + iter.init(this->batchList(), BatchList::Iter::kHead_IterStart);
|
| + int numGLInstances = 0;
|
| + int numGLDrawCmds = 0;
|
| + while (Batch* b = iter.get()) {
|
| + GLBatch* batch = static_cast<GLBatch*>(b);
|
| + iter.next();
|
| +
|
| + const auto& drawCmds = batch->fDrawCmds;
|
| + for (int c = 0; c < drawCmds.count(); ++c) {
|
| + numGLInstances += drawCmds[c].fInstanceRange.fCount;
|
| + if (c > 0 && drawCmds[c].fGeometry == drawCmds[c - 1].fGeometry) {
|
| + // When two adjacent draw commands have the same geometry, we will rearrange the
|
| + // instances for the GL buffer and combine them into a single command.
|
| + continue;
|
| + }
|
| + ++numGLDrawCmds;
|
| + }
|
| + }
|
| + if (!numGLDrawCmds) {
|
| + return;
|
| + }
|
| + SkASSERT(numGLInstances);
|
| +
|
| + // Lazily create a vertex array object.
|
| + if (!fVertexArrayID) {
|
| + GL_CALL(GenVertexArrays(1, &fVertexArrayID));
|
| + if (!fVertexArrayID) {
|
| + return;
|
| + }
|
| + this->glGpu()->bindVertexArray(fVertexArrayID);
|
| +
|
| + // Attach our index buffer to the vertex array.
|
| + GL_CALL(BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
|
| + static_cast<const GrGLBuffer*>(this->indexBuffer())->bufferID()));
|
| +
|
| + // Set up the non-instanced attribs.
|
| + this->glGpu()->bindBuffer(kVertex_GrBufferType,
|
| + static_cast<const GrGLBuffer*>(this->vertexBuffer()));
|
| + GL_CALL(EnableVertexAttribArray(kShapeCoords_AttribIdx));
|
| + GL_CALL(VertexAttribPointer(kShapeCoords_AttribIdx, 2, GR_GL_FLOAT, GR_GL_FALSE,
|
| + sizeof(ShapeVertex), (void*) offsetof(ShapeVertex, fX)));
|
| + GL_CALL(EnableVertexAttribArray(kVertexAttrs_AttribIdx));
|
| + GL_CALL(VertexAttribIPointer(kVertexAttrs_AttribIdx, 1, GR_GL_INT, sizeof(ShapeVertex),
|
| + (void*) offsetof(ShapeVertex, fAttrs)));
|
| +
|
| + SkASSERT(SK_InvalidUniqueID == fInstanceAttribsBufferUniqueId);
|
| + }
|
| +
|
| + // Create and map instance and draw-indirect buffers.
|
| + SkASSERT(!fInstanceBuffer);
|
| + fInstanceBuffer.reset(static_cast<GrGLBuffer*>(
|
| + rp->createBuffer(sizeof(Instance) * numGLInstances, kVertex_GrBufferType,
|
| + kDynamic_GrAccessPattern, GrResourceProvider::kNoPendingIO_Flag)));
|
| + if (!fInstanceBuffer) {
|
| + return;
|
| + }
|
| +
|
| + SkASSERT(!fDrawIndirectBuffer);
|
| + fDrawIndirectBuffer.reset(static_cast<GrGLBuffer*>(
|
| + rp->createBuffer(sizeof(GrGLDrawElementsIndirectCommand) * numGLDrawCmds,
|
| + kDrawIndirect_GrBufferType, kDynamic_GrAccessPattern,
|
| + GrResourceProvider::kNoPendingIO_Flag)));
|
| + if (!fDrawIndirectBuffer) {
|
| + return;
|
| + }
|
| +
|
| + Instance* glMappedInstances = static_cast<Instance*>(fInstanceBuffer->map());
|
| + int glInstancesIdx = 0;
|
| +
|
| + auto* glMappedCmds = static_cast<GrGLDrawElementsIndirectCommand*>(fDrawIndirectBuffer->map());
|
| + int glDrawCmdsIdx = 0;
|
| +
|
| + bool baseInstanceSupport = this->glGpu()->glCaps().baseInstanceSupport();
|
| +
|
| + if (!baseInstanceSupport) {
|
| + fGLDrawCmdsInfo.reset(numGLDrawCmds);
|
| + }
|
| +
|
| + // Generate the instance and draw-indirect buffer contents based on the batches in existence.
|
| + iter.init(this->batchList(), BatchList::Iter::kHead_IterStart);
|
| + while (Batch* b = iter.get()) {
|
| + GLBatch* batch = static_cast<GLBatch*>(b);
|
| + iter.next();
|
| +
|
| + batch->fEmulatedBaseInstance = baseInstanceSupport ? 0 : glInstancesIdx;
|
| + batch->fGLDrawCmdsIdx = glDrawCmdsIdx;
|
| + batch->fNumGLDrawCmds = 0;
|
| +
|
| + const auto& drawCmds = batch->fDrawCmds;
|
| + int cidx = 0;
|
| + SkASSERT(!drawCmds.empty());
|
| + do {
|
| + IndexRange geometry = drawCmds[cidx].fGeometry;
|
| + int instanceCount = 0;
|
| +
|
| + do {
|
| + SkASSERT(drawCmds[cidx].isValid());
|
| + InstanceRange range = drawCmds[cidx].fInstanceRange;
|
| + memcpy(&glMappedInstances[glInstancesIdx + instanceCount],
|
| + &this->instance(range.fStart), range.fCount * sizeof(Instance));
|
| + instanceCount += range.fCount;
|
| + } while (++cidx < drawCmds.count() && drawCmds[cidx].fGeometry == geometry);
|
| +
|
| + GrGLDrawElementsIndirectCommand& glCmd = glMappedCmds[glDrawCmdsIdx];
|
| + glCmd.fCount = geometry.fCount;
|
| + glCmd.fInstanceCount = instanceCount;
|
| + glCmd.fFirstIndex = geometry.fStart;
|
| + glCmd.fBaseVertex = 0;
|
| + glCmd.fBaseInstance = baseInstanceSupport ? glInstancesIdx : 0;
|
| +
|
| + if (!baseInstanceSupport) {
|
| + fGLDrawCmdsInfo[glDrawCmdsIdx].fInstanceCount = instanceCount;
|
| + }
|
| +
|
| + glInstancesIdx += instanceCount;
|
| + ++glDrawCmdsIdx;
|
| + ++batch->fNumGLDrawCmds;
|
| + } while (cidx < drawCmds.count());
|
| + }
|
| +
|
| + SkASSERT(glDrawCmdsIdx == numGLDrawCmds);
|
| + fDrawIndirectBuffer->unmap();
|
| +
|
| + SkASSERT(glInstancesIdx == numGLInstances);
|
| + fInstanceBuffer->unmap();
|
| +}
|
| +
|
| +void GLInstancedRendering::onDraw(const GrPipeline& pipeline, const InstanceProcessor& instProc,
|
| + const Batch* baseBatch) {
|
| + if (!fDrawIndirectBuffer) {
|
| + return; // beginFlush was not successful.
|
| + }
|
| + if (!this->glGpu()->flushGLState(pipeline, instProc)) {
|
| + return;
|
| + }
|
| +
|
| + this->glGpu()->bindBuffer(kDrawIndirect_GrBufferType, fDrawIndirectBuffer.get());
|
| +
|
| + const GrGLCaps& glCaps = this->glGpu()->glCaps();
|
| + const GLBatch* batch = static_cast<const GLBatch*>(baseBatch);
|
| + int numCommands = batch->fNumGLDrawCmds;
|
| +
|
| +#if 0
|
| + SkDebugf("Instanced batch: [");
|
| + for (int i = 0; i < numCommands; ++i) {
|
| + SkDebugf("%s%i * %s", (i ? ", " : ""), batch->fDrawCmds[i].fInstanceRange.fCount,
|
| + InstanceProcessor::GetNameOfIndexRange(batch->fDrawCmds[i].fGeometry));
|
| + }
|
| + SkDebugf("]\n");
|
| +#endif
|
| +
|
| + if (1 == numCommands || !glCaps.baseInstanceSupport() || !glCaps.multiDrawIndirectSupport()) {
|
| + int emulatedBaseInstance = batch->fEmulatedBaseInstance;
|
| + for (int i = 0; i < numCommands; ++i) {
|
| + int glCmdIdx = batch->fGLDrawCmdsIdx + i;
|
| + this->flushInstanceAttribs(emulatedBaseInstance);
|
| + GL_CALL(DrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
|
| + (GrGLDrawElementsIndirectCommand*) nullptr + glCmdIdx));
|
| + if (!glCaps.baseInstanceSupport()) {
|
| + emulatedBaseInstance += fGLDrawCmdsInfo[glCmdIdx].fInstanceCount;
|
| + }
|
| + }
|
| + } else {
|
| + int glCmdsIdx = batch->fGLDrawCmdsIdx;
|
| + this->flushInstanceAttribs(batch->fEmulatedBaseInstance);
|
| + GL_CALL(MultiDrawElementsIndirect(GR_GL_TRIANGLES, GR_GL_UNSIGNED_BYTE,
|
| + (GrGLDrawElementsIndirectCommand*) nullptr + glCmdsIdx,
|
| + numCommands, 0));
|
| + }
|
| +}
|
| +
|
| +void GLInstancedRendering::flushInstanceAttribs(int baseInstance) {
|
| + SkASSERT(fVertexArrayID);
|
| + this->glGpu()->bindVertexArray(fVertexArrayID);
|
| +
|
| + SkASSERT(fInstanceBuffer);
|
| + if (fInstanceAttribsBufferUniqueId != fInstanceBuffer->getUniqueID() ||
|
| + fInstanceAttribsBaseInstance != baseInstance) {
|
| + Instance* offsetInBuffer = (Instance*) nullptr + baseInstance;
|
| +
|
| + this->glGpu()->bindBuffer(kVertex_GrBufferType, fInstanceBuffer.get());
|
| +
|
| + // Info attrib.
|
| + GL_CALL(EnableVertexAttribArray(kInstanceInfo_AttribIdx));
|
| + GL_CALL(VertexAttribIPointer(kInstanceInfo_AttribIdx, 1, GR_GL_UNSIGNED_INT,
|
| + sizeof(Instance), &offsetInBuffer->fInfo));
|
| + GL_CALL(VertexAttribDivisor(kInstanceInfo_AttribIdx, 1));
|
| +
|
| + // Shape matrix attrib.
|
| + GL_CALL(EnableVertexAttribArray(kShapeMatrixX_AttribIdx));
|
| + GL_CALL(EnableVertexAttribArray(kShapeMatrixY_AttribIdx));
|
| + GL_CALL(VertexAttribPointer(kShapeMatrixX_AttribIdx, 3, GR_GL_FLOAT, GR_GL_FALSE,
|
| + sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[0]));
|
| + GL_CALL(VertexAttribPointer(kShapeMatrixY_AttribIdx, 3, GR_GL_FLOAT, GR_GL_FALSE,
|
| + sizeof(Instance), &offsetInBuffer->fShapeMatrix2x3[3]));
|
| + GL_CALL(VertexAttribDivisor(kShapeMatrixX_AttribIdx, 1));
|
| + GL_CALL(VertexAttribDivisor(kShapeMatrixY_AttribIdx, 1));
|
| +
|
| + // Color attrib.
|
| + GL_CALL(EnableVertexAttribArray(kColor_AttribIdx));
|
| + GL_CALL(VertexAttribPointer(kColor_AttribIdx, 4, GR_GL_UNSIGNED_BYTE, GR_GL_TRUE,
|
| + sizeof(Instance), &offsetInBuffer->fColor));
|
| + GL_CALL(VertexAttribDivisor(kColor_AttribIdx, 1));
|
| +
|
| + // Local rect attrib.
|
| + GL_CALL(EnableVertexAttribArray(kLocalRect_AttribIdx));
|
| + GL_CALL(VertexAttribPointer(kLocalRect_AttribIdx, 4, GR_GL_FLOAT, GR_GL_FALSE,
|
| + sizeof(Instance), &offsetInBuffer->fLocalRect));
|
| + GL_CALL(VertexAttribDivisor(kLocalRect_AttribIdx, 1));
|
| +
|
| + fInstanceAttribsBufferUniqueId = fInstanceBuffer->getUniqueID();
|
| + fInstanceAttribsBaseInstance = baseInstance;
|
| + }
|
| +}
|
| +
|
| +void GLInstancedRendering::onEndFlush() {
|
| + fInstanceBuffer.reset();
|
| + fDrawIndirectBuffer.reset();
|
| + fGLDrawCmdsInfo.reset(0);
|
| +}
|
| +
|
| +void GLInstancedRendering::onResetGpuResources(ResetType resetType) {
|
| + if (fVertexArrayID && ResetType::kDestroy == resetType) {
|
| + GL_CALL(DeleteVertexArrays(1, &fVertexArrayID));
|
| + this->glGpu()->notifyVertexArrayDelete(fVertexArrayID);
|
| + }
|
| + fVertexArrayID = 0;
|
| + fInstanceBuffer.reset();
|
| + fDrawIndirectBuffer.reset();
|
| + fInstanceAttribsBufferUniqueId = SK_InvalidUniqueID;
|
| +}
|
| +
|
| +}
|
|
|