| Index: src/gpu/instanced/InstancedRendering.h
|
| diff --git a/src/gpu/instanced/InstancedRendering.h b/src/gpu/instanced/InstancedRendering.h
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..c0a300f09c3e4dc3a082ff2acb5594f33d3474cb
|
| --- /dev/null
|
| +++ b/src/gpu/instanced/InstancedRendering.h
|
| @@ -0,0 +1,196 @@
|
| +/*
|
| + * Copyright 2016 Google Inc.
|
| + *
|
| + * Use of this source code is governed by a BSD-style license that can be
|
| + * found in the LICENSE file.
|
| + */
|
| +
|
| +#ifndef gr_instanced_InstancedRendering_DEFINED
|
| +#define gr_instanced_InstancedRendering_DEFINED
|
| +
|
| +#include "GrAllocator.h"
|
| +#include "SkTInternalLList.h"
|
| +#include "batches/GrDrawBatch.h"
|
| +#include "instanced/InstancedRenderingTypes.h"
|
| +#include "../private/GrInstancedPipelineInfo.h"
|
| +
|
| +class GrResourceProvider;
|
| +
|
| +namespace gr_instanced {
|
| +
|
| +class InstanceProcessor;
|
| +
|
| +/**
|
| + * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for
|
| + * instanced draws into one location, and creates special batches that pull from this data. The
|
| + * nature of instanced rendering allows these batches to combine well and render efficiently.
|
| + *
|
| + * During a flush, this class assembles the accumulated draw data into a single vertex and texel
|
| + * buffer, and its subclass draws the batches using backend-specific instanced rendering APIs.
|
| + *
|
| + * This class is responsible for the CPU side of instanced rendering. Shaders are implemented by
|
| + * InstanceProcessor.
|
| + */
|
| +class InstancedRendering : public SkNoncopyable {
|
| +public:
|
| + virtual ~InstancedRendering() { SkASSERT(State::kRecordingDraws == fState); }
|
| +
|
| + GrGpu* gpu() const { return fGpu; }
|
| +
|
| + /**
|
| + * These methods make a new record internally for an instanced draw, and return a batch that is
|
| + * effectively just an index to that record. The returned batch is not self-contained, but
|
| + * rather relies on this class to handle the rendering. The client must call beginFlush() on
|
| + * this class before attempting to flush batches returned by it. It is invalid to record new
|
| + * draws between beginFlush() and endFlush().
|
| + */
|
| + GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
|
| + bool antialias, const GrInstancedPipelineInfo&,
|
| + bool* useHWAA);
|
| +
|
| + GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
|
| + const SkRect& localRect, bool antialias,
|
| + const GrInstancedPipelineInfo&, bool* useHWAA);
|
| +
|
| + GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor,
|
| + const SkMatrix& localMatrix, bool antialias,
|
| + const GrInstancedPipelineInfo&, bool* useHWAA);
|
| +
|
| + GrDrawBatch* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor,
|
| + bool antialias, const GrInstancedPipelineInfo&,
|
| + bool* useHWAA);
|
| +
|
| + GrDrawBatch* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor,
|
| + bool antialias, const GrInstancedPipelineInfo&,
|
| + bool* useHWAA);
|
| +
|
| + GrDrawBatch* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner,
|
| + const SkMatrix&, GrColor, bool antialias,
|
| + const GrInstancedPipelineInfo&, bool* useHWAA);
|
| +
|
| + /**
|
| + * Compiles all recorded draws into GPU buffers and allows the client to begin flushing the
|
| + * batches created by this class.
|
| + */
|
| + void beginFlush(GrResourceProvider*);
|
| +
|
| + /**
|
| + * Called once the batches created previously by this class have all been released. Allows the
|
| + * client to begin recording draws again.
|
| + */
|
| + void endFlush();
|
| +
|
| + enum class ResetType : bool {
|
| + kDestroy,
|
| + kAbandon
|
| + };
|
| +
|
| + /**
|
| + * Resets all GPU resources, including those that are held long term. They will be lazily
|
| + * reinitialized if the class begins to be used again.
|
| + */
|
| + void resetGpuResources(ResetType);
|
| +
|
| +protected:
|
| + class Batch : public GrDrawBatch {
|
| + public:
|
| + SK_DECLARE_INTERNAL_LLIST_INTERFACE(Batch);
|
| +
|
| + const char* name() const override { return "Instanced Batch"; }
|
| + ~Batch() override { fInstancedRendering->fBatchList.remove(this); }
|
| +
|
| + protected:
|
| + Batch(uint32_t classID, InstancedRendering* ir, int instanceIdx);
|
| +
|
| + void initBatchTracker(const GrXPOverridesForBatch&) override;
|
| + bool onCombineIfPossible(GrBatch* other, const GrCaps& caps) override;
|
| +
|
| + void computePipelineOptimizations(GrInitInvariantOutput* color,
|
| + GrInitInvariantOutput* coverage,
|
| + GrBatchToXPOverrides*) const override;
|
| +
|
| + void onPrepare(GrBatchFlushState*) override {}
|
| + void onDraw(GrBatchFlushState*) override;
|
| +
|
| + struct DrawCmd {
|
| +#ifdef SK_DEBUG
|
| + DrawCmd() : fGeometry{-1, 0}, fInstanceRange{-1, 0} {}
|
| +
|
| + bool isValid() const {
|
| + return fGeometry.fStart >= 0 && fGeometry.fCount > 0 &&
|
| + fInstanceRange.fStart >= 0 && fInstanceRange.fCount > 0;
|
| + }
|
| +#endif
|
| +
|
| + int getSingleInstanceIdx() const {
|
| + SkASSERT(1 == fInstanceRange.fCount);
|
| + return fInstanceRange.fStart;
|
| + }
|
| +
|
| + IndexRange fGeometry;
|
| + InstanceRange fInstanceRange;
|
| + };
|
| +
|
| + DrawCmd& getSingleDrawCmd() {
|
| + SkASSERT(1 == fDrawCmds.count());
|
| + return fDrawCmds.front();
|
| + }
|
| +
|
| + InstancedRendering* const fInstancedRendering;
|
| + SkSTArray<4, DrawCmd, false> fDrawCmds;
|
| + BatchInfo fInfo;
|
| +
|
| + typedef GrDrawBatch INHERITED;
|
| +
|
| + friend class InstancedRendering;
|
| + };
|
| +
|
| + typedef SkTInternalLList<Batch> BatchList;
|
| +
|
| + InstancedRendering(GrGpu* gpu, AntialiasMode lastSupportedAAMode);
|
| +
|
| + const Instance& instance(int idx) const { return fInstances[idx]; }
|
| + const BatchList& batchList() const { return fBatchList; }
|
| + const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVertexBuffer; }
|
| + const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexBuffer; }
|
| +
|
| + virtual void onBeginFlush(GrResourceProvider*) = 0;
|
| + virtual void onDraw(const GrPipeline&, const InstanceProcessor&, const Batch*) = 0;
|
| + virtual void onEndFlush() = 0;
|
| + virtual void onResetGpuResources(ResetType) = 0;
|
| +
|
| +private:
|
| + enum class State : bool {
|
| + kRecordingDraws,
|
| + kFlushing
|
| + };
|
| +
|
| + Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds,
|
| + const SkMatrix& viewMatrix, GrColor,
|
| + const SkRect& localRect, bool antialias,
|
| + const GrInstancedPipelineInfo&, bool* requireHWAA);
|
| +
|
| + bool selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias,
|
| + const GrInstancedPipelineInfo&, bool* useHWAA, AntialiasMode*);
|
| +
|
| + void appendRRectParams(const SkRRect&, BatchInfo*);
|
| + void appendParamsTexel(const SkScalar* vals, int count);
|
| + void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w);
|
| + void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z);
|
| +
|
| + virtual Batch* createBatch(int instanceIdx) = 0;
|
| +
|
| + const SkAutoTUnref<GrGpu> fGpu;
|
| + const AntialiasMode fLastSupportedAAMode;
|
| + State fState;
|
| + SkSTArray<1024, Instance, true> fInstances;
|
| + SkSTArray<1024, ParamsTexel, true> fParams;
|
| + BatchList fBatchList;
|
| + SkAutoTUnref<const GrBuffer> fVertexBuffer;
|
| + SkAutoTUnref<const GrBuffer> fIndexBuffer;
|
| + SkAutoTUnref<GrBuffer> fParamsBuffer;
|
| +};
|
| +
|
| +}
|
| +
|
| +#endif
|
|
|