Index: src/gpu/GrInstancedRendering.h |
diff --git a/src/gpu/GrInstancedRendering.h b/src/gpu/GrInstancedRendering.h |
new file mode 100644 |
index 0000000000000000000000000000000000000000..367342f89dbfde7b7545d6799eb4c81d225cbc15 |
--- /dev/null |
+++ b/src/gpu/GrInstancedRendering.h |
@@ -0,0 +1,216 @@ |
+/* |
+ * Copyright 2016 Google Inc. |
+ * |
+ * Use of this source code is governed by a BSD-style license that can be |
+ * found in the LICENSE file. |
+ */ |
+ |
+#ifndef GrInstancedRendering_DEFINED |
+#define GrInstancedRendering_DEFINED |
+ |
+#include "GrAllocator.h" |
+#include "GrInstancedRenderingTypes.h" |
+#include "batches/GrDrawBatch.h" |
+ |
+class GrInstanceProcessor; |
+class GrResourceProvider; |
+ |
+/** |
+ * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for |
+ * instanced draws into one location, and creates special batches that pull from this data. The |
+ * nature of instanced rendering allows these batches to combine well and render efficiently. |
+ * |
+ * During a flush, this class assembles the accumulated draw data into a single vertex and texel |
+ * buffer, and its subclass draws the batches using backend-specific instanced rendering APIs. |
+ * |
+ * This class is responsible for the CPU side of instanced rendering. Shaders are implemented by |
+ * GrInstanceProcessor. |
+ */ |
+class GrInstancedRendering : public SkNoncopyable, protected GrInstancedRenderingTypes { |
+public: |
+ virtual ~GrInstancedRendering() { SkASSERT(State::kRecordingDraws == fState); } |
+ |
+ GrGpu* gpu() const { return fGpu; } |
+ |
+ /** |
+ * Flags that describe relevant external pipeline conditions. These are used to select |
+ * appropriate antialias modes, shader strategies, etc. |
+ */ |
+ enum Flags { |
+ kStencilWrite_Flag = (1 << 0), |
+ kStencilBufferMSAA_Flag = (1 << 1), |
+ kColorWrite_Flag = (1 << 2), |
+ kColorBufferMSAA_Flag = (1 << 3), |
+ /** |
+ * This should not be set if the fragment shader uses derivatives, automatic mipmap LOD, or |
+ * other features that depend on neighboring pixels. |
+ */ |
+ kUseDiscard_Flag = (1 << 4) |
+ }; |
+ |
+ /** |
+ * These methods make a new record internally for an instanced draw, and return a batch that is |
+ * effectively just an index to that record. The returned batch is not self-contained, but |
+ * rather relies on this class to handle the rendering. The client must call beginFlush() on |
+ * this class before attempting to flush batches returned by it. It is invalid to record new |
+ * draws between beginFlush() and endFlush(). |
+ */ |
+ GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, |
+ bool antialias, uint32_t flags, bool* useHWAA); |
+ |
+ GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, |
+ const SkRect& localRect, bool antialias, |
+ uint32_t flags, bool* useHWAA); |
+ |
+ GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, |
+ const SkMatrix& localMatrix, bool antialias, |
+ uint32_t flags, bool* useHWAA); |
+ |
+ GrDrawBatch* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor, |
+ bool antialias, uint32_t flags, bool* useHWAA); |
+ |
+ GrDrawBatch* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor, |
+ bool antialias, uint32_t flags, bool* useHWAA); |
+ |
+ GrDrawBatch* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner, |
+ const SkMatrix&, GrColor, bool antialias, |
+ uint32_t flags, bool* useHWAA); |
+ |
+ /** |
+ * Compiles all recorded draws into GPU buffers and allows the client to begin flushing the |
+ * batches created by this class. |
+ */ |
+ void beginFlush(GrResourceProvider*); |
+ |
+ /** |
+ * Called once the batches created previously by this class have all been released. Allows the |
+ * client to begin recording draws again. |
+ */ |
+ void endFlush(); |
+ |
+ enum class ResetType : bool { |
+ kDestroy, |
+ kAbandon |
+ }; |
+ |
+ /** |
+ * Resets all GPU resources, including those that are held long term. They will be lazily |
+ * reinitialized if the class begins to be used again. |
+ */ |
+ void resetGpuResources(ResetType); |
+ |
+protected: |
+ class Batch : public GrDrawBatch { |
+ public: |
+ virtual ~Batch() { fInUse = false; } // fInUse will continue to be accessed. |
+ |
+ const char* name() const override { return "Instanced Batch"; } |
+ |
+ void computePipelineOptimizations(GrInitInvariantOutput* color, |
+ GrInitInvariantOutput* coverage, |
+ GrBatchToXPOverrides*) const override; |
+ |
+ protected: |
+ Batch(uint32_t classID, GrInstancedRendering* ir, int instanceIdx) |
+ : INHERITED(classID), |
+ fInstancedRendering(ir), |
+ fFirstInstanceIdx(instanceIdx), |
+ fInUse(true) { |
+#ifdef SK_DEBUG |
+ fIsCombined = false; |
+#endif |
+ } |
+ |
+ void initBatchTracker(const GrXPOverridesForBatch&) override; |
+ |
+ void onPrepare(GrBatchFlushState*) override {} |
+ void onDraw(GrBatchFlushState*) override; |
+ void onDelete() const override; |
+ |
+ GrInstancedRendering* const fInstancedRendering; |
+ const int fFirstInstanceIdx; |
+ BatchInfo fInfo; |
+ bool fInUse; |
+#ifdef SK_DEBUG |
+ bool fIsCombined; |
+#endif |
+ |
+ typedef GrDrawBatch INHERITED; |
+ |
+ friend class GrInstancedRendering; |
+ }; |
+ |
+ /** |
+ * We allocate our own batches. This allows us to iterate through them immediately before a |
+ * flush in order to compile draw buffers. |
+ */ |
+ class BatchAllocator : public GrAllocator { |
+ public: |
+ BatchAllocator(size_t sizeofBatchClass) |
+ : INHERITED(sizeofBatchClass, kBatchesPerBlock, nullptr) { |
+ fFirstBlock = sk_malloc_throw(kBatchesPerBlock * sizeofBatchClass); |
+ this->setInitialBlock(fFirstBlock); |
+ } |
+ |
+ ~BatchAllocator() { |
+ sk_free(fFirstBlock); |
+ } |
+ |
+ private: |
+ enum { kBatchesPerBlock = 128 }; |
+ |
+ void* fFirstBlock; |
+ |
+ typedef GrAllocator INHERITED; |
+ }; |
+ |
+ GrInstancedRendering(GrGpu* gpu, uint32_t supportedAAModes, size_t sizeofBatchClass); |
+ |
+ const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVertexBuffer; } |
+ const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexBuffer; } |
+ const GrBuffer* instanceBuffer() const { SkASSERT(fInstanceBuffer); return fInstanceBuffer; } |
+ const BatchAllocator* batchAllocator() const { return &fBatchAllocator; } |
+ |
+ virtual void onBeginFlush(GrResourceProvider*) = 0; |
+ virtual void onDraw(const GrPipeline&, const GrInstanceProcessor&, const Batch*) = 0; |
+ virtual void onEndFlush() = 0; |
+ virtual void onResetGpuResources(ResetType) = 0; |
+ |
+#ifdef SK_DEBUG |
+ int fInUseBatchCount; |
+#endif |
+ |
+private: |
+ enum class State : bool { |
+ kRecordingDraws, |
+ kFlushing |
+ }; |
+ |
+ Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds, |
+ const SkMatrix& viewMatrix, GrColor, |
+ const SkRect& localRect, bool antialias, |
+ uint32_t flags, bool* requireHWAA); |
+ |
+ bool selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias, uint32_t flags, |
+ AntialiasMode*, bool* requireHWAA); |
+ |
+ void appendRRectParams(const SkRRect&, BatchInfo*); |
+ void appendParamsTexel(const SkScalar* vals, int count); |
+ void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w); |
+ void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z); |
+ |
+ virtual Batch* constructBatch(void* storage, int instanceIdx) = 0; |
+ |
+ const SkAutoTUnref<GrGpu> fGpu; |
+ const uint32_t fSupportedAAModes; |
+ State fState; |
+ SkSTArray<1024, Instance, true> fInstances; |
+ SkSTArray<1024, ParamsTexel, true> fParams; |
+ BatchAllocator fBatchAllocator; |
+ SkAutoTUnref<const GrBuffer> fVertexBuffer; |
+ SkAutoTUnref<const GrBuffer> fIndexBuffer; |
+ SkAutoTUnref<const GrBuffer> fInstanceBuffer; |
+ SkAutoTUnref<GrBuffer> fParamsBuffer; |
+}; |
+ |
+#endif |