Chromium Code Reviews| Index: src/gpu/GrInstancedRendering.h |
| diff --git a/src/gpu/GrInstancedRendering.h b/src/gpu/GrInstancedRendering.h |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..6d0a52bbc24cc33e539295fdda507e0e4238a2e7 |
| --- /dev/null |
| +++ b/src/gpu/GrInstancedRendering.h |
| @@ -0,0 +1,208 @@ |
| +/* |
| + * Copyright 2016 Google Inc. |
| + * |
| + * Use of this source code is governed by a BSD-style license that can be |
| + * found in the LICENSE file. |
| + */ |
| + |
| +#ifndef GrInstancedRendering_DEFINED |
| +#define GrInstancedRendering_DEFINED |
| + |
| +#include "GrAllocator.h" |
| +#include "GrInstancedRenderingTypes.h" |
| +#include "batches/GrDrawBatch.h" |
| + |
| +class GrInstanceProcessor; |
| +class GrResourceProvider; |
| + |
| +/** |
| + * This class serves as a centralized clearinghouse for instanced rendering. It accumulates data for |
|
bsalomon
2016/04/25 13:22:38
If we had a completely different instanced primiti
Chris Dalton
2016/04/25 17:01:19
My vision for this class is yes! The basic geometr
bsalomon
2016/04/27 14:10:32
Ok, let's keep the name then
|
| + * instanced draws into one location, and creates special batches that pull from this data. The |
| + * nature of instanced rendering allows these batches to combine well and render efficiently. |
| + * |
| + * During a flush, this class assembles the accumulated draw data into a single vertex and texel |
| + * buffer, and its subclass draws the batches using backend-specific instanced rendering APIs. |
| + * |
| + * This class is responsible for the CPU side of instanced rendering. Shaders are implemented by |
| + * GrInstanceProcessor. |
| + */ |
| +class GrInstancedRendering : public SkNoncopyable, protected GrInstancedRenderingTypes { |
| +public: |
| + virtual ~GrInstancedRendering() { SkASSERT(State::kRecordingShapes == fState); } |
| + |
| + GrGpu* gpu() const { return fGpu; } |
| + |
|
bsalomon
2016/04/25 13:22:38
I think these could use a comment
Chris Dalton
2016/04/26 19:18:07
Done.
|
| + enum Flags { |
| + kStencilWrite_Flag = (1 << 0), |
| + kStencilBufferMSAA_Flag = (1 << 1), |
| + kColorWrite_Flag = (1 << 2), |
| + kColorBufferMSAA_Flag = (1 << 3), |
| + /** |
| + * This should not be set if the fragment shader uses derivatives, automatic mipmap LOD, or |
| + * other features that depend on neighboring pixels. |
| + */ |
| + kUseDiscard_Flag = (1 << 4) |
| + }; |
| + |
| + /** |
| + * These methods record a new instanced draw and return a batch that can render it. The client |
| + * must call commitToGpu() before attempting to draw batches returned by this class. After |
|
bsalomon
2016/04/25 13:22:38
Maybe before attempting to flush batches returned
Chris Dalton
2016/04/26 19:18:07
Done.
|
| + * commitToGpu(), it becomes invalid to record new draws until a subsequent call to restart(). |
| + */ |
| + GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, |
|
bsalomon
2016/04/25 13:22:38
It seems to me like the "recording" aspect of thes
Chris Dalton
2016/04/25 17:01:19
When naming these methods I did think this through
Chris Dalton
2016/04/26 19:18:07
Updated the comment.
bsalomon
2016/04/27 14:10:32
Ok, that makes sense to me.
|
| + bool antialias, uint32_t flags, bool* useHWAA); |
| + |
| + GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, |
| + const SkRect& localRect, bool antialias, |
| + uint32_t flags, bool* useHWAA); |
| + |
| + GrDrawBatch* SK_WARN_UNUSED_RESULT recordRect(const SkRect&, const SkMatrix&, GrColor, |
| + const SkMatrix& localMatrix, bool antialias, |
| + uint32_t flags, bool* useHWAA); |
| + |
| + GrDrawBatch* SK_WARN_UNUSED_RESULT recordOval(const SkRect&, const SkMatrix&, GrColor, |
| + bool antialias, uint32_t flags, bool* useHWAA); |
| + |
| + GrDrawBatch* SK_WARN_UNUSED_RESULT recordRRect(const SkRRect&, const SkMatrix&, GrColor, |
| + bool antialias, uint32_t flags, bool* useHWAA); |
| + |
| + GrDrawBatch* SK_WARN_UNUSED_RESULT recordDRRect(const SkRRect& outer, const SkRRect& inner, |
| + const SkMatrix&, GrColor, bool antialias, |
| + uint32_t flags, bool* useHWAA); |
| + |
| + /** |
| + * Commits all recorded draws to GPU memory and allows the client to begin drawing the batches |
| + * created by this class. |
| + */ |
| + void commitToGpu(GrResourceProvider*); |
|
bsalomon
2016/04/25 13:22:38
Wonder if something like prepareToFlush or willFlu
Chris Dalton
2016/04/26 19:18:08
Done.
|
| + |
| + /** |
| + * Called once the batches created previously by this class have all been released. Allows the |
| + * client to begin recording draws again. |
| + */ |
| + void restart(); |
|
bsalomon
2016/04/25 13:22:38
Minor but I think we more commonly use reset()
Chris Dalton
2016/04/25 17:01:19
I also thought this one through. I decided to go w
Chris Dalton
2016/04/26 19:18:07
How's endFlush()?
bsalomon
2016/04/27 14:10:32
sgtm
|
| + |
| + enum class ClearType { |
| + kDestroy, |
| + kAbandon |
| + }; |
| + |
| + /** |
| + * Clears all GPU resources, including those that are held long term. They will be lazily |
| + * reinitialized if the class begins to be used again. |
| + */ |
| + void clearGpuResources(ClearType); |
|
bsalomon
2016/04/25 13:22:38
freeGpuResources? again just trying to mentally ma
Chris Dalton
2016/04/25 17:01:19
clearGpuResources(ClearType::kDestroy) would be th
Chris Dalton
2016/04/26 19:18:07
Done.
|
| + |
| +protected: |
| + class Batch : public GrDrawBatch { |
| + public: |
| + virtual ~Batch() { fInUse = false; } // fInUse will continue to be accessed. |
| + |
| + const char* name() const override { return "Instanced Batch"; } |
| + |
| + void computePipelineOptimizations(GrInitInvariantOutput* color, |
| + GrInitInvariantOutput* coverage, |
| + GrBatchToXPOverrides*) const override; |
| + |
| + protected: |
| + Batch(uint32_t classID, GrInstancedRendering* ir, AntialiasMode aa, int instanceIdx) |
| + : INHERITED(classID), |
| + fInstancedRendering(ir), |
| + fAntialiasMode(aa), |
| + fFirstInstanceIdx(instanceIdx), |
| + fInUse(true) { |
| +#ifdef SK_DEBUG |
| + fIsCombined = false; |
| +#endif |
| + } |
| + |
| + void initBatchTracker(const GrXPOverridesForBatch&) override; |
| + |
| + void onPrepare(GrBatchFlushState*) override {} |
| + void onDraw(GrBatchFlushState*) override; |
| + void onDelete() const override; |
| + |
| + GrInstancedRendering* const fInstancedRendering; |
| + const AntialiasMode fAntialiasMode; |
| + const int fFirstInstanceIdx; |
| + BatchTracker fTracker; |
| + bool fInUse; |
| +#ifdef SK_DEBUG |
| + bool fIsCombined; |
| +#endif |
| + |
| + typedef GrDrawBatch INHERITED; |
| + |
| + friend class GrInstancedRendering; |
| + }; |
| + |
| + class BatchAllocator : public GrAllocator { |
| + public: |
| + BatchAllocator(size_t sizeofBatchClass) |
| + : INHERITED(sizeofBatchClass, kBatchesPerBlock, nullptr) { |
| + fFirstBlock = sk_malloc_throw(kBatchesPerBlock * sizeofBatchClass); |
| + this->setInitialBlock(fFirstBlock); |
| + } |
| + |
| + ~BatchAllocator() { |
| + sk_free(fFirstBlock); |
| + } |
| + |
| + private: |
| + enum { kBatchesPerBlock = 128 }; |
| + |
| + void* fFirstBlock; |
| + |
| + typedef GrAllocator INHERITED; |
| + }; |
| + |
| + GrInstancedRendering(GrGpu* gpu, uint32_t supportedAAModes, size_t sizeofBatchClass); |
| + |
| + const GrBuffer* vertexBuffer() const { SkASSERT(fVertexBuffer); return fVertexBuffer; } |
| + const GrBuffer* indexBuffer() const { SkASSERT(fIndexBuffer); return fIndexBuffer; } |
| + const GrBuffer* instanceBuffer() const { SkASSERT(fInstanceBuffer); return fInstanceBuffer; } |
| + const BatchAllocator* batchAllocator() const { return &fBatchAllocator; } |
| + |
| + virtual void onCommitToGpu(GrResourceProvider*) = 0; |
| + virtual void onDraw(const GrPipeline&, const GrInstanceProcessor&, const Batch*) = 0; |
| + virtual void onRestart() = 0; |
| + virtual void onClearGpuResources(ClearType) = 0; |
| + |
| +#ifdef SK_DEBUG |
| + int fInUseBatchCount; |
| +#endif |
| + |
| +private: |
| + enum class State { |
| + kRecordingShapes, |
| + kDrawingBatches |
| + }; |
| + |
| + Batch* SK_WARN_UNUSED_RESULT recordShape(ShapeType, const SkRect& bounds, |
| + const SkMatrix& viewMatrix, GrColor, |
| + const SkRect& localRect, bool antialias, |
| + uint32_t flags, bool* requireHWAA); |
| + |
| + bool selectAntialiasMode(const SkMatrix& viewMatrix, bool antialias, uint32_t flags, |
| + AntialiasMode*, bool* requireHWAA); |
| + |
| + void appendRRectParams(const SkRRect&, BatchTracker*); |
| + void appendParamsTexel(const SkScalar* vals, int count); |
| + void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z, SkScalar w); |
| + void appendParamsTexel(SkScalar x, SkScalar y, SkScalar z); |
| + |
| + virtual Batch* constructBatch(void* storage, AntialiasMode, int instanceIdx) = 0; |
| + |
| + const SkAutoTUnref<GrGpu> fGpu; |
| + const uint32_t fSupportedAAModes; |
| + State fState; |
| + SkSTArray<1024, Instance, true> fInstances; |
| + SkSTArray<1024, ParamsTexel, true> fParams; |
| + BatchAllocator fBatchAllocator; |
| + SkAutoTUnref<const GrBuffer> fVertexBuffer; |
| + SkAutoTUnref<const GrBuffer> fIndexBuffer; |
| + SkAutoTUnref<const GrBuffer> fInstanceBuffer; |
| + SkAutoTUnref<GrBuffer> fParamsBuffer; |
| +}; |
| + |
| +#endif |