| Index: src/gpu/batches/GrBatch.h
|
| diff --git a/src/gpu/batches/GrBatch.h b/src/gpu/batches/GrBatch.h
|
| index 35844cd99a533d6e6a4e5ab56dd93ac0948d23f0..ef8f56c4b9daa1a5241abbd91947878c437ac59b 100644
|
| --- a/src/gpu/batches/GrBatch.h
|
| +++ b/src/gpu/batches/GrBatch.h
|
| @@ -21,7 +21,7 @@ class GrPipeline;
|
|
|
| struct GrInitInvariantOutput;
|
|
|
| -/*
|
| +/**
|
| * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate
|
| * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it
|
| * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch
|
| @@ -51,8 +51,6 @@ public:
|
| ~GrBatch() override;
|
|
|
| virtual const char* name() const = 0;
|
| - virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0;
|
| - virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0;
|
|
|
| bool combineIfPossible(GrBatch* that, const GrCaps& caps) {
|
| if (this->classID() != that->classID()) {
|
| @@ -62,14 +60,8 @@ public:
|
| return this->onCombineIfPossible(that, caps);
|
| }
|
|
|
| - virtual void generateGeometry(GrBatchTarget*) = 0;
|
| -
|
| const SkRect& bounds() const { return fBounds; }
|
|
|
| - // TODO this goes away when batches are everywhere
|
| - void setNumberOfDraws(int numberOfDraws) { fNumberOfDraws = numberOfDraws; }
|
| - int numberOfDraws() const { return fNumberOfDraws; }
|
| -
|
| void* operator new(size_t size);
|
| void operator delete(void* target);
|
|
|
| @@ -81,28 +73,17 @@ public:
|
| }
|
|
|
| /**
|
| - * Helper for down-casting to a GrBatch subclass
|
| - */
|
| + * Helper for down-casting to a GrBatch subclass
|
| + */
|
| template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
|
| template <typename T> T* cast() { return static_cast<T*>(this); }
|
|
|
| uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; }
|
|
|
| - // TODO no GrPrimitiveProcessors yet read fragment position
|
| - bool willReadFragmentPosition() const { return false; }
|
| -
|
| - SkDEBUGCODE(bool isUsed() const { return fUsed; })
|
| -
|
| - const GrPipeline* pipeline() const {
|
| - SkASSERT(fPipelineInstalled);
|
| - return reinterpret_cast<const GrPipeline*>(fPipelineStorage.get());
|
| - }
|
| -
|
| - bool installPipeline(const GrPipeline::CreateArgs&);
|
| -
|
| #if GR_BATCH_SPEW
|
| uint32_t uniqueID() const { return fUniqueID; }
|
| #endif
|
| + SkDEBUGCODE(bool isUsed() const { return fUsed; })
|
|
|
| protected:
|
| template <typename PROC_SUBCLASS> void initClassID() {
|
| @@ -118,8 +99,86 @@ protected:
|
| return fBounds.joinPossiblyEmptyRect(otherBounds);
|
| }
|
|
|
| + SkRect fBounds;
|
| +
|
| +private:
|
| + virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
|
| +
|
| + static uint32_t GenID(int32_t* idCounter) {
|
| + // fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The
|
| + // atomic inc returns the old value not the incremented value. So we add
|
| + // 1 to the returned value.
|
| + uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
|
| + if (!id) {
|
| + SkFAIL("This should never wrap as it should only be called once for each GrBatch "
|
| + "subclass.");
|
| + }
|
| + return id;
|
| + }
|
| +
|
| + enum {
|
| + kIllegalBatchID = 0,
|
| + };
|
| +
|
| + uint32_t fClassID;
|
| + SkDEBUGCODE(bool fUsed;)
|
| +#if GR_BATCH_SPEW
|
| + uint32_t fUniqueID;
|
| + static int32_t gCurrBatchUniqueID;
|
| +#endif
|
| + static int32_t gCurrBatchClassID;
|
| + typedef GrNonAtomicRef INHERITED;
|
| +};
|
| +
|
| +/**
|
| + * Base class for GrBatches that draw. These batches have a GrPipeline installed by GrDrawTarget.
|
| + */
|
| +class GrDrawBatch : public GrBatch {
|
| +public:
|
| + GrDrawBatch();
|
| + ~GrDrawBatch() override;
|
| +
|
| + virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0;
|
| + virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const = 0;
|
| +
|
| + const GrPipeline* pipeline() const {
|
| + SkASSERT(fPipelineInstalled);
|
| + return reinterpret_cast<const GrPipeline*>(fPipelineStorage.get());
|
| + }
|
| +
|
| + bool installPipeline(const GrPipeline::CreateArgs&);
|
| +
|
| + // TODO no GrPrimitiveProcessors yet read fragment position
|
| + bool willReadFragmentPosition() const { return false; }
|
| +
|
| +private:
|
| + /**
|
| + * initBatchTracker is a hook for the some additional overrides / optimization possibilities
|
| + * from the GrXferProcessor.
|
| + */
|
| + virtual void initBatchTracker(const GrPipelineOptimizations&) = 0;
|
| +
|
| + SkAlignedSTStorage<1, GrPipeline> fPipelineStorage;
|
| + bool fPipelineInstalled;
|
| + typedef GrBatch INHERITED;
|
| +};
|
| +
|
| +/**
|
| + * Base class for vertex-based GrBatches.
|
| + */
|
| +class GrVertexBatch : public GrDrawBatch {
|
| +public:
|
| + GrVertexBatch();
|
| +
|
| + virtual void generateGeometry(GrBatchTarget*) = 0;
|
| +
|
| + // TODO this goes away when batches are everywhere
|
| + void setNumberOfDraws(int numberOfDraws) { fNumberOfDraws = numberOfDraws; }
|
| + int numberOfDraws() const { return fNumberOfDraws; }
|
| +
|
| +protected:
|
| /** Helper for rendering instances using an instanced index index buffer. This class creates the
|
| - space for the vertices and flushes the draws to the batch target.*/
|
| + space for the vertices and flushes the draws to the batch target. */
|
| class InstancedHelper {
|
| public:
|
| InstancedHelper() {}
|
| @@ -156,44 +215,9 @@ protected:
|
| typedef InstancedHelper INHERITED;
|
| };
|
|
|
| - uint32_t fClassID;
|
| - SkRect fBounds;
|
| -
|
| private:
|
| - virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
|
| -
|
| - /*
|
| - * initBatchTracker is a hook for the some additional overrides / optimization possibilities
|
| - * from the GrXferProcessor.
|
| - */
|
| - virtual void initBatchTracker(const GrPipelineOptimizations&) = 0;
|
| -
|
| -
|
| - static uint32_t GenID(int32_t* idCounter) {
|
| - // fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The
|
| - // atomic inc returns the old value not the incremented value. So we add
|
| - // 1 to the returned value.
|
| - uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
|
| - if (!id) {
|
| - SkFAIL("This should never wrap as it should only be called once for each GrBatch "
|
| - "subclass.");
|
| - }
|
| - return id;
|
| - }
|
| -
|
| - enum {
|
| - kIllegalBatchID = 0,
|
| - };
|
| - SkAlignedSTStorage<1, GrPipeline> fPipelineStorage;
|
| int fNumberOfDraws;
|
| - SkDEBUGCODE(bool fUsed;)
|
| - bool fPipelineInstalled;
|
| -#if GR_BATCH_SPEW
|
| - uint32_t fUniqueID;
|
| - static int32_t gCurrBatchUniqueID;
|
| -#endif
|
| - static int32_t gCurrBatchClassID;
|
| - typedef SkRefCnt INHERITED;
|
| + typedef GrDrawBatch INHERITED;
|
| };
|
|
|
| #endif
|
|
|