| Index: src/gpu/batches/GrBatch.h
|
| diff --git a/src/gpu/batches/GrBatch.h b/src/gpu/batches/GrBatch.h
|
| index b6eec1f969ea1e25de8eb4c86946882a8efa2ba4..2be17eb70cead1bfb3553d6b9a303b465051ffa2 100644
|
| --- a/src/gpu/batches/GrBatch.h
|
| +++ b/src/gpu/batches/GrBatch.h
|
| @@ -41,9 +41,16 @@ class GrBatchFlushState;
|
| #define GrBATCH_INFO(...)
|
| #endif
|
|
|
| +// A helper macro to generate a class static id
|
| +#define DEFINE_BATCH_CLASS_ID \
|
| + static uint32_t ClassID() { \
|
| + static uint32_t kClassID = GenID(); \
|
| + return kClassID; \
|
| + }
|
| +
|
| class GrBatch : public GrNonAtomicRef {
|
| public:
|
| - GrBatch();
|
| + GrBatch(uint32_t classID);
|
| ~GrBatch() override;
|
|
|
| virtual const char* name() const = 0;
|
| @@ -69,10 +76,17 @@ public:
|
| }
|
|
|
| /**
|
| - * Helper for down-casting to a GrBatch subclass
|
| + * Helper for safely down-casting to a GrBatch subclass
|
| */
|
| - template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
|
| - template <typename T> T* cast() { return static_cast<T*>(this); }
|
| + template <typename T> const T& cast() const {
|
| + SkASSERT(T::ClassID() == this->classID());
|
| + return *static_cast<const T*>(this);
|
| + }
|
| +
|
| + template <typename T> T* cast() {
|
| + SkASSERT(T::ClassID() == this->classID());
|
| + return static_cast<T*>(this);
|
| + }
|
|
|
| uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; }
|
|
|
| @@ -96,11 +110,6 @@ public:
|
| virtual SkString dumpInfo() const = 0;
|
|
|
| protected:
|
| - template <typename PROC_SUBCLASS> void initClassID() {
|
| - static uint32_t kClassID = GenID(&gCurrBatchClassID);
|
| - fClassID = kClassID;
|
| - }
|
| -
|
| // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setLargest on the bounds
|
| // rect because we outset it for dst copy textures
|
| void setBounds(const SkRect& newBounds) { fBounds = newBounds; }
|
| @@ -109,19 +118,10 @@ protected:
|
| return fBounds.joinPossiblyEmptyRect(otherBounds);
|
| }
|
|
|
| - SkRect fBounds;
|
| -
|
| -private:
|
| - virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
|
| -
|
| - virtual void onPrepare(GrBatchFlushState*) = 0;
|
| - virtual void onDraw(GrBatchFlushState*) = 0;
|
| -
|
| - static uint32_t GenID(int32_t* idCounter) {
|
| - // fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The
|
| - // atomic inc returns the old value not the incremented value. So we add
|
| + static uint32_t GenID() {
|
| + // The atomic inc returns the old value not the incremented value. So we add
|
| // 1 to the returned value.
|
| - uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
|
| + uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) + 1;
|
| if (!id) {
|
| SkFAIL("This should never wrap as it should only be called once for each GrBatch "
|
| "subclass.");
|
| @@ -129,17 +129,25 @@ private:
|
| return id;
|
| }
|
|
|
| + SkRect fBounds;
|
| +
|
| +private:
|
| + virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0;
|
| +
|
| + virtual void onPrepare(GrBatchFlushState*) = 0;
|
| + virtual void onDraw(GrBatchFlushState*) = 0;
|
| +
|
| enum {
|
| kIllegalBatchID = 0,
|
| };
|
|
|
| - uint32_t fClassID;
|
| SkDEBUGCODE(bool fUsed;)
|
| #if GR_BATCH_SPEW
|
| uint32_t fUniqueID;
|
| static int32_t gCurrBatchUniqueID;
|
| #endif
|
| - static int32_t gCurrBatchClassID;
|
| + const uint32_t fClassID;
|
| + static int32_t gCurrBatchClassID;
|
| typedef GrNonAtomicRef INHERITED;
|
| };
|
|
|
|
|