Chromium Code Reviews| Index: src/gpu/GrBatch.h |
| diff --git a/src/gpu/GrBatch.h b/src/gpu/GrBatch.h |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..29df2cd264ff413316df861bd0ea58c9272cd367 |
| --- /dev/null |
| +++ b/src/gpu/GrBatch.h |
| @@ -0,0 +1,105 @@ |
| +/* |
| + * Copyright 2015 Google Inc. |
| + * |
| + * Use of this source code is governed by a BSD-style license that can be |
| + * found in the LICENSE file. |
| + */ |
| + |
| +#ifndef GrBatch_DEFINED |
| +#define GrBatch_DEFINED |
| + |
| +#include <new> |
| +// TODO remove this header when we move entirely to batch |
| +#include "GrGeometryProcessor.h" |
| +#include "SkThread.h" |
| +#include "SkTypes.h" |
| + |
| +class GrGpu; |
| +class GrIndexBufferAllocPool; |
| +class GrInitInvariantOutput; |
| +class GrOptDrawState; |
| +class GrVertexBufferAllocPool; |
| + |
| +struct GrBatchOpt { |
| + bool fCanTweakAlphaForCoverage; |
| +}; |
| + |
| +class GrBatch : public SkNoncopyable { |
|
bsalomon
2015/01/20 16:14:02
needs a block comment explaining what this is/does
|
| +public: |
| + virtual ~GrBatch() {} |
| + virtual const char* name() const = 0; |
| + virtual void getInvariantOutputColor(GrInitInvariantOutput* out, |
| + const GrBatchOpt&) const = 0; |
| + virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out, |
| + const GrBatchOpt&) const = 0; |
| + |
| + virtual void initBatchOpt(const GrBatchOpt&) = 0; |
| + virtual void initBatchTracker(const GrGeometryProcessor::InitBT& init) = 0; |
| + |
| + bool canMakeEqual(const GrBatch& that) const { |
| + if (this->classID() != that.classID()) { |
| + return false; |
| + } |
| + |
| + return onCanMakeEqual(that); |
| + } |
| + |
| + virtual bool onCanMakeEqual(const GrBatch&) const = 0; |
| + virtual void makeEqual(GrBatch*) = 0; |
| + |
| + virtual void generateGeometry(GrGpu* gpu, |
| + GrVertexBufferAllocPool* vpool, |
| + GrIndexBufferAllocPool* ipool, |
| + GrOptDrawState* optState) = 0; |
| + virtual void draw(GrGpu* gpu, const GrOptDrawState* optState) = 0; |
| + |
| + void* operator new(size_t size); |
| + void operator delete(void* target); |
| + |
| + void* operator new(size_t size, void* placement) { |
| + return ::operator new(size, placement); |
| + } |
| + void operator delete(void* target, void* placement) { |
| + ::operator delete(target, placement); |
| + } |
| + |
| + /** |
| + * Helper for down-casting to a GrBatch subclass |
| + */ |
| + template <typename T> const T& cast() const { return *static_cast<const T*>(this); } |
| + template <typename T> T* cast() { return static_cast<T*>(this); } |
| + |
| + uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); return fClassID; } |
| + |
| + // TODO no GrPrimitiveProcessors yet read fragment position |
| + bool willReadFragmentPosition() const { return false; } |
| + |
| +protected: |
| + template <typename PROC_SUBCLASS> void initClassID() { |
| + static uint32_t kClassID = GenClassID(); |
| + fClassID = kClassID; |
| + } |
| + |
| + uint32_t fClassID; |
| + |
| +private: |
| + static uint32_t GenClassID() { |
| + // fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The |
| + // atomic inc returns the old value not the incremented value. So we add |
| + // 1 to the returned value. |
| + uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) + 1; |
| + if (!id) { |
| + SkFAIL("This should never wrap as it should only be called once for each GrProcessor " |
|
bsalomon
2015/01/20 16:14:02
each GrBatch subclass
|
| + "subclass."); |
| + } |
| + return id; |
| + } |
| + |
| + enum { |
| + kIllegalBatchClassID = 0, |
| + }; |
| + static int32_t gCurrBatchClassID; |
| + |
| +}; |
| + |
| +#endif |