Index: src/gpu/GrBatch.h |
diff --git a/src/gpu/GrBatch.h b/src/gpu/GrBatch.h |
new file mode 100644 |
index 0000000000000000000000000000000000000000..b1837abd7d4115f04345b8f2853a6f973484a338 |
--- /dev/null |
+++ b/src/gpu/GrBatch.h |
@@ -0,0 +1,129 @@ |
+/* |
+ * Copyright 2015 Google Inc. |
+ * |
+ * Use of this source code is governed by a BSD-style license that can be |
+ * found in the LICENSE file. |
+ */ |
+ |
+#ifndef GrBatch_DEFINED |
+#define GrBatch_DEFINED |
+ |
+#include <new> |
+// TODO remove this header when we move entirely to batch |
+#include "GrGeometryProcessor.h" |
+#include "SkRefCnt.h" |
+#include "SkThread.h" |
+#include "SkTypes.h" |
+ |
+class GrBatchBuffer; |
+class GrGpu; |
+class GrIndexBufferAllocPool; |
+class GrInitInvariantOutput; |
+class GrOptDrawState; |
+class GrVertexBufferAllocPool; |
+ |
+/* |
+ * GrBatch is the base class for all Ganesh deferred geometry generators. To facilitate |
+ * reorderable batching, Ganesh does not generate geometry inline with draw calls. Instead, it |
+ * captures the arguments to the draw and then generates the geometry on demand. This gives GrBatch |
+ * subclasses complete freedom to decide how / what they can batch. |
+ * |
+ * Batches are created generally in renderers, but they cannot be reused. Instead, they are owned |
bsalomon
2015/01/21 21:39:29
How about
"Batches are created when GrContext pro
|
+ * by a GrDrawTarget subclass after being passed into BatchDraw. Two GrBatches may be batched. The |
+ * canMakeEqual and MakeEqual calls below are used to determine if two batches can batch. MakeEqual |
+ * can be destructive. |
+ * |
+ * GrBatches are always drawn with a GrOptDrawState. Of the GrOptDrawState determines any |
bsalomon
2015/01/21 21:39:29
The second sentence is confusing. Also, this might
|
+ * pipeline optimizations are possible, it will communicate this information to the GrBatch through |
+ * GrBatchOpt. |
+ * |
+ * Finally, generateGeometry is called with a GrOptDrawState, and a GrBatchBuffer. GrBatchBuffer |
bsalomon
2015/01/21 21:39:29
Can you just explain it directly in terms of GrBat
|
+ * can be treated somewhat like a Gpu, and though it guarantees in order playback to a real gpu, it |
+ * may delay flushing to the gpu for performance reasons. |
+ */ |
+ |
+struct GrBatchOpt { |
+ bool fCanTweakAlphaForCoverage; |
+}; |
+ |
+class GrBatch : public SkRefCnt { |
+public: |
+ SK_DECLARE_INST_COUNT(GrBatch) |
+ GrBatch() { SkDEBUGCODE(fUsed = false;) } |
+ virtual ~GrBatch() {} |
+ virtual const char* name() const = 0; |
+ virtual void getInvariantOutputColor(GrInitInvariantOutput* out, |
+ const GrBatchOpt&) const = 0; |
+ virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out, |
+ const GrBatchOpt&) const = 0; |
+ |
+ virtual void initBatchOpt(const GrBatchOpt&) = 0; |
+ virtual void initBatchTracker(const GrGeometryProcessor::InitBT& init) = 0; |
+ |
+ bool combineIfPossible(GrBatch* that) { |
+ if (this->classID() != that->classID()) { |
+ return false; |
+ } |
+ |
+ return onCombineIfPossible(that); |
+ } |
+ |
+ virtual bool onCombineIfPossible(GrBatch*) = 0; |
+ |
+ virtual void generateGeometry(GrBatchBuffer*, const GrOptDrawState*) = 0; |
+ |
+ void* operator new(size_t size); |
+ void operator delete(void* target); |
+ |
+ void* operator new(size_t size, void* placement) { |
+ return ::operator new(size, placement); |
+ } |
+ void operator delete(void* target, void* placement) { |
+ ::operator delete(target, placement); |
+ } |
+ |
+ /** |
+ * Helper for down-casting to a GrBatch subclass |
+ */ |
+ template <typename T> const T& cast() const { return *static_cast<const T*>(this); } |
+ template <typename T> T* cast() { return static_cast<T*>(this); } |
+ |
+ uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); return fClassID; } |
+ |
+ // TODO no GrPrimitiveProcessors yet read fragment position |
+ bool willReadFragmentPosition() const { return false; } |
+ |
+ SkDEBUGCODE(bool isUsed() const { return fUsed; }) |
+ |
+protected: |
+ template <typename PROC_SUBCLASS> void initClassID() { |
+ static uint32_t kClassID = GenClassID(); |
+ fClassID = kClassID; |
+ } |
+ |
+ uint32_t fClassID; |
+ |
+private: |
+ static uint32_t GenClassID() { |
+ // fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The |
+ // atomic inc returns the old value not the incremented value. So we add |
+ // 1 to the returned value. |
+ uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) + 1; |
+ if (!id) { |
+ SkFAIL("This should never wrap as it should only be called once for each GrBatch " |
+ "subclass."); |
+ } |
+ return id; |
+ } |
+ |
+ enum { |
+ kIllegalBatchClassID = 0, |
+ }; |
+ static int32_t gCurrBatchClassID; |
+ |
+ SkDEBUGCODE(bool fUsed;) |
+ |
+ typedef SkRefCnt INHERITED; |
+}; |
+ |
+#endif |