OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2015 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #ifndef GrBatch_DEFINED | |
9 #define GrBatch_DEFINED | |
10 | |
11 #include <new> | |
12 #include "GrBatchTarget.h" | |
13 #include "GrGeometryProcessor.h" | |
14 #include "GrNonAtomicRef.h" | |
15 #include "GrVertices.h" | |
16 #include "SkAtomics.h" | |
17 #include "SkTypes.h" | |
18 | |
19 class GrGpu; | |
20 class GrPipeline; | |
21 | |
22 struct GrInitInvariantOutput; | |
23 | |
24 /* | |
25 * GrBatch is the base class for all Ganesh deferred geometry generators. To fa
cilitate | |
26 * reorderable batching, Ganesh does not generate geometry inline with draw call
s. Instead, it | |
27 * captures the arguments to the draw and then generates the geometry on demand.
This gives GrBatch | |
28 * subclasses complete freedom to decide how / what they can batch. | |
29 * | |
30 * Batches are created when GrContext processes a draw call. Batches of the same
subclass may be | |
31 * merged using combineIfPossible. When two batches merge, one takes on the unio
n of the data | |
32 * and the other is left empty. The merged batch becomes responsible for drawing
the data from both | |
33 * the original batches. | |
34 * | |
35 * If there are any possible optimizations which might require knowing more abou
t the full state of | |
36 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverag
e, then this | |
37 * information will be communicated to the GrBatch prior to geometry generation. | |
38 */ | |
39 #define GR_BATCH_SPEW 0 | |
40 #if GR_BATCH_SPEW | |
41 #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__) | |
42 #define GrBATCH_SPEW(code) code | |
43 #else | |
44 #define GrBATCH_SPEW(code) | |
45 #define GrBATCH_INFO(...) | |
46 #endif | |
47 | |
48 class GrBatch : public GrNonAtomicRef { | |
49 public: | |
50 GrBatch() | |
51 : fClassID(kIllegalBatchID) | |
52 , fNumberOfDraws(0) | |
53 #if GR_BATCH_SPEW | |
54 , fUniqueID(GenID(&gCurrBatchUniqueID)) | |
55 #endif | |
56 { SkDEBUGCODE(fUsed = false;) } | |
57 virtual ~GrBatch() {} | |
58 | |
59 virtual const char* name() const = 0; | |
60 virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0; | |
61 virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const =
0; | |
62 | |
63 /* | |
64 * initBatchTracker is a hook for the some additional overrides / optimizati
on possibilities | |
65 * from the GrXferProcessor. | |
66 */ | |
67 virtual void initBatchTracker(const GrPipelineInfo& init) = 0; | |
68 | |
69 bool combineIfPossible(GrBatch* that) { | |
70 if (this->classID() != that->classID()) { | |
71 return false; | |
72 } | |
73 | |
74 return this->onCombineIfPossible(that); | |
75 } | |
76 | |
77 virtual bool onCombineIfPossible(GrBatch*) = 0; | |
78 | |
79 virtual void generateGeometry(GrBatchTarget*) = 0; | |
80 | |
81 const SkRect& bounds() const { return fBounds; } | |
82 | |
83 // TODO this goes away when batches are everywhere | |
84 void setNumberOfDraws(int numberOfDraws) { fNumberOfDraws = numberOfDraws; } | |
85 int numberOfDraws() const { return fNumberOfDraws; } | |
86 | |
87 void* operator new(size_t size); | |
88 void operator delete(void* target); | |
89 | |
90 void* operator new(size_t size, void* placement) { | |
91 return ::operator new(size, placement); | |
92 } | |
93 void operator delete(void* target, void* placement) { | |
94 ::operator delete(target, placement); | |
95 } | |
96 | |
97 /** | |
98 * Helper for down-casting to a GrBatch subclass | |
99 */ | |
100 template <typename T> const T& cast() const { return *static_cast<const T*>(
this); } | |
101 template <typename T> T* cast() { return static_cast<T*>(this); } | |
102 | |
103 uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fCl
assID; } | |
104 | |
105 // TODO no GrPrimitiveProcessors yet read fragment position | |
106 bool willReadFragmentPosition() const { return false; } | |
107 | |
108 SkDEBUGCODE(bool isUsed() const { return fUsed; }) | |
109 | |
110 const GrPipeline* pipeline() const { return fPipeline; } | |
111 void setPipeline(const GrPipeline* pipeline) { fPipeline.reset(SkRef(pipelin
e)); } | |
112 | |
113 #if GR_BATCH_SPEW | |
114 uint32_t uniqueID() const { return fUniqueID; } | |
115 #endif | |
116 | |
117 protected: | |
118 template <typename PROC_SUBCLASS> void initClassID() { | |
119 static uint32_t kClassID = GenID(&gCurrBatchClassID); | |
120 fClassID = kClassID; | |
121 } | |
122 | |
123 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setL
argest on the bounds | |
124 // rect because we outset it for dst copy textures | |
125 void setBounds(const SkRect& newBounds) { fBounds = newBounds; } | |
126 | |
127 void joinBounds(const SkRect& otherBounds) { | |
128 return fBounds.joinPossiblyEmptyRect(otherBounds); | |
129 } | |
130 | |
131 /** Helper for rendering instances using an instanced index index buffer. Th
is class creates the | |
132 space for the vertices and flushes the draws to the batch target.*/ | |
133 class InstancedHelper { | |
134 public: | |
135 InstancedHelper() {} | |
136 /** Returns the allocated storage for the vertices. The caller should po
pulate the before | |
137 vertices before calling issueDraws(). */ | |
138 void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStr
ide, | |
139 const GrIndexBuffer*, int verticesPerInstance, int indicesPer
Instance, | |
140 int instancesToDraw); | |
141 | |
142 /** Call after init() to issue draws to the batch target.*/ | |
143 void issueDraw(GrBatchTarget* batchTarget) { | |
144 SkASSERT(fVertices.instanceCount()); | |
145 batchTarget->draw(fVertices); | |
146 } | |
147 private: | |
148 GrVertices fVertices; | |
149 }; | |
150 | |
151 static const int kVerticesPerQuad = 4; | |
152 static const int kIndicesPerQuad = 6; | |
153 | |
154 /** A specialization of InstanceHelper for quad rendering. */ | |
155 class QuadHelper : private InstancedHelper { | |
156 public: | |
157 QuadHelper() : INHERITED() {} | |
158 /** Finds the cached quad index buffer and reserves vertex space. Return
s NULL on failure | |
159 and on sucess a pointer to the vertex data that the caller should po
pulate before | |
160 calling issueDraws(). */ | |
161 void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToD
raw); | |
162 | |
163 using InstancedHelper::issueDraw; | |
164 | |
165 private: | |
166 typedef InstancedHelper INHERITED; | |
167 }; | |
168 | |
169 uint32_t fClassID; | |
170 SkRect fBounds; | |
171 | |
172 private: | |
173 static uint32_t GenID(int32_t* idCounter) { | |
174 // fCurrProcessorClassID has been initialized to kIllegalProcessorClassI
D. The | |
175 // atomic inc returns the old value not the incremented value. So we add | |
176 // 1 to the returned value. | |
177 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1; | |
178 if (!id) { | |
179 SkFAIL("This should never wrap as it should only be called once for
each GrBatch " | |
180 "subclass."); | |
181 } | |
182 return id; | |
183 } | |
184 | |
185 enum { | |
186 kIllegalBatchID = 0, | |
187 }; | |
188 SkAutoTUnref<const GrPipeline> fPipeline; | |
189 static int32_t gCurrBatchClassID; | |
190 int fNumberOfDraws; | |
191 SkDEBUGCODE(bool fUsed;) | |
192 #if GR_BATCH_SPEW | |
193 static int32_t gCurrBatchUniqueID; | |
194 uint32_t fUniqueID; | |
195 #endif | |
196 | |
197 typedef SkRefCnt INHERITED; | |
198 }; | |
199 | |
200 #endif | |
OLD | NEW |