OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef GrBatch_DEFINED | 8 #ifndef GrBatch_DEFINED |
9 #define GrBatch_DEFINED | 9 #define GrBatch_DEFINED |
10 | 10 |
11 #include <new> | 11 #include <new> |
12 #include "GrBatchTarget.h" | 12 #include "GrBatchTarget.h" |
13 #include "GrGeometryProcessor.h" | 13 #include "GrGeometryProcessor.h" |
14 #include "GrNonAtomicRef.h" | |
15 #include "GrVertices.h" | 14 #include "GrVertices.h" |
16 #include "SkAtomics.h" | 15 #include "SkAtomics.h" |
| 16 #include "SkRefCnt.h" |
17 #include "SkTypes.h" | 17 #include "SkTypes.h" |
18 | 18 |
19 class GrGpu; | 19 class GrGpu; |
20 class GrPipeline; | 20 class GrPipeline; |
21 | 21 |
22 struct GrInitInvariantOutput; | 22 struct GrInitInvariantOutput; |
23 | 23 |
24 /* | 24 /* |
25 * GrBatch is the base class for all Ganesh deferred geometry generators. To fa
cilitate | 25 * GrBatch is the base class for all Ganesh deferred geometry generators. To fa
cilitate |
26 * reorderable batching, Ganesh does not generate geometry inline with draw call
s. Instead, it | 26 * reorderable batching, Ganesh does not generate geometry inline with draw call
s. Instead, it |
27 * captures the arguments to the draw and then generates the geometry on demand.
This gives GrBatch | 27 * captures the arguments to the draw and then generates the geometry on demand.
This gives GrBatch |
28 * subclasses complete freedom to decide how / what they can batch. | 28 * subclasses complete freedom to decide how / what they can batch. |
29 * | 29 * |
30 * Batches are created when GrContext processes a draw call. Batches of the same
subclass may be | 30 * Batches are created when GrContext processes a draw call. Batches of the same
subclass may be |
31 * merged using combineIfPossible. When two batches merge, one takes on the unio
n of the data | 31 * merged using combineIfPossible. When two batches merge, one takes on the unio
n of the data |
32 * and the other is left empty. The merged batch becomes responsible for drawing
the data from both | 32 * and the other is left empty. The merged batch becomes responsible for drawing
the data from both |
33 * the original batches. | 33 * the original batches. |
34 * | 34 * |
35 * If there are any possible optimizations which might require knowing more abou
t the full state of | 35 * If there are any possible optimizations which might require knowing more abou
t the full state of |
36 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverag
e, then this | 36 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverag
e, then this |
37 * information will be communicated to the GrBatch prior to geometry generation. | 37 * information will be communicated to the GrBatch prior to geometry generation. |
38 */ | 38 */ |
39 | 39 |
40 class GrBatch : public GrNonAtomicRef { | 40 class GrBatch : public SkRefCnt { |
41 public: | 41 public: |
| 42 |
42 GrBatch() : fClassID(kIllegalBatchClassID), fNumberOfDraws(0) { SkDEBUGCODE(
fUsed = false;) } | 43 GrBatch() : fClassID(kIllegalBatchClassID), fNumberOfDraws(0) { SkDEBUGCODE(
fUsed = false;) } |
43 virtual ~GrBatch() {} | 44 virtual ~GrBatch() {} |
44 | 45 |
45 virtual const char* name() const = 0; | 46 virtual const char* name() const = 0; |
46 virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0; | 47 virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0; |
47 virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const =
0; | 48 virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const =
0; |
48 | 49 |
49 /* | 50 /* |
50 * initBatchTracker is a hook for the some additional overrides / optimizati
on possibilities | 51 * initBatchTracker is a hook for the some additional overrides / optimizati
on possibilities |
51 * from the GrXferProcessor. | 52 * from the GrXferProcessor. |
52 */ | 53 */ |
53 virtual void initBatchTracker(const GrPipelineInfo& init) = 0; | 54 virtual void initBatchTracker(const GrPipelineInfo& init) = 0; |
54 | 55 |
55 bool combineIfPossible(GrBatch* that) { | 56 bool combineIfPossible(GrBatch* that) { |
56 if (this->classID() != that->classID()) { | 57 if (this->classID() != that->classID()) { |
57 return false; | 58 return false; |
58 } | 59 } |
59 | 60 |
60 if (!this->pipeline()->isEqual(*that->pipeline())) { | |
61 return false; | |
62 } | |
63 | |
64 return this->onCombineIfPossible(that); | 61 return this->onCombineIfPossible(that); |
65 } | 62 } |
66 | 63 |
67 virtual bool onCombineIfPossible(GrBatch*) = 0; | 64 virtual bool onCombineIfPossible(GrBatch*) = 0; |
68 | 65 |
69 virtual void generateGeometry(GrBatchTarget*, const GrPipeline*) = 0; | 66 virtual void generateGeometry(GrBatchTarget*, const GrPipeline*) = 0; |
70 | 67 |
71 const SkRect& bounds() const { return fBounds; } | 68 const SkRect& bounds() const { return fBounds; } |
72 | 69 |
73 // TODO this goes away when batches are everywhere | 70 // TODO this goes away when batches are everywhere |
(...skipping 16 matching lines...) Expand all Loading... |
90 template <typename T> const T& cast() const { return *static_cast<const T*>(
this); } | 87 template <typename T> const T& cast() const { return *static_cast<const T*>(
this); } |
91 template <typename T> T* cast() { return static_cast<T*>(this); } | 88 template <typename T> T* cast() { return static_cast<T*>(this); } |
92 | 89 |
93 uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); retur
n fClassID; } | 90 uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); retur
n fClassID; } |
94 | 91 |
95 // TODO no GrPrimitiveProcessors yet read fragment position | 92 // TODO no GrPrimitiveProcessors yet read fragment position |
96 bool willReadFragmentPosition() const { return false; } | 93 bool willReadFragmentPosition() const { return false; } |
97 | 94 |
98 SkDEBUGCODE(bool isUsed() const { return fUsed; }) | 95 SkDEBUGCODE(bool isUsed() const { return fUsed; }) |
99 | 96 |
100 void setPipeline(const GrPipeline* pipeline) { fPipeline.reset(SkRef(pipelin
e)); } | |
101 | |
102 protected: | 97 protected: |
103 template <typename PROC_SUBCLASS> void initClassID() { | 98 template <typename PROC_SUBCLASS> void initClassID() { |
104 static uint32_t kClassID = GenClassID(); | 99 static uint32_t kClassID = GenClassID(); |
105 fClassID = kClassID; | 100 fClassID = kClassID; |
106 } | 101 } |
107 | 102 |
| 103 uint32_t fClassID; |
| 104 |
108 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setL
argest on the bounds | 105 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setL
argest on the bounds |
109 // rect because we outset it for dst copy textures | 106 // rect because we outset it for dst copy textures |
110 void setBounds(const SkRect& newBounds) { fBounds = newBounds; } | 107 void setBounds(const SkRect& newBounds) { fBounds = newBounds; } |
111 | 108 |
112 void joinBounds(const SkRect& otherBounds) { | 109 void joinBounds(const SkRect& otherBounds) { |
113 return fBounds.joinPossiblyEmptyRect(otherBounds); | 110 return fBounds.joinPossiblyEmptyRect(otherBounds); |
114 } | 111 } |
115 | 112 |
116 const GrPipeline* pipeline() const { return fPipeline; } | |
117 | |
118 /** Helper for rendering instances using an instanced index index buffer. Th
is class creates the | 113 /** Helper for rendering instances using an instanced index index buffer. Th
is class creates the |
119 space for the vertices and flushes the draws to the batch target.*/ | 114 space for the vertices and flushes the draws to the batch target.*/ |
120 class InstancedHelper { | 115 class InstancedHelper { |
121 public: | 116 public: |
122 InstancedHelper() {} | 117 InstancedHelper() {} |
123 /** Returns the allocated storage for the vertices. The caller should po
pulate the before | 118 /** Returns the allocated storage for the vertices. The caller should po
pulate the before |
124 vertices before calling issueDraws(). */ | 119 vertices before calling issueDraws(). */ |
125 void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStr
ide, | 120 void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStr
ide, |
126 const GrIndexBuffer*, int verticesPerInstance, int indicesPer
Instance, | 121 const GrIndexBuffer*, int verticesPerInstance, int indicesPer
Instance, |
127 int instancesToDraw); | 122 int instancesToDraw); |
(...skipping 18 matching lines...) Expand all Loading... |
146 and on sucess a pointer to the vertex data that the caller should po
pulate before | 141 and on sucess a pointer to the vertex data that the caller should po
pulate before |
147 calling issueDraws(). */ | 142 calling issueDraws(). */ |
148 void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToD
raw); | 143 void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToD
raw); |
149 | 144 |
150 using InstancedHelper::issueDraw; | 145 using InstancedHelper::issueDraw; |
151 | 146 |
152 private: | 147 private: |
153 typedef InstancedHelper INHERITED; | 148 typedef InstancedHelper INHERITED; |
154 }; | 149 }; |
155 | 150 |
156 uint32_t fClassID; | |
157 SkRect fBounds; | 151 SkRect fBounds; |
158 | 152 |
159 private: | 153 private: |
160 static uint32_t GenClassID() { | 154 static uint32_t GenClassID() { |
161 // fCurrProcessorClassID has been initialized to kIllegalProcessorClassI
D. The | 155 // fCurrProcessorClassID has been initialized to kIllegalProcessorClassI
D. The |
162 // atomic inc returns the old value not the incremented value. So we add | 156 // atomic inc returns the old value not the incremented value. So we add |
163 // 1 to the returned value. | 157 // 1 to the returned value. |
164 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) +
1; | 158 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) +
1; |
165 if (!id) { | 159 if (!id) { |
166 SkFAIL("This should never wrap as it should only be called once for
each GrBatch " | 160 SkFAIL("This should never wrap as it should only be called once for
each GrBatch " |
167 "subclass."); | 161 "subclass."); |
168 } | 162 } |
169 return id; | 163 return id; |
170 } | 164 } |
171 | 165 |
172 enum { | 166 enum { |
173 kIllegalBatchClassID = 0, | 167 kIllegalBatchClassID = 0, |
174 }; | 168 }; |
175 SkAutoTUnref<const GrPipeline> fPipeline; | |
176 static int32_t gCurrBatchClassID; | 169 static int32_t gCurrBatchClassID; |
| 170 |
| 171 SkDEBUGCODE(bool fUsed;) |
| 172 |
177 int fNumberOfDraws; | 173 int fNumberOfDraws; |
178 SkDEBUGCODE(bool fUsed;) | |
179 | 174 |
180 typedef SkRefCnt INHERITED; | 175 typedef SkRefCnt INHERITED; |
181 }; | 176 }; |
182 | 177 |
183 #endif | 178 #endif |
OLD | NEW |