| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef GrBatch_DEFINED | 8 #ifndef GrBatch_DEFINED |
| 9 #define GrBatch_DEFINED | 9 #define GrBatch_DEFINED |
| 10 | 10 |
| 11 #include <new> | 11 #include <new> |
| 12 #include "GrBatchTarget.h" | 12 #include "GrBatchTarget.h" |
| 13 #include "GrGeometryProcessor.h" | 13 #include "GrGeometryProcessor.h" |
| 14 #include "GrNonAtomicRef.h" | 14 #include "GrNonAtomicRef.h" |
| 15 #include "GrVertices.h" | 15 #include "GrVertices.h" |
| 16 #include "SkAtomics.h" | 16 #include "SkAtomics.h" |
| 17 #include "SkTypes.h" | 17 #include "SkTypes.h" |
| 18 | 18 |
| 19 class GrGpu; | 19 class GrGpu; |
| 20 class GrPipeline; | 20 class GrPipeline; |
| 21 | 21 |
| 22 struct GrInitInvariantOutput; | 22 struct GrInitInvariantOutput; |
| 23 | 23 |
| 24 /* | 24 /** |
| 25 * GrBatch is the base class for all Ganesh deferred geometry generators. To fa
cilitate | 25 * GrBatch is the base class for all Ganesh deferred geometry generators. To fa
cilitate |
| 26 * reorderable batching, Ganesh does not generate geometry inline with draw call
s. Instead, it | 26 * reorderable batching, Ganesh does not generate geometry inline with draw call
s. Instead, it |
| 27 * captures the arguments to the draw and then generates the geometry on demand.
This gives GrBatch | 27 * captures the arguments to the draw and then generates the geometry on demand.
This gives GrBatch |
| 28 * subclasses complete freedom to decide how / what they can batch. | 28 * subclasses complete freedom to decide how / what they can batch. |
| 29 * | 29 * |
| 30 * Batches are created when GrContext processes a draw call. Batches of the same
subclass may be | 30 * Batches are created when GrContext processes a draw call. Batches of the same
subclass may be |
| 31 * merged using combineIfPossible. When two batches merge, one takes on the unio
n of the data | 31 * merged using combineIfPossible. When two batches merge, one takes on the unio
n of the data |
| 32 * and the other is left empty. The merged batch becomes responsible for drawing
the data from both | 32 * and the other is left empty. The merged batch becomes responsible for drawing
the data from both |
| 33 * the original batches. | 33 * the original batches. |
| 34 * | 34 * |
| 35 * If there are any possible optimizations which might require knowing more abou
t the full state of | 35 * If there are any possible optimizations which might require knowing more abou
t the full state of |
| 36 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverag
e, then this | 36 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverag
e, then this |
| 37 * information will be communicated to the GrBatch prior to geometry generation. | 37 * information will be communicated to the GrBatch prior to geometry generation. |
| 38 */ | 38 */ |
| 39 #define GR_BATCH_SPEW 0 | 39 #define GR_BATCH_SPEW 0 |
| 40 #if GR_BATCH_SPEW | 40 #if GR_BATCH_SPEW |
| 41 #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__) | 41 #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__) |
| 42 #define GrBATCH_SPEW(code) code | 42 #define GrBATCH_SPEW(code) code |
| 43 #else | 43 #else |
| 44 #define GrBATCH_SPEW(code) | 44 #define GrBATCH_SPEW(code) |
| 45 #define GrBATCH_INFO(...) | 45 #define GrBATCH_INFO(...) |
| 46 #endif | 46 #endif |
| 47 | 47 |
| 48 class GrBatch : public GrNonAtomicRef { | 48 class GrBatch : public GrNonAtomicRef { |
| 49 public: | 49 public: |
| 50 GrBatch(); | 50 GrBatch(); |
| 51 ~GrBatch() override; | 51 ~GrBatch() override; |
| 52 | 52 |
| 53 virtual const char* name() const = 0; | 53 virtual const char* name() const = 0; |
| 54 virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0; | |
| 55 virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const =
0; | |
| 56 | 54 |
| 57 bool combineIfPossible(GrBatch* that, const GrCaps& caps) { | 55 bool combineIfPossible(GrBatch* that, const GrCaps& caps) { |
| 58 if (this->classID() != that->classID()) { | 56 if (this->classID() != that->classID()) { |
| 59 return false; | 57 return false; |
| 60 } | 58 } |
| 61 | 59 |
| 62 return this->onCombineIfPossible(that, caps); | 60 return this->onCombineIfPossible(that, caps); |
| 63 } | 61 } |
| 64 | 62 |
| 65 virtual void generateGeometry(GrBatchTarget*) = 0; | |
| 66 | |
| 67 const SkRect& bounds() const { return fBounds; } | 63 const SkRect& bounds() const { return fBounds; } |
| 68 | 64 |
| 69 // TODO this goes away when batches are everywhere | |
| 70 void setNumberOfDraws(int numberOfDraws) { fNumberOfDraws = numberOfDraws; } | |
| 71 int numberOfDraws() const { return fNumberOfDraws; } | |
| 72 | |
| 73 void* operator new(size_t size); | 65 void* operator new(size_t size); |
| 74 void operator delete(void* target); | 66 void operator delete(void* target); |
| 75 | 67 |
| 76 void* operator new(size_t size, void* placement) { | 68 void* operator new(size_t size, void* placement) { |
| 77 return ::operator new(size, placement); | 69 return ::operator new(size, placement); |
| 78 } | 70 } |
| 79 void operator delete(void* target, void* placement) { | 71 void operator delete(void* target, void* placement) { |
| 80 ::operator delete(target, placement); | 72 ::operator delete(target, placement); |
| 81 } | 73 } |
| 82 | 74 |
| 83 /** | 75 /** |
| 84 * Helper for down-casting to a GrBatch subclass | 76 * Helper for down-casting to a GrBatch subclass |
| 85 */ | 77 */ |
| 86 template <typename T> const T& cast() const { return *static_cast<const T*>(
this); } | 78 template <typename T> const T& cast() const { return *static_cast<const T*>(
this); } |
| 87 template <typename T> T* cast() { return static_cast<T*>(this); } | 79 template <typename T> T* cast() { return static_cast<T*>(this); } |
| 88 | 80 |
| 89 uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fCl
assID; } | 81 uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fCl
assID; } |
| 90 | 82 |
| 91 // TODO no GrPrimitiveProcessors yet read fragment position | |
| 92 bool willReadFragmentPosition() const { return false; } | |
| 93 | |
| 94 SkDEBUGCODE(bool isUsed() const { return fUsed; }) | |
| 95 | |
| 96 const GrPipeline* pipeline() const { | |
| 97 SkASSERT(fPipelineInstalled); | |
| 98 return reinterpret_cast<const GrPipeline*>(fPipelineStorage.get()); | |
| 99 } | |
| 100 | |
| 101 bool installPipeline(const GrPipeline::CreateArgs&); | |
| 102 | |
| 103 #if GR_BATCH_SPEW | 83 #if GR_BATCH_SPEW |
| 104 uint32_t uniqueID() const { return fUniqueID; } | 84 uint32_t uniqueID() const { return fUniqueID; } |
| 105 #endif | 85 #endif |
| 86 SkDEBUGCODE(bool isUsed() const { return fUsed; }) |
| 106 | 87 |
| 107 protected: | 88 protected: |
| 108 template <typename PROC_SUBCLASS> void initClassID() { | 89 template <typename PROC_SUBCLASS> void initClassID() { |
| 109 static uint32_t kClassID = GenID(&gCurrBatchClassID); | 90 static uint32_t kClassID = GenID(&gCurrBatchClassID); |
| 110 fClassID = kClassID; | 91 fClassID = kClassID; |
| 111 } | 92 } |
| 112 | 93 |
| 113 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setL
argest on the bounds | 94 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setL
argest on the bounds |
| 114 // rect because we outset it for dst copy textures | 95 // rect because we outset it for dst copy textures |
| 115 void setBounds(const SkRect& newBounds) { fBounds = newBounds; } | 96 void setBounds(const SkRect& newBounds) { fBounds = newBounds; } |
| 116 | 97 |
| 117 void joinBounds(const SkRect& otherBounds) { | 98 void joinBounds(const SkRect& otherBounds) { |
| 118 return fBounds.joinPossiblyEmptyRect(otherBounds); | 99 return fBounds.joinPossiblyEmptyRect(otherBounds); |
| 119 } | 100 } |
| 120 | 101 |
| 102 SkRect fBounds; |
| 103 |
| 104 private: |
| 105 virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0; |
| 106 |
| 107 static uint32_t GenID(int32_t* idCounter) { |
| 108 // fCurrProcessorClassID has been initialized to kIllegalProcessorClassI
D. The |
| 109 // atomic inc returns the old value not the incremented value. So we add |
| 110 // 1 to the returned value. |
| 111 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1; |
| 112 if (!id) { |
| 113 SkFAIL("This should never wrap as it should only be called once for
each GrBatch " |
| 114 "subclass."); |
| 115 } |
| 116 return id; |
| 117 } |
| 118 |
| 119 enum { |
| 120 kIllegalBatchID = 0, |
| 121 }; |
| 122 |
| 123 uint32_t fClassID; |
| 124 SkDEBUGCODE(bool fUsed;) |
| 125 #if GR_BATCH_SPEW |
| 126 uint32_t fUniqueID; |
| 127 static int32_t gCurrBatchUniqueID; |
| 128 #endif |
| 129 static int32_t gCurrBatchClassID; |
| 130 typedef GrNonAtomicRef INHERITED; |
| 131 }; |
| 132 |
| 133 /** |
| 134 * Base class for GrBatches that draw. These batches have a GrPipeline installed
by GrDrawTarget. |
| 135 */ |
| 136 class GrDrawBatch : public GrBatch { |
| 137 public: |
| 138 GrDrawBatch(); |
| 139 ~GrDrawBatch() override; |
| 140 |
| 141 virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0; |
| 142 virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const =
0; |
| 143 |
| 144 const GrPipeline* pipeline() const { |
| 145 SkASSERT(fPipelineInstalled); |
| 146 return reinterpret_cast<const GrPipeline*>(fPipelineStorage.get()); |
| 147 } |
| 148 |
| 149 bool installPipeline(const GrPipeline::CreateArgs&); |
| 150 |
| 151 // TODO no GrPrimitiveProcessors yet read fragment position |
| 152 bool willReadFragmentPosition() const { return false; } |
| 153 |
| 154 private: |
| 155 /** |
| 156 * initBatchTracker is a hook for the some additional overrides / optimizati
on possibilities |
| 157 * from the GrXferProcessor. |
| 158 */ |
| 159 virtual void initBatchTracker(const GrPipelineOptimizations&) = 0; |
| 160 |
| 161 SkAlignedSTStorage<1, GrPipeline> fPipelineStorage; |
| 162 bool fPipelineInstalled; |
| 163 typedef GrBatch INHERITED; |
| 164 }; |
| 165 |
| 166 /** |
| 167 * Base class for vertex-based GrBatches. |
| 168 */ |
| 169 class GrVertexBatch : public GrDrawBatch { |
| 170 public: |
| 171 GrVertexBatch(); |
| 172 |
| 173 virtual void generateGeometry(GrBatchTarget*) = 0; |
| 174 |
| 175 // TODO this goes away when batches are everywhere |
| 176 void setNumberOfDraws(int numberOfDraws) { fNumberOfDraws = numberOfDraws; } |
| 177 int numberOfDraws() const { return fNumberOfDraws; } |
| 178 |
| 179 protected: |
| 121 /** Helper for rendering instances using an instanced index index buffer. Th
is class creates the | 180 /** Helper for rendering instances using an instanced index index buffer. Th
is class creates the |
| 122 space for the vertices and flushes the draws to the batch target.*/ | 181 space for the vertices and flushes the draws to the batch target. */ |
| 123 class InstancedHelper { | 182 class InstancedHelper { |
| 124 public: | 183 public: |
| 125 InstancedHelper() {} | 184 InstancedHelper() {} |
| 126 /** Returns the allocated storage for the vertices. The caller should po
pulate the before | 185 /** Returns the allocated storage for the vertices. The caller should po
pulate the before |
| 127 vertices before calling issueDraws(). */ | 186 vertices before calling issueDraws(). */ |
| 128 void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStr
ide, | 187 void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStr
ide, |
| 129 const GrIndexBuffer*, int verticesPerInstance, int indicesPer
Instance, | 188 const GrIndexBuffer*, int verticesPerInstance, int indicesPer
Instance, |
| 130 int instancesToDraw); | 189 int instancesToDraw); |
| 131 | 190 |
| 132 /** Call after init() to issue draws to the batch target.*/ | 191 /** Call after init() to issue draws to the batch target.*/ |
| (...skipping 16 matching lines...) Expand all Loading... |
| 149 and on sucess a pointer to the vertex data that the caller should po
pulate before | 208 and on sucess a pointer to the vertex data that the caller should po
pulate before |
| 150 calling issueDraws(). */ | 209 calling issueDraws(). */ |
| 151 void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToD
raw); | 210 void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToD
raw); |
| 152 | 211 |
| 153 using InstancedHelper::issueDraw; | 212 using InstancedHelper::issueDraw; |
| 154 | 213 |
| 155 private: | 214 private: |
| 156 typedef InstancedHelper INHERITED; | 215 typedef InstancedHelper INHERITED; |
| 157 }; | 216 }; |
| 158 | 217 |
| 159 uint32_t fClassID; | |
| 160 SkRect fBounds; | |
| 161 | |
| 162 private: | 218 private: |
| 163 virtual bool onCombineIfPossible(GrBatch*, const GrCaps& caps) = 0; | |
| 164 | |
| 165 /* | |
| 166 * initBatchTracker is a hook for the some additional overrides / optimizati
on possibilities | |
| 167 * from the GrXferProcessor. | |
| 168 */ | |
| 169 virtual void initBatchTracker(const GrPipelineOptimizations&) = 0; | |
| 170 | |
| 171 | |
| 172 static uint32_t GenID(int32_t* idCounter) { | |
| 173 // fCurrProcessorClassID has been initialized to kIllegalProcessorClassI
D. The | |
| 174 // atomic inc returns the old value not the incremented value. So we add | |
| 175 // 1 to the returned value. | |
| 176 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1; | |
| 177 if (!id) { | |
| 178 SkFAIL("This should never wrap as it should only be called once for
each GrBatch " | |
| 179 "subclass."); | |
| 180 } | |
| 181 return id; | |
| 182 } | |
| 183 | |
| 184 enum { | |
| 185 kIllegalBatchID = 0, | |
| 186 }; | |
| 187 SkAlignedSTStorage<1, GrPipeline> fPipelineStorage; | |
| 188 int fNumberOfDraws; | 219 int fNumberOfDraws; |
| 189 SkDEBUGCODE(bool fUsed;) | 220 typedef GrDrawBatch INHERITED; |
| 190 bool fPipelineInstalled; | |
| 191 #if GR_BATCH_SPEW | |
| 192 uint32_t fUniqueID; | |
| 193 static int32_t gCurrBatchUniqueID; | |
| 194 #endif | |
| 195 static int32_t gCurrBatchClassID; | |
| 196 typedef SkRefCnt INHERITED; | |
| 197 }; | 221 }; |
| 198 | 222 |
| 199 #endif | 223 #endif |
| OLD | NEW |