| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef GrBatch_DEFINED | 8 #ifndef GrBatch_DEFINED |
| 9 #define GrBatch_DEFINED | 9 #define GrBatch_DEFINED |
| 10 | 10 |
| 11 #include <new> | 11 #include <new> |
| 12 #include "GrBatchTarget.h" | 12 #include "GrBatchTarget.h" |
| 13 #include "GrGeometryProcessor.h" | 13 #include "GrGeometryProcessor.h" |
| 14 #include "GrNonAtomicRef.h" |
| 14 #include "GrVertices.h" | 15 #include "GrVertices.h" |
| 15 #include "SkAtomics.h" | 16 #include "SkAtomics.h" |
| 16 #include "SkRefCnt.h" | |
| 17 #include "SkTypes.h" | 17 #include "SkTypes.h" |
| 18 | 18 |
| 19 class GrGpu; | 19 class GrGpu; |
| 20 class GrPipeline; | 20 class GrPipeline; |
| 21 | 21 |
| 22 struct GrInitInvariantOutput; | 22 struct GrInitInvariantOutput; |
| 23 | 23 |
| 24 /* | 24 /* |
| 25 * GrBatch is the base class for all Ganesh deferred geometry generators. To fa
cilitate | 25 * GrBatch is the base class for all Ganesh deferred geometry generators. To fa
cilitate |
| 26 * reorderable batching, Ganesh does not generate geometry inline with draw call
s. Instead, it | 26 * reorderable batching, Ganesh does not generate geometry inline with draw call
s. Instead, it |
| 27 * captures the arguments to the draw and then generates the geometry on demand.
This gives GrBatch | 27 * captures the arguments to the draw and then generates the geometry on demand.
This gives GrBatch |
| 28 * subclasses complete freedom to decide how / what they can batch. | 28 * subclasses complete freedom to decide how / what they can batch. |
| 29 * | 29 * |
| 30 * Batches are created when GrContext processes a draw call. Batches of the same
subclass may be | 30 * Batches are created when GrContext processes a draw call. Batches of the same
subclass may be |
| 31 * merged using combineIfPossible. When two batches merge, one takes on the unio
n of the data | 31 * merged using combineIfPossible. When two batches merge, one takes on the unio
n of the data |
| 32 * and the other is left empty. The merged batch becomes responsible for drawing
the data from both | 32 * and the other is left empty. The merged batch becomes responsible for drawing
the data from both |
| 33 * the original batches. | 33 * the original batches. |
| 34 * | 34 * |
| 35 * If there are any possible optimizations which might require knowing more abou
t the full state of | 35 * If there are any possible optimizations which might require knowing more abou
t the full state of |
| 36 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverag
e, then this | 36 * the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverag
e, then this |
| 37 * information will be communicated to the GrBatch prior to geometry generation. | 37 * information will be communicated to the GrBatch prior to geometry generation. |
| 38 */ | 38 */ |
| 39 | 39 |
| 40 class GrBatch : public SkRefCnt { | 40 class GrBatch : public GrNonAtomicRef { |
| 41 public: | 41 public: |
| 42 | |
| 43 GrBatch() : fClassID(kIllegalBatchClassID), fNumberOfDraws(0) { SkDEBUGCODE(
fUsed = false;) } | 42 GrBatch() : fClassID(kIllegalBatchClassID), fNumberOfDraws(0) { SkDEBUGCODE(
fUsed = false;) } |
| 44 virtual ~GrBatch() {} | 43 virtual ~GrBatch() {} |
| 45 | 44 |
| 46 virtual const char* name() const = 0; | 45 virtual const char* name() const = 0; |
| 47 virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0; | 46 virtual void getInvariantOutputColor(GrInitInvariantOutput* out) const = 0; |
| 48 virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const =
0; | 47 virtual void getInvariantOutputCoverage(GrInitInvariantOutput* out) const =
0; |
| 49 | 48 |
| 50 /* | 49 /* |
| 51 * initBatchTracker is a hook for the some additional overrides / optimizati
on possibilities | 50 * initBatchTracker is a hook for the some additional overrides / optimizati
on possibilities |
| 52 * from the GrXferProcessor. | 51 * from the GrXferProcessor. |
| 53 */ | 52 */ |
| 54 virtual void initBatchTracker(const GrPipelineInfo& init) = 0; | 53 virtual void initBatchTracker(const GrPipelineInfo& init) = 0; |
| 55 | 54 |
| 56 bool combineIfPossible(GrBatch* that) { | 55 bool combineIfPossible(GrBatch* that) { |
| 57 if (this->classID() != that->classID()) { | 56 if (this->classID() != that->classID()) { |
| 58 return false; | 57 return false; |
| 59 } | 58 } |
| 60 | 59 |
| 60 if (!this->pipeline()->isEqual(*that->pipeline())) { |
| 61 return false; |
| 62 } |
| 63 |
| 61 return this->onCombineIfPossible(that); | 64 return this->onCombineIfPossible(that); |
| 62 } | 65 } |
| 63 | 66 |
| 64 virtual bool onCombineIfPossible(GrBatch*) = 0; | 67 virtual bool onCombineIfPossible(GrBatch*) = 0; |
| 65 | 68 |
| 66 virtual void generateGeometry(GrBatchTarget*, const GrPipeline*) = 0; | 69 virtual void generateGeometry(GrBatchTarget*, const GrPipeline*) = 0; |
| 67 | 70 |
| 68 const SkRect& bounds() const { return fBounds; } | 71 const SkRect& bounds() const { return fBounds; } |
| 69 | 72 |
| 70 // TODO this goes away when batches are everywhere | 73 // TODO this goes away when batches are everywhere |
| (...skipping 16 matching lines...) Expand all Loading... |
| 87 template <typename T> const T& cast() const { return *static_cast<const T*>(
this); } | 90 template <typename T> const T& cast() const { return *static_cast<const T*>(
this); } |
| 88 template <typename T> T* cast() { return static_cast<T*>(this); } | 91 template <typename T> T* cast() { return static_cast<T*>(this); } |
| 89 | 92 |
| 90 uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); retur
n fClassID; } | 93 uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); retur
n fClassID; } |
| 91 | 94 |
| 92 // TODO no GrPrimitiveProcessors yet read fragment position | 95 // TODO no GrPrimitiveProcessors yet read fragment position |
| 93 bool willReadFragmentPosition() const { return false; } | 96 bool willReadFragmentPosition() const { return false; } |
| 94 | 97 |
| 95 SkDEBUGCODE(bool isUsed() const { return fUsed; }) | 98 SkDEBUGCODE(bool isUsed() const { return fUsed; }) |
| 96 | 99 |
| 100 void setPipeline(const GrPipeline* pipeline) { fPipeline.reset(SkRef(pipelin
e)); } |
| 101 |
| 97 protected: | 102 protected: |
| 98 template <typename PROC_SUBCLASS> void initClassID() { | 103 template <typename PROC_SUBCLASS> void initClassID() { |
| 99 static uint32_t kClassID = GenClassID(); | 104 static uint32_t kClassID = GenClassID(); |
| 100 fClassID = kClassID; | 105 fClassID = kClassID; |
| 101 } | 106 } |
| 102 | 107 |
| 103 uint32_t fClassID; | |
| 104 | |
| 105 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setL
argest on the bounds | 108 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setL
argest on the bounds |
| 106 // rect because we outset it for dst copy textures | 109 // rect because we outset it for dst copy textures |
| 107 void setBounds(const SkRect& newBounds) { fBounds = newBounds; } | 110 void setBounds(const SkRect& newBounds) { fBounds = newBounds; } |
| 108 | 111 |
| 109 void joinBounds(const SkRect& otherBounds) { | 112 void joinBounds(const SkRect& otherBounds) { |
| 110 return fBounds.joinPossiblyEmptyRect(otherBounds); | 113 return fBounds.joinPossiblyEmptyRect(otherBounds); |
| 111 } | 114 } |
| 112 | 115 |
| 116 const GrPipeline* pipeline() const { return fPipeline; } |
| 117 |
| 113 /** Helper for rendering instances using an instanced index index buffer. Th
is class creates the | 118 /** Helper for rendering instances using an instanced index index buffer. Th
is class creates the |
| 114 space for the vertices and flushes the draws to the batch target.*/ | 119 space for the vertices and flushes the draws to the batch target.*/ |
| 115 class InstancedHelper { | 120 class InstancedHelper { |
| 116 public: | 121 public: |
| 117 InstancedHelper() {} | 122 InstancedHelper() {} |
| 118 /** Returns the allocated storage for the vertices. The caller should po
pulate the before | 123 /** Returns the allocated storage for the vertices. The caller should po
pulate the before |
| 119 vertices before calling issueDraws(). */ | 124 vertices before calling issueDraws(). */ |
| 120 void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStr
ide, | 125 void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStr
ide, |
| 121 const GrIndexBuffer*, int verticesPerInstance, int indicesPer
Instance, | 126 const GrIndexBuffer*, int verticesPerInstance, int indicesPer
Instance, |
| 122 int instancesToDraw); | 127 int instancesToDraw); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 141 and on sucess a pointer to the vertex data that the caller should po
pulate before | 146 and on sucess a pointer to the vertex data that the caller should po
pulate before |
| 142 calling issueDraws(). */ | 147 calling issueDraws(). */ |
| 143 void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToD
raw); | 148 void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToD
raw); |
| 144 | 149 |
| 145 using InstancedHelper::issueDraw; | 150 using InstancedHelper::issueDraw; |
| 146 | 151 |
| 147 private: | 152 private: |
| 148 typedef InstancedHelper INHERITED; | 153 typedef InstancedHelper INHERITED; |
| 149 }; | 154 }; |
| 150 | 155 |
| 156 uint32_t fClassID; |
| 151 SkRect fBounds; | 157 SkRect fBounds; |
| 152 | 158 |
| 153 private: | 159 private: |
| 154 static uint32_t GenClassID() { | 160 static uint32_t GenClassID() { |
| 155 // fCurrProcessorClassID has been initialized to kIllegalProcessorClassI
D. The | 161 // fCurrProcessorClassID has been initialized to kIllegalProcessorClassI
D. The |
| 156 // atomic inc returns the old value not the incremented value. So we add | 162 // atomic inc returns the old value not the incremented value. So we add |
| 157 // 1 to the returned value. | 163 // 1 to the returned value. |
| 158 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) +
1; | 164 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) +
1; |
| 159 if (!id) { | 165 if (!id) { |
| 160 SkFAIL("This should never wrap as it should only be called once for
each GrBatch " | 166 SkFAIL("This should never wrap as it should only be called once for
each GrBatch " |
| 161 "subclass."); | 167 "subclass."); |
| 162 } | 168 } |
| 163 return id; | 169 return id; |
| 164 } | 170 } |
| 165 | 171 |
| 166 enum { | 172 enum { |
| 167 kIllegalBatchClassID = 0, | 173 kIllegalBatchClassID = 0, |
| 168 }; | 174 }; |
| 175 SkAutoTUnref<const GrPipeline> fPipeline; |
| 169 static int32_t gCurrBatchClassID; | 176 static int32_t gCurrBatchClassID; |
| 170 | 177 int fNumberOfDraws; |
| 171 SkDEBUGCODE(bool fUsed;) | 178 SkDEBUGCODE(bool fUsed;) |
| 172 | 179 |
| 173 int fNumberOfDraws; | |
| 174 | |
| 175 typedef SkRefCnt INHERITED; | 180 typedef SkRefCnt INHERITED; |
| 176 }; | 181 }; |
| 177 | 182 |
| 178 #endif | 183 #endif |
| OLD | NEW |