| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef GrBatch_DEFINED | 8 #ifndef GrBatch_DEFINED |
| 9 #define GrBatch_DEFINED | 9 #define GrBatch_DEFINED |
| 10 | 10 |
| 11 #include <new> | 11 #include <new> |
| 12 // TODO remove this header when we move entirely to batch | 12 // TODO remove this header when we move entirely to batch |
| 13 #include "GrDrawTarget.h" | 13 #include "GrDrawTarget.h" |
| 14 #include "GrBatchTarget.h" |
| 14 #include "GrGeometryProcessor.h" | 15 #include "GrGeometryProcessor.h" |
| 15 #include "SkRefCnt.h" | 16 #include "SkRefCnt.h" |
| 16 #include "SkThread.h" | 17 #include "SkThread.h" |
| 17 #include "SkTypes.h" | 18 #include "SkTypes.h" |
| 18 | 19 |
| 19 class GrBatchTarget; | |
| 20 class GrGpu; | 20 class GrGpu; |
| 21 class GrIndexBufferAllocPool; | 21 class GrIndexBufferAllocPool; |
| 22 class GrPipeline; | 22 class GrPipeline; |
| 23 class GrVertexBufferAllocPool; | 23 class GrVertexBufferAllocPool; |
| 24 | 24 |
| 25 struct GrInitInvariantOutput; | 25 struct GrInitInvariantOutput; |
| 26 | 26 |
| 27 /* | 27 /* |
| 28 * GrBatch is the base class for all Ganesh deferred geometry generators. To fa
cilitate | 28 * GrBatch is the base class for all Ganesh deferred geometry generators. To fa
cilitate |
| 29 * reorderable batching, Ganesh does not generate geometry inline with draw call
s. Instead, it | 29 * reorderable batching, Ganesh does not generate geometry inline with draw call
s. Instead, it |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 106 uint32_t fClassID; | 106 uint32_t fClassID; |
| 107 | 107 |
| 108 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setL
argest on the bounds | 108 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setL
argest on the bounds |
| 109 // rect because we outset it for dst copy textures | 109 // rect because we outset it for dst copy textures |
| 110 void setBounds(const SkRect& newBounds) { fBounds = newBounds; } | 110 void setBounds(const SkRect& newBounds) { fBounds = newBounds; } |
| 111 | 111 |
| 112 void joinBounds(const SkRect& otherBounds) { | 112 void joinBounds(const SkRect& otherBounds) { |
| 113 return fBounds.joinPossiblyEmptyRect(otherBounds); | 113 return fBounds.joinPossiblyEmptyRect(otherBounds); |
| 114 } | 114 } |
| 115 | 115 |
| 116 /** Helper for rendering instances using an instanced index index buffer. Th
is class creates the |
| 117 space for the vertices and flushes the draws to the batch target.*/ |
| 118 class InstancedHelper { |
| 119 public: |
| 120 InstancedHelper() : fInstancesRemaining(0) {} |
| 121 /** Returns the allocated storage for the vertices. The caller should po
pulate the before |
| 122 vertices before calling issueDraws(). */ |
| 123 void* init(GrBatchTarget* batchTarget, GrPrimitiveType, size_t vertexStr
ide, |
| 124 const GrIndexBuffer*, int verticesPerInstance, int indicesPer
Instance, |
| 125 int instancesToDraw); |
| 126 |
| 127 /** Call after init() to issue draws to the batch target.*/ |
| 128 void issueDraws(GrBatchTarget* batchTarget) { |
| 129 SkASSERT(fDrawInfo.instanceCount()); |
| 130 do { |
| 131 batchTarget->draw(fDrawInfo); |
| 132 } while (fDrawInfo.nextInstances(&fInstancesRemaining, fMaxInstances
PerDraw)); |
| 133 } |
| 134 private: |
| 135 int fInstancesRemaining; |
| 136 int fMaxInstancesPerDraw; |
| 137 GrDrawTarget::DrawInfo fDrawInfo; |
| 138 }; |
| 139 |
| 140 static const int kVerticesPerQuad = 4; |
| 141 static const int kIndicesPerQuad = 6; |
| 142 |
| 143 /** A specialization of InstanceHelper for quad rendering. */ |
| 144 class QuadHelper : private InstancedHelper { |
| 145 public: |
| 146 QuadHelper() : INHERITED() {} |
| 147 /** Finds the cached quad index buffer and reserves vertex space. Return
s NULL on failure |
| 148 and on sucess a pointer to the vertex data that the caller should po
pulate before |
| 149 calling issueDraws(). */ |
| 150 void* init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToD
raw); |
| 151 |
| 152 using InstancedHelper::issueDraws; |
| 153 |
| 154 private: |
| 155 typedef InstancedHelper INHERITED; |
| 156 }; |
| 157 |
| 116 SkRect fBounds; | 158 SkRect fBounds; |
| 117 | 159 |
| 118 private: | 160 private: |
| 119 static uint32_t GenClassID() { | 161 static uint32_t GenClassID() { |
| 120 // fCurrProcessorClassID has been initialized to kIllegalProcessorClassI
D. The | 162 // fCurrProcessorClassID has been initialized to kIllegalProcessorClassI
D. The |
| 121 // atomic inc returns the old value not the incremented value. So we add | 163 // atomic inc returns the old value not the incremented value. So we add |
| 122 // 1 to the returned value. | 164 // 1 to the returned value. |
| 123 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) +
1; | 165 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) +
1; |
| 124 if (!id) { | 166 if (!id) { |
| 125 SkFAIL("This should never wrap as it should only be called once for
each GrBatch " | 167 SkFAIL("This should never wrap as it should only be called once for
each GrBatch " |
| 126 "subclass."); | 168 "subclass."); |
| 127 } | 169 } |
| 128 return id; | 170 return id; |
| 129 } | 171 } |
| 130 | 172 |
| 131 enum { | 173 enum { |
| 132 kIllegalBatchClassID = 0, | 174 kIllegalBatchClassID = 0, |
| 133 }; | 175 }; |
| 134 static int32_t gCurrBatchClassID; | 176 static int32_t gCurrBatchClassID; |
| 135 | 177 |
| 136 SkDEBUGCODE(bool fUsed;) | 178 SkDEBUGCODE(bool fUsed;) |
| 137 | 179 |
| 138 int fNumberOfDraws; | 180 int fNumberOfDraws; |
| 139 | 181 |
| 140 typedef SkRefCnt INHERITED; | 182 typedef SkRefCnt INHERITED; |
| 141 }; | 183 }; |
| 142 | 184 |
| 143 #endif | 185 #endif |
| OLD | NEW |