| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef GrBatch_DEFINED | 8 #ifndef GrBatch_DEFINED |
| 9 #define GrBatch_DEFINED | 9 #define GrBatch_DEFINED |
| 10 | 10 |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 87 template <typename T> T* cast() { return static_cast<T*>(this); } | 87 template <typename T> T* cast() { return static_cast<T*>(this); } |
| 88 | 88 |
| 89 uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); retur
n fClassID; } | 89 uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); retur
n fClassID; } |
| 90 | 90 |
| 91 // TODO no GrPrimitiveProcessors yet read fragment position | 91 // TODO no GrPrimitiveProcessors yet read fragment position |
| 92 bool willReadFragmentPosition() const { return false; } | 92 bool willReadFragmentPosition() const { return false; } |
| 93 | 93 |
| 94 SkDEBUGCODE(bool isUsed() const { return fUsed; }) | 94 SkDEBUGCODE(bool isUsed() const { return fUsed; }) |
| 95 | 95 |
| 96 const GrPipeline* pipeline() const { return fPipeline; } | 96 const GrPipeline* pipeline() const { return fPipeline; } |
| 97 void setPipeline(const GrPipeline* pipeline) { fPipeline.reset(SkRef(pipelin
e)); } | 97 void setPipeline(const GrPipeline* pipeline) { |
| 98 fPipeline.reset(SkRef(pipeline)); |
| 99 this->onSetPipeline(); |
| 100 } |
| 98 | 101 |
| 99 protected: | 102 protected: |
| 100 template <typename PROC_SUBCLASS> void initClassID() { | 103 template <typename PROC_SUBCLASS> void initClassID() { |
| 101 static uint32_t kClassID = GenClassID(); | 104 static uint32_t kClassID = GenClassID(); |
| 102 fClassID = kClassID; | 105 fClassID = kClassID; |
| 103 } | 106 } |
| 104 | 107 |
| 105 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setL
argest on the bounds | 108 // NOTE, compute some bounds, even if extremely conservative. Do *NOT* setL
argest on the bounds |
| 106 // rect because we outset it for dst copy textures | 109 // rect because we outset it for dst copy textures |
| 107 void setBounds(const SkRect& newBounds) { fBounds = newBounds; } | 110 void setBounds(const SkRect& newBounds) { fBounds = newBounds; } |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 157 // atomic inc returns the old value not the incremented value. So we add | 160 // atomic inc returns the old value not the incremented value. So we add |
| 158 // 1 to the returned value. | 161 // 1 to the returned value. |
| 159 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) +
1; | 162 uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) +
1; |
| 160 if (!id) { | 163 if (!id) { |
| 161 SkFAIL("This should never wrap as it should only be called once for
each GrBatch " | 164 SkFAIL("This should never wrap as it should only be called once for
each GrBatch " |
| 162 "subclass."); | 165 "subclass."); |
| 163 } | 166 } |
| 164 return id; | 167 return id; |
| 165 } | 168 } |
| 166 | 169 |
| 170 virtual void onSetPipeline() {} |
| 171 |
| 167 enum { | 172 enum { |
| 168 kIllegalBatchClassID = 0, | 173 kIllegalBatchClassID = 0, |
| 169 }; | 174 }; |
| 170 SkAutoTUnref<const GrPipeline> fPipeline; | 175 SkAutoTUnref<const GrPipeline> fPipeline; |
| 171 static int32_t gCurrBatchClassID; | 176 static int32_t gCurrBatchClassID; |
| 172 int fNumberOfDraws; | 177 int fNumberOfDraws; |
| 173 SkDEBUGCODE(bool fUsed;) | 178 SkDEBUGCODE(bool fUsed;) |
| 174 | 179 |
| 175 typedef SkRefCnt INHERITED; | 180 typedef SkRefCnt INHERITED; |
| 176 }; | 181 }; |
| 177 | 182 |
| 178 #endif | 183 #endif |
| OLD | NEW |