| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef GrAuditTrail_DEFINED | 8 #ifndef GrAuditTrail_DEFINED |
| 9 #define GrAuditTrail_DEFINED | 9 #define GrAuditTrail_DEFINED |
| 10 | 10 |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 72 fAuditTrail->setClientID(clientID); | 72 fAuditTrail->setClientID(clientID); |
| 73 } | 73 } |
| 74 | 74 |
| 75 ~AutoCollectBatches() { fAuditTrail->setClientID(kGrAuditTrailInvalidID)
; } | 75 ~AutoCollectBatches() { fAuditTrail->setClientID(kGrAuditTrailInvalidID)
; } |
| 76 | 76 |
| 77 private: | 77 private: |
| 78 AutoEnable fAutoEnable; | 78 AutoEnable fAutoEnable; |
| 79 GrAuditTrail* fAuditTrail; | 79 GrAuditTrail* fAuditTrail; |
| 80 }; | 80 }; |
| 81 | 81 |
| 82 void addBatch(const char* name, const SkRect& bounds) { | 82 void addBatch(const char* name, const SkRect& bounds); |
| 83 SkASSERT(fEnabled); | |
| 84 Batch* batch = new Batch; | |
| 85 fBatchPool.emplace_back(batch); | |
| 86 batch->fName = name; | |
| 87 batch->fBounds = bounds; | |
| 88 batch->fClientID = kGrAuditTrailInvalidID; | |
| 89 batch->fBatchListID = kGrAuditTrailInvalidID; | |
| 90 batch->fChildID = kGrAuditTrailInvalidID; | |
| 91 fCurrentBatch = batch; | |
| 92 | |
| 93 if (fClientID != kGrAuditTrailInvalidID) { | |
| 94 batch->fClientID = fClientID; | |
| 95 Batches** batchesLookup = fClientIDLookup.find(fClientID); | |
| 96 Batches* batches = nullptr; | |
| 97 if (!batchesLookup) { | |
| 98 batches = new Batches; | |
| 99 fClientIDLookup.set(fClientID, batches); | |
| 100 } else { | |
| 101 batches = *batchesLookup; | |
| 102 } | |
| 103 | |
| 104 batches->push_back(fCurrentBatch); | |
| 105 } | |
| 106 } | |
| 107 | 83 |
| 108 void batchingResultCombined(GrBatch* combiner); | 84 void batchingResultCombined(GrBatch* combiner); |
| 109 | 85 |
| 110 void batchingResultNew(GrBatch* batch); | 86 void batchingResultNew(GrBatch* batch); |
| 111 | 87 |
| 112 // Because batching is heavily dependent on sequence of draw calls, these ca
lls will only | 88 // Because batching is heavily dependent on sequence of draw calls, these ca
lls will only |
| 113 // produce valid information for the given draw sequence which preceeded the
m. | 89 // produce valid information for the given draw sequence which preceeded the
m. |
| 114 // Specifically, future draw calls may change the batching and thus would in
validate | 90 // Specifically, future draw calls may change the batching and thus would in
validate |
| 115 // the json. What this means is that for some sequence of draw calls N, the
below toJson | 91 // the json. What this means is that for some sequence of draw calls N, the
below toJson |
| 116 // calls will only produce JSON which reflects N draw calls. This JSON may
or may not be | 92 // calls will only produce JSON which reflects N draw calls. This JSON may
or may not be |
| (...skipping 15 matching lines...) Expand all Loading... |
| 132 uint32_t fRenderTargetUniqueID; | 108 uint32_t fRenderTargetUniqueID; |
| 133 struct Batch { | 109 struct Batch { |
| 134 int fClientID; | 110 int fClientID; |
| 135 SkRect fBounds; | 111 SkRect fBounds; |
| 136 }; | 112 }; |
| 137 SkTArray<Batch> fBatches; | 113 SkTArray<Batch> fBatches; |
| 138 }; | 114 }; |
| 139 | 115 |
| 140 void getBoundsByClientID(SkTArray<BatchInfo>* outInfo, int clientID); | 116 void getBoundsByClientID(SkTArray<BatchInfo>* outInfo, int clientID); |
| 141 | 117 |
| 142 void fullReset() { | 118 void fullReset(); |
| 143 SkASSERT(fEnabled); | |
| 144 fBatchList.reset(); | |
| 145 fIDLookup.reset(); | |
| 146 // free all client batches | |
| 147 fClientIDLookup.foreach([](const int&, Batches** batches) { delete *batc
hes; }); | |
| 148 fClientIDLookup.reset(); | |
| 149 fBatchPool.reset(); // must be last, frees all of the memory | |
| 150 } | |
| 151 | 119 |
| 152 static const int kGrAuditTrailInvalidID; | 120 static const int kGrAuditTrailInvalidID; |
| 153 | 121 |
| 154 private: | 122 private: |
| 155 // TODO if performance becomes an issue, we can move to using SkVarAlloc | 123 // TODO if performance becomes an issue, we can move to using SkVarAlloc |
| 156 struct Batch { | 124 struct Batch { |
| 157 SkString toJson() const; | 125 SkString toJson() const; |
| 158 SkString fName; | 126 SkString fName; |
| 159 SkRect fBounds; | 127 SkRect fBounds; |
| 160 int fClientID; | 128 int fClientID; |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 203 #define GR_AUDIT_TRAIL_ADDBATCH(audit_trail, batchname, bounds) \ | 171 #define GR_AUDIT_TRAIL_ADDBATCH(audit_trail, batchname, bounds) \ |
| 204 GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, addBatch, batchname, bounds); | 172 GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, addBatch, batchname, bounds); |
| 205 | 173 |
| 206 #define GR_AUDIT_TRAIL_BATCHING_RESULT_COMBINED(audit_trail, combiner) \ | 174 #define GR_AUDIT_TRAIL_BATCHING_RESULT_COMBINED(audit_trail, combiner) \ |
| 207 GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, batchingResultCombined, combiner); | 175 GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, batchingResultCombined, combiner); |
| 208 | 176 |
| 209 #define GR_AUDIT_TRAIL_BATCHING_RESULT_NEW(audit_trail, batch) \ | 177 #define GR_AUDIT_TRAIL_BATCHING_RESULT_NEW(audit_trail, batch) \ |
| 210 GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, batchingResultNew, batch); | 178 GR_AUDIT_TRAIL_INVOKE_GUARD(audit_trail, batchingResultNew, batch); |
| 211 | 179 |
| 212 #endif | 180 #endif |
| OLD | NEW |