Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(282)

Side by Side Diff: src/gpu/GrLayerCache.h

Issue 753253002: Use variable length key (rather than accumulated matrix) as save layer hoisting key (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: more cleanup Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/core/SkRecordDraw.cpp ('k') | src/gpu/GrLayerCache.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2014 Google Inc. 2 * Copyright 2014 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #ifndef GrLayerCache_DEFINED 8 #ifndef GrLayerCache_DEFINED
9 #define GrLayerCache_DEFINED 9 #define GrLayerCache_DEFINED
10 10
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
69 // get a ref to the GrTexture in which they reside. In both cases 'fRect' 69 // get a ref to the GrTexture in which they reside. In both cases 'fRect'
70 // contains the layer's extent in its texture. 70 // contains the layer's extent in its texture.
71 // Atlased layers also get a pointer to the plot in which they reside. 71 // Atlased layers also get a pointer to the plot in which they reside.
72 // For non-atlased layers, the lock field just corresponds to locking in 72 // For non-atlased layers, the lock field just corresponds to locking in
73 // the resource cache. For atlased layers, it implements an additional level 73 // the resource cache. For atlased layers, it implements an additional level
74 // of locking to allow atlased layers to be reused multiple times. 74 // of locking to allow atlased layers to be reused multiple times.
75 struct GrCachedLayer { 75 struct GrCachedLayer {
76 public: 76 public:
77 // For SkTDynamicHash 77 // For SkTDynamicHash
78 struct Key { 78 struct Key {
79 Key(uint32_t pictureID, int start, const SkIRect& bounds, const SkMatrix & ctm) 79 Key(uint32_t pictureID, const SkMatrix& initialMat,
bsalomon 2014/12/01 16:40:47 Is the not copy optimization worth the risk of get
robertphillips 2014/12/01 16:59:08 The not-copy option is used in the lookup case in
80 : fPictureID(pictureID) 80 const int* key, int keySize, bool copyKey = false)
81 , fStart(start) 81 : fKeySize(keySize)
82 , fBounds(bounds) 82 , fFreeKey(copyKey) {
83 , fCTM(ctm) { 83 fIDMatrix.fPictureID = pictureID;
84 fCTM.getType(); // force initialization of type so hashes match 84 fIDMatrix.fInitialMat = initialMat;
85 fIDMatrix.fInitialMat.getType(); // force initialization of type so hashes match
85 86
86 // Key needs to be tightly packed. 87 if (copyKey) {
87 GR_STATIC_ASSERT(sizeof(Key) == sizeof(uint32_t) + // picture I D 88 int* tempKey = SkNEW_ARRAY(int, keySize);
88 sizeof(int) + // start ind ex 89 memcpy(tempKey, key, keySize*sizeof(int));
89 4 * sizeof(uint32_t) + // bounds 90 fKey = tempKey;
90 9 * sizeof(SkScalar) + sizeof(uint32 _t)); // matrix 91 } else {
92 fKey = key;
93 }
94
95 // The pictureID/matrix portion needs to be tightly packed.
96 GR_STATIC_ASSERT(sizeof(IDMatrix) == sizeof(uint32_t)+ // pictureID
97 9 * sizeof(SkScalar) + sizeof(uint3 2_t)); // matrix
98 }
99
100 ~Key() {
101 if (fFreeKey) {
102 SkDELETE_ARRAY(fKey);
103 }
91 } 104 }
92 105
93 bool operator==(const Key& other) const { 106 bool operator==(const Key& other) const {
94 return fPictureID == other.fPictureID && 107 if (fKeySize != other.fKeySize) {
95 fStart == other.fStart && 108 return false;
96 fBounds == other.fBounds && 109 }
97 fCTM.cheapEqualTo(other.fCTM); 110 return fIDMatrix.fPictureID == other.fIDMatrix.fPictureID &&
111 fIDMatrix.fInitialMat.cheapEqualTo(other.fIDMatrix.fInitialMa t) &&
112 !memcmp(fKey, other.fKey, fKeySize * sizeof(int));
98 } 113 }
99 114
100 uint32_t pictureID() const { return fPictureID; } 115 uint32_t pictureID() const { return fIDMatrix.fPictureID; }
101 int start() const { return fStart; } 116
102 const SkIRect& bound() const { return fBounds; } 117 // TODO: remove these when GrCachedLayer & ReplacementInfo fuse
118 const int* key() const { SkASSERT(fFreeKey); return fKey; }
119 int keySize() const { SkASSERT(fFreeKey); return fKeySize; }
120
121 uint32_t hash() const {
122 uint32_t hash = SkChecksum::Murmur3(reinterpret_cast<const uint32_t* >(fKey),
123 fKeySize * sizeof(int));
124 return SkChecksum::Murmur3(reinterpret_cast<const uint32_t*>(&fIDMat rix),
125 sizeof(IDMatrix), hash);
126 }
103 127
104 private: 128 private:
105 // ID of the picture of which this layer is a part 129 struct IDMatrix {
106 const uint32_t fPictureID; 130 // ID of the picture of which this layer is a part
107 // The the index of the saveLayer command in the picture 131 uint32_t fPictureID;
108 const int fStart; 132 // The initial matrix passed into drawPicture
109 // The bounds of the layer. The TL corner is its offset. 133 SkMatrix fInitialMat;
110 const SkIRect fBounds; 134 } fIDMatrix;
111 // The 2x2 portion of the CTM applied to this layer in the picture 135
112 SkMatrix fCTM; 136 const int* fKey;
137 const int fKeySize;
138 bool fFreeKey;
113 }; 139 };
114 140
115 static const Key& GetKey(const GrCachedLayer& layer) { return layer.fKey; } 141 static const Key& GetKey(const GrCachedLayer& layer) { return layer.fKey; }
116 static uint32_t Hash(const Key& key) { 142 static uint32_t Hash(const Key& key) { return key.hash(); }
117 return SkChecksum::Murmur3(reinterpret_cast<const uint32_t*>(&key), size of(Key));
118 }
119 143
120 // GrCachedLayer proper 144 // GrCachedLayer proper
121 GrCachedLayer(uint32_t pictureID, int start, int stop, 145 GrCachedLayer(uint32_t pictureID, int start, int stop,
122 const SkIRect& bounds, const SkMatrix& ctm, 146 const SkIRect& bounds, const SkMatrix& ctm,
147 const int* key, int keySize,
123 const SkPaint* paint) 148 const SkPaint* paint)
124 : fKey(pictureID, start, bounds, ctm) 149 : fKey(pictureID, ctm, key, keySize, true)
150 , fStart(start)
125 , fStop(stop) 151 , fStop(stop)
152 , fBounds(bounds)
126 , fPaint(paint ? SkNEW_ARGS(SkPaint, (*paint)) : NULL) 153 , fPaint(paint ? SkNEW_ARGS(SkPaint, (*paint)) : NULL)
127 , fTexture(NULL) 154 , fTexture(NULL)
128 , fRect(GrIRect16::MakeEmpty()) 155 , fRect(GrIRect16::MakeEmpty())
129 , fPlot(NULL) 156 , fPlot(NULL)
130 , fUses(0) 157 , fUses(0)
131 , fLocked(false) { 158 , fLocked(false) {
132 SkASSERT(SK_InvalidGenID != pictureID && start >= 0 && stop >= 0); 159 SkASSERT(SK_InvalidGenID != pictureID && start >= 0 && stop >= 0);
133 } 160 }
134 161
135 ~GrCachedLayer() { 162 ~GrCachedLayer() {
136 SkSafeUnref(fTexture); 163 SkSafeUnref(fTexture);
137 SkDELETE(fPaint); 164 SkDELETE(fPaint);
138 } 165 }
139 166
140 uint32_t pictureID() const { return fKey.pictureID(); } 167 uint32_t pictureID() const { return fKey.pictureID(); }
141 int start() const { return fKey.start(); } 168 // TODO: remove these when GrCachedLayer & ReplacementInfo fuse
142 const SkIRect& bound() const { return fKey.bound(); } 169 const int* key() const { return fKey.key(); }
170 int keySize() const { return fKey.keySize(); }
143 171
172 int start() const { return fStart; }
173 // TODO: make bound debug only
174 const SkIRect& bound() const { return fBounds; }
144 int stop() const { return fStop; } 175 int stop() const { return fStop; }
145 void setTexture(GrTexture* texture, const GrIRect16& rect) { 176 void setTexture(GrTexture* texture, const GrIRect16& rect) {
146 SkRefCnt_SafeAssign(fTexture, texture); 177 SkRefCnt_SafeAssign(fTexture, texture);
147 fRect = rect; 178 fRect = rect;
148 } 179 }
149 GrTexture* texture() { return fTexture; } 180 GrTexture* texture() { return fTexture; }
150 const SkPaint* paint() const { return fPaint; } 181 const SkPaint* paint() const { return fPaint; }
151 const GrIRect16& rect() const { return fRect; } 182 const GrIRect16& rect() const { return fRect; }
152 183
153 void setPlot(GrPlot* plot) { 184 void setPlot(GrPlot* plot) {
154 SkASSERT(NULL == plot || NULL == fPlot); 185 SkASSERT(NULL == plot || NULL == fPlot);
155 fPlot = plot; 186 fPlot = plot;
156 } 187 }
157 GrPlot* plot() { return fPlot; } 188 GrPlot* plot() { return fPlot; }
158 189
159 bool isAtlased() const { return SkToBool(fPlot); } 190 bool isAtlased() const { return SkToBool(fPlot); }
160 191
161 void setLocked(bool locked) { fLocked = locked; } 192 void setLocked(bool locked) { fLocked = locked; }
162 bool locked() const { return fLocked; } 193 bool locked() const { return fLocked; }
163 194
164 SkDEBUGCODE(const GrPlot* plot() const { return fPlot; }) 195 SkDEBUGCODE(const GrPlot* plot() const { return fPlot; })
165 SkDEBUGCODE(void validate(const GrTexture* backingTexture) const;) 196 SkDEBUGCODE(void validate(const GrTexture* backingTexture) const;)
166 197
167 private: 198 private:
168 const Key fKey; 199 const Key fKey;
169 200
201 // The "saveLayer" operation index of the cached layer
202 const int fStart;
170 // The final "restore" operation index of the cached layer 203 // The final "restore" operation index of the cached layer
171 const int fStop; 204 const int fStop;
172 205
206 const SkIRect fBounds;
207
173 // The paint used when dropping the layer down into the owning canvas. 208 // The paint used when dropping the layer down into the owning canvas.
174 // Can be NULL. This class makes a copy for itself. 209 // Can be NULL. This class makes a copy for itself.
175 const SkPaint* fPaint; 210 const SkPaint* fPaint;
176 211
177 // fTexture is a ref on the atlasing texture for atlased layers and a 212 // fTexture is a ref on the atlasing texture for atlased layers and a
178 // ref on a GrTexture for non-atlased textures. 213 // ref on a GrTexture for non-atlased textures.
179 GrTexture* fTexture; 214 GrTexture* fTexture;
180 215
181 // For both atlased and non-atlased layers 'fRect' contains the bound of 216 // For both atlased and non-atlased layers 'fRect' contains the bound of
182 // the layer in whichever texture it resides. It is empty when 'fTexture' 217 // the layer in whichever texture it resides. It is empty when 'fTexture'
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
217 // classes. 252 // classes.
218 class GrLayerCache { 253 class GrLayerCache {
219 public: 254 public:
220 GrLayerCache(GrContext*); 255 GrLayerCache(GrContext*);
221 ~GrLayerCache(); 256 ~GrLayerCache();
222 257
223 // As a cache, the GrLayerCache can be ordered to free up all its cached 258 // As a cache, the GrLayerCache can be ordered to free up all its cached
224 // elements by the GrContext 259 // elements by the GrContext
225 void freeAll(); 260 void freeAll();
226 261
227 GrCachedLayer* findLayer(uint32_t pictureID, int start,
228 const SkIRect& bounds, const SkMatrix& ctm);
229 GrCachedLayer* findLayerOrCreate(uint32_t pictureID, 262 GrCachedLayer* findLayerOrCreate(uint32_t pictureID,
230 int start, int stop, 263 int start, int stop,
231 const SkIRect& bounds, 264 const SkIRect& bounds,
232 const SkMatrix& ctm, 265 const SkMatrix& initialMat,
266 const int* key, int keySize,
233 const SkPaint* paint); 267 const SkPaint* paint);
234 268
235 // Attempt to place 'layer' in the atlas. Return true on success; false on f ailure. 269 // Attempt to place 'layer' in the atlas. Return true on success; false on f ailure.
236 // When true is returned, 'needsRendering' will indicate if the layer must b e (re)drawn. 270 // When true is returned, 'needsRendering' will indicate if the layer must b e (re)drawn.
237 // Additionally, the GPU resources will be locked. 271 // Additionally, the GPU resources will be locked.
238 bool tryToAtlas(GrCachedLayer* layer, const GrSurfaceDesc& desc, bool* needs Rendering); 272 bool tryToAtlas(GrCachedLayer* layer, const GrSurfaceDesc& desc, bool* needs Rendering);
239 273
240 // Attempt to lock the GPU resources required for a layer. Return true on su ccess; 274 // Attempt to lock the GPU resources required for a layer. Return true on su ccess;
241 // false on failure. When true is returned 'needsRendering' will indicate if the 275 // false on failure. When true is returned 'needsRendering' will indicate if the
242 // layer must be (re)drawn. 276 // layer must be (re)drawn.
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
303 // needed for the current rendering) in a plot increments the plot lock 337 // needed for the current rendering) in a plot increments the plot lock
304 // count for that plot. Similarly, once a rendering is complete all the 338 // count for that plot. Similarly, once a rendering is complete all the
305 // layers used in it decrement the lock count for the used plots. 339 // layers used in it decrement the lock count for the used plots.
306 // Plots with a 0 lock count are open for recycling/purging. 340 // Plots with a 0 lock count are open for recycling/purging.
307 int fPlotLocks[kNumPlotsX * kNumPlotsY]; 341 int fPlotLocks[kNumPlotsX * kNumPlotsY];
308 342
309 // Inform the cache that layer's cached image is not currently required 343 // Inform the cache that layer's cached image is not currently required
310 void unlock(GrCachedLayer* layer); 344 void unlock(GrCachedLayer* layer);
311 345
312 void initAtlas(); 346 void initAtlas();
313 GrCachedLayer* createLayer(uint32_t pictureID, int start, int stop, 347 GrCachedLayer* createLayer(uint32_t pictureID, int start, int stop,
314 const SkIRect& bounds, const SkMatrix& ctm, 348 const SkIRect& bounds, const SkMatrix& initialMat ,
349 const int* key, int keySize,
315 const SkPaint* paint); 350 const SkPaint* paint);
316 351
317 // Remove all the layers (and unlock any resources) associated with 'picture ID' 352 // Remove all the layers (and unlock any resources) associated with 'picture ID'
318 void purge(uint32_t pictureID); 353 void purge(uint32_t pictureID);
319 354
320 void purgePlot(GrPlot* plot); 355 void purgePlot(GrPlot* plot);
321 356
322 // Try to find a purgeable plot and clear it out. Return true if a plot 357 // Try to find a purgeable plot and clear it out. Return true if a plot
323 // was purged; false otherwise. 358 // was purged; false otherwise.
324 bool purgePlot(); 359 bool purgePlot();
325 360
326 void incPlotLock(int plotIdx) { ++fPlotLocks[plotIdx]; } 361 void incPlotLock(int plotIdx) { ++fPlotLocks[plotIdx]; }
327 void decPlotLock(int plotIdx) { 362 void decPlotLock(int plotIdx) {
328 SkASSERT(fPlotLocks[plotIdx] > 0); 363 SkASSERT(fPlotLocks[plotIdx] > 0);
329 --fPlotLocks[plotIdx]; 364 --fPlotLocks[plotIdx];
330 } 365 }
331 366
332 // for testing 367 // for testing
333 friend class TestingAccess; 368 friend class TestingAccess;
334 int numLayers() const { return fLayerHash.count(); } 369 int numLayers() const { return fLayerHash.count(); }
370 GrCachedLayer* findLayer(uint32_t pictureID, const SkMatrix& ctm,
371 const int* key, int keySize);
335 }; 372 };
336 373
337 #endif 374 #endif
OLDNEW
« no previous file with comments | « src/core/SkRecordDraw.cpp ('k') | src/gpu/GrLayerCache.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698