OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2014 Google Inc. | 2 * Copyright 2014 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef GrLayerCache_DEFINED | 8 #ifndef GrLayerCache_DEFINED |
9 #define GrLayerCache_DEFINED | 9 #define GrLayerCache_DEFINED |
10 | 10 |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
44 // get a ref to the GrTexture in which they reside. In both cases 'fRect' | 44 // get a ref to the GrTexture in which they reside. In both cases 'fRect' |
45 // contains the layer's extent in its texture. | 45 // contains the layer's extent in its texture. |
46 // Atlased layers also get a pointer to the plot in which they reside. | 46 // Atlased layers also get a pointer to the plot in which they reside. |
47 // For non-atlased layers, the lock field just corresponds to locking in | 47 // For non-atlased layers, the lock field just corresponds to locking in |
48 // the resource cache. For atlased layers, it implements an additional level | 48 // the resource cache. For atlased layers, it implements an additional level |
49 // of locking to allow atlased layers to be reused multiple times. | 49 // of locking to allow atlased layers to be reused multiple times. |
50 struct GrCachedLayer { | 50 struct GrCachedLayer { |
51 public: | 51 public: |
52 // For SkTDynamicHash | 52 // For SkTDynamicHash |
53 struct Key { | 53 struct Key { |
54 // TODO: the key needs to include the clip | 54 Key(uint32_t pictureID, int start, const SkIRect& bounds, const SkMatrix
& ctm) |
55 Key(uint32_t pictureID, int start, const SkMatrix& ctm) | |
56 : fPictureID(pictureID) | 55 : fPictureID(pictureID) |
57 , fStart(start) { | 56 , fStart(start) |
58 fCTM[0] = ctm.getScaleX(); | 57 , fBounds(bounds) |
59 fCTM[1] = ctm.getSkewX(); | 58 , fCTM(ctm) { |
60 fCTM[2] = ctm.getSkewY(); | 59 fCTM.getType(); // force initialization of type so hashes match |
61 fCTM[3] = ctm.getScaleY(); | 60 |
62 // Key needs to be tightly packed. | 61 // Key needs to be tightly packed. |
63 GR_STATIC_ASSERT(sizeof(Key) == sizeof(uint32_t) + // picture I
D | 62 GR_STATIC_ASSERT(sizeof(Key) == sizeof(uint32_t) + // picture I
D |
64 sizeof(int) + // start ind
ex | 63 sizeof(int) + // start ind
ex |
65 4 * sizeof(SkScalar)); // 2x2 from
CTM | 64 4 * sizeof(uint32_t) + // bounds |
| 65 9 * sizeof(SkScalar) + sizeof(uint32
_t)); // matrix |
66 } | 66 } |
67 | 67 |
68 bool operator==(const Key& other) const { | 68 bool operator==(const Key& other) const { |
69 return fPictureID == other.fPictureID && | 69 return fPictureID == other.fPictureID && |
70 fStart == other.fStart && | 70 fStart == other.fStart && |
71 0 == memcmp(fCTM, other.fCTM, sizeof(fCTM)); | 71 fBounds == other.fBounds && |
| 72 fCTM.cheapEqualTo(other.fCTM); |
72 } | 73 } |
73 | 74 |
74 uint32_t pictureID() const { return fPictureID; } | 75 uint32_t pictureID() const { return fPictureID; } |
75 int start() const { return fStart; } | 76 int start() const { return fStart; } |
| 77 const SkIRect& bound() const { return fBounds; } |
| 78 const SkMatrix& ctm() const { return fCTM; } |
76 | 79 |
77 private: | 80 private: |
78 // ID of the picture of which this layer is a part | 81 // ID of the picture of which this layer is a part |
79 const uint32_t fPictureID; | 82 const uint32_t fPictureID; |
80 // The the index of the saveLayer command in the picture | 83 // The the index of the saveLayer command in the picture |
81 const int fStart; | 84 const int fStart; |
| 85 // The bounds of the layer. The TL corner is its offset. |
| 86 const SkIRect fBounds; |
82 // The 2x2 portion of the CTM applied to this layer in the picture | 87 // The 2x2 portion of the CTM applied to this layer in the picture |
83 SkScalar fCTM[4]; | 88 SkMatrix fCTM; |
84 }; | 89 }; |
85 | 90 |
86 static const Key& GetKey(const GrCachedLayer& layer) { return layer.fKey; } | 91 static const Key& GetKey(const GrCachedLayer& layer) { return layer.fKey; } |
87 static uint32_t Hash(const Key& key) { | 92 static uint32_t Hash(const Key& key) { |
88 return SkChecksum::Murmur3(reinterpret_cast<const uint32_t*>(&key), size
of(Key)); | 93 return SkChecksum::Murmur3(reinterpret_cast<const uint32_t*>(&key), size
of(Key)); |
89 } | 94 } |
90 | 95 |
91 // GrCachedLayer proper | 96 // GrCachedLayer proper |
92 GrCachedLayer(uint32_t pictureID, int start, int stop, | 97 GrCachedLayer(uint32_t pictureID, int start, int stop, |
93 const SkMatrix& ctm, const SkPaint* paint) | 98 const SkIRect& bounds, const SkMatrix& ctm, |
94 : fKey(pictureID, start, ctm) | 99 const SkPaint* paint) |
| 100 : fKey(pictureID, start, bounds, ctm) |
95 , fStop(stop) | 101 , fStop(stop) |
96 , fPaint(paint ? SkNEW_ARGS(SkPaint, (*paint)) : NULL) | 102 , fPaint(paint ? SkNEW_ARGS(SkPaint, (*paint)) : NULL) |
97 , fTexture(NULL) | 103 , fTexture(NULL) |
98 , fRect(GrIRect16::MakeEmpty()) | 104 , fRect(GrIRect16::MakeEmpty()) |
99 , fPlot(NULL) | 105 , fPlot(NULL) |
100 , fUses(0) | 106 , fUses(0) |
101 , fLocked(false) { | 107 , fLocked(false) { |
102 SkASSERT(SK_InvalidGenID != pictureID && start >= 0 && stop >= 0); | 108 SkASSERT(SK_InvalidGenID != pictureID && start >= 0 && stop >= 0); |
103 } | 109 } |
104 | 110 |
105 ~GrCachedLayer() { | 111 ~GrCachedLayer() { |
106 SkSafeUnref(fTexture); | 112 SkSafeUnref(fTexture); |
107 SkDELETE(fPaint); | 113 SkDELETE(fPaint); |
108 } | 114 } |
109 | 115 |
110 uint32_t pictureID() const { return fKey.pictureID(); } | 116 uint32_t pictureID() const { return fKey.pictureID(); } |
111 int start() const { return fKey.start(); } | 117 int start() const { return fKey.start(); } |
| 118 const SkIRect& bound() const { return fKey.bound(); } |
112 | 119 |
113 int stop() const { return fStop; } | 120 int stop() const { return fStop; } |
114 void setTexture(GrTexture* texture, const GrIRect16& rect) { | 121 void setTexture(GrTexture* texture, const GrIRect16& rect) { |
115 SkRefCnt_SafeAssign(fTexture, texture); | 122 SkRefCnt_SafeAssign(fTexture, texture); |
116 fRect = rect; | 123 fRect = rect; |
117 } | 124 } |
118 GrTexture* texture() { return fTexture; } | 125 GrTexture* texture() { return fTexture; } |
119 const SkPaint* paint() const { return fPaint; } | 126 const SkPaint* paint() const { return fPaint; } |
120 const GrIRect16& rect() const { return fRect; } | 127 const GrIRect16& rect() const { return fRect; } |
121 | 128 |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
186 // classes. | 193 // classes. |
187 class GrLayerCache { | 194 class GrLayerCache { |
188 public: | 195 public: |
189 GrLayerCache(GrContext*); | 196 GrLayerCache(GrContext*); |
190 ~GrLayerCache(); | 197 ~GrLayerCache(); |
191 | 198 |
192 // As a cache, the GrLayerCache can be ordered to free up all its cached | 199 // As a cache, the GrLayerCache can be ordered to free up all its cached |
193 // elements by the GrContext | 200 // elements by the GrContext |
194 void freeAll(); | 201 void freeAll(); |
195 | 202 |
196 GrCachedLayer* findLayer(uint32_t pictureID, int start, const SkMatrix& ctm)
; | 203 GrCachedLayer* findLayer(uint32_t pictureID, int start, |
| 204 const SkIRect& bounds, const SkMatrix& ctm); |
197 GrCachedLayer* findLayerOrCreate(uint32_t pictureID, | 205 GrCachedLayer* findLayerOrCreate(uint32_t pictureID, |
198 int start, int stop, | 206 int start, int stop, |
| 207 const SkIRect& bounds, |
199 const SkMatrix& ctm, | 208 const SkMatrix& ctm, |
200 const SkPaint* paint); | 209 const SkPaint* paint); |
201 | 210 |
202 // Inform the cache that layer's cached image is now required. | 211 // Inform the cache that layer's cached image is now required. |
203 // Return true if the layer must be re-rendered. Return false if the | 212 // Return true if the layer must be re-rendered. Return false if the |
204 // layer was found in the cache and can be reused. | 213 // layer was found in the cache and can be reused. |
205 bool lock(GrCachedLayer* layer, const GrTextureDesc& desc, bool dontAtlas); | 214 bool lock(GrCachedLayer* layer, const GrTextureDesc& desc, bool dontAtlas); |
206 | 215 |
207 // addUse is just here to keep the API symmetric | 216 // addUse is just here to keep the API symmetric |
208 void addUse(GrCachedLayer* layer) { layer->addUse(); } | 217 void addUse(GrCachedLayer* layer) { layer->addUse(); } |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
258 // count for that plot. Similarly, once a rendering is complete all the | 267 // count for that plot. Similarly, once a rendering is complete all the |
259 // layers used in it decrement the lock count for the used plots. | 268 // layers used in it decrement the lock count for the used plots. |
260 // Plots with a 0 lock count are open for recycling/purging. | 269 // Plots with a 0 lock count are open for recycling/purging. |
261 int fPlotLocks[kNumPlotsX * kNumPlotsY]; | 270 int fPlotLocks[kNumPlotsX * kNumPlotsY]; |
262 | 271 |
263 // Inform the cache that layer's cached image is not currently required | 272 // Inform the cache that layer's cached image is not currently required |
264 void unlock(GrCachedLayer* layer); | 273 void unlock(GrCachedLayer* layer); |
265 | 274 |
266 void initAtlas(); | 275 void initAtlas(); |
267 GrCachedLayer* createLayer(uint32_t pictureID, int start, int stop, | 276 GrCachedLayer* createLayer(uint32_t pictureID, int start, int stop, |
268 const SkMatrix& ctm, const SkPaint* paint); | 277 const SkIRect& bounds, const SkMatrix& ctm, |
| 278 const SkPaint* paint); |
269 | 279 |
270 void purgeAll(); | 280 void purgeAll(); |
271 | 281 |
272 // Remove all the layers (and unlock any resources) associated with 'picture
ID' | 282 // Remove all the layers (and unlock any resources) associated with 'picture
ID' |
273 void purge(uint32_t pictureID); | 283 void purge(uint32_t pictureID); |
274 | 284 |
275 static bool PlausiblyAtlasable(int width, int height) { | 285 static bool PlausiblyAtlasable(int width, int height) { |
276 return width <= kPlotWidth && height <= kPlotHeight; | 286 return width <= kPlotWidth && height <= kPlotHeight; |
277 } | 287 } |
278 | 288 |
279 void purgePlot(GrPlot* plot); | 289 void purgePlot(GrPlot* plot); |
280 | 290 |
281 // Try to find a purgeable plot and clear it out. Return true if a plot | 291 // Try to find a purgeable plot and clear it out. Return true if a plot |
282 // was purged; false otherwise. | 292 // was purged; false otherwise. |
283 bool purgePlot(); | 293 bool purgePlot(); |
284 | 294 |
285 void incPlotLock(int plotIdx) { ++fPlotLocks[plotIdx]; } | 295 void incPlotLock(int plotIdx) { ++fPlotLocks[plotIdx]; } |
286 void decPlotLock(int plotIdx) { | 296 void decPlotLock(int plotIdx) { |
287 SkASSERT(fPlotLocks[plotIdx] > 0); | 297 SkASSERT(fPlotLocks[plotIdx] > 0); |
288 --fPlotLocks[plotIdx]; | 298 --fPlotLocks[plotIdx]; |
289 } | 299 } |
290 | 300 |
291 // for testing | 301 // for testing |
292 friend class TestingAccess; | 302 friend class TestingAccess; |
293 int numLayers() const { return fLayerHash.count(); } | 303 int numLayers() const { return fLayerHash.count(); } |
294 }; | 304 }; |
295 | 305 |
296 #endif | 306 #endif |
OLD | NEW |