| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2014 Google Inc. | 2 * Copyright 2014 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef GrLayerCache_DEFINED | 8 #ifndef GrLayerCache_DEFINED |
| 9 #define GrLayerCache_DEFINED | 9 #define GrLayerCache_DEFINED |
| 10 | 10 |
| (...skipping 13 matching lines...) Expand all Loading... |
| 24 // plot may be used to store layers from multiple pictures. | 24 // plot may be used to store layers from multiple pictures. |
| 25 struct GrPictureInfo { | 25 struct GrPictureInfo { |
| 26 public: | 26 public: |
| 27 static const int kNumPlots = 4; | 27 static const int kNumPlots = 4; |
| 28 | 28 |
| 29 // for SkTDynamicHash - just use the pictureID as the hash key | 29 // for SkTDynamicHash - just use the pictureID as the hash key |
| 30 static const uint32_t& GetKey(const GrPictureInfo& pictInfo) { return pictIn
fo.fPictureID; } | 30 static const uint32_t& GetKey(const GrPictureInfo& pictInfo) { return pictIn
fo.fPictureID; } |
| 31 static uint32_t Hash(const uint32_t& key) { return SkChecksum::Mix(key); } | 31 static uint32_t Hash(const uint32_t& key) { return SkChecksum::Mix(key); } |
| 32 | 32 |
| 33 // GrPictureInfo proper | 33 // GrPictureInfo proper |
| 34 GrPictureInfo(uint32_t pictureID) : fPictureID(pictureID) { | 34 GrPictureInfo(uint32_t pictureID) : fPictureID(pictureID) { |
| 35 #if !GR_CACHE_HOISTED_LAYERS | 35 #if !GR_CACHE_HOISTED_LAYERS |
| 36 memset(fPlotUses, 0, sizeof(fPlotUses)); | 36 memset(fPlotUses, 0, sizeof(fPlotUses)); |
| 37 #endif | 37 #endif |
| 38 } | 38 } |
| 39 | 39 |
| 40 #if !GR_CACHE_HOISTED_LAYERS | 40 #if !GR_CACHE_HOISTED_LAYERS |
| 41 void incPlotUsage(int plotID) { | 41 void incPlotUsage(int plotID) { |
| 42 SkASSERT(plotID < kNumPlots); | 42 SkASSERT(plotID < kNumPlots); |
| 43 fPlotUses[plotID]++; | 43 fPlotUses[plotID]++; |
| 44 } | 44 } |
| 45 | 45 |
| 46 void decPlotUsage(int plotID) { | 46 void decPlotUsage(int plotID) { |
| 47 SkASSERT(plotID < kNumPlots); | 47 SkASSERT(plotID < kNumPlots); |
| 48 SkASSERT(fPlotUses[plotID] > 0); | 48 SkASSERT(fPlotUses[plotID] > 0); |
| 49 fPlotUses[plotID]--; | 49 fPlotUses[plotID]--; |
| 50 } | 50 } |
| 51 | 51 |
| 52 int plotUsage(int plotID) const { | 52 int plotUsage(int plotID) const { |
| 53 SkASSERT(plotID < kNumPlots); | 53 SkASSERT(plotID < kNumPlots); |
| 54 return fPlotUses[plotID]; | 54 return fPlotUses[plotID]; |
| 55 } | 55 } |
| 56 #endif | 56 #endif |
| 57 | 57 |
| 58 const uint32_t fPictureID; | 58 const uint32_t fPictureID; |
| 59 GrAtlas::ClientPlotUsage fPlotUsage; | 59 GrAtlas::ClientPlotUsage fPlotUsage; |
| 60 | 60 |
| 61 #if !GR_CACHE_HOISTED_LAYERS | 61 #if !GR_CACHE_HOISTED_LAYERS |
| 62 private: | 62 private: |
| 63 int fPlotUses[kNumPlots]; | 63 int fPlotUses[kNumPlots]; |
| 64 #endif | 64 #endif |
| 65 }; | 65 }; |
| 66 | 66 |
| 67 // GrCachedLayer encapsulates the caching information for a single saveLayer. | 67 // GrCachedLayer encapsulates the caching information for a single saveLayer. |
| 68 // | 68 // |
| 69 // Atlased layers get a ref to the backing GrTexture while non-atlased layers | 69 // Atlased layers get a ref to the backing GrTexture while non-atlased layers |
| 70 // get a ref to the GrTexture in which they reside. In both cases 'fRect' | 70 // get a ref to the GrTexture in which they reside. In both cases 'fRect' |
| 71 // contains the layer's extent in its texture. | 71 // contains the layer's extent in its texture. |
| 72 // Atlased layers also get a pointer to the plot in which they reside. | 72 // Atlased layers also get a pointer to the plot in which they reside. |
| 73 // For non-atlased layers, the lock field just corresponds to locking in | 73 // For non-atlased layers, the lock field just corresponds to locking in |
| 74 // the resource cache. For atlased layers, it implements an additional level | 74 // the resource cache. For atlased layers, it implements an additional level |
| 75 // of locking to allow atlased layers to be reused multiple times. | 75 // of locking to allow atlased layers to be reused multiple times. |
| 76 struct GrCachedLayer { | 76 struct GrCachedLayer { |
| 77 public: | 77 public: |
| 78 // For SkTDynamicHash | 78 // For SkTDynamicHash |
| 79 struct Key { | 79 struct Key { |
| 80 Key(uint32_t pictureID, const SkMatrix& initialMat, | 80 Key(uint32_t pictureID, const SkMatrix& initialMat, |
| 81 const unsigned* key, int keySize, bool copyKey = false) | 81 const int* key, int keySize, bool copyKey = false) |
| 82 : fKeySize(keySize) | 82 : fKeySize(keySize) |
| 83 , fFreeKey(copyKey) { | 83 , fFreeKey(copyKey) { |
| 84 fIDMatrix.fPictureID = pictureID; | 84 fIDMatrix.fPictureID = pictureID; |
| 85 fIDMatrix.fInitialMat = initialMat; | 85 fIDMatrix.fInitialMat = initialMat; |
| 86 fIDMatrix.fInitialMat.getType(); // force initialization of type so
hashes match | 86 fIDMatrix.fInitialMat.getType(); // force initialization of type so
hashes match |
| 87 | 87 |
| 88 if (copyKey) { | 88 if (copyKey) { |
| 89 unsigned* tempKey = SkNEW_ARRAY(unsigned, keySize); | 89 int* tempKey = SkNEW_ARRAY(int, keySize); |
| 90 memcpy(tempKey, key, keySize*sizeof(unsigned)); | 90 memcpy(tempKey, key, keySize*sizeof(int)); |
| 91 fKey = tempKey; | 91 fKey = tempKey; |
| 92 } else { | 92 } else { |
| 93 fKey = key; | 93 fKey = key; |
| 94 } | 94 } |
| 95 | 95 |
| 96 // The pictureID/matrix portion needs to be tightly packed. | 96 // The pictureID/matrix portion needs to be tightly packed. |
| 97 GR_STATIC_ASSERT(sizeof(IDMatrix) == sizeof(uint32_t)+
// pictureID | 97 GR_STATIC_ASSERT(sizeof(IDMatrix) == sizeof(uint32_t)+
// pictureID |
| 98 9 * sizeof(SkScalar) + sizeof(uint3
2_t)); // matrix | 98 9 * sizeof(SkScalar) + sizeof(uint3
2_t)); // matrix |
| 99 } | 99 } |
| 100 | 100 |
| 101 ~Key() { | 101 ~Key() { |
| 102 if (fFreeKey) { | 102 if (fFreeKey) { |
| 103 SkDELETE_ARRAY(fKey); | 103 SkDELETE_ARRAY(fKey); |
| 104 } | 104 } |
| 105 } | 105 } |
| 106 | 106 |
| 107 bool operator==(const Key& other) const { | 107 bool operator==(const Key& other) const { |
| 108 if (fKeySize != other.fKeySize) { | 108 if (fKeySize != other.fKeySize) { |
| 109 return false; | 109 return false; |
| 110 } | 110 } |
| 111 return fIDMatrix.fPictureID == other.fIDMatrix.fPictureID && | 111 return fIDMatrix.fPictureID == other.fIDMatrix.fPictureID && |
| 112 fIDMatrix.fInitialMat.cheapEqualTo(other.fIDMatrix.fInitialMa
t) && | 112 fIDMatrix.fInitialMat.cheapEqualTo(other.fIDMatrix.fInitialMa
t) && |
| 113 !memcmp(fKey, other.fKey, fKeySize * sizeof(int)); | 113 !memcmp(fKey, other.fKey, fKeySize * sizeof(int)); |
| 114 } | 114 } |
| 115 | 115 |
| 116 uint32_t pictureID() const { return fIDMatrix.fPictureID; } | 116 uint32_t pictureID() const { return fIDMatrix.fPictureID; } |
| 117 | 117 |
| 118 // TODO: remove these when GrCachedLayer & ReplacementInfo fuse | 118 // TODO: remove these when GrCachedLayer & ReplacementInfo fuse |
| 119 const unsigned* key() const { SkASSERT(fFreeKey); return fKey; } | 119 const int* key() const { SkASSERT(fFreeKey); return fKey; } |
| 120 int keySize() const { SkASSERT(fFreeKey); return fKeySize; } | 120 int keySize() const { SkASSERT(fFreeKey); return fKeySize; } |
| 121 | 121 |
| 122 uint32_t hash() const { | 122 uint32_t hash() const { |
| 123 uint32_t hash = SkChecksum::Murmur3(reinterpret_cast<const uint32_t*
>(fKey), | 123 uint32_t hash = SkChecksum::Murmur3(reinterpret_cast<const uint32_t*
>(fKey), |
| 124 fKeySize * sizeof(int)); | 124 fKeySize * sizeof(int)); |
| 125 return SkChecksum::Murmur3(reinterpret_cast<const uint32_t*>(&fIDMat
rix), | 125 return SkChecksum::Murmur3(reinterpret_cast<const uint32_t*>(&fIDMat
rix), |
| 126 sizeof(IDMatrix), hash); | 126 sizeof(IDMatrix), hash); |
| 127 } | 127 } |
| 128 | 128 |
| 129 private: | 129 private: |
| 130 struct IDMatrix { | 130 struct IDMatrix { |
| 131 // ID of the picture of which this layer is a part | 131 // ID of the picture of which this layer is a part |
| 132 uint32_t fPictureID; | 132 uint32_t fPictureID; |
| 133 // The initial matrix passed into drawPicture | 133 // The initial matrix passed into drawPicture |
| 134 SkMatrix fInitialMat; | 134 SkMatrix fInitialMat; |
| 135 } fIDMatrix; | 135 } fIDMatrix; |
| 136 | 136 |
| 137 const unsigned* fKey; | 137 const int* fKey; |
| 138 const int fKeySize; | 138 const int fKeySize; |
| 139 bool fFreeKey; | 139 bool fFreeKey; |
| 140 }; | 140 }; |
| 141 | 141 |
| 142 static const Key& GetKey(const GrCachedLayer& layer) { return layer.fKey; } | 142 static const Key& GetKey(const GrCachedLayer& layer) { return layer.fKey; } |
| 143 static uint32_t Hash(const Key& key) { return key.hash(); } | 143 static uint32_t Hash(const Key& key) { return key.hash(); } |
| 144 | 144 |
| 145 // GrCachedLayer proper | 145 // GrCachedLayer proper |
| 146 GrCachedLayer(uint32_t pictureID, unsigned start, unsigned stop, | 146 GrCachedLayer(uint32_t pictureID, int start, int stop, |
| 147 const SkIRect& srcIR, const SkIRect& dstIR, | 147 const SkIRect& srcIR, const SkIRect& dstIR, |
| 148 const SkMatrix& ctm, | 148 const SkMatrix& ctm, |
| 149 const unsigned* key, int keySize, | 149 const int* key, int keySize, |
| 150 const SkPaint* paint) | 150 const SkPaint* paint) |
| 151 : fKey(pictureID, ctm, key, keySize, true) | 151 : fKey(pictureID, ctm, key, keySize, true) |
| 152 , fStart(start) | 152 , fStart(start) |
| 153 , fStop(stop) | 153 , fStop(stop) |
| 154 , fSrcIR(srcIR) | 154 , fSrcIR(srcIR) |
| 155 , fDstIR(dstIR) | 155 , fDstIR(dstIR) |
| 156 , fOffset(SkIPoint::Make(0, 0)) | 156 , fOffset(SkIPoint::Make(0, 0)) |
| 157 , fPaint(paint ? SkNEW_ARGS(SkPaint, (*paint)) : NULL) | 157 , fPaint(paint ? SkNEW_ARGS(SkPaint, (*paint)) : NULL) |
| 158 , fFilter(NULL) | 158 , fFilter(NULL) |
| 159 , fTexture(NULL) | 159 , fTexture(NULL) |
| (...skipping 12 matching lines...) Expand all Loading... |
| 172 } | 172 } |
| 173 | 173 |
| 174 ~GrCachedLayer() { | 174 ~GrCachedLayer() { |
| 175 SkSafeUnref(fTexture); | 175 SkSafeUnref(fTexture); |
| 176 SkSafeUnref(fFilter); | 176 SkSafeUnref(fFilter); |
| 177 SkDELETE(fPaint); | 177 SkDELETE(fPaint); |
| 178 } | 178 } |
| 179 | 179 |
| 180 uint32_t pictureID() const { return fKey.pictureID(); } | 180 uint32_t pictureID() const { return fKey.pictureID(); } |
| 181 // TODO: remove these when GrCachedLayer & ReplacementInfo fuse | 181 // TODO: remove these when GrCachedLayer & ReplacementInfo fuse |
| 182 const unsigned* key() const { return fKey.key(); } | 182 const int* key() const { return fKey.key(); } |
| 183 int keySize() const { return fKey.keySize(); } | 183 int keySize() const { return fKey.keySize(); } |
| 184 | 184 |
| 185 unsigned start() const { return fStart; } | 185 int start() const { return fStart; } |
| 186 // TODO: make bound debug only | 186 // TODO: make bound debug only |
| 187 const SkIRect& srcIR() const { return fSrcIR; } | 187 const SkIRect& srcIR() const { return fSrcIR; } |
| 188 const SkIRect& dstIR() const { return fDstIR; } | 188 const SkIRect& dstIR() const { return fDstIR; } |
| 189 unsigned stop() const { return fStop; } | 189 int stop() const { return fStop; } |
| 190 void setTexture(GrTexture* texture, const SkIRect& rect) { | 190 void setTexture(GrTexture* texture, const SkIRect& rect) { |
| 191 SkRefCnt_SafeAssign(fTexture, texture); | 191 SkRefCnt_SafeAssign(fTexture, texture); |
| 192 fRect = rect; | 192 fRect = rect; |
| 193 if (!fTexture) { | 193 if (!fTexture) { |
| 194 fLocked = false; | 194 fLocked = false; |
| 195 } | 195 } |
| 196 } | 196 } |
| 197 GrTexture* texture() { return fTexture; } | 197 GrTexture* texture() { return fTexture; } |
| 198 const SkPaint* paint() const { return fPaint; } | 198 const SkPaint* paint() const { return fPaint; } |
| 199 const SkImageFilter* filter() const { return fFilter; } | 199 const SkImageFilter* filter() const { return fFilter; } |
| (...skipping 13 matching lines...) Expand all Loading... |
| 213 void setLocked(bool locked) { fLocked = locked; } | 213 void setLocked(bool locked) { fLocked = locked; } |
| 214 bool locked() const { return fLocked; } | 214 bool locked() const { return fLocked; } |
| 215 | 215 |
| 216 SkDEBUGCODE(const GrPlot* plot() const { return fPlot; }) | 216 SkDEBUGCODE(const GrPlot* plot() const { return fPlot; }) |
| 217 SkDEBUGCODE(void validate(const GrTexture* backingTexture) const;) | 217 SkDEBUGCODE(void validate(const GrTexture* backingTexture) const;) |
| 218 | 218 |
| 219 private: | 219 private: |
| 220 const Key fKey; | 220 const Key fKey; |
| 221 | 221 |
| 222 // The "saveLayer" operation index of the cached layer | 222 // The "saveLayer" operation index of the cached layer |
| 223 const unsigned fStart; | 223 const int fStart; |
| 224 // The final "restore" operation index of the cached layer | 224 // The final "restore" operation index of the cached layer |
| 225 const unsigned fStop; | 225 const int fStop; |
| 226 | 226 |
| 227 // The layer's src rect (i.e., the portion of the source scene required | 227 // The layer's src rect (i.e., the portion of the source scene required |
| 228 // for filtering). | 228 // for filtering). |
| 229 const SkIRect fSrcIR; | 229 const SkIRect fSrcIR; |
| 230 // The layer's dest rect (i.e., where it will land in device space) | 230 // The layer's dest rect (i.e., where it will land in device space) |
| 231 const SkIRect fDstIR; | 231 const SkIRect fDstIR; |
| 232 // Offset sometimes required by image filters | 232 // Offset sometimes required by image filters |
| 233 SkIPoint fOffset; | 233 SkIPoint fOffset; |
| 234 | 234 |
| 235 // The paint used when dropping the layer down into the owning canvas. | 235 // The paint used when dropping the layer down into the owning canvas. |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 284 class GrLayerCache { | 284 class GrLayerCache { |
| 285 public: | 285 public: |
| 286 GrLayerCache(GrContext*); | 286 GrLayerCache(GrContext*); |
| 287 ~GrLayerCache(); | 287 ~GrLayerCache(); |
| 288 | 288 |
| 289 // As a cache, the GrLayerCache can be ordered to free up all its cached | 289 // As a cache, the GrLayerCache can be ordered to free up all its cached |
| 290 // elements by the GrContext | 290 // elements by the GrContext |
| 291 void freeAll(); | 291 void freeAll(); |
| 292 | 292 |
| 293 GrCachedLayer* findLayer(uint32_t pictureID, const SkMatrix& ctm, | 293 GrCachedLayer* findLayer(uint32_t pictureID, const SkMatrix& ctm, |
| 294 const unsigned* key, int keySize); | 294 const int* key, int keySize); |
| 295 GrCachedLayer* findLayerOrCreate(uint32_t pictureID, | 295 GrCachedLayer* findLayerOrCreate(uint32_t pictureID, |
| 296 int start, int stop, | 296 int start, int stop, |
| 297 const SkIRect& srcIR, | 297 const SkIRect& srcIR, |
| 298 const SkIRect& dstIR, | 298 const SkIRect& dstIR, |
| 299 const SkMatrix& initialMat, | 299 const SkMatrix& initialMat, |
| 300 const unsigned* key, int keySize, | 300 const int* key, int keySize, |
| 301 const SkPaint* paint); | 301 const SkPaint* paint); |
| 302 | 302 |
| 303 // Attempt to place 'layer' in the atlas. Return true on success; false on f
ailure. | 303 // Attempt to place 'layer' in the atlas. Return true on success; false on f
ailure. |
| 304 // When true is returned, 'needsRendering' will indicate if the layer must b
e (re)drawn. | 304 // When true is returned, 'needsRendering' will indicate if the layer must b
e (re)drawn. |
| 305 // Additionally, the GPU resources will be locked. | 305 // Additionally, the GPU resources will be locked. |
| 306 bool tryToAtlas(GrCachedLayer* layer, const GrSurfaceDesc& desc, bool* needs
Rendering); | 306 bool tryToAtlas(GrCachedLayer* layer, const GrSurfaceDesc& desc, bool* needs
Rendering); |
| 307 | 307 |
| 308 // Attempt to lock the GPU resources required for a layer. Return true on su
ccess; | 308 // Attempt to lock the GPU resources required for a layer. Return true on su
ccess; |
| 309 // false on failure. When true is returned 'needsRendering' will indicate if
the | 309 // false on failure. When true is returned 'needsRendering' will indicate if
the |
| 310 // layer must be (re)drawn. | 310 // layer must be (re)drawn. |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 350 static const int kNumPlotsY = 2; | 350 static const int kNumPlotsY = 2; |
| 351 | 351 |
| 352 static const int kPlotWidth = kAtlasTextureWidth / kNumPlotsX; | 352 static const int kPlotWidth = kAtlasTextureWidth / kNumPlotsX; |
| 353 static const int kPlotHeight = kAtlasTextureHeight / kNumPlotsY; | 353 static const int kPlotHeight = kAtlasTextureHeight / kNumPlotsY; |
| 354 | 354 |
| 355 GrContext* fContext; // pointer back to owning context | 355 GrContext* fContext; // pointer back to owning context |
| 356 SkAutoTDelete<GrAtlas> fAtlas; // TODO: could lazily allocate | 356 SkAutoTDelete<GrAtlas> fAtlas; // TODO: could lazily allocate |
| 357 | 357 |
| 358 // We cache this information here (rather then, say, on the owning picture) | 358 // We cache this information here (rather then, say, on the owning picture) |
| 359 // because we want to be able to clean it up as needed (e.g., if a picture | 359 // because we want to be able to clean it up as needed (e.g., if a picture |
| 360 // is leaked and never cleans itself up we still want to be able to | 360 // is leaked and never cleans itself up we still want to be able to |
| 361 // remove the GrPictureInfo once its layers are purged from all the atlas | 361 // remove the GrPictureInfo once its layers are purged from all the atlas |
| 362 // plots). | 362 // plots). |
| 363 SkTDynamicHash<GrPictureInfo, uint32_t> fPictureHash; | 363 SkTDynamicHash<GrPictureInfo, uint32_t> fPictureHash; |
| 364 | 364 |
| 365 SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key> fLayerHash; | 365 SkTDynamicHash<GrCachedLayer, GrCachedLayer::Key> fLayerHash; |
| 366 | 366 |
| 367 SkMessageBus<SkPicture::DeletionMessage>::Inbox fPictDeletionInbox; | 367 SkMessageBus<SkPicture::DeletionMessage>::Inbox fPictDeletionInbox; |
| 368 | 368 |
| 369 // This implements a plot-centric locking mechanism (since the atlas | 369 // This implements a plot-centric locking mechanism (since the atlas |
| 370 // backing texture is always locked). Each layer that is locked (i.e., | 370 // backing texture is always locked). Each layer that is locked (i.e., |
| 371 // needed for the current rendering) in a plot increments the plot lock | 371 // needed for the current rendering) in a plot increments the plot lock |
| 372 // count for that plot. Similarly, once a rendering is complete all the | 372 // count for that plot. Similarly, once a rendering is complete all the |
| 373 // layers used in it decrement the lock count for the used plots. | 373 // layers used in it decrement the lock count for the used plots. |
| 374 // Plots with a 0 lock count are open for recycling/purging. | 374 // Plots with a 0 lock count are open for recycling/purging. |
| 375 int fPlotLocks[kNumPlotsX * kNumPlotsY]; | 375 int fPlotLocks[kNumPlotsX * kNumPlotsY]; |
| 376 | 376 |
| 377 // Inform the cache that layer's cached image is not currently required | 377 // Inform the cache that layer's cached image is not currently required |
| 378 void unlock(GrCachedLayer* layer); | 378 void unlock(GrCachedLayer* layer); |
| 379 | 379 |
| 380 void initAtlas(); | 380 void initAtlas(); |
| 381 GrCachedLayer* createLayer(uint32_t pictureID, int start, int stop, | 381 GrCachedLayer* createLayer(uint32_t pictureID, int start, int stop, |
| 382 const SkIRect& srcIR, const SkIRect& dstIR, | 382 const SkIRect& srcIR, const SkIRect& dstIR, |
| 383 const SkMatrix& initialMat, | 383 const SkMatrix& initialMat, |
| 384 const unsigned* key, int keySize, | 384 const int* key, int keySize, |
| 385 const SkPaint* paint); | 385 const SkPaint* paint); |
| 386 | 386 |
| 387 // Remove all the layers (and unlock any resources) associated with 'picture
ID' | 387 // Remove all the layers (and unlock any resources) associated with 'picture
ID' |
| 388 void purge(uint32_t pictureID); | 388 void purge(uint32_t pictureID); |
| 389 | 389 |
| 390 void purgePlot(GrPlot* plot); | 390 void purgePlot(GrPlot* plot); |
| 391 | 391 |
| 392 // Try to find a purgeable plot and clear it out. Return true if a plot | 392 // Try to find a purgeable plot and clear it out. Return true if a plot |
| 393 // was purged; false otherwise. | 393 // was purged; false otherwise. |
| 394 bool purgePlot(); | 394 bool purgePlot(); |
| 395 | 395 |
| 396 void incPlotLock(int plotIdx) { ++fPlotLocks[plotIdx]; } | 396 void incPlotLock(int plotIdx) { ++fPlotLocks[plotIdx]; } |
| 397 void decPlotLock(int plotIdx) { | 397 void decPlotLock(int plotIdx) { |
| 398 SkASSERT(fPlotLocks[plotIdx] > 0); | 398 SkASSERT(fPlotLocks[plotIdx] > 0); |
| 399 --fPlotLocks[plotIdx]; | 399 --fPlotLocks[plotIdx]; |
| 400 } | 400 } |
| 401 | 401 |
| 402 // for testing | 402 // for testing |
| 403 friend class TestingAccess; | 403 friend class TestingAccess; |
| 404 int numLayers() const { return fLayerHash.count(); } | 404 int numLayers() const { return fLayerHash.count(); } |
| 405 }; | 405 }; |
| 406 | 406 |
| 407 #endif | 407 #endif |
| OLD | NEW |