Chromium Code Reviews| Index: src/core/SkGlyphCache.cpp |
| diff --git a/src/core/SkGlyphCache.cpp b/src/core/SkGlyphCache.cpp |
| index 9b0199f6cf9f3f23b253aa77ab56276376d13055..6447f1a8d76333fdbbde6fa7e7d3dcd092feee28 100755 |
| --- a/src/core/SkGlyphCache.cpp |
| +++ b/src/core/SkGlyphCache.cpp |
| @@ -5,6 +5,7 @@ |
| * found in the LICENSE file. |
| */ |
| +#include <SkOnce.h> |
| #include "SkGlyphCache.h" |
| #include "SkGlyphCache_Globals.h" |
| #include "SkGraphics.h" |
| @@ -38,20 +39,19 @@ static SkGlyphCache_Globals& get_globals() { |
| #define kMinAllocAmount ((sizeof(SkGlyph) + kMinGlyphImageSize) * kMinGlyphCount) |
| SkGlyphCache::SkGlyphCache(SkTypeface* typeface, const SkDescriptor* desc, SkScalerContext* ctx) |
| - : fDesc(desc->copy()) |
| + : fNext(NULL) |
| + , fPrev(NULL) |
| + , fDesc(desc->copy()) |
| + , refCount(0) |
| + , fGlyphAlloc(kMinAllocAmount) |
| + , fMemoryUsed(sizeof(*this)) |
| , fScalerContext(ctx) |
| - , fGlyphAlloc(kMinAllocAmount) { |
| + , fAuxProcList(NULL) { |
| SkASSERT(typeface); |
| SkASSERT(desc); |
| SkASSERT(ctx); |
| - fPrev = fNext = NULL; |
| - |
| fScalerContext->getFontMetrics(&fFontMetrics); |
| - |
| - fMemoryUsed = sizeof(*this); |
| - |
| - fAuxProcList = NULL; |
| } |
| SkGlyphCache::~SkGlyphCache() { |
| @@ -65,19 +65,59 @@ SkGlyphCache::~SkGlyphCache() { |
| this->invokeAndRemoveAuxProcs(); |
| } |
| +void SkGlyphCache::increaseMemoryUsed(size_t used) { |
| + fMemoryUsed += used; |
| + get_globals().increaseTotalMemoryUsed(used); |
| +} |
| + |
| +SkGlyphCache::CharGlyphRec SkGlyphCache::PackedUnicharIDtoCharGlyphRec(PackedUnicharID packedUnicharID) { |
| + SkFixed x = SkGlyph::SubToFixed(SkGlyph::ID2SubX(packedUnicharID)); |
| + SkFixed y = SkGlyph::SubToFixed(SkGlyph::ID2SubY(packedUnicharID)); |
| + SkUnichar unichar = SkGlyph::ID2Code(packedUnicharID); |
| + |
| + SkAutoMutexAcquire lock(fScalerMutex); |
| + PackedGlyphID packedGlyphID = SkGlyph::MakeID(fScalerContext->charToGlyphID(unichar), x, y); |
| + |
| + return {packedUnicharID, packedGlyphID}; |
| +} |
| + |
| SkGlyphCache::CharGlyphRec* SkGlyphCache::getCharGlyphRec(PackedUnicharID packedUnicharID) { |
| if (NULL == fPackedUnicharIDToPackedGlyphID.get()) { |
| - // Allocate the array. |
| - fPackedUnicharIDToPackedGlyphID.reset(kHashCount); |
| - // Initialize array to map character and position with the impossible glyph ID. This |
| - // represents no mapping. |
| - for (int i = 0; i <kHashCount; ++i) { |
| - fPackedUnicharIDToPackedGlyphID[i].fPackedUnicharID = SkGlyph::kImpossibleID; |
| - fPackedUnicharIDToPackedGlyphID[i].fPackedGlyphID = 0; |
| + fMu.releaseShared(); |
|
mtklein_C
2015/08/14 15:31:37
Let's add assertShared() / assertExclusive() metho
|
| + |
| + // Add the map only if there is a call for char -> glyph mapping. |
| + { |
| + SkAutoTAcquire<SkSharedMutex> lock(fMu); |
| + |
| + // Now that the cache is locked exclusively, make sure no one added this array while unlocked. |
| + if (NULL == fPackedUnicharIDToPackedGlyphID.get()) { |
| + // Allocate the array. |
| + fPackedUnicharIDToPackedGlyphID.reset(SkNEW(PackedUnicharIDToPackedGlyphIDMap)); |
| + } |
| + |
| + fPackedUnicharIDToPackedGlyphID->set(PackedUnicharIDtoCharGlyphRec(packedUnicharID)); |
| + } |
| + fMu.acquireShared(); |
| + |
| + return fPackedUnicharIDToPackedGlyphID->find(packedUnicharID); |
| + } |
| + |
| + CharGlyphRec* answer = fPackedUnicharIDToPackedGlyphID->find(packedUnicharID); |
| + if (NULL == answer) { |
| + fMu.releaseShared(); |
| + // Add a new char -> glyph mapping. |
| + { |
| + SkAutoTAcquire<SkSharedMutex> lock(fMu); |
| + answer = fPackedUnicharIDToPackedGlyphID->find(packedUnicharID); |
| + if (NULL == answer) { |
| + fPackedUnicharIDToPackedGlyphID->set(PackedUnicharIDtoCharGlyphRec(packedUnicharID)); |
| + } |
| } |
| + fMu.acquireShared(); |
| + return fPackedUnicharIDToPackedGlyphID->find(packedUnicharID); |
| } |
| - return &fPackedUnicharIDToPackedGlyphID[SkChecksum::CheapMix(packedUnicharID) & kHashMask]; |
| + return answer; |
| } |
| /////////////////////////////////////////////////////////////////////////////// |
| @@ -92,16 +132,14 @@ uint16_t SkGlyphCache::unicharToGlyph(SkUnichar charCode) { |
| VALIDATE(); |
| PackedUnicharID packedUnicharID = SkGlyph::MakeID(charCode); |
| const CharGlyphRec& rec = *this->getCharGlyphRec(packedUnicharID); |
| - |
| - if (rec.fPackedUnicharID == packedUnicharID) { |
| - return SkGlyph::ID2Code(rec.fPackedGlyphID); |
| - } else { |
| - return fScalerContext->charToGlyphID(charCode); |
| - } |
| + return SkGlyph::ID2Code(rec.fPackedGlyphID); |
| } |
| SkUnichar SkGlyphCache::glyphToUnichar(uint16_t glyphID) { |
| - return fScalerContext->glyphIDToChar(glyphID); |
| + SkUnichar answer; |
| + SkAutoMutexAcquire lock(fScalerMutex); |
|
mtklein_C
2015/08/14 15:31:37
Is this different from
SkAutoMutexAcquire lock(f
|
| + answer = fScalerContext->glyphIDToChar(glyphID); |
| + return answer; |
| } |
| unsigned SkGlyphCache::getGlyphCount() { |
| @@ -146,18 +184,11 @@ const SkGlyph& SkGlyphCache::getGlyphIDMetrics(uint16_t glyphID, SkFixed x, SkFi |
| } |
| SkGlyph* SkGlyphCache::lookupByChar(SkUnichar charCode, MetricsType type, SkFixed x, SkFixed y) { |
| - PackedUnicharID id = SkGlyph::MakeID(charCode, x, y); |
| - CharGlyphRec* rec = this->getCharGlyphRec(id); |
| - if (rec->fPackedUnicharID != id) { |
| - // this ID is based on the UniChar |
| - rec->fPackedUnicharID = id; |
| - // this ID is based on the glyph index |
| - PackedGlyphID combinedID = SkGlyph::MakeID(fScalerContext->charToGlyphID(charCode), x, y); |
| - rec->fPackedGlyphID = combinedID; |
| - return this->lookupByPackedGlyphID(combinedID, type); |
| - } else { |
| - return this->lookupByPackedGlyphID(rec->fPackedGlyphID, type); |
| - } |
| + PackedUnicharID targetUnicharID = SkGlyph::MakeID(charCode, x, y); |
| + CharGlyphRec* rec = this->getCharGlyphRec(targetUnicharID); |
| + PackedGlyphID packedGlyphID = rec->fPackedGlyphID; |
| + |
| + return this->lookupByPackedGlyphID(packedGlyphID, type); |
| } |
| SkGlyph* SkGlyphCache::lookupByPackedGlyphID(PackedGlyphID packedGlyphID, MetricsType type) { |
| @@ -167,63 +198,91 @@ SkGlyph* SkGlyphCache::lookupByPackedGlyphID(PackedGlyphID packedGlyphID, Metric |
| glyph = this->allocateNewGlyph(packedGlyphID, type); |
| } else { |
| if (type == kFull_MetricsType && glyph->isJustAdvance()) { |
| - fScalerContext->getMetrics(glyph); |
| + addFullMetrics(glyph); |
|
mtklein_C
2015/08/14 15:31:37
this->
|
| } |
| } |
| return glyph; |
| } |
| SkGlyph* SkGlyphCache::allocateNewGlyph(PackedGlyphID packedGlyphID, MetricsType mtype) { |
| - fMemoryUsed += sizeof(SkGlyph); |
| SkGlyph* glyphPtr; |
| { |
| - SkGlyph glyph; |
| - glyph.initGlyphFromCombinedID(packedGlyphID); |
| - glyphPtr = fGlyphMap.set(glyph); |
| - } |
| - |
| - if (kJustAdvance_MetricsType == mtype) { |
| - fScalerContext->getAdvance(glyphPtr); |
| - } else { |
| - SkASSERT(kFull_MetricsType == mtype); |
| - fScalerContext->getMetrics(glyphPtr); |
| + fMu.releaseShared(); |
| + { |
| + SkAutoTAcquire<SkSharedMutex> mapLock(fMu); |
|
mtklein_C
2015/08/14 15:31:37
Let's find some common naming scheme between fMu a
|
| + glyphPtr = fGlyphMap.find(packedGlyphID); |
| + if (NULL == glyphPtr) { |
| + SkGlyph glyph; |
| + glyph.initGlyphFromCombinedID(packedGlyphID); |
| + { |
| + SkAutoMutexAcquire lock(fScalerMutex); |
| + if (kJustAdvance_MetricsType == mtype) { |
| + fScalerContext->getAdvance(&glyph); |
| + } else { |
| + SkASSERT(kFull_MetricsType == mtype); |
| + fScalerContext->getMetrics(&glyph); |
| + } |
| + increaseMemoryUsed(sizeof(SkGlyph)); |
|
mtklein_C
2015/08/14 15:31:37
this->
|
| + } // drop scaler lock |
| + glyphPtr = fGlyphMap.set(glyph); |
| + |
| + } else if (kFull_MetricsType == mtype && glyphPtr->isJustAdvance()) { |
| + // Some other thread added the glyph, but only calculated just-advance metrics. |
| + SkAutoMutexAcquire lock(fScalerMutex); |
| + fScalerContext->getMetrics(glyphPtr); |
| + } |
| + } // drop map lock |
| + fMu.acquireShared(); |
| + glyphPtr = fGlyphMap.find(packedGlyphID); |
| } |
| SkASSERT(glyphPtr->fID != SkGlyph::kImpossibleID); |
| return glyphPtr; |
| } |
| -const void* SkGlyphCache::findImage(const SkGlyph& glyph) { |
| - if (glyph.fWidth > 0 && glyph.fWidth < kMaxGlyphWidth) { |
| - if (NULL == glyph.fImage) { |
| - size_t size = glyph.computeImageSize(); |
| - const_cast<SkGlyph&>(glyph).fImage = fGlyphAlloc.alloc(size, |
| - SkChunkAlloc::kReturnNil_AllocFailType); |
| - // check that alloc() actually succeeded |
| - if (glyph.fImage) { |
| - fScalerContext->getImage(glyph); |
| - // TODO: the scaler may have changed the maskformat during |
| - // getImage (e.g. from AA or LCD to BW) which means we may have |
| - // overallocated the buffer. Check if the new computedImageSize |
| - // is smaller, and if so, strink the alloc size in fImageAlloc. |
| - fMemoryUsed += size; |
| - } |
| - } |
| +void SkGlyphCache::addFullMetrics(SkGlyph* glyph) { |
| + // Now that the cache is exclusively owned, check again. |
| + SkAutoMutexAcquire lock(fScalerMutex); |
| + if (glyph->isJustAdvance()) { |
| + fScalerContext->getMetrics(glyph); |
| } |
| - return glyph.fImage; |
| } |
| -const SkPath* SkGlyphCache::findPath(const SkGlyph& glyph) { |
| - if (glyph.fWidth) { |
| - if (glyph.fPath == NULL) { |
| - const_cast<SkGlyph&>(glyph).fPath = SkNEW(SkPath); |
| - fScalerContext->getPath(glyph, glyph.fPath); |
| - fMemoryUsed += sizeof(SkPath) + |
| - glyph.fPath->countPoints() * sizeof(SkPoint); |
| +void SkGlyphCache::onceFillInImage(GlyphAndCache gc) { |
|
mtklein_C
2015/08/14 15:31:37
It'd be nice to remind ourselves we're holding the
|
| + SkGlyphCache* cache = gc.cache; |
| + const SkGlyph* glyph = gc.glyph; |
| + if (glyph->fWidth > 0 && glyph->fWidth < kMaxGlyphWidth) { |
| + size_t size = glyph->computeImageSize(); |
| + sk_atomic_store(&const_cast<SkGlyph*>(glyph)->fImage, |
|
mtklein_C
2015/08/14 15:31:37
This shouldn't need to be atomic under this scheme
|
| + cache->fGlyphAlloc.alloc(size, SkChunkAlloc::kReturnNil_AllocFailType), |
| + sk_memory_order_relaxed); |
| + if (glyph->fImage != NULL) { |
| + cache->fScalerContext->getImage(*glyph); |
| + cache->increaseMemoryUsed(size); |
| } |
| } |
| - return glyph.fPath; |
| +} |
| + |
| +const void* SkGlyphCache::findImage(const SkGlyph& glyph) { |
| + SkOnce(&glyph.fImageIsSet, &fScalerMutex, &SkGlyphCache::onceFillInImage, {this, &glyph}); |
| + return sk_atomic_load(&glyph.fImage, sk_memory_order_relaxed); |
|
mtklein_C
2015/08/14 15:31:37
Again, I think these do not need to be atomic.
herb_g
2015/08/21 19:16:50
I don't think this is correct. I think that making
|
| +} |
| + |
| +void SkGlyphCache::onceFillInPath(GlyphAndCache gc) { |
| + SkGlyphCache* cache = gc.cache; |
| + const SkGlyph* glyph = gc.glyph; |
| + if (glyph->fWidth > 0) { |
| + sk_atomic_store(&const_cast<SkGlyph*>(glyph)->fPath, SkNEW(SkPath), sk_memory_order_relaxed); |
|
mtklein_C
2015/08/14 15:31:37
ditto
|
| + cache->fScalerContext->getPath(*glyph, glyph->fPath); |
| + size_t size = sizeof(SkPath) + glyph->fPath->countPoints() * sizeof(SkPoint); |
| + cache->increaseMemoryUsed(size); |
| + } |
| +} |
| + |
| +const SkPath* SkGlyphCache::findPath(const SkGlyph& glyph) { |
| + SkOnce(&glyph.fPathIsSet, &fScalerMutex, &SkGlyphCache::onceFillInPath, {this, &glyph}); |
| + return sk_atomic_load(&glyph.fPath, sk_memory_order_relaxed); |
|
mtklein_C
2015/08/14 15:31:37
ditto
|
| } |
| void SkGlyphCache::dump() const { |
| @@ -248,6 +307,7 @@ void SkGlyphCache::dump() const { |
| /////////////////////////////////////////////////////////////////////////////// |
| bool SkGlyphCache::getAuxProcData(void (*proc)(void*), void** dataPtr) const { |
| + SkAutoMutexAcquire lock(fScalerMutex); |
|
mtklein_C
2015/08/14 15:31:37
It's not obvious why these hold fScalerMutex?
|
| const AuxProcRec* rec = fAuxProcList; |
| while (rec) { |
| if (rec->fProc == proc) { |
| @@ -266,6 +326,7 @@ void SkGlyphCache::setAuxProc(void (*proc)(void*), void* data) { |
| return; |
| } |
| + SkAutoMutexAcquire lock(fScalerMutex); |
| AuxProcRec* rec = fAuxProcList; |
| while (rec) { |
| if (rec->fProc == proc) { |
| @@ -295,14 +356,7 @@ void SkGlyphCache::invokeAndRemoveAuxProcs() { |
| /////////////////////////////////////////////////////////////////////////////// |
| /////////////////////////////////////////////////////////////////////////////// |
| - |
| -class AutoAcquire { |
| -public: |
| - AutoAcquire(SkSpinlock& lock) : fLock(lock) { fLock.acquire(); } |
| - ~AutoAcquire() { fLock.release(); } |
| -private: |
| - SkSpinlock& fLock; |
| -}; |
| +typedef SkAutoTAcquire<SkSpinlock> AutoAcquire; |
| size_t SkGlyphCache_Globals::setCacheSizeLimit(size_t newLimit) { |
| static const size_t minLimit = 256 * 1024; |
| @@ -333,7 +387,7 @@ int SkGlyphCache_Globals::setCacheCountLimit(int newCount) { |
| void SkGlyphCache_Globals::purgeAll() { |
| AutoAcquire ac(fLock); |
| - this->internalPurge(fTotalMemoryUsed); |
| + this->internalPurge(fTotalMemoryUsed.load()); |
| } |
| /* This guy calls the visitor from within the mutext lock, so the visitor |
| @@ -342,10 +396,7 @@ void SkGlyphCache_Globals::purgeAll() { |
| - try to acquire the mutext again |
| - call a fontscaler (which might call into the cache) |
| */ |
| -SkGlyphCache* SkGlyphCache::VisitCache(SkTypeface* typeface, |
| - const SkDescriptor* desc, |
| - bool (*proc)(const SkGlyphCache*, void*), |
| - void* context) { |
| +SkGlyphCache* SkGlyphCache::VisitCache(SkTypeface* typeface, const SkDescriptor* desc, VisitProc proc, void* context) { |
| if (!typeface) { |
| typeface = SkTypeface::GetDefaultTypeface(); |
| } |
| @@ -361,11 +412,13 @@ SkGlyphCache* SkGlyphCache::VisitCache(SkTypeface* typeface, |
| for (cache = globals.internalGetHead(); cache != NULL; cache = cache->fNext) { |
| if (cache->fDesc->equals(*desc)) { |
| - globals.internalDetachCache(cache); |
| + globals.internalMoveToHead(cache); |
| + cache->fMu.acquireShared(); |
| if (!proc(cache, context)) { |
| - globals.internalAttachCacheToHead(cache); |
| - cache = NULL; |
| + cache->fMu.releaseShared(); |
| + return NULL; |
| } |
| + cache->refCount += 1; |
|
mtklein_C
2015/08/14 15:31:37
This seems odd. Are all these cache->refCounts al
|
| return cache; |
| } |
| } |
| @@ -378,28 +431,39 @@ SkGlyphCache* SkGlyphCache::VisitCache(SkTypeface* typeface, |
| // pass true the first time, to notice if the scalercontext failed, |
| // so we can try the purge. |
| SkScalerContext* ctx = typeface->createScalerContext(desc, true); |
| - if (!ctx) { |
| + if (NULL == ctx) { |
| get_globals().purgeAll(); |
| ctx = typeface->createScalerContext(desc, false); |
| SkASSERT(ctx); |
| } |
| cache = SkNEW_ARGS(SkGlyphCache, (typeface, desc, ctx)); |
| + |
| + globals.attachCacheToHead(cache); |
| } |
| AutoValidate av(cache); |
| + AutoAcquire ac(globals.fLock); |
| + cache->fMu.acquireShared(); |
| if (!proc(cache, context)) { // need to reattach |
| - globals.attachCacheToHead(cache); |
| - cache = NULL; |
| + cache->fMu.releaseShared(); |
| + return NULL; |
| } |
| + cache->refCount += 1; |
| return cache; |
| } |
| void SkGlyphCache::AttachCache(SkGlyphCache* cache) { |
| SkASSERT(cache); |
| - SkASSERT(cache->fNext == NULL); |
| - |
| - get_globals().attachCacheToHead(cache); |
| + cache->fMu.releaseShared(); |
| + SkGlyphCache_Globals& globals = get_globals(); |
| + AutoAcquire ac(globals.fLock); |
| + globals.validate(); |
| + cache->validate(); |
| + if (cache->refCount == 0) { |
| + delete cache; |
| + } |
| + globals.internalPurge(); |
| } |
| void SkGlyphCache::Dump() { |
| @@ -422,10 +486,16 @@ void SkGlyphCache::Dump() { |
| void SkGlyphCache_Globals::attachCacheToHead(SkGlyphCache* cache) { |
| AutoAcquire ac(fLock); |
| + fCacheCount += 1; |
| + cache->refCount += 1; |
| + // Access to cache->fMemoryUsed is single threaded until internalMoveToHead. |
| + fTotalMemoryUsed.fetch_add(cache->fMemoryUsed); |
| + |
| + this->internalMoveToHead(cache); |
| + |
| this->validate(); |
| cache->validate(); |
| - this->internalAttachCacheToHead(cache); |
| this->internalPurge(); |
| } |
| @@ -443,13 +513,13 @@ size_t SkGlyphCache_Globals::internalPurge(size_t minBytesNeeded) { |
| this->validate(); |
| size_t bytesNeeded = 0; |
| - if (fTotalMemoryUsed > fCacheSizeLimit) { |
| - bytesNeeded = fTotalMemoryUsed - fCacheSizeLimit; |
| + if (fTotalMemoryUsed.load() > fCacheSizeLimit) { |
| + bytesNeeded = fTotalMemoryUsed.load() - fCacheSizeLimit; |
| } |
| bytesNeeded = SkTMax(bytesNeeded, minBytesNeeded); |
| if (bytesNeeded) { |
| // no small purges! |
| - bytesNeeded = SkTMax(bytesNeeded, fTotalMemoryUsed >> 2); |
| + bytesNeeded = SkTMax(bytesNeeded, fTotalMemoryUsed.load() >> 2); |
| } |
| int countNeeded = 0; |
| @@ -477,7 +547,9 @@ size_t SkGlyphCache_Globals::internalPurge(size_t minBytesNeeded) { |
| countFreed += 1; |
| this->internalDetachCache(cache); |
| - SkDELETE(cache); |
| + if (0 == cache->refCount) { |
| + SkDELETE(cache); |
| + } |
| cache = prev; |
| } |
| @@ -493,22 +565,27 @@ size_t SkGlyphCache_Globals::internalPurge(size_t minBytesNeeded) { |
| return bytesFreed; |
| } |
| -void SkGlyphCache_Globals::internalAttachCacheToHead(SkGlyphCache* cache) { |
| - SkASSERT(NULL == cache->fPrev && NULL == cache->fNext); |
| - if (fHead) { |
| - fHead->fPrev = cache; |
| - cache->fNext = fHead; |
| +void SkGlyphCache_Globals::internalMoveToHead(SkGlyphCache *cache) { |
| + if (cache != fHead) { |
| + if (cache->fPrev) { |
| + cache->fPrev->fNext = cache->fNext; |
| + } |
| + if (cache->fNext) { |
| + cache->fNext->fPrev = cache->fPrev; |
| + } |
| + cache->fNext = NULL; |
| + cache->fPrev = NULL; |
| + if (fHead) { |
| + fHead->fPrev = cache; |
| + cache->fNext = fHead; |
| + } |
| + fHead = cache; |
| } |
| - fHead = cache; |
| - |
| - fCacheCount += 1; |
| - fTotalMemoryUsed += cache->fMemoryUsed; |
| } |
| void SkGlyphCache_Globals::internalDetachCache(SkGlyphCache* cache) { |
| - SkASSERT(fCacheCount > 0); |
| fCacheCount -= 1; |
| - fTotalMemoryUsed -= cache->fMemoryUsed; |
| + fTotalMemoryUsed.fetch_add(-cache->fMemoryUsed); |
| if (cache->fPrev) { |
| cache->fPrev->fNext = cache->fNext; |
| @@ -519,8 +596,11 @@ void SkGlyphCache_Globals::internalDetachCache(SkGlyphCache* cache) { |
| cache->fNext->fPrev = cache->fPrev; |
| } |
| cache->fPrev = cache->fNext = NULL; |
| + cache->refCount -= 1; |
| + |
| } |
| + |
| /////////////////////////////////////////////////////////////////////////////// |
| #ifdef SK_DEBUG |
| @@ -539,20 +619,16 @@ void SkGlyphCache::validate() const { |
| } |
| void SkGlyphCache_Globals::validate() const { |
| - size_t computedBytes = 0; |
| int computedCount = 0; |
| - const SkGlyphCache* head = fHead; |
| + SkGlyphCache* head = fHead; |
| while (head != NULL) { |
| - computedBytes += head->fMemoryUsed; |
| computedCount += 1; |
| head = head->fNext; |
| } |
| SkASSERTF(fCacheCount == computedCount, "fCacheCount: %d, computedCount: %d", fCacheCount, |
| computedCount); |
| - SkASSERTF(fTotalMemoryUsed == computedBytes, "fTotalMemoryUsed: %d, computedBytes: %d", |
| - fTotalMemoryUsed, computedBytes); |
| } |
| #endif |