| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2014 Google Inc. | 2 * Copyright 2014 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 | 8 |
| 9 #include "GrResourceCache.h" | 9 #include "GrResourceCache.h" |
| 10 | 10 |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 50 | 50 |
| 51 class GrResourceCache::AutoValidate : ::SkNoncopyable { | 51 class GrResourceCache::AutoValidate : ::SkNoncopyable { |
| 52 public: | 52 public: |
| 53 AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); } | 53 AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); } |
| 54 ~AutoValidate() { fCache->validate(); } | 54 ~AutoValidate() { fCache->validate(); } |
| 55 private: | 55 private: |
| 56 GrResourceCache* fCache; | 56 GrResourceCache* fCache; |
| 57 }; | 57 }; |
| 58 | 58 |
| 59 ////////////////////////////////////////////////////////////////////////////// | 59 ////////////////////////////////////////////////////////////////////////////// |
| 60 constexpr int GrResourceCache::kStrategyScoreMin; | 60 |
| 61 constexpr int GrResourceCache::kStrategyScoreMax; | |
| 62 constexpr int GrResourceCache::kInitialStrategyScore; | |
| 63 | 61 |
| 64 GrResourceCache::GrResourceCache(const GrCaps* caps) | 62 GrResourceCache::GrResourceCache(const GrCaps* caps) |
| 65 : fTimestamp(0) | 63 : fTimestamp(0) |
| 66 , fMaxCount(kDefaultMaxCount) | 64 , fMaxCount(kDefaultMaxCount) |
| 67 , fMaxBytes(kDefaultMaxSize) | 65 , fMaxBytes(kDefaultMaxSize) |
| 68 , fMaxUnusedFlushes(kDefaultMaxUnusedFlushes) | 66 , fMaxUnusedFlushes(kDefaultMaxUnusedFlushes) |
| 69 , fStrategy(ReplacementStrategy::kLRU) | |
| 70 , fStrategyScore(kInitialStrategyScore) | |
| 71 , fTotalMissesThisFlush(0) | |
| 72 , fMissesThisFlushPurgedRecently(0) | |
| 73 , fUniqueKeysPurgedThisFlushStorage {new SkChunkAlloc(8*sizeof(GrUniqueKey))
, | |
| 74 new SkChunkAlloc(8*sizeof(GrUniqueKey))
} | |
| 75 , fFlushParity(0) | |
| 76 #if GR_CACHE_STATS | 67 #if GR_CACHE_STATS |
| 77 , fHighWaterCount(0) | 68 , fHighWaterCount(0) |
| 78 , fHighWaterBytes(0) | 69 , fHighWaterBytes(0) |
| 79 , fBudgetedHighWaterCount(0) | 70 , fBudgetedHighWaterCount(0) |
| 80 , fBudgetedHighWaterBytes(0) | 71 , fBudgetedHighWaterBytes(0) |
| 81 #endif | 72 #endif |
| 82 , fBytes(0) | 73 , fBytes(0) |
| 83 , fBudgetedCount(0) | 74 , fBudgetedCount(0) |
| 84 , fBudgetedBytes(0) | 75 , fBudgetedBytes(0) |
| 76 , fRequestFlush(false) |
| 85 , fExternalFlushCnt(0) | 77 , fExternalFlushCnt(0) |
| 86 , fIsPurging(false) | |
| 87 , fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) { | 78 , fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) { |
| 88 SkDEBUGCODE(fCount = 0;) | 79 SkDEBUGCODE(fCount = 0;) |
| 89 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr;) | 80 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr;) |
| 90 } | 81 } |
| 91 | 82 |
| 92 GrResourceCache::~GrResourceCache() { | 83 GrResourceCache::~GrResourceCache() { |
| 93 this->releaseAll(); | 84 this->releaseAll(); |
| 94 delete fUniqueKeysPurgedThisFlushStorage[0]; | |
| 95 delete fUniqueKeysPurgedThisFlushStorage[1]; | |
| 96 } | 85 } |
| 97 | 86 |
| 98 void GrResourceCache::setLimits(int count, size_t bytes, int maxUnusedFlushes) { | 87 void GrResourceCache::setLimits(int count, size_t bytes, int maxUnusedFlushes) { |
| 99 fMaxCount = count; | 88 fMaxCount = count; |
| 100 fMaxBytes = bytes; | 89 fMaxBytes = bytes; |
| 101 fMaxUnusedFlushes = maxUnusedFlushes; | 90 fMaxUnusedFlushes = maxUnusedFlushes; |
| 102 this->purgeAsNeeded(); | 91 this->purgeAsNeeded(); |
| 103 } | 92 } |
| 104 | 93 |
| 105 void GrResourceCache::insertResource(GrGpuResource* resource) { | 94 void GrResourceCache::insertResource(GrGpuResource* resource) { |
| (...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 231 return !fRejectPendingIO || !resource->internalHasPendingIO(); | 220 return !fRejectPendingIO || !resource->internalHasPendingIO(); |
| 232 } | 221 } |
| 233 | 222 |
| 234 private: | 223 private: |
| 235 bool fRejectPendingIO; | 224 bool fRejectPendingIO; |
| 236 }; | 225 }; |
| 237 | 226 |
| 238 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& sc
ratchKey, | 227 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& sc
ratchKey, |
| 239 size_t resourceSize, | 228 size_t resourceSize, |
| 240 uint32_t flags) { | 229 uint32_t flags) { |
| 241 // We don't currently track misses for scratch resources for selecting the r
eplacement policy. | |
| 242 // The reason is that it is common to look for a scratch resource before cre
ating a texture | |
| 243 // that will immediately become uniquely keyed. | |
| 244 SkASSERT(scratchKey.isValid()); | 230 SkASSERT(scratchKey.isValid()); |
| 231 |
| 245 GrGpuResource* resource; | 232 GrGpuResource* resource; |
| 246 if (flags & (kPreferNoPendingIO_ScratchFlag | kRequireNoPendingIO_ScratchFla
g)) { | 233 if (flags & (kPreferNoPendingIO_ScratchFlag | kRequireNoPendingIO_ScratchFla
g)) { |
| 247 resource = fScratchMap.find(scratchKey, AvailableForScratchUse(true)); | 234 resource = fScratchMap.find(scratchKey, AvailableForScratchUse(true)); |
| 248 if (resource) { | 235 if (resource) { |
| 249 this->refAndMakeResourceMRU(resource); | 236 this->refAndMakeResourceMRU(resource); |
| 250 this->validate(); | 237 this->validate(); |
| 251 return resource; | 238 return resource; |
| 252 } else if (flags & kRequireNoPendingIO_ScratchFlag) { | 239 } else if (flags & kRequireNoPendingIO_ScratchFlag) { |
| 253 return nullptr; | 240 return nullptr; |
| 254 } | 241 } |
| 255 // We would prefer to consume more available VRAM rather than flushing | 242 // We would prefer to consume more available VRAM rather than flushing |
| 256 // immediately, but on ANGLE this can lead to starving of the GPU. | 243 // immediately, but on ANGLE this can lead to starving of the GPU. |
| 257 if (fPreferVRAMUseOverFlushes && this->wouldFit(resourceSize)) { | 244 if (fPreferVRAMUseOverFlushes && this->wouldFit(resourceSize)) { |
| 258 // kPrefer is specified, we didn't find a resource without pending i
o, | 245 // kPrefer is specified, we didn't find a resource without pending i
o, |
| 259 // but there is still space in our budget for the resource so force | 246 // but there is still space in our budget for the resource so force |
| 260 // the caller to allocate a new resource. | 247 // the caller to allocate a new resource. |
| 261 return nullptr; | 248 return nullptr; |
| 262 } | 249 } |
| 263 } | 250 } |
| 264 resource = fScratchMap.find(scratchKey, AvailableForScratchUse(false)); | 251 resource = fScratchMap.find(scratchKey, AvailableForScratchUse(false)); |
| 265 if (resource) { | 252 if (resource) { |
| 266 this->refAndMakeResourceMRU(resource); | 253 this->refAndMakeResourceMRU(resource); |
| 267 this->validate(); | 254 this->validate(); |
| 268 } | 255 } |
| 269 return resource; | 256 return resource; |
| 270 } | 257 } |
| 271 | 258 |
| 272 GrGpuResource* GrResourceCache::findAndRefUniqueResource(const GrUniqueKey& key)
{ | |
| 273 GrGpuResource* resource = fUniqueHash.find(key); | |
| 274 if (resource) { | |
| 275 this->refAndMakeResourceMRU(resource); | |
| 276 } else { | |
| 277 this->recordKeyMiss(key); | |
| 278 } | |
| 279 return resource; | |
| 280 } | |
| 281 | |
| 282 void GrResourceCache::recordKeyMiss(const GrUniqueKey& key) { | |
| 283 // If a resource with this key was purged either this flush or the previous
flush, consider it | |
| 284 // a recent purge. | |
| 285 if (fUniqueKeysPurgedThisFlush[0].find(key) || fUniqueKeysPurgedThisFlush[1]
.find(key)) { | |
| 286 ++fMissesThisFlushPurgedRecently; | |
| 287 } | |
| 288 ++fTotalMissesThisFlush; | |
| 289 } | |
| 290 | |
| 291 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) { | 259 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) { |
| 292 SkASSERT(resource->resourcePriv().getScratchKey().isValid()); | 260 SkASSERT(resource->resourcePriv().getScratchKey().isValid()); |
| 293 if (!resource->getUniqueKey().isValid()) { | 261 if (!resource->getUniqueKey().isValid()) { |
| 294 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource); | 262 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource); |
| 295 } | 263 } |
| 296 } | 264 } |
| 297 | 265 |
| 298 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) { | 266 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) { |
| 299 // Someone has a ref to this resource in order to have removed the key. When
the ref count | 267 // Someone has a ref to this resource in order to have removed the key. When
the ref count |
| 300 // reaches zero we will get a ref cnt notification and figure out what to do
with it. | 268 // reaches zero we will get a ref cnt notification and figure out what to do
with it. |
| (...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 405 // We won't purge an existing resource to make room for this one. | 373 // We won't purge an existing resource to make room for this one. |
| 406 if (fBudgetedCount < fMaxCount && | 374 if (fBudgetedCount < fMaxCount && |
| 407 fBudgetedBytes + resource->gpuMemorySize() <= fMaxBytes) { | 375 fBudgetedBytes + resource->gpuMemorySize() <= fMaxBytes) { |
| 408 resource->resourcePriv().makeBudgeted(); | 376 resource->resourcePriv().makeBudgeted(); |
| 409 return; | 377 return; |
| 410 } | 378 } |
| 411 } | 379 } |
| 412 } else { | 380 } else { |
| 413 // Purge the resource immediately if we're over budget | 381 // Purge the resource immediately if we're over budget |
| 414 // Also purge if the resource has neither a valid scratch key nor a uniq
ue key. | 382 // Also purge if the resource has neither a valid scratch key nor a uniq
ue key. |
| 415 bool hasKey = resource->resourcePriv().getScratchKey().isValid() || | 383 bool noKey = !resource->resourcePriv().getScratchKey().isValid() && |
| 416 resource->getUniqueKey().isValid(); | 384 !resource->getUniqueKey().isValid(); |
| 417 if (hasKey) { | 385 if (!this->overBudget() && !noKey) { |
| 418 if (this->overBudget()) { | |
| 419 this->purgeAsNeeded(); | |
| 420 } | |
| 421 return; | 386 return; |
| 422 } | 387 } |
| 423 } | 388 } |
| 424 | 389 |
| 425 SkDEBUGCODE(int beforeCount = this->getResourceCount();) | 390 SkDEBUGCODE(int beforeCount = this->getResourceCount();) |
| 426 resource->cacheAccess().release(); | 391 resource->cacheAccess().release(); |
| 427 // We should at least free this resource, perhaps dependent resources as wel
l. | 392 // We should at least free this resource, perhaps dependent resources as wel
l. |
| 428 SkASSERT(this->getResourceCount() < beforeCount); | 393 SkASSERT(this->getResourceCount() < beforeCount); |
| 429 this->validate(); | 394 this->validate(); |
| 430 } | 395 } |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 470 } else { | 435 } else { |
| 471 --fBudgetedCount; | 436 --fBudgetedCount; |
| 472 fBudgetedBytes -= size; | 437 fBudgetedBytes -= size; |
| 473 } | 438 } |
| 474 TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "
used", | 439 TRACE_COUNTER2(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache"), "skia budget", "
used", |
| 475 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes); | 440 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes); |
| 476 | 441 |
| 477 this->validate(); | 442 this->validate(); |
| 478 } | 443 } |
| 479 | 444 |
| 480 void GrResourceCache::recordPurgedKey(GrGpuResource* resource) { | 445 void GrResourceCache::purgeAsNeeded() { |
| 481 // This maximum exists to avoid allocating too much space for key tracking. | |
| 482 static constexpr int kMaxTrackedKeys = 256; | |
| 483 if (fUniqueKeysPurgedThisFlush[fFlushParity].count() >= kMaxTrackedKeys) { | |
| 484 return; | |
| 485 } | |
| 486 if (resource->getUniqueKey().isValid() && | |
| 487 !fUniqueKeysPurgedThisFlush[fFlushParity].find(resource->getUniqueKey())
) { | |
| 488 void* p = fUniqueKeysPurgedThisFlushStorage[fFlushParity]->allocThrow(si
zeof(GrUniqueKey)); | |
| 489 GrUniqueKey* copy = new (p) GrUniqueKey; | |
| 490 *copy = resource->getUniqueKey(); | |
| 491 fUniqueKeysPurgedThisFlush[fFlushParity].add(copy); | |
| 492 } | |
| 493 } | |
| 494 | |
| 495 GrGpuResource* GrResourceCache::selectResourceUsingStrategy() { | |
| 496 switch (fStrategy) { | |
| 497 case ReplacementStrategy::kLRU: | |
| 498 return fPurgeableQueue.peek(); | |
| 499 case ReplacementStrategy::kRandom: | |
| 500 return fPurgeableQueue.at(fRandom.nextULessThan(fPurgeableQueue.coun
t())); | |
| 501 } | |
| 502 return nullptr; | |
| 503 } | |
| 504 | |
| 505 void GrResourceCache::internalPurgeAsNeeded(bool fromFlushNotification) { | |
| 506 if (fIsPurging) { | |
| 507 return; | |
| 508 } | |
| 509 fIsPurging = true; | |
| 510 SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs; | 446 SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs; |
| 511 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs); | 447 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs); |
| 512 if (invalidKeyMsgs.count()) { | 448 if (invalidKeyMsgs.count()) { |
| 513 this->processInvalidUniqueKeys(invalidKeyMsgs); | 449 this->processInvalidUniqueKeys(invalidKeyMsgs); |
| 514 } | 450 } |
| 515 | 451 |
| 516 if (fMaxUnusedFlushes > 0) { | 452 if (fMaxUnusedFlushes > 0) { |
| 517 // We want to know how many complete flushes have occurred without the r
esource being used. | 453 // We want to know how many complete flushes have occurred without the r
esource being used. |
| 518 // If the resource was tagged when fExternalFlushCnt was N then this mea
ns it became | 454 // If the resource was tagged when fExternalFlushCnt was N then this mea
ns it became |
| 519 // purgeable during activity that became the N+1th flush. So when the fl
ush count is N+2 | 455 // purgeable during activity that became the N+1th flush. So when the fl
ush count is N+2 |
| 520 // it has sat in the purgeable queue for one entire flush. | 456 // it has sat in the purgeable queue for one entire flush. |
| 521 uint32_t oldestAllowedFlushCnt = fExternalFlushCnt - fMaxUnusedFlushes -
1; | 457 uint32_t oldestAllowedFlushCnt = fExternalFlushCnt - fMaxUnusedFlushes -
1; |
| 522 // check for underflow | 458 // check for underflow |
| 523 if (oldestAllowedFlushCnt < fExternalFlushCnt) { | 459 if (oldestAllowedFlushCnt < fExternalFlushCnt) { |
| 524 while (fPurgeableQueue.count()) { | 460 while (fPurgeableQueue.count()) { |
| 525 uint32_t flushWhenResourceBecamePurgeable = | 461 uint32_t flushWhenResourceBecamePurgeable = |
| 526 fPurgeableQueue.peek()->cacheAccess().flushCntWhenResour
ceBecamePurgeable(); | 462 fPurgeableQueue.peek()->cacheAccess().flushCntWhenResour
ceBecamePurgeable(); |
| 527 if (oldestAllowedFlushCnt < flushWhenResourceBecamePurgeable) { | 463 if (oldestAllowedFlushCnt < flushWhenResourceBecamePurgeable) { |
| 528 // Resources were given both LRU timestamps and tagged with
a flush cnt when | 464 // Resources were given both LRU timestamps and tagged with
a flush cnt when |
| 529 // they first became purgeable. The LRU timestamp won't chan
ge again until the | 465 // they first became purgeable. The LRU timestamp won't chan
ge again until the |
| 530 // resource is made non-purgeable again. So, at this point a
ll the remaining | 466 // resource is made non-purgeable again. So, at this point a
ll the remaining |
| 531 // resources in the timestamp-sorted queue will have a flush
count >= to this | 467 // resources in the timestamp-sorted queue will have a flush
count >= to this |
| 532 // one. | 468 // one. |
| 533 break; | 469 break; |
| 534 } | 470 } |
| 535 GrGpuResource* resource = fPurgeableQueue.peek(); | 471 GrGpuResource* resource = fPurgeableQueue.peek(); |
| 536 SkASSERT(resource->isPurgeable()); | 472 SkASSERT(resource->isPurgeable()); |
| 537 this->recordPurgedKey(resource); | |
| 538 resource->cacheAccess().release(); | 473 resource->cacheAccess().release(); |
| 539 } | 474 } |
| 540 } | 475 } |
| 541 } | 476 } |
| 542 | 477 |
| 543 if (ReplacementStrategy::kRandom == fStrategy && !fromFlushNotification) { | |
| 544 // Wait until after the requested flush when all the pending IO resource
s will be eligible | |
| 545 // for the draft. | |
| 546 SkASSERT(!this->overBudget() || this->requestsFlush()); | |
| 547 fIsPurging = false; | |
| 548 return; | |
| 549 } | |
| 550 | |
| 551 bool stillOverbudget = this->overBudget(); | 478 bool stillOverbudget = this->overBudget(); |
| 552 while (stillOverbudget && fPurgeableQueue.count()) { | 479 while (stillOverbudget && fPurgeableQueue.count()) { |
| 553 GrGpuResource* resource = this->selectResourceUsingStrategy(); | 480 GrGpuResource* resource = fPurgeableQueue.peek(); |
| 554 SkASSERT(resource->isPurgeable()); | 481 SkASSERT(resource->isPurgeable()); |
| 555 this->recordPurgedKey(resource); | |
| 556 resource->cacheAccess().release(); | 482 resource->cacheAccess().release(); |
| 557 stillOverbudget = this->overBudget(); | 483 stillOverbudget = this->overBudget(); |
| 558 } | 484 } |
| 559 | 485 |
| 560 this->validate(); | 486 this->validate(); |
| 561 fIsPurging = false; | 487 |
| 488 if (stillOverbudget) { |
| 489 // Set this so that GrDrawingManager will issue a flush to free up resou
rces with pending |
| 490 // IO that we were unable to purge in this pass. |
| 491 fRequestFlush = true; |
| 492 } |
| 562 } | 493 } |
| 563 | 494 |
| 564 void GrResourceCache::purgeAllUnlocked() { | 495 void GrResourceCache::purgeAllUnlocked() { |
| 565 // We could disable maintaining the heap property here, but it would add a l
ot of complexity. | 496 // We could disable maintaining the heap property here, but it would add a l
ot of complexity. |
| 566 // Moreover, this is rarely called. | 497 // Moreover, this is rarely called. |
| 567 while (fPurgeableQueue.count()) { | 498 while (fPurgeableQueue.count()) { |
| 568 GrGpuResource* resource = fPurgeableQueue.peek(); | 499 GrGpuResource* resource = fPurgeableQueue.peek(); |
| 569 SkASSERT(resource->isPurgeable()); | 500 SkASSERT(resource->isPurgeable()); |
| 570 resource->cacheAccess().release(); | 501 resource->cacheAccess().release(); |
| 571 } | 502 } |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 611 // Reset all the timestamps. We sort the resources by timestamp and
then assign | 542 // Reset all the timestamps. We sort the resources by timestamp and
then assign |
| 612 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it
should be extremely | 543 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it
should be extremely |
| 613 // rare. | 544 // rare. |
| 614 SkTDArray<GrGpuResource*> sortedPurgeableResources; | 545 SkTDArray<GrGpuResource*> sortedPurgeableResources; |
| 615 sortedPurgeableResources.setReserve(fPurgeableQueue.count()); | 546 sortedPurgeableResources.setReserve(fPurgeableQueue.count()); |
| 616 | 547 |
| 617 while (fPurgeableQueue.count()) { | 548 while (fPurgeableQueue.count()) { |
| 618 *sortedPurgeableResources.append() = fPurgeableQueue.peek(); | 549 *sortedPurgeableResources.append() = fPurgeableQueue.peek(); |
| 619 fPurgeableQueue.pop(); | 550 fPurgeableQueue.pop(); |
| 620 } | 551 } |
| 552 |
| 621 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(
) - 1, | 553 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(
) - 1, |
| 622 CompareTimestamp); | 554 CompareTimestamp); |
| 623 | 555 |
| 624 // Pick resources out of the purgeable and non-purgeable arrays base
d on lowest | 556 // Pick resources out of the purgeable and non-purgeable arrays base
d on lowest |
| 625 // timestamp and assign new timestamps. | 557 // timestamp and assign new timestamps. |
| 626 int currP = 0; | 558 int currP = 0; |
| 627 int currNP = 0; | 559 int currNP = 0; |
| 628 while (currP < sortedPurgeableResources.count() && | 560 while (currP < sortedPurgeableResources.count() && |
| 629 currNP < fNonpurgeableResources.count()) { | 561 currNP < fNonpurgeableResources.count()) { |
| 630 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().ti
mestamp(); | 562 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().ti
mestamp(); |
| (...skipping 30 matching lines...) Expand all Loading... |
| 661 } | 593 } |
| 662 } | 594 } |
| 663 return fTimestamp++; | 595 return fTimestamp++; |
| 664 } | 596 } |
| 665 | 597 |
| 666 void GrResourceCache::notifyFlushOccurred(FlushType type) { | 598 void GrResourceCache::notifyFlushOccurred(FlushType type) { |
| 667 switch (type) { | 599 switch (type) { |
| 668 case FlushType::kImmediateMode: | 600 case FlushType::kImmediateMode: |
| 669 break; | 601 break; |
| 670 case FlushType::kCacheRequested: | 602 case FlushType::kCacheRequested: |
| 603 SkASSERT(fRequestFlush); |
| 604 fRequestFlush = false; |
| 671 break; | 605 break; |
| 672 case FlushType::kExternal: { | 606 case FlushType::kExternal: |
| 673 int scoreDelta = 1; | |
| 674 if (fMissesThisFlushPurgedRecently) { | |
| 675 // If > 60% of our cache misses were things we purged in the las
t two flushes | |
| 676 // then we move closer towards selecting random replacement. | |
| 677 if ((float)fMissesThisFlushPurgedRecently / fTotalMissesThisFlus
h > 0.6f) { | |
| 678 scoreDelta = -1; | |
| 679 } | |
| 680 } | |
| 681 fStrategyScore = SkTPin(fStrategyScore + scoreDelta, kStrategyScoreM
in, | |
| 682 kStrategyScoreMax); | |
| 683 fStrategy = fStrategyScore < 0 ? ReplacementStrategy::kRandom | |
| 684 : ReplacementStrategy::kLRU; | |
| 685 fMissesThisFlushPurgedRecently = 0; | |
| 686 fTotalMissesThisFlush = 0; | |
| 687 fFlushParity = -(fFlushParity - 1); | |
| 688 fUniqueKeysPurgedThisFlush[fFlushParity].reset(); | |
| 689 fUniqueKeysPurgedThisFlushStorage[fFlushParity]->rewind(); | |
| 690 ++fExternalFlushCnt; | 607 ++fExternalFlushCnt; |
| 691 if (0 == fExternalFlushCnt) { | 608 if (0 == fExternalFlushCnt) { |
| 692 // When this wraps just reset all the purgeable resources' last
used flush state. | 609 // When this wraps just reset all the purgeable resources' last
used flush state. |
| 693 for (int i = 0; i < fPurgeableQueue.count(); ++i) { | 610 for (int i = 0; i < fPurgeableQueue.count(); ++i) { |
| 694 fPurgeableQueue.at(i)->cacheAccess().setFlushCntWhenResource
BecamePurgeable(0); | 611 fPurgeableQueue.at(i)->cacheAccess().setFlushCntWhenResource
BecamePurgeable(0); |
| 695 } | 612 } |
| 696 } | 613 } |
| 697 break; | 614 break; |
| 698 } | |
| 699 } | 615 } |
| 700 this->internalPurgeAsNeeded(true); | 616 this->purgeAsNeeded(); |
| 701 } | 617 } |
| 702 | 618 |
| 703 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) c
onst { | 619 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) c
onst { |
| 704 for (int i = 0; i < fNonpurgeableResources.count(); ++i) { | 620 for (int i = 0; i < fNonpurgeableResources.count(); ++i) { |
| 705 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump); | 621 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump); |
| 706 } | 622 } |
| 707 for (int i = 0; i < fPurgeableQueue.count(); ++i) { | 623 for (int i = 0; i < fPurgeableQueue.count(); ++i) { |
| 708 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump); | 624 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump); |
| 709 } | 625 } |
| 710 } | 626 } |
| (...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 838 return true; | 754 return true; |
| 839 } | 755 } |
| 840 if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index]
== resource) { | 756 if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index]
== resource) { |
| 841 return true; | 757 return true; |
| 842 } | 758 } |
| 843 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the ca
che."); | 759 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the ca
che."); |
| 844 return false; | 760 return false; |
| 845 } | 761 } |
| 846 | 762 |
| 847 #endif | 763 #endif |
| OLD | NEW |