Index: src/effects/gradients/SkGradientShader.cpp |
diff --git a/src/effects/gradients/SkGradientShader.cpp b/src/effects/gradients/SkGradientShader.cpp |
index 2e92076b3add195c08158967c451a6e8b324dc40..46e0c9502dd089fda6cb27d77df7da64d9118497 100644 |
--- a/src/effects/gradients/SkGradientShader.cpp |
+++ b/src/effects/gradients/SkGradientShader.cpp |
@@ -15,8 +15,6 @@ |
SkGradientShaderBase::SkGradientShaderBase(const Descriptor& desc) { |
SkASSERT(desc.fCount > 1); |
- fCacheAlpha = 256; // init to a value that paint.getAlpha() can't return |
- |
fMapper = desc.fMapper; |
SkSafeRef(fMapper); |
fGradFlags = SkToU8(desc.fGradFlags); |
@@ -26,10 +24,6 @@ SkGradientShaderBase::SkGradientShaderBase(const Descriptor& desc) { |
fTileMode = desc.fTileMode; |
fTileProc = gTileProcs[desc.fTileMode]; |
- fCache16 = fCache16Storage = NULL; |
- fCache32 = NULL; |
- fCache32PixelRef = NULL; |
- |
/* Note: we let the caller skip the first and/or last position. |
i.e. pos[0] = 0.3, pos[1] = 0.7 |
In these cases, we insert dummy entries to ensure that the final data |
@@ -146,14 +140,8 @@ static uint32_t unpack_flags(uint32_t packed) { |
} |
SkGradientShaderBase::SkGradientShaderBase(SkReadBuffer& buffer) : INHERITED(buffer) { |
- fCacheAlpha = 256; |
- |
fMapper = buffer.readUnitMapper(); |
- fCache16 = fCache16Storage = NULL; |
- fCache32 = NULL; |
- fCache32PixelRef = NULL; |
- |
int colorCount = fColorCount = buffer.getArrayCount(); |
if (colorCount > kColorStorageCount) { |
size_t allocSize = (sizeof(SkColor) + sizeof(SkPMColor) + sizeof(Rec)) * colorCount; |
@@ -188,10 +176,6 @@ SkGradientShaderBase::SkGradientShaderBase(SkReadBuffer& buffer) : INHERITED(buf |
} |
SkGradientShaderBase::~SkGradientShaderBase() { |
- if (fCache16Storage) { |
- sk_free(fCache16Storage); |
- } |
- SkSafeUnref(fCache32PixelRef); |
if (fOrigColors != fStorage) { |
sk_free(fOrigColors); |
} |
@@ -199,7 +183,6 @@ SkGradientShaderBase::~SkGradientShaderBase() { |
} |
void SkGradientShaderBase::initCommon() { |
- fFlags = 0; |
unsigned colorAlpha = 0xFF; |
for (int i = 0; i < fColorCount; i++) { |
colorAlpha &= SkColorGetA(fOrigColors[i]); |
@@ -267,49 +250,50 @@ bool SkGradientShaderBase::isOpaque() const { |
return fColorsAreOpaque; |
} |
-bool SkGradientShaderBase::setContext(const SkBitmap& device, |
- const SkPaint& paint, |
- const SkMatrix& matrix) { |
- if (!this->INHERITED::setContext(device, paint, matrix)) { |
- return false; |
- } |
- |
+SkGradientShaderBase::GradientShaderBaseContext::GradientShaderBaseContext( |
+ const SkGradientShaderBase& shader, const SkBitmap& device, |
+ const SkPaint& paint, const SkMatrix& matrix) |
+ : INHERITED(shader, device, paint, matrix) |
+ , fCache(shader.refCache(getPaintAlpha())) |
+{ |
const SkMatrix& inverse = this->getTotalInverse(); |
- fDstToIndex.setConcat(fPtsToUnit, inverse); |
+ fDstToIndex.setConcat(shader.fPtsToUnit, inverse); |
+ |
fDstToIndexProc = fDstToIndex.getMapXYProc(); |
- fDstToIndexClass = (uint8_t)SkShader::ComputeMatrixClass(fDstToIndex); |
+ fDstToIndexClass = (uint8_t)SkShader::Context::ComputeMatrixClass(fDstToIndex); |
// now convert our colors in to PMColors |
unsigned paintAlpha = this->getPaintAlpha(); |
fFlags = this->INHERITED::getFlags(); |
- if (fColorsAreOpaque && paintAlpha == 0xFF) { |
+ if (shader.fColorsAreOpaque && paintAlpha == 0xFF) { |
fFlags |= kOpaqueAlpha_Flag; |
} |
// we can do span16 as long as our individual colors are opaque, |
// regardless of the paint's alpha |
- if (fColorsAreOpaque) { |
+ if (shader.fColorsAreOpaque) { |
fFlags |= kHasSpan16_Flag; |
} |
+} |
- this->setCacheAlpha(paintAlpha); |
- return true; |
+SkGradientShaderBase::GradientShaderCache::GradientShaderCache( |
+ U8CPU alpha, const SkGradientShaderBase& shader) |
+ : fCacheAlpha(alpha) |
+ , fShader(shader) |
+ , fCache16Inited(false) |
+ , fCache32Inited(false) |
+{ |
+ // Only initialize the cache in getCache16/32. |
+ fCache16 = NULL; |
+ fCache32 = NULL; |
+ fCache16Storage = NULL; |
+ fCache32PixelRef = NULL; |
} |
-void SkGradientShaderBase::setCacheAlpha(U8CPU alpha) const { |
- // if the new alpha differs from the previous time we were called, inval our cache |
- // this will trigger the cache to be rebuilt. |
- // we don't care about the first time, since the cache ptrs will already be NULL |
- if (fCacheAlpha != alpha) { |
- fCache16 = NULL; // inval the cache |
- fCache32 = NULL; // inval the cache |
- fCacheAlpha = alpha; // record the new alpha |
- // inform our subclasses |
- if (fCache32PixelRef) { |
- fCache32PixelRef->notifyPixelsChanged(); |
- } |
- } |
+SkGradientShaderBase::GradientShaderCache::~GradientShaderCache() { |
+ sk_free(fCache16Storage); |
+ SkSafeUnref(fCache32PixelRef); |
} |
#define Fixed_To_Dot8(x) (((x) + 0x80) >> 8) |
@@ -318,8 +302,8 @@ void SkGradientShaderBase::setCacheAlpha(U8CPU alpha) const { |
build a 16bit table as long as the original colors are opaque, even if the |
paint specifies a non-opaque alpha. |
*/ |
-void SkGradientShaderBase::Build16bitCache(uint16_t cache[], SkColor c0, SkColor c1, |
- int count) { |
+void SkGradientShaderBase::GradientShaderCache::Build16bitCache( |
+ uint16_t cache[], SkColor c0, SkColor c1, int count) { |
SkASSERT(count > 1); |
SkASSERT(SkColorGetA(c0) == 0xFF); |
SkASSERT(SkColorGetA(c1) == 0xFF); |
@@ -367,8 +351,9 @@ void SkGradientShaderBase::Build16bitCache(uint16_t cache[], SkColor c0, SkColor |
*/ |
typedef uint32_t SkUFixed; |
-void SkGradientShaderBase::Build32bitCache(SkPMColor cache[], SkColor c0, SkColor c1, |
- int count, U8CPU paintAlpha, uint32_t gradFlags) { |
+void SkGradientShaderBase::GradientShaderCache::Build32bitCache( |
+ SkPMColor cache[], SkColor c0, SkColor c1, |
+ int count, U8CPU paintAlpha, uint32_t gradFlags) { |
SkASSERT(count > 1); |
// need to apply paintAlpha to our two endpoints |
@@ -511,97 +496,121 @@ static inline U16CPU bitsTo16(unsigned x, const unsigned bits) { |
return 0; |
} |
-const uint16_t* SkGradientShaderBase::getCache16() const { |
- if (fCache16 == NULL) { |
- // double the count for dither entries |
- const int entryCount = kCache16Count * 2; |
- const size_t allocSize = sizeof(uint16_t) * entryCount; |
+const uint16_t* SkGradientShaderBase::GradientShaderCache::getCache16() { |
+ SkOnce(&fCache16Inited, &fCache16Mutex, SkGradientShaderBase::GradientShaderCache::initCache16, |
+ this); |
+ SkASSERT(fCache16); |
+ return fCache16; |
+} |
- if (fCache16Storage == NULL) { // set the storage and our working ptr |
- fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); |
- } |
- fCache16 = fCache16Storage; |
- if (fColorCount == 2) { |
- Build16bitCache(fCache16, fOrigColors[0], fOrigColors[1], |
- kCache16Count); |
- } else { |
- Rec* rec = fRecs; |
- int prevIndex = 0; |
- for (int i = 1; i < fColorCount; i++) { |
- int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache16Shift; |
- SkASSERT(nextIndex < kCache16Count); |
- |
- if (nextIndex > prevIndex) |
- Build16bitCache(fCache16 + prevIndex, fOrigColors[i-1], fOrigColors[i], nextIndex - prevIndex + 1); |
- prevIndex = nextIndex; |
- } |
+void SkGradientShaderBase::GradientShaderCache::initCache16(GradientShaderCache* cache) { |
+ // double the count for dither entries |
+ const int entryCount = kCache16Count * 2; |
+ const size_t allocSize = sizeof(uint16_t) * entryCount; |
+ |
+ SkASSERT(NULL == cache->fCache16Storage); |
+ cache->fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); |
+ cache->fCache16 = cache->fCache16Storage; |
+ if (cache->fShader.fColorCount == 2) { |
+ Build16bitCache(cache->fCache16, cache->fShader.fOrigColors[0], |
+ cache->fShader.fOrigColors[1], kCache16Count); |
+ } else { |
+ Rec* rec = cache->fShader.fRecs; |
+ int prevIndex = 0; |
+ for (int i = 1; i < cache->fShader.fColorCount; i++) { |
+ int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache16Shift; |
+ SkASSERT(nextIndex < kCache16Count); |
+ |
+ if (nextIndex > prevIndex) |
+ Build16bitCache(cache->fCache16 + prevIndex, cache->fShader.fOrigColors[i-1], |
+ cache->fShader.fOrigColors[i], nextIndex - prevIndex + 1); |
+ prevIndex = nextIndex; |
} |
+ } |
- if (fMapper) { |
- fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); |
- uint16_t* linear = fCache16; // just computed linear data |
- uint16_t* mapped = fCache16Storage; // storage for mapped data |
- SkUnitMapper* map = fMapper; |
- for (int i = 0; i < kCache16Count; i++) { |
- int index = map->mapUnit16(bitsTo16(i, kCache16Bits)) >> kCache16Shift; |
- mapped[i] = linear[index]; |
- mapped[i + kCache16Count] = linear[index + kCache16Count]; |
- } |
- sk_free(fCache16); |
- fCache16 = fCache16Storage; |
+ if (cache->fShader.fMapper) { |
+ cache->fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); |
+ uint16_t* linear = cache->fCache16; // just computed linear data |
+ uint16_t* mapped = cache->fCache16Storage; // storage for mapped data |
+ SkUnitMapper* map = cache->fShader.fMapper; |
+ for (int i = 0; i < kCache16Count; i++) { |
+ int index = map->mapUnit16(bitsTo16(i, kCache16Bits)) >> kCache16Shift; |
+ mapped[i] = linear[index]; |
+ mapped[i + kCache16Count] = linear[index + kCache16Count]; |
} |
+ sk_free(cache->fCache16); |
+ cache->fCache16 = cache->fCache16Storage; |
} |
- return fCache16; |
} |
-const SkPMColor* SkGradientShaderBase::getCache32() const { |
- if (fCache32 == NULL) { |
- SkImageInfo info; |
- info.fWidth = kCache32Count; |
- info.fHeight = 4; // for our 4 dither rows |
- info.fAlphaType = kPremul_SkAlphaType; |
- info.fColorType = kN32_SkColorType; |
+const SkPMColor* SkGradientShaderBase::GradientShaderCache::getCache32() { |
+ SkOnce(&fCache32Inited, &fCache32Mutex, SkGradientShaderBase::GradientShaderCache::initCache32, |
+ this); |
+ SkASSERT(fCache32); |
+ return fCache32; |
+} |
- if (NULL == fCache32PixelRef) { |
- fCache32PixelRef = SkMallocPixelRef::NewAllocate(info, 0, NULL); |
- } |
- fCache32 = (SkPMColor*)fCache32PixelRef->getAddr(); |
- if (fColorCount == 2) { |
- Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1], |
- kCache32Count, fCacheAlpha, fGradFlags); |
- } else { |
- Rec* rec = fRecs; |
- int prevIndex = 0; |
- for (int i = 1; i < fColorCount; i++) { |
- int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache32Shift; |
- SkASSERT(nextIndex < kCache32Count); |
- |
- if (nextIndex > prevIndex) |
- Build32bitCache(fCache32 + prevIndex, fOrigColors[i-1], |
- fOrigColors[i], nextIndex - prevIndex + 1, |
- fCacheAlpha, fGradFlags); |
- prevIndex = nextIndex; |
- } |
+void SkGradientShaderBase::GradientShaderCache::initCache32(GradientShaderCache* cache) { |
+ SkImageInfo info; |
+ info.fWidth = kCache32Count; |
+ info.fHeight = 4; // for our 4 dither rows |
+ info.fAlphaType = kPremul_SkAlphaType; |
+ info.fColorType = kN32_SkColorType; |
+ |
+ SkASSERT(NULL == cache->fCache32PixelRef); |
+ cache->fCache32PixelRef = SkMallocPixelRef::NewAllocate(info, 0, NULL); |
+ cache->fCache32 = (SkPMColor*)cache->fCache32PixelRef->getAddr(); |
+ if (cache->fShader.fColorCount == 2) { |
+ Build32bitCache(cache->fCache32, cache->fShader.fOrigColors[0], |
+ cache->fShader.fOrigColors[1], kCache32Count, cache->fCacheAlpha, |
+ cache->fShader.fGradFlags); |
+ } else { |
+ Rec* rec = cache->fShader.fRecs; |
+ int prevIndex = 0; |
+ for (int i = 1; i < cache->fShader.fColorCount; i++) { |
+ int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache32Shift; |
+ SkASSERT(nextIndex < kCache32Count); |
+ |
+ if (nextIndex > prevIndex) |
+ Build32bitCache(cache->fCache32 + prevIndex, cache->fShader.fOrigColors[i-1], |
+ cache->fShader.fOrigColors[i], nextIndex - prevIndex + 1, |
+ cache->fCacheAlpha, cache->fShader.fGradFlags); |
+ prevIndex = nextIndex; |
} |
+ } |
- if (fMapper) { |
- SkMallocPixelRef* newPR = SkMallocPixelRef::NewAllocate(info, 0, NULL); |
- SkPMColor* linear = fCache32; // just computed linear data |
- SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data |
- SkUnitMapper* map = fMapper; |
- for (int i = 0; i < kCache32Count; i++) { |
- int index = map->mapUnit16((i << 8) | i) >> 8; |
- mapped[i + kCache32Count*0] = linear[index + kCache32Count*0]; |
- mapped[i + kCache32Count*1] = linear[index + kCache32Count*1]; |
- mapped[i + kCache32Count*2] = linear[index + kCache32Count*2]; |
- mapped[i + kCache32Count*3] = linear[index + kCache32Count*3]; |
- } |
- fCache32PixelRef->unref(); |
- fCache32PixelRef = newPR; |
- fCache32 = (SkPMColor*)newPR->getAddr(); |
+ if (cache->fShader.fMapper) { |
+ SkMallocPixelRef* newPR = SkMallocPixelRef::NewAllocate(info, 0, NULL); |
+ SkPMColor* linear = cache->fCache32; // just computed linear data |
+ SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapped data |
+ SkUnitMapper* map = cache->fShader.fMapper; |
+ for (int i = 0; i < kCache32Count; i++) { |
+ int index = map->mapUnit16((i << 8) | i) >> 8; |
+ mapped[i + kCache32Count*0] = linear[index + kCache32Count*0]; |
+ mapped[i + kCache32Count*1] = linear[index + kCache32Count*1]; |
+ mapped[i + kCache32Count*2] = linear[index + kCache32Count*2]; |
+ mapped[i + kCache32Count*3] = linear[index + kCache32Count*3]; |
} |
+ cache->fCache32PixelRef->unref(); |
+ cache->fCache32PixelRef = newPR; |
+ cache->fCache32 = (SkPMColor*)newPR->getAddr(); |
} |
- return fCache32; |
+} |
+ |
+/* |
+ * The gradient holds a cache for the most recent value of alpha. Successive |
+ * callers with the same alpha value will share the same cache. |
+ */ |
+SkGradientShaderBase::GradientShaderCache* SkGradientShaderBase::refCache(U8CPU alpha) const { |
+ SkAutoMutexAcquire ama(fCacheMutex); |
+ if (!fCache || fCache->getAlpha() != alpha) { |
+ fCache.reset(SkNEW_ARGS(GradientShaderCache, (alpha, *this))); |
+ } |
+ // Increment the ref counter inside the mutex to ensure the returned pointer is still valid. |
+ // Otherwise, the pointer may have been overwritten on a different thread before the object's |
+ // ref count was incremented. |
+ fCache.get()->ref(); |
+ return fCache; |
} |
/* |
@@ -615,14 +624,14 @@ const SkPMColor* SkGradientShaderBase::getCache32() const { |
void SkGradientShaderBase::getGradientTableBitmap(SkBitmap* bitmap) const { |
// our caller assumes no external alpha, so we ensure that our cache is |
// built with 0xFF |
- this->setCacheAlpha(0xFF); |
+ SkAutoTUnref<GradientShaderCache> cache(this->refCache(0xFF)); |
// don't have a way to put the mapper into our cache-key yet |
if (fMapper) { |
- // force our cahce32pixelref to be built |
- (void)this->getCache32(); |
+ // force our cache32pixelref to be built |
+ (void)cache->getCache32(); |
bitmap->setConfig(SkImageInfo::MakeN32Premul(kCache32Count, 1)); |
- bitmap->setPixelRef(fCache32PixelRef); |
+ bitmap->setPixelRef(cache->getCache32PixelRef()); |
return; |
} |
@@ -661,9 +670,9 @@ void SkGradientShaderBase::getGradientTableBitmap(SkBitmap* bitmap) const { |
if (!gCache->find(storage.get(), size, bitmap)) { |
// force our cahce32pixelref to be built |
- (void)this->getCache32(); |
+ (void)cache->getCache32(); |
bitmap->setConfig(SkImageInfo::MakeN32Premul(kCache32Count, 1)); |
- bitmap->setPixelRef(fCache32PixelRef); |
+ bitmap->setPixelRef(cache->getCache32PixelRef()); |
gCache->add(storage.get(), size, *bitmap); |
} |