Chromium Code Reviews| Index: src/effects/SkBlurMask.cpp |
| diff --git a/src/effects/SkBlurMask.cpp b/src/effects/SkBlurMask.cpp |
| index bf50845ab6cb1d308562ea44fa279133bcafd46f..1c29830f49303700a34c3f78e13bd9b259f5a1b4 100644 |
| --- a/src/effects/SkBlurMask.cpp |
| +++ b/src/effects/SkBlurMask.cpp |
| @@ -9,6 +9,7 @@ |
| #include "SkBlurMask.h" |
| #include "SkMath.h" |
| +#include "SkScaledImageCache.h" |
| #include "SkTemplates.h" |
| #include "SkEndian.h" |
| @@ -21,6 +22,33 @@ |
| // point we should probably get rid of these scaling constants and rebaseline |
| // all the blur tests. |
| static const SkScalar kBLUR_SIGMA_SCALE = 0.57735f; |
| +std::map<SkMask*, SkDiscardableMemoryMask*> SkBlurMask::fDiscardableMemoryMaskMap; |
| + |
| +SK_DECLARE_STATIC_MUTEX(gMapMutex); |
| + |
| +bool SkBlurMask::addDiscardableMemoryMaskToMap(SkMask* mask, SkDiscardableMemoryMask* dmMask) { |
| + SkAutoMutexAcquire am(gMapMutex); |
| + fDiscardableMemoryMaskMap.insert(std::pair<SkMask*, SkDiscardableMemoryMask*>(mask, dmMask)); |
| + return true; |
| +} |
| + |
| +bool SkBlurMask::removeDiscardableMemoryMaskFromMap(SkMask* mask) { |
| + SkAutoMutexAcquire am(gMapMutex); |
| + std::map<SkMask*, SkDiscardableMemoryMask*>::iterator it; |
| + it = fDiscardableMemoryMaskMap.find(mask); |
| + if (it != fDiscardableMemoryMaskMap.end()) |
| + fDiscardableMemoryMaskMap.erase(it); |
| + return true; |
| +} |
| + |
| +SkDiscardableMemoryMask* SkBlurMask::getDiscardableMemoryMaskFromMap(SkMask* mask) { |
| + SkAutoMutexAcquire am(gMapMutex); |
| + std::map<SkMask*, SkDiscardableMemoryMask*>::iterator it; |
| + it = fDiscardableMemoryMaskMap.find(mask); |
| + if (it != fDiscardableMemoryMaskMap.end()) |
| + return it->second; |
| + return NULL; |
| +} |
| SkScalar SkBlurMask::ConvertRadiusToSigma(SkScalar radius) { |
| return radius > 0 ? kBLUR_SIGMA_SCALE * radius + 0.5f : 0.0f; |
| @@ -541,8 +569,26 @@ bool SkBlurMask::BoxBlur(SkMask* dst, const SkMask& src, |
| int sw = src.fBounds.width(); |
| int sh = src.fBounds.height(); |
| const uint8_t* sp = src.fImage; |
| - uint8_t* dp = SkMask::AllocImage(dstSize); |
| - SkAutoTCallVProc<uint8_t, SkMask_FreeImage> autoCall(dp); |
| + uint8_t* dp = NULL; |
| + SkDiscardableMemoryMask* dm = SkBlurMask::getDiscardableMemoryMaskFromMap(dst); |
| + if (dm) { |
| + SkBitmap bitmap; |
| + SkBitmap::Allocator* allocator = SkScaledImageCache::GetAllocator(); |
| + bitmap.setConfig(SkBitmap::kA8_Config, dst->fBounds.width(), |
| + dst->fBounds.height(), dst->fRowBytes); |
| + bitmap.allocPixels(allocator, NULL); |
| + if (!bitmap.readyToDraw()) { |
| + return false; |
| + } |
| + dm->fPixelGenerationID = bitmap.getGenerationID(); |
| + dm->fCacheId = SkScaledImageCache::AddAndLock(dm->fPixelGenerationID, |
| + bitmap.width(), |
| + bitmap.height(), |
| + bitmap); |
| + SkASSERT(dm->fCacheId != NULL); |
| + dp = static_cast<uint8_t*>(bitmap.getPixels()); |
| + } else |
| + dp = SkMask::AllocImage(dstSize); |
| // build the blurry destination |
| SkAutoTMalloc<uint8_t> tmpBuffer(dstSize); |
| @@ -588,6 +634,11 @@ bool SkBlurMask::BoxBlur(SkMask* dst, const SkMask& src, |
| // now we allocate the "real" dst, mirror the size of src |
| size_t srcSize = src.computeImageSize(); |
| if (0 == srcSize) { |
| + if (dm) { |
| + SkScaledImageCache::Unlock(static_cast<SkScaledImageCache::ID*>(dm->fCacheId)); |
|
Stephen White
2014/06/09 18:03:57
This 5-line stanza is repeated 3 times. Could we r
|
| + } else if (dp) { |
| + SkMask::FreeImage(dp); |
| + } |
| return false; // too big to allocate, abort |
| } |
| dst->fImage = SkMask::AllocImage(srcSize); |
| @@ -595,12 +646,15 @@ bool SkBlurMask::BoxBlur(SkMask* dst, const SkMask& src, |
| sp, src.fRowBytes, |
| dp + passCount * (rx + ry * dst->fRowBytes), |
| dst->fRowBytes, sw, sh); |
| - SkMask::FreeImage(dp); |
| + if (dm) { |
| + SkScaledImageCache::Unlock(static_cast<SkScaledImageCache::ID*>(dm->fCacheId)); |
| + } else if (dp) { |
| + SkMask::FreeImage(dp); |
| + } |
| } else if (style != kNormal_SkBlurStyle) { |
| clamp_with_orig(dp + passCount * (rx + ry * dst->fRowBytes), |
| dst->fRowBytes, sp, src.fRowBytes, sw, sh, style); |
| } |
| - (void)autoCall.detach(); |
| } |
| if (style == kInner_SkBlurStyle) { |
| @@ -779,7 +833,26 @@ bool SkBlurMask::BlurRect(SkScalar sigma, SkMask *dst, |
| return false; // too big to allocate, abort |
| } |
| - uint8_t* dp = SkMask::AllocImage(dstSize); |
| + uint8_t* dp = NULL; |
| + SkDiscardableMemoryMask* dm = SkBlurMask::getDiscardableMemoryMaskFromMap(dst); |
| + if (dm) { |
| + SkBitmap bitmap; |
| + SkBitmap::Allocator* allocator = SkScaledImageCache::GetAllocator(); |
| + bitmap.setConfig(SkBitmap::kA8_Config, dst->fBounds.width(), |
| + dst->fBounds.height(), dst->fRowBytes); |
| + bitmap.allocPixels(allocator, NULL); |
| + if (!bitmap.readyToDraw()) { |
| + return false; |
| + } |
| + dm->fPixelGenerationID = bitmap.getGenerationID(); |
| + dm->fCacheId = SkScaledImageCache::AddAndLock(dm->fPixelGenerationID, |
| + bitmap.width(), |
| + bitmap.height(), |
| + bitmap); |
| + SkASSERT(dm->fCacheId != NULL); |
| + dp = static_cast<uint8_t*>(bitmap.getPixels()); |
| + } else |
| + dp = SkMask::AllocImage(dstSize); |
| dst->fImage = dp; |
| @@ -813,7 +886,11 @@ bool SkBlurMask::BlurRect(SkScalar sigma, SkMask *dst, |
| uint8_t *inner_scanline = dst->fImage + y*sw; |
| memcpy(inner_scanline, blur_scanline, sw); |
| } |
| - SkMask::FreeImage(dp); |
| + if (dm) { |
| + SkScaledImageCache::Unlock(static_cast<SkScaledImageCache::ID*>(dm->fCacheId)); |
| + } else if (dp) { |
| + SkMask::FreeImage(dp); |
| + } |
| dst->fBounds.set(SkScalarRoundToInt(src.fLeft), |
| SkScalarRoundToInt(src.fTop), |