| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2006 The Android Open Source Project | 2 * Copyright 2006 The Android Open Source Project |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkGradientShaderPriv.h" | 8 #include "SkGradientShaderPriv.h" |
| 9 #include "SkLinearGradient.h" | 9 #include "SkLinearGradient.h" |
| 10 #include "SkRadialGradient.h" | 10 #include "SkRadialGradient.h" |
| 11 #include "SkTwoPointRadialGradient.h" | 11 #include "SkTwoPointRadialGradient.h" |
| 12 #include "SkTwoPointConicalGradient.h" | 12 #include "SkTwoPointConicalGradient.h" |
| 13 #include "SkSweepGradient.h" | 13 #include "SkSweepGradient.h" |
| 14 | 14 |
| 15 SkGradientShaderBase::SkGradientShaderBase(const Descriptor& desc) { | 15 SkGradientShaderBase::SkGradientShaderBase(const Descriptor& desc) { |
| 16 SkASSERT(desc.fCount > 1); | 16 SkASSERT(desc.fCount > 1); |
| 17 | 17 |
| 18 fCacheAlpha = 256; // init to a value that paint.getAlpha() can't return | |
| 19 | |
| 20 fMapper = desc.fMapper; | 18 fMapper = desc.fMapper; |
| 21 SkSafeRef(fMapper); | 19 SkSafeRef(fMapper); |
| 22 fGradFlags = SkToU8(desc.fGradFlags); | 20 fGradFlags = SkToU8(desc.fGradFlags); |
| 23 | 21 |
| 24 SkASSERT((unsigned)desc.fTileMode < SkShader::kTileModeCount); | 22 SkASSERT((unsigned)desc.fTileMode < SkShader::kTileModeCount); |
| 25 SkASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gTileProcs)); | 23 SkASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gTileProcs)); |
| 26 fTileMode = desc.fTileMode; | 24 fTileMode = desc.fTileMode; |
| 27 fTileProc = gTileProcs[desc.fTileMode]; | 25 fTileProc = gTileProcs[desc.fTileMode]; |
| 28 | 26 |
| 29 fCache16 = fCache16Storage = NULL; | |
| 30 fCache32 = NULL; | |
| 31 fCache32PixelRef = NULL; | |
| 32 | |
| 33 /* Note: we let the caller skip the first and/or last position. | 27 /* Note: we let the caller skip the first and/or last position. |
| 34 i.e. pos[0] = 0.3, pos[1] = 0.7 | 28 i.e. pos[0] = 0.3, pos[1] = 0.7 |
| 35 In these cases, we insert dummy entries to ensure that the final data | 29 In these cases, we insert dummy entries to ensure that the final data |
| 36 will be bracketed by [0, 1]. | 30 will be bracketed by [0, 1]. |
| 37 i.e. our_pos[0] = 0, our_pos[1] = 0.3, our_pos[2] = 0.7, our_pos[3] = 1 | 31 i.e. our_pos[0] = 0, our_pos[1] = 0.3, our_pos[2] = 0.7, our_pos[3] = 1 |
| 38 | 32 |
| 39 Thus colorCount (the caller's value, and fColorCount (our value) may | 33 Thus colorCount (the caller's value, and fColorCount (our value) may |
| 40 differ by up to 2. In the above example: | 34 differ by up to 2. In the above example: |
| 41 colorCount = 2 | 35 colorCount = 2 |
| 42 fColorCount = 4 | 36 fColorCount = 4 |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 139 | 133 |
| 140 static SkShader::TileMode unpack_mode(uint32_t packed) { | 134 static SkShader::TileMode unpack_mode(uint32_t packed) { |
| 141 return (SkShader::TileMode)(packed & 0xF); | 135 return (SkShader::TileMode)(packed & 0xF); |
| 142 } | 136 } |
| 143 | 137 |
| 144 static uint32_t unpack_flags(uint32_t packed) { | 138 static uint32_t unpack_flags(uint32_t packed) { |
| 145 return packed >> 4; | 139 return packed >> 4; |
| 146 } | 140 } |
| 147 | 141 |
| 148 SkGradientShaderBase::SkGradientShaderBase(SkReadBuffer& buffer) : INHERITED(buf
fer) { | 142 SkGradientShaderBase::SkGradientShaderBase(SkReadBuffer& buffer) : INHERITED(buf
fer) { |
| 149 fCacheAlpha = 256; | |
| 150 | |
| 151 fMapper = buffer.readUnitMapper(); | 143 fMapper = buffer.readUnitMapper(); |
| 152 | 144 |
| 153 fCache16 = fCache16Storage = NULL; | |
| 154 fCache32 = NULL; | |
| 155 fCache32PixelRef = NULL; | |
| 156 | |
| 157 int colorCount = fColorCount = buffer.getArrayCount(); | 145 int colorCount = fColorCount = buffer.getArrayCount(); |
| 158 if (colorCount > kColorStorageCount) { | 146 if (colorCount > kColorStorageCount) { |
| 159 size_t allocSize = (sizeof(SkColor) + sizeof(SkPMColor) + sizeof(Rec)) *
colorCount; | 147 size_t allocSize = (sizeof(SkColor) + sizeof(SkPMColor) + sizeof(Rec)) *
colorCount; |
| 160 if (buffer.validateAvailable(allocSize)) { | 148 if (buffer.validateAvailable(allocSize)) { |
| 161 fOrigColors = reinterpret_cast<SkColor*>(sk_malloc_throw(allocSize))
; | 149 fOrigColors = reinterpret_cast<SkColor*>(sk_malloc_throw(allocSize))
; |
| 162 } else { | 150 } else { |
| 163 fOrigColors = NULL; | 151 fOrigColors = NULL; |
| 164 colorCount = fColorCount = 0; | 152 colorCount = fColorCount = 0; |
| 165 } | 153 } |
| 166 } else { | 154 } else { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 181 for (int i = 1; i < colorCount; i++) { | 169 for (int i = 1; i < colorCount; i++) { |
| 182 recs[i].fPos = buffer.readInt(); | 170 recs[i].fPos = buffer.readInt(); |
| 183 recs[i].fScale = buffer.readUInt(); | 171 recs[i].fScale = buffer.readUInt(); |
| 184 } | 172 } |
| 185 } | 173 } |
| 186 buffer.readMatrix(&fPtsToUnit); | 174 buffer.readMatrix(&fPtsToUnit); |
| 187 this->initCommon(); | 175 this->initCommon(); |
| 188 } | 176 } |
| 189 | 177 |
| 190 SkGradientShaderBase::~SkGradientShaderBase() { | 178 SkGradientShaderBase::~SkGradientShaderBase() { |
| 191 if (fCache16Storage) { | |
| 192 sk_free(fCache16Storage); | |
| 193 } | |
| 194 SkSafeUnref(fCache32PixelRef); | |
| 195 if (fOrigColors != fStorage) { | 179 if (fOrigColors != fStorage) { |
| 196 sk_free(fOrigColors); | 180 sk_free(fOrigColors); |
| 197 } | 181 } |
| 198 SkSafeUnref(fMapper); | 182 SkSafeUnref(fMapper); |
| 199 } | 183 } |
| 200 | 184 |
| 201 void SkGradientShaderBase::initCommon() { | 185 void SkGradientShaderBase::initCommon() { |
| 202 fFlags = 0; | |
| 203 unsigned colorAlpha = 0xFF; | 186 unsigned colorAlpha = 0xFF; |
| 204 for (int i = 0; i < fColorCount; i++) { | 187 for (int i = 0; i < fColorCount; i++) { |
| 205 colorAlpha &= SkColorGetA(fOrigColors[i]); | 188 colorAlpha &= SkColorGetA(fOrigColors[i]); |
| 206 } | 189 } |
| 207 fColorsAreOpaque = colorAlpha == 0xFF; | 190 fColorsAreOpaque = colorAlpha == 0xFF; |
| 208 } | 191 } |
| 209 | 192 |
| 210 void SkGradientShaderBase::flatten(SkWriteBuffer& buffer) const { | 193 void SkGradientShaderBase::flatten(SkWriteBuffer& buffer) const { |
| 211 this->INHERITED::flatten(buffer); | 194 this->INHERITED::flatten(buffer); |
| 212 buffer.writeFlattenable(fMapper); | 195 buffer.writeFlattenable(fMapper); |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 260 } | 243 } |
| 261 | 244 |
| 262 void SkGradientShaderBase::flipGradientColors() { | 245 void SkGradientShaderBase::flipGradientColors() { |
| 263 FlipGradientColors(fOrigColors, fRecs, fOrigColors, fRecs, fColorCount); | 246 FlipGradientColors(fOrigColors, fRecs, fOrigColors, fRecs, fColorCount); |
| 264 } | 247 } |
| 265 | 248 |
| 266 bool SkGradientShaderBase::isOpaque() const { | 249 bool SkGradientShaderBase::isOpaque() const { |
| 267 return fColorsAreOpaque; | 250 return fColorsAreOpaque; |
| 268 } | 251 } |
| 269 | 252 |
| 270 bool SkGradientShaderBase::setContext(const SkBitmap& device, | 253 SkGradientShaderBase::GradientShaderBaseContext::GradientShaderBaseContext( |
| 271 const SkPaint& paint, | 254 const SkGradientShaderBase& shader, const SkBitmap& device, |
| 272 const SkMatrix& matrix) { | 255 const SkPaint& paint, const SkMatrix& matrix) |
| 273 if (!this->INHERITED::setContext(device, paint, matrix)) { | 256 : INHERITED(shader, device, paint, matrix) |
| 274 return false; | 257 , fCache(shader.refCache(getPaintAlpha())) |
| 275 } | 258 { |
| 276 | |
| 277 const SkMatrix& inverse = this->getTotalInverse(); | 259 const SkMatrix& inverse = this->getTotalInverse(); |
| 278 | 260 |
| 279 fDstToIndex.setConcat(fPtsToUnit, inverse); | 261 fDstToIndex.setConcat(shader.fPtsToUnit, inverse); |
| 262 |
| 280 fDstToIndexProc = fDstToIndex.getMapXYProc(); | 263 fDstToIndexProc = fDstToIndex.getMapXYProc(); |
| 281 fDstToIndexClass = (uint8_t)SkShader::ComputeMatrixClass(fDstToIndex); | 264 fDstToIndexClass = (uint8_t)SkShader::Context::ComputeMatrixClass(fDstToInde
x); |
| 282 | 265 |
| 283 // now convert our colors in to PMColors | 266 // now convert our colors in to PMColors |
| 284 unsigned paintAlpha = this->getPaintAlpha(); | 267 unsigned paintAlpha = this->getPaintAlpha(); |
| 285 | 268 |
| 286 fFlags = this->INHERITED::getFlags(); | 269 fFlags = this->INHERITED::getFlags(); |
| 287 if (fColorsAreOpaque && paintAlpha == 0xFF) { | 270 if (shader.fColorsAreOpaque && paintAlpha == 0xFF) { |
| 288 fFlags |= kOpaqueAlpha_Flag; | 271 fFlags |= kOpaqueAlpha_Flag; |
| 289 } | 272 } |
| 290 // we can do span16 as long as our individual colors are opaque, | 273 // we can do span16 as long as our individual colors are opaque, |
| 291 // regardless of the paint's alpha | 274 // regardless of the paint's alpha |
| 292 if (fColorsAreOpaque) { | 275 if (shader.fColorsAreOpaque) { |
| 293 fFlags |= kHasSpan16_Flag; | 276 fFlags |= kHasSpan16_Flag; |
| 294 } | 277 } |
| 295 | |
| 296 this->setCacheAlpha(paintAlpha); | |
| 297 return true; | |
| 298 } | |
| 299 | |
| 300 void SkGradientShaderBase::setCacheAlpha(U8CPU alpha) const { | |
| 301 // if the new alpha differs from the previous time we were called, inval our
cache | |
| 302 // this will trigger the cache to be rebuilt. | |
| 303 // we don't care about the first time, since the cache ptrs will already be
NULL | |
| 304 if (fCacheAlpha != alpha) { | |
| 305 fCache16 = NULL; // inval the cache | |
| 306 fCache32 = NULL; // inval the cache | |
| 307 fCacheAlpha = alpha; // record the new alpha | |
| 308 // inform our subclasses | |
| 309 if (fCache32PixelRef) { | |
| 310 fCache32PixelRef->notifyPixelsChanged(); | |
| 311 } | |
| 312 } | |
| 313 } | 278 } |
| 314 | 279 |
| 280 SkGradientShaderBase::GradientShaderCache::GradientShaderCache( |
| 281 U8CPU alpha, const SkGradientShaderBase& shader) |
| 282 : fCacheAlpha(alpha) |
| 283 , fShader(shader) |
| 284 , fCache16Inited(false) |
| 285 , fCache32Inited(false) |
| 286 { |
| 287 // Only initialize the cache in getCache16/32. |
| 288 fCache16 = NULL; |
| 289 fCache32 = NULL; |
| 290 fCache16Storage = NULL; |
| 291 fCache32PixelRef = NULL; |
| 292 } |
| 293 |
| 294 SkGradientShaderBase::GradientShaderCache::~GradientShaderCache() { |
| 295 sk_free(fCache16Storage); |
| 296 SkSafeUnref(fCache32PixelRef); |
| 297 } |
| 298 |
| 315 #define Fixed_To_Dot8(x) (((x) + 0x80) >> 8) | 299 #define Fixed_To_Dot8(x) (((x) + 0x80) >> 8) |
| 316 | 300 |
| 317 /** We take the original colors, not our premultiplied PMColors, since we can | 301 /** We take the original colors, not our premultiplied PMColors, since we can |
| 318 build a 16bit table as long as the original colors are opaque, even if the | 302 build a 16bit table as long as the original colors are opaque, even if the |
| 319 paint specifies a non-opaque alpha. | 303 paint specifies a non-opaque alpha. |
| 320 */ | 304 */ |
| 321 void SkGradientShaderBase::Build16bitCache(uint16_t cache[], SkColor c0, SkColor
c1, | 305 void SkGradientShaderBase::GradientShaderCache::Build16bitCache( |
| 322 int count) { | 306 uint16_t cache[], SkColor c0, SkColor c1, int count) { |
| 323 SkASSERT(count > 1); | 307 SkASSERT(count > 1); |
| 324 SkASSERT(SkColorGetA(c0) == 0xFF); | 308 SkASSERT(SkColorGetA(c0) == 0xFF); |
| 325 SkASSERT(SkColorGetA(c1) == 0xFF); | 309 SkASSERT(SkColorGetA(c1) == 0xFF); |
| 326 | 310 |
| 327 SkFixed r = SkColorGetR(c0); | 311 SkFixed r = SkColorGetR(c0); |
| 328 SkFixed g = SkColorGetG(c0); | 312 SkFixed g = SkColorGetG(c0); |
| 329 SkFixed b = SkColorGetB(c0); | 313 SkFixed b = SkColorGetB(c0); |
| 330 | 314 |
| 331 SkFixed dr = SkIntToFixed(SkColorGetR(c1) - r) / (count - 1); | 315 SkFixed dr = SkIntToFixed(SkColorGetR(c1) - r) / (count - 1); |
| 332 SkFixed dg = SkIntToFixed(SkColorGetG(c1) - g) / (count - 1); | 316 SkFixed dg = SkIntToFixed(SkColorGetG(c1) - g) / (count - 1); |
| (...skipping 27 matching lines...) Expand all Loading... |
| 360 * 2. change SkPackARGB32 to + its (a << SK_A32_SHIFT) value instead | 344 * 2. change SkPackARGB32 to + its (a << SK_A32_SHIFT) value instead |
| 361 * of using | | 345 * of using | |
| 362 * | 346 * |
| 363 * We chose #1 just because it was more localized. | 347 * We chose #1 just because it was more localized. |
| 364 * See http://code.google.com/p/skia/issues/detail?id=1113 | 348 * See http://code.google.com/p/skia/issues/detail?id=1113 |
| 365 * | 349 * |
| 366 * The type SkUFixed encapsulate this need for unsigned, but logically Fixed. | 350 * The type SkUFixed encapsulate this need for unsigned, but logically Fixed. |
| 367 */ | 351 */ |
| 368 typedef uint32_t SkUFixed; | 352 typedef uint32_t SkUFixed; |
| 369 | 353 |
| 370 void SkGradientShaderBase::Build32bitCache(SkPMColor cache[], SkColor c0, SkColo
r c1, | 354 void SkGradientShaderBase::GradientShaderCache::Build32bitCache( |
| 371 int count, U8CPU paintAlpha, uint32_t grad
Flags) { | 355 SkPMColor cache[], SkColor c0, SkColor c1, |
| 356 int count, U8CPU paintAlpha, uint32_t gradFlags) { |
| 372 SkASSERT(count > 1); | 357 SkASSERT(count > 1); |
| 373 | 358 |
| 374 // need to apply paintAlpha to our two endpoints | 359 // need to apply paintAlpha to our two endpoints |
| 375 uint32_t a0 = SkMulDiv255Round(SkColorGetA(c0), paintAlpha); | 360 uint32_t a0 = SkMulDiv255Round(SkColorGetA(c0), paintAlpha); |
| 376 uint32_t a1 = SkMulDiv255Round(SkColorGetA(c1), paintAlpha); | 361 uint32_t a1 = SkMulDiv255Round(SkColorGetA(c1), paintAlpha); |
| 377 | 362 |
| 378 | 363 |
| 379 const bool interpInPremul = SkToBool(gradFlags & | 364 const bool interpInPremul = SkToBool(gradFlags & |
| 380 SkGradientShader::kInterpolateColorsInPremul_Flag); | 365 SkGradientShader::kInterpolateColorsInPremul_Flag); |
| 381 | 366 |
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 504 if (6 == bits) { | 489 if (6 == bits) { |
| 505 return (x << 10) | (x << 4) | (x >> 2); | 490 return (x << 10) | (x << 4) | (x >> 2); |
| 506 } | 491 } |
| 507 if (8 == bits) { | 492 if (8 == bits) { |
| 508 return (x << 8) | x; | 493 return (x << 8) | x; |
| 509 } | 494 } |
| 510 sk_throw(); | 495 sk_throw(); |
| 511 return 0; | 496 return 0; |
| 512 } | 497 } |
| 513 | 498 |
| 514 const uint16_t* SkGradientShaderBase::getCache16() const { | 499 const uint16_t* SkGradientShaderBase::GradientShaderCache::getCache16() { |
| 515 if (fCache16 == NULL) { | 500 SkOnce(&fCache16Inited, &fCache16Mutex, SkGradientShaderBase::GradientShader
Cache::initCache16, |
| 516 // double the count for dither entries | 501 this); |
| 517 const int entryCount = kCache16Count * 2; | 502 SkASSERT(fCache16); |
| 518 const size_t allocSize = sizeof(uint16_t) * entryCount; | |
| 519 | |
| 520 if (fCache16Storage == NULL) { // set the storage and our working ptr | |
| 521 fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); | |
| 522 } | |
| 523 fCache16 = fCache16Storage; | |
| 524 if (fColorCount == 2) { | |
| 525 Build16bitCache(fCache16, fOrigColors[0], fOrigColors[1], | |
| 526 kCache16Count); | |
| 527 } else { | |
| 528 Rec* rec = fRecs; | |
| 529 int prevIndex = 0; | |
| 530 for (int i = 1; i < fColorCount; i++) { | |
| 531 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache16Shift; | |
| 532 SkASSERT(nextIndex < kCache16Count); | |
| 533 | |
| 534 if (nextIndex > prevIndex) | |
| 535 Build16bitCache(fCache16 + prevIndex, fOrigColors[i-1], fOri
gColors[i], nextIndex - prevIndex + 1); | |
| 536 prevIndex = nextIndex; | |
| 537 } | |
| 538 } | |
| 539 | |
| 540 if (fMapper) { | |
| 541 fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); | |
| 542 uint16_t* linear = fCache16; // just computed linear data | |
| 543 uint16_t* mapped = fCache16Storage; // storage for mapped data | |
| 544 SkUnitMapper* map = fMapper; | |
| 545 for (int i = 0; i < kCache16Count; i++) { | |
| 546 int index = map->mapUnit16(bitsTo16(i, kCache16Bits)) >> kCache1
6Shift; | |
| 547 mapped[i] = linear[index]; | |
| 548 mapped[i + kCache16Count] = linear[index + kCache16Count]; | |
| 549 } | |
| 550 sk_free(fCache16); | |
| 551 fCache16 = fCache16Storage; | |
| 552 } | |
| 553 } | |
| 554 return fCache16; | 503 return fCache16; |
| 555 } | 504 } |
| 556 | 505 |
| 557 const SkPMColor* SkGradientShaderBase::getCache32() const { | 506 void SkGradientShaderBase::GradientShaderCache::initCache16(GradientShaderCache*
cache) { |
| 558 if (fCache32 == NULL) { | 507 // double the count for dither entries |
| 559 SkImageInfo info; | 508 const int entryCount = kCache16Count * 2; |
| 560 info.fWidth = kCache32Count; | 509 const size_t allocSize = sizeof(uint16_t) * entryCount; |
| 561 info.fHeight = 4; // for our 4 dither rows | |
| 562 info.fAlphaType = kPremul_SkAlphaType; | |
| 563 info.fColorType = kN32_SkColorType; | |
| 564 | 510 |
| 565 if (NULL == fCache32PixelRef) { | 511 SkASSERT(NULL == cache->fCache16Storage); |
| 566 fCache32PixelRef = SkMallocPixelRef::NewAllocate(info, 0, NULL); | 512 cache->fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); |
| 567 } | 513 cache->fCache16 = cache->fCache16Storage; |
| 568 fCache32 = (SkPMColor*)fCache32PixelRef->getAddr(); | 514 if (cache->fShader.fColorCount == 2) { |
| 569 if (fColorCount == 2) { | 515 Build16bitCache(cache->fCache16, cache->fShader.fOrigColors[0], |
| 570 Build32bitCache(fCache32, fOrigColors[0], fOrigColors[1], | 516 cache->fShader.fOrigColors[1], kCache16Count); |
| 571 kCache32Count, fCacheAlpha, fGradFlags); | 517 } else { |
| 572 } else { | 518 Rec* rec = cache->fShader.fRecs; |
| 573 Rec* rec = fRecs; | 519 int prevIndex = 0; |
| 574 int prevIndex = 0; | 520 for (int i = 1; i < cache->fShader.fColorCount; i++) { |
| 575 for (int i = 1; i < fColorCount; i++) { | 521 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache16Shift; |
| 576 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache32Shift; | 522 SkASSERT(nextIndex < kCache16Count); |
| 577 SkASSERT(nextIndex < kCache32Count); | |
| 578 | 523 |
| 579 if (nextIndex > prevIndex) | 524 if (nextIndex > prevIndex) |
| 580 Build32bitCache(fCache32 + prevIndex, fOrigColors[i-1], | 525 Build16bitCache(cache->fCache16 + prevIndex, cache->fShader.fOri
gColors[i-1], |
| 581 fOrigColors[i], nextIndex - prevIndex + 1, | 526 cache->fShader.fOrigColors[i], nextIndex - prevI
ndex + 1); |
| 582 fCacheAlpha, fGradFlags); | 527 prevIndex = nextIndex; |
| 583 prevIndex = nextIndex; | |
| 584 } | |
| 585 } | |
| 586 | |
| 587 if (fMapper) { | |
| 588 SkMallocPixelRef* newPR = SkMallocPixelRef::NewAllocate(info, 0, NUL
L); | |
| 589 SkPMColor* linear = fCache32; // just computed linear data | |
| 590 SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for
mapped data | |
| 591 SkUnitMapper* map = fMapper; | |
| 592 for (int i = 0; i < kCache32Count; i++) { | |
| 593 int index = map->mapUnit16((i << 8) | i) >> 8; | |
| 594 mapped[i + kCache32Count*0] = linear[index + kCache32Count*0]; | |
| 595 mapped[i + kCache32Count*1] = linear[index + kCache32Count*1]; | |
| 596 mapped[i + kCache32Count*2] = linear[index + kCache32Count*2]; | |
| 597 mapped[i + kCache32Count*3] = linear[index + kCache32Count*3]; | |
| 598 } | |
| 599 fCache32PixelRef->unref(); | |
| 600 fCache32PixelRef = newPR; | |
| 601 fCache32 = (SkPMColor*)newPR->getAddr(); | |
| 602 } | 528 } |
| 603 } | 529 } |
| 530 |
| 531 if (cache->fShader.fMapper) { |
| 532 cache->fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); |
| 533 uint16_t* linear = cache->fCache16; // just computed linear data |
| 534 uint16_t* mapped = cache->fCache16Storage; // storage for mapped data |
| 535 SkUnitMapper* map = cache->fShader.fMapper; |
| 536 for (int i = 0; i < kCache16Count; i++) { |
| 537 int index = map->mapUnit16(bitsTo16(i, kCache16Bits)) >> kCache16Shi
ft; |
| 538 mapped[i] = linear[index]; |
| 539 mapped[i + kCache16Count] = linear[index + kCache16Count]; |
| 540 } |
| 541 sk_free(cache->fCache16); |
| 542 cache->fCache16 = cache->fCache16Storage; |
| 543 } |
| 544 } |
| 545 |
| 546 const SkPMColor* SkGradientShaderBase::GradientShaderCache::getCache32() { |
| 547 SkOnce(&fCache32Inited, &fCache32Mutex, SkGradientShaderBase::GradientShader
Cache::initCache32, |
| 548 this); |
| 549 SkASSERT(fCache32); |
| 604 return fCache32; | 550 return fCache32; |
| 605 } | 551 } |
| 606 | 552 |
| 553 void SkGradientShaderBase::GradientShaderCache::initCache32(GradientShaderCache*
cache) { |
| 554 SkImageInfo info; |
| 555 info.fWidth = kCache32Count; |
| 556 info.fHeight = 4; // for our 4 dither rows |
| 557 info.fAlphaType = kPremul_SkAlphaType; |
| 558 info.fColorType = kN32_SkColorType; |
| 559 |
| 560 SkASSERT(NULL == cache->fCache32PixelRef); |
| 561 cache->fCache32PixelRef = SkMallocPixelRef::NewAllocate(info, 0, NULL); |
| 562 cache->fCache32 = (SkPMColor*)cache->fCache32PixelRef->getAddr(); |
| 563 if (cache->fShader.fColorCount == 2) { |
| 564 Build32bitCache(cache->fCache32, cache->fShader.fOrigColors[0], |
| 565 cache->fShader.fOrigColors[1], kCache32Count, cache->fCa
cheAlpha, |
| 566 cache->fShader.fGradFlags); |
| 567 } else { |
| 568 Rec* rec = cache->fShader.fRecs; |
| 569 int prevIndex = 0; |
| 570 for (int i = 1; i < cache->fShader.fColorCount; i++) { |
| 571 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache32Shift; |
| 572 SkASSERT(nextIndex < kCache32Count); |
| 573 |
| 574 if (nextIndex > prevIndex) |
| 575 Build32bitCache(cache->fCache32 + prevIndex, cache->fShader.fOri
gColors[i-1], |
| 576 cache->fShader.fOrigColors[i], nextIndex - prevI
ndex + 1, |
| 577 cache->fCacheAlpha, cache->fShader.fGradFlags); |
| 578 prevIndex = nextIndex; |
| 579 } |
| 580 } |
| 581 |
| 582 if (cache->fShader.fMapper) { |
| 583 SkMallocPixelRef* newPR = SkMallocPixelRef::NewAllocate(info, 0, NULL); |
| 584 SkPMColor* linear = cache->fCache32; // just computed linear d
ata |
| 585 SkPMColor* mapped = (SkPMColor*)newPR->getAddr(); // storage for mapp
ed data |
| 586 SkUnitMapper* map = cache->fShader.fMapper; |
| 587 for (int i = 0; i < kCache32Count; i++) { |
| 588 int index = map->mapUnit16((i << 8) | i) >> 8; |
| 589 mapped[i + kCache32Count*0] = linear[index + kCache32Count*0]; |
| 590 mapped[i + kCache32Count*1] = linear[index + kCache32Count*1]; |
| 591 mapped[i + kCache32Count*2] = linear[index + kCache32Count*2]; |
| 592 mapped[i + kCache32Count*3] = linear[index + kCache32Count*3]; |
| 593 } |
| 594 cache->fCache32PixelRef->unref(); |
| 595 cache->fCache32PixelRef = newPR; |
| 596 cache->fCache32 = (SkPMColor*)newPR->getAddr(); |
| 597 } |
| 598 } |
| 599 |
| 607 /* | 600 /* |
| 601 * The gradient holds a cache for the most recent value of alpha. Successive |
| 602 * callers with the same alpha value will share the same cache. |
| 603 */ |
| 604 SkGradientShaderBase::GradientShaderCache* SkGradientShaderBase::refCache(U8CPU
alpha) const { |
| 605 SkAutoMutexAcquire ama(fCacheMutex); |
| 606 if (!fCache || fCache->getAlpha() != alpha) { |
| 607 fCache.reset(SkNEW_ARGS(GradientShaderCache, (alpha, *this))); |
| 608 } |
| 609 // Increment the ref counter inside the mutex to ensure the returned pointer
is still valid. |
| 610 // Otherwise, the pointer may have been overwritten on a different thread be
fore the object's |
| 611 // ref count was incremented. |
| 612 fCache.get()->ref(); |
| 613 return fCache; |
| 614 } |
| 615 |
| 616 /* |
| 608 * Because our caller might rebuild the same (logically the same) gradient | 617 * Because our caller might rebuild the same (logically the same) gradient |
| 609 * over and over, we'd like to return exactly the same "bitmap" if possible, | 618 * over and over, we'd like to return exactly the same "bitmap" if possible, |
| 610 * allowing the client to utilize a cache of our bitmap (e.g. with a GPU). | 619 * allowing the client to utilize a cache of our bitmap (e.g. with a GPU). |
| 611 * To do that, we maintain a private cache of built-bitmaps, based on our | 620 * To do that, we maintain a private cache of built-bitmaps, based on our |
| 612 * colors and positions. Note: we don't try to flatten the fMapper, so if one | 621 * colors and positions. Note: we don't try to flatten the fMapper, so if one |
| 613 * is present, we skip the cache for now. | 622 * is present, we skip the cache for now. |
| 614 */ | 623 */ |
| 615 void SkGradientShaderBase::getGradientTableBitmap(SkBitmap* bitmap) const { | 624 void SkGradientShaderBase::getGradientTableBitmap(SkBitmap* bitmap) const { |
| 616 // our caller assumes no external alpha, so we ensure that our cache is | 625 // our caller assumes no external alpha, so we ensure that our cache is |
| 617 // built with 0xFF | 626 // built with 0xFF |
| 618 this->setCacheAlpha(0xFF); | 627 SkAutoTUnref<GradientShaderCache> cache(this->refCache(0xFF)); |
| 619 | 628 |
| 620 // don't have a way to put the mapper into our cache-key yet | 629 // don't have a way to put the mapper into our cache-key yet |
| 621 if (fMapper) { | 630 if (fMapper) { |
| 622 // force our cahce32pixelref to be built | 631 // force our cache32pixelref to be built |
| 623 (void)this->getCache32(); | 632 (void)cache->getCache32(); |
| 624 bitmap->setConfig(SkImageInfo::MakeN32Premul(kCache32Count, 1)); | 633 bitmap->setConfig(SkImageInfo::MakeN32Premul(kCache32Count, 1)); |
| 625 bitmap->setPixelRef(fCache32PixelRef); | 634 bitmap->setPixelRef(cache->getCache32PixelRef()); |
| 626 return; | 635 return; |
| 627 } | 636 } |
| 628 | 637 |
| 629 // build our key: [numColors + colors[] + {positions[]} + flags ] | 638 // build our key: [numColors + colors[] + {positions[]} + flags ] |
| 630 int count = 1 + fColorCount + 1; | 639 int count = 1 + fColorCount + 1; |
| 631 if (fColorCount > 2) { | 640 if (fColorCount > 2) { |
| 632 count += fColorCount - 1; // fRecs[].fPos | 641 count += fColorCount - 1; // fRecs[].fPos |
| 633 } | 642 } |
| 634 | 643 |
| 635 SkAutoSTMalloc<16, int32_t> storage(count); | 644 SkAutoSTMalloc<16, int32_t> storage(count); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 654 static const int MAX_NUM_CACHED_GRADIENT_BITMAPS = 32; | 663 static const int MAX_NUM_CACHED_GRADIENT_BITMAPS = 32; |
| 655 SkAutoMutexAcquire ama(gMutex); | 664 SkAutoMutexAcquire ama(gMutex); |
| 656 | 665 |
| 657 if (NULL == gCache) { | 666 if (NULL == gCache) { |
| 658 gCache = SkNEW_ARGS(SkBitmapCache, (MAX_NUM_CACHED_GRADIENT_BITMAPS)); | 667 gCache = SkNEW_ARGS(SkBitmapCache, (MAX_NUM_CACHED_GRADIENT_BITMAPS)); |
| 659 } | 668 } |
| 660 size_t size = count * sizeof(int32_t); | 669 size_t size = count * sizeof(int32_t); |
| 661 | 670 |
| 662 if (!gCache->find(storage.get(), size, bitmap)) { | 671 if (!gCache->find(storage.get(), size, bitmap)) { |
| 663 // force our cahce32pixelref to be built | 672 // force our cahce32pixelref to be built |
| 664 (void)this->getCache32(); | 673 (void)cache->getCache32(); |
| 665 bitmap->setConfig(SkImageInfo::MakeN32Premul(kCache32Count, 1)); | 674 bitmap->setConfig(SkImageInfo::MakeN32Premul(kCache32Count, 1)); |
| 666 bitmap->setPixelRef(fCache32PixelRef); | 675 bitmap->setPixelRef(cache->getCache32PixelRef()); |
| 667 | 676 |
| 668 gCache->add(storage.get(), size, *bitmap); | 677 gCache->add(storage.get(), size, *bitmap); |
| 669 } | 678 } |
| 670 } | 679 } |
| 671 | 680 |
| 672 void SkGradientShaderBase::commonAsAGradient(GradientInfo* info, bool flipGrad)
const { | 681 void SkGradientShaderBase::commonAsAGradient(GradientInfo* info, bool flipGrad)
const { |
| 673 if (info) { | 682 if (info) { |
| 674 if (info->fColorCount >= fColorCount) { | 683 if (info->fColorCount >= fColorCount) { |
| 675 SkColor* colorLoc; | 684 SkColor* colorLoc; |
| 676 Rec* recLoc; | 685 Rec* recLoc; |
| (...skipping 518 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1195 (*stops)[i] = stop; | 1204 (*stops)[i] = stop; |
| 1196 stop = i < outColors - 1 ? stop + random->nextUScalar1() * (1.f - st
op) : 1.f; | 1205 stop = i < outColors - 1 ? stop + random->nextUScalar1() * (1.f - st
op) : 1.f; |
| 1197 } | 1206 } |
| 1198 } | 1207 } |
| 1199 *tm = static_cast<SkShader::TileMode>(random->nextULessThan(SkShader::kTileM
odeCount)); | 1208 *tm = static_cast<SkShader::TileMode>(random->nextULessThan(SkShader::kTileM
odeCount)); |
| 1200 | 1209 |
| 1201 return outColors; | 1210 return outColors; |
| 1202 } | 1211 } |
| 1203 | 1212 |
| 1204 #endif | 1213 #endif |
| OLD | NEW |