OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2006 The Android Open Source Project | 2 * Copyright 2006 The Android Open Source Project |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkGradientShaderPriv.h" | 8 #include "SkGradientShaderPriv.h" |
9 #include "SkLinearGradient.h" | 9 #include "SkLinearGradient.h" |
10 #include "SkRadialGradient.h" | 10 #include "SkRadialGradient.h" |
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
283 for (int i = 0; i < n; ++i) { | 283 for (int i = 0; i < n; ++i) { |
284 SkColor c = fOrigColors[i]; | 284 SkColor c = fOrigColors[i]; |
285 r += SkColorGetR(c); | 285 r += SkColorGetR(c); |
286 g += SkColorGetG(c); | 286 g += SkColorGetG(c); |
287 b += SkColorGetB(c); | 287 b += SkColorGetB(c); |
288 } | 288 } |
289 *lum = SkColorSetRGB(rounded_divide(r, n), rounded_divide(g, n), rounded_div
ide(b, n)); | 289 *lum = SkColorSetRGB(rounded_divide(r, n), rounded_divide(g, n), rounded_div
ide(b, n)); |
290 return true; | 290 return true; |
291 } | 291 } |
292 | 292 |
| 293 #define SK_SUPPORT_LEGACY_GRADIENT_DITHERING |
| 294 |
293 SkGradientShaderBase::GradientShaderBaseContext::GradientShaderBaseContext( | 295 SkGradientShaderBase::GradientShaderBaseContext::GradientShaderBaseContext( |
294 const SkGradientShaderBase& shader, const ContextRec& rec) | 296 const SkGradientShaderBase& shader, const ContextRec& rec) |
295 : INHERITED(shader, rec) | 297 : INHERITED(shader, rec) |
296 , fCache(shader.refCache(getPaintAlpha())) | 298 #ifdef SK_SUPPORT_LEGACY_GRADIENT_DITHERING |
| 299 , fDither(true) |
| 300 #else |
| 301 , fDither(rec.fPaint->isDither()) |
| 302 #endif |
| 303 , fCache(shader.refCache(getPaintAlpha(), fDither)) |
297 { | 304 { |
298 const SkMatrix& inverse = this->getTotalInverse(); | 305 const SkMatrix& inverse = this->getTotalInverse(); |
299 | 306 |
300 fDstToIndex.setConcat(shader.fPtsToUnit, inverse); | 307 fDstToIndex.setConcat(shader.fPtsToUnit, inverse); |
301 | 308 |
302 fDstToIndexProc = fDstToIndex.getMapXYProc(); | 309 fDstToIndexProc = fDstToIndex.getMapXYProc(); |
303 fDstToIndexClass = (uint8_t)SkShader::Context::ComputeMatrixClass(fDstToInde
x); | 310 fDstToIndexClass = (uint8_t)SkShader::Context::ComputeMatrixClass(fDstToInde
x); |
304 | 311 |
305 // now convert our colors in to PMColors | 312 // now convert our colors in to PMColors |
306 unsigned paintAlpha = this->getPaintAlpha(); | 313 unsigned paintAlpha = this->getPaintAlpha(); |
307 | 314 |
308 fFlags = this->INHERITED::getFlags(); | 315 fFlags = this->INHERITED::getFlags(); |
309 if (shader.fColorsAreOpaque && paintAlpha == 0xFF) { | 316 if (shader.fColorsAreOpaque && paintAlpha == 0xFF) { |
310 fFlags |= kOpaqueAlpha_Flag; | 317 fFlags |= kOpaqueAlpha_Flag; |
311 } | 318 } |
312 // we can do span16 as long as our individual colors are opaque, | 319 // we can do span16 as long as our individual colors are opaque, |
313 // regardless of the paint's alpha | 320 // regardless of the paint's alpha |
314 if (shader.fColorsAreOpaque) { | 321 if (shader.fColorsAreOpaque) { |
315 fFlags |= kHasSpan16_Flag; | 322 fFlags |= kHasSpan16_Flag; |
316 } | 323 } |
317 } | 324 } |
318 | 325 |
319 SkGradientShaderBase::GradientShaderCache::GradientShaderCache( | 326 SkGradientShaderBase::GradientShaderCache::GradientShaderCache( |
320 U8CPU alpha, const SkGradientShaderBase& shader) | 327 U8CPU alpha, bool dither, const SkGradientShaderBase& shader) |
321 : fCacheAlpha(alpha) | 328 : fCacheAlpha(alpha) |
| 329 , fCacheDither(dither) |
322 , fShader(shader) | 330 , fShader(shader) |
323 , fCache16Inited(false) | 331 , fCache16Inited(false) |
324 , fCache32Inited(false) | 332 , fCache32Inited(false) |
325 { | 333 { |
326 // Only initialize the cache in getCache16/32. | 334 // Only initialize the cache in getCache16/32. |
327 fCache16 = nullptr; | 335 fCache16 = nullptr; |
328 fCache32 = nullptr; | 336 fCache32 = nullptr; |
329 fCache16Storage = nullptr; | 337 fCache16Storage = nullptr; |
330 fCache32PixelRef = nullptr; | 338 fCache32PixelRef = nullptr; |
331 } | 339 } |
332 | 340 |
333 SkGradientShaderBase::GradientShaderCache::~GradientShaderCache() { | 341 SkGradientShaderBase::GradientShaderCache::~GradientShaderCache() { |
334 sk_free(fCache16Storage); | 342 sk_free(fCache16Storage); |
335 SkSafeUnref(fCache32PixelRef); | 343 SkSafeUnref(fCache32PixelRef); |
336 } | 344 } |
337 | 345 |
338 #define Fixed_To_Dot8(x) (((x) + 0x80) >> 8) | 346 #define Fixed_To_Dot8(x) (((x) + 0x80) >> 8) |
339 | 347 |
340 /** We take the original colors, not our premultiplied PMColors, since we can | 348 /** We take the original colors, not our premultiplied PMColors, since we can |
341 build a 16bit table as long as the original colors are opaque, even if the | 349 build a 16bit table as long as the original colors are opaque, even if the |
342 paint specifies a non-opaque alpha. | 350 paint specifies a non-opaque alpha. |
343 */ | 351 */ |
344 void SkGradientShaderBase::GradientShaderCache::Build16bitCache( | 352 void SkGradientShaderBase::GradientShaderCache::Build16bitCache( |
345 uint16_t cache[], SkColor c0, SkColor c1, int count) { | 353 uint16_t cache[], SkColor c0, SkColor c1, int count, bool dither) { |
346 SkASSERT(count > 1); | 354 SkASSERT(count > 1); |
347 SkASSERT(SkColorGetA(c0) == 0xFF); | 355 SkASSERT(SkColorGetA(c0) == 0xFF); |
348 SkASSERT(SkColorGetA(c1) == 0xFF); | 356 SkASSERT(SkColorGetA(c1) == 0xFF); |
349 | 357 |
350 SkFixed r = SkColorGetR(c0); | 358 SkFixed r = SkColorGetR(c0); |
351 SkFixed g = SkColorGetG(c0); | 359 SkFixed g = SkColorGetG(c0); |
352 SkFixed b = SkColorGetB(c0); | 360 SkFixed b = SkColorGetB(c0); |
353 | 361 |
354 SkFixed dr = SkIntToFixed(SkColorGetR(c1) - r) / (count - 1); | 362 SkFixed dr = SkIntToFixed(SkColorGetR(c1) - r) / (count - 1); |
355 SkFixed dg = SkIntToFixed(SkColorGetG(c1) - g) / (count - 1); | 363 SkFixed dg = SkIntToFixed(SkColorGetG(c1) - g) / (count - 1); |
356 SkFixed db = SkIntToFixed(SkColorGetB(c1) - b) / (count - 1); | 364 SkFixed db = SkIntToFixed(SkColorGetB(c1) - b) / (count - 1); |
357 | 365 |
358 r = SkIntToFixed(r) + 0x8000; | 366 r = SkIntToFixed(r) + 0x8000; |
359 g = SkIntToFixed(g) + 0x8000; | 367 g = SkIntToFixed(g) + 0x8000; |
360 b = SkIntToFixed(b) + 0x8000; | 368 b = SkIntToFixed(b) + 0x8000; |
361 | 369 |
362 do { | 370 if (dither) { |
363 unsigned rr = r >> 16; | 371 do { |
364 unsigned gg = g >> 16; | 372 unsigned rr = r >> 16; |
365 unsigned bb = b >> 16; | 373 unsigned gg = g >> 16; |
366 cache[0] = SkPackRGB16(SkR32ToR16(rr), SkG32ToG16(gg), SkB32ToB16(bb)); | 374 unsigned bb = b >> 16; |
367 cache[kCache16Count] = SkDitherPack888ToRGB16(rr, gg, bb); | 375 cache[0] = SkPackRGB16(SkR32ToR16(rr), SkG32ToG16(gg), SkB32ToB16(bb
)); |
368 cache += 1; | 376 cache[kCache16Count] = SkDitherPack888ToRGB16(rr, gg, bb); |
369 r += dr; | 377 cache += 1; |
370 g += dg; | 378 r += dr; |
371 b += db; | 379 g += dg; |
372 } while (--count != 0); | 380 b += db; |
| 381 } while (--count != 0); |
| 382 } else { |
| 383 do { |
| 384 unsigned rr = r >> 16; |
| 385 unsigned gg = g >> 16; |
| 386 unsigned bb = b >> 16; |
| 387 cache[0] = SkPackRGB16(SkR32ToR16(rr), SkG32ToG16(gg), SkB32ToB16(bb
)); |
| 388 cache[kCache16Count] = cache[0]; |
| 389 cache += 1; |
| 390 r += dr; |
| 391 g += dg; |
| 392 b += db; |
| 393 } while (--count != 0); |
| 394 } |
373 } | 395 } |
374 | 396 |
375 /* | 397 /* |
376 * r,g,b used to be SkFixed, but on gcc (4.2.1 mac and 4.6.3 goobuntu) in | 398 * r,g,b used to be SkFixed, but on gcc (4.2.1 mac and 4.6.3 goobuntu) in |
377 * release builds, we saw a compiler error where the 0xFF parameter in | 399 * release builds, we saw a compiler error where the 0xFF parameter in |
378 * SkPackARGB32() was being totally ignored whenever it was called with | 400 * SkPackARGB32() was being totally ignored whenever it was called with |
379 * a non-zero add (e.g. 0x8000). | 401 * a non-zero add (e.g. 0x8000). |
380 * | 402 * |
381 * We found two work-arounds: | 403 * We found two work-arounds: |
382 * 1. change r,g,b to unsigned (or just one of them) | 404 * 1. change r,g,b to unsigned (or just one of them) |
383 * 2. change SkPackARGB32 to + its (a << SK_A32_SHIFT) value instead | 405 * 2. change SkPackARGB32 to + its (a << SK_A32_SHIFT) value instead |
384 * of using | | 406 * of using | |
385 * | 407 * |
386 * We chose #1 just because it was more localized. | 408 * We chose #1 just because it was more localized. |
387 * See http://code.google.com/p/skia/issues/detail?id=1113 | 409 * See http://code.google.com/p/skia/issues/detail?id=1113 |
388 * | 410 * |
389 * The type SkUFixed encapsulate this need for unsigned, but logically Fixed. | 411 * The type SkUFixed encapsulate this need for unsigned, but logically Fixed. |
390 */ | 412 */ |
391 typedef uint32_t SkUFixed; | 413 typedef uint32_t SkUFixed; |
392 | 414 |
393 void SkGradientShaderBase::GradientShaderCache::Build32bitCache( | 415 void SkGradientShaderBase::GradientShaderCache::Build32bitCache( |
394 SkPMColor cache[], SkColor c0, SkColor c1, | 416 SkPMColor cache[], SkColor c0, SkColor c1, |
395 int count, U8CPU paintAlpha, uint32_t gradFlags) { | 417 int count, U8CPU paintAlpha, uint32_t gradFlags, bool dither) { |
396 SkASSERT(count > 1); | 418 SkASSERT(count > 1); |
397 | 419 |
398 // need to apply paintAlpha to our two endpoints | 420 // need to apply paintAlpha to our two endpoints |
399 uint32_t a0 = SkMulDiv255Round(SkColorGetA(c0), paintAlpha); | 421 uint32_t a0 = SkMulDiv255Round(SkColorGetA(c0), paintAlpha); |
400 uint32_t a1 = SkMulDiv255Round(SkColorGetA(c1), paintAlpha); | 422 uint32_t a1 = SkMulDiv255Round(SkColorGetA(c1), paintAlpha); |
401 | 423 |
402 | 424 |
403 const bool interpInPremul = SkToBool(gradFlags & | 425 const bool interpInPremul = SkToBool(gradFlags & |
404 SkGradientShader::kInterpolateColorsInPremul_Flag); | 426 SkGradientShader::kInterpolateColorsInPremul_Flag); |
405 | 427 |
(...skipping 19 matching lines...) Expand all Loading... |
425 SkFixed dr = SkIntToFixed(r1 - r0) / (count - 1); | 447 SkFixed dr = SkIntToFixed(r1 - r0) / (count - 1); |
426 SkFixed dg = SkIntToFixed(g1 - g0) / (count - 1); | 448 SkFixed dg = SkIntToFixed(g1 - g0) / (count - 1); |
427 SkFixed db = SkIntToFixed(b1 - b0) / (count - 1); | 449 SkFixed db = SkIntToFixed(b1 - b0) / (count - 1); |
428 | 450 |
429 /* We pre-add 1/8 to avoid having to add this to our [0] value each time | 451 /* We pre-add 1/8 to avoid having to add this to our [0] value each time |
430 in the loop. Without this, the bias for each would be | 452 in the loop. Without this, the bias for each would be |
431 0x2000 0xA000 0xE000 0x6000 | 453 0x2000 0xA000 0xE000 0x6000 |
432 With this trick, we can add 0 for the first (no-op) and just adjust the | 454 With this trick, we can add 0 for the first (no-op) and just adjust the |
433 others. | 455 others. |
434 */ | 456 */ |
435 SkUFixed a = SkIntToFixed(a0) + 0x2000; | 457 const SkUFixed bias0 = dither ? 0x2000 : 0x8000; |
436 SkUFixed r = SkIntToFixed(r0) + 0x2000; | 458 const SkUFixed bias1 = dither ? 0x8000 : 0; |
437 SkUFixed g = SkIntToFixed(g0) + 0x2000; | 459 const SkUFixed bias2 = dither ? 0xC000 : 0; |
438 SkUFixed b = SkIntToFixed(b0) + 0x2000; | 460 const SkUFixed bias3 = dither ? 0x4000 : 0; |
| 461 |
| 462 SkUFixed a = SkIntToFixed(a0) + bias0; |
| 463 SkUFixed r = SkIntToFixed(r0) + bias0; |
| 464 SkUFixed g = SkIntToFixed(g0) + bias0; |
| 465 SkUFixed b = SkIntToFixed(b0) + bias0; |
439 | 466 |
440 /* | 467 /* |
441 * Our dither-cell (spatially) is | 468 * Our dither-cell (spatially) is |
442 * 0 2 | 469 * 0 2 |
443 * 3 1 | 470 * 3 1 |
444 * Where | 471 * Where |
445 * [0] -> [-1/8 ... 1/8 ) values near 0 | 472 * [0] -> [-1/8 ... 1/8 ) values near 0 |
446 * [1] -> [ 1/8 ... 3/8 ) values near 1/4 | 473 * [1] -> [ 1/8 ... 3/8 ) values near 1/4 |
447 * [2] -> [ 3/8 ... 5/8 ) values near 1/2 | 474 * [2] -> [ 3/8 ... 5/8 ) values near 1/2 |
448 * [3] -> [ 5/8 ... 7/8 ) values near 3/4 | 475 * [3] -> [ 5/8 ... 7/8 ) values near 3/4 |
449 */ | 476 */ |
450 | 477 |
451 if (0xFF == a0 && 0 == da) { | 478 if (0xFF == a0 && 0 == da) { |
452 do { | 479 do { |
453 cache[kCache32Count*0] = SkPackARGB32(0xFF, (r + 0 ) >> 16, | 480 cache[kCache32Count*0] = SkPackARGB32(0xFF, (r + 0 ) >> 16, |
454 (g + 0 ) >> 16, | 481 (g + 0 ) >> 16, |
455 (b + 0 ) >> 16); | 482 (b + 0 ) >> 16); |
456 cache[kCache32Count*1] = SkPackARGB32(0xFF, (r + 0x8000) >> 16, | 483 cache[kCache32Count*1] = SkPackARGB32(0xFF, (r + bias1) >> 16, |
457 (g + 0x8000) >> 16, | 484 (g + bias1) >> 16, |
458 (b + 0x8000) >> 16); | 485 (b + bias1) >> 16); |
459 cache[kCache32Count*2] = SkPackARGB32(0xFF, (r + 0xC000) >> 16, | 486 cache[kCache32Count*2] = SkPackARGB32(0xFF, (r + bias2) >> 16, |
460 (g + 0xC000) >> 16, | 487 (g + bias2) >> 16, |
461 (b + 0xC000) >> 16); | 488 (b + bias2) >> 16); |
462 cache[kCache32Count*3] = SkPackARGB32(0xFF, (r + 0x4000) >> 16, | 489 cache[kCache32Count*3] = SkPackARGB32(0xFF, (r + bias3) >> 16, |
463 (g + 0x4000) >> 16, | 490 (g + bias3) >> 16, |
464 (b + 0x4000) >> 16); | 491 (b + bias3) >> 16); |
465 cache += 1; | 492 cache += 1; |
466 r += dr; | 493 r += dr; |
467 g += dg; | 494 g += dg; |
468 b += db; | 495 b += db; |
469 } while (--count != 0); | 496 } while (--count != 0); |
470 } else if (interpInPremul) { | 497 } else if (interpInPremul) { |
471 do { | 498 do { |
472 cache[kCache32Count*0] = SkPackARGB32((a + 0 ) >> 16, | 499 cache[kCache32Count*0] = SkPackARGB32((a + 0 ) >> 16, |
473 (r + 0 ) >> 16, | 500 (r + 0 ) >> 16, |
474 (g + 0 ) >> 16, | 501 (g + 0 ) >> 16, |
475 (b + 0 ) >> 16); | 502 (b + 0 ) >> 16); |
476 cache[kCache32Count*1] = SkPackARGB32((a + 0x8000) >> 16, | 503 cache[kCache32Count*1] = SkPackARGB32((a + bias1) >> 16, |
477 (r + 0x8000) >> 16, | 504 (r + bias1) >> 16, |
478 (g + 0x8000) >> 16, | 505 (g + bias1) >> 16, |
479 (b + 0x8000) >> 16); | 506 (b + bias1) >> 16); |
480 cache[kCache32Count*2] = SkPackARGB32((a + 0xC000) >> 16, | 507 cache[kCache32Count*2] = SkPackARGB32((a + bias2) >> 16, |
481 (r + 0xC000) >> 16, | 508 (r + bias2) >> 16, |
482 (g + 0xC000) >> 16, | 509 (g + bias2) >> 16, |
483 (b + 0xC000) >> 16); | 510 (b + bias2) >> 16); |
484 cache[kCache32Count*3] = SkPackARGB32((a + 0x4000) >> 16, | 511 cache[kCache32Count*3] = SkPackARGB32((a + bias3) >> 16, |
485 (r + 0x4000) >> 16, | 512 (r + bias3) >> 16, |
486 (g + 0x4000) >> 16, | 513 (g + bias3) >> 16, |
487 (b + 0x4000) >> 16); | 514 (b + bias3) >> 16); |
488 cache += 1; | 515 cache += 1; |
489 a += da; | 516 a += da; |
490 r += dr; | 517 r += dr; |
491 g += dg; | 518 g += dg; |
492 b += db; | 519 b += db; |
493 } while (--count != 0); | 520 } while (--count != 0); |
494 } else { // interpolate in unpreml space | 521 } else { // interpolate in unpreml space |
495 do { | 522 do { |
496 cache[kCache32Count*0] = SkPremultiplyARGBInline((a + 0 ) >> 16, | 523 cache[kCache32Count*0] = SkPremultiplyARGBInline((a + 0 ) >> 16, |
497 (r + 0 ) >> 16, | 524 (r + 0 ) >> 16, |
498 (g + 0 ) >> 16, | 525 (g + 0 ) >> 16, |
499 (b + 0 ) >> 16)
; | 526 (b + 0 ) >> 16)
; |
500 cache[kCache32Count*1] = SkPremultiplyARGBInline((a + 0x8000) >> 16, | 527 cache[kCache32Count*1] = SkPremultiplyARGBInline((a + bias1) >> 16, |
501 (r + 0x8000) >> 16, | 528 (r + bias1) >> 16, |
502 (g + 0x8000) >> 16, | 529 (g + bias1) >> 16, |
503 (b + 0x8000) >> 16)
; | 530 (b + bias1) >> 16); |
504 cache[kCache32Count*2] = SkPremultiplyARGBInline((a + 0xC000) >> 16, | 531 cache[kCache32Count*2] = SkPremultiplyARGBInline((a + bias2) >> 16, |
505 (r + 0xC000) >> 16, | 532 (r + bias2) >> 16, |
506 (g + 0xC000) >> 16, | 533 (g + bias2) >> 16, |
507 (b + 0xC000) >> 16)
; | 534 (b + bias2) >> 16); |
508 cache[kCache32Count*3] = SkPremultiplyARGBInline((a + 0x4000) >> 16, | 535 cache[kCache32Count*3] = SkPremultiplyARGBInline((a + bias3) >> 16, |
509 (r + 0x4000) >> 16, | 536 (r + bias3) >> 16, |
510 (g + 0x4000) >> 16, | 537 (g + bias3) >> 16, |
511 (b + 0x4000) >> 16)
; | 538 (b + bias3) >> 16); |
512 cache += 1; | 539 cache += 1; |
513 a += da; | 540 a += da; |
514 r += dr; | 541 r += dr; |
515 g += dg; | 542 g += dg; |
516 b += db; | 543 b += db; |
517 } while (--count != 0); | 544 } while (--count != 0); |
518 } | 545 } |
519 } | 546 } |
520 | 547 |
521 static inline int SkFixedToFFFF(SkFixed x) { | 548 static inline int SkFixedToFFFF(SkFixed x) { |
(...skipping 11 matching lines...) Expand all Loading... |
533 void SkGradientShaderBase::GradientShaderCache::initCache16(GradientShaderCache*
cache) { | 560 void SkGradientShaderBase::GradientShaderCache::initCache16(GradientShaderCache*
cache) { |
534 // double the count for dither entries | 561 // double the count for dither entries |
535 const int entryCount = kCache16Count * 2; | 562 const int entryCount = kCache16Count * 2; |
536 const size_t allocSize = sizeof(uint16_t) * entryCount; | 563 const size_t allocSize = sizeof(uint16_t) * entryCount; |
537 | 564 |
538 SkASSERT(nullptr == cache->fCache16Storage); | 565 SkASSERT(nullptr == cache->fCache16Storage); |
539 cache->fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); | 566 cache->fCache16Storage = (uint16_t*)sk_malloc_throw(allocSize); |
540 cache->fCache16 = cache->fCache16Storage; | 567 cache->fCache16 = cache->fCache16Storage; |
541 if (cache->fShader.fColorCount == 2) { | 568 if (cache->fShader.fColorCount == 2) { |
542 Build16bitCache(cache->fCache16, cache->fShader.fOrigColors[0], | 569 Build16bitCache(cache->fCache16, cache->fShader.fOrigColors[0], |
543 cache->fShader.fOrigColors[1], kCache16Count); | 570 cache->fShader.fOrigColors[1], kCache16Count, cache->fCa
cheDither); |
544 } else { | 571 } else { |
545 Rec* rec = cache->fShader.fRecs; | 572 Rec* rec = cache->fShader.fRecs; |
546 int prevIndex = 0; | 573 int prevIndex = 0; |
547 for (int i = 1; i < cache->fShader.fColorCount; i++) { | 574 for (int i = 1; i < cache->fShader.fColorCount; i++) { |
548 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache16Shift; | 575 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache16Shift; |
549 SkASSERT(nextIndex < kCache16Count); | 576 SkASSERT(nextIndex < kCache16Count); |
550 | 577 |
551 if (nextIndex > prevIndex) | 578 if (nextIndex > prevIndex) |
552 Build16bitCache(cache->fCache16 + prevIndex, cache->fShader.fOri
gColors[i-1], | 579 Build16bitCache(cache->fCache16 + prevIndex, cache->fShader.fOri
gColors[i-1], |
553 cache->fShader.fOrigColors[i], nextIndex - prevI
ndex + 1); | 580 cache->fShader.fOrigColors[i], nextIndex - prevI
ndex + 1, |
| 581 cache->fCacheDither); |
554 prevIndex = nextIndex; | 582 prevIndex = nextIndex; |
555 } | 583 } |
556 } | 584 } |
557 } | 585 } |
558 | 586 |
559 const SkPMColor* SkGradientShaderBase::GradientShaderCache::getCache32() { | 587 const SkPMColor* SkGradientShaderBase::GradientShaderCache::getCache32() { |
560 SkOnce(&fCache32Inited, &fCache32Mutex, SkGradientShaderBase::GradientShader
Cache::initCache32, | 588 SkOnce(&fCache32Inited, &fCache32Mutex, SkGradientShaderBase::GradientShader
Cache::initCache32, |
561 this); | 589 this); |
562 SkASSERT(fCache32); | 590 SkASSERT(fCache32); |
563 return fCache32; | 591 return fCache32; |
564 } | 592 } |
565 | 593 |
566 void SkGradientShaderBase::GradientShaderCache::initCache32(GradientShaderCache*
cache) { | 594 void SkGradientShaderBase::GradientShaderCache::initCache32(GradientShaderCache*
cache) { |
567 const int kNumberOfDitherRows = 4; | 595 const int kNumberOfDitherRows = 4; |
568 const SkImageInfo info = SkImageInfo::MakeN32Premul(kCache32Count, kNumberOf
DitherRows); | 596 const SkImageInfo info = SkImageInfo::MakeN32Premul(kCache32Count, kNumberOf
DitherRows); |
569 | 597 |
570 SkASSERT(nullptr == cache->fCache32PixelRef); | 598 SkASSERT(nullptr == cache->fCache32PixelRef); |
571 cache->fCache32PixelRef = SkMallocPixelRef::NewAllocate(info, 0, nullptr); | 599 cache->fCache32PixelRef = SkMallocPixelRef::NewAllocate(info, 0, nullptr); |
572 cache->fCache32 = (SkPMColor*)cache->fCache32PixelRef->getAddr(); | 600 cache->fCache32 = (SkPMColor*)cache->fCache32PixelRef->getAddr(); |
573 if (cache->fShader.fColorCount == 2) { | 601 if (cache->fShader.fColorCount == 2) { |
574 Build32bitCache(cache->fCache32, cache->fShader.fOrigColors[0], | 602 Build32bitCache(cache->fCache32, cache->fShader.fOrigColors[0], |
575 cache->fShader.fOrigColors[1], kCache32Count, cache->fCa
cheAlpha, | 603 cache->fShader.fOrigColors[1], kCache32Count, cache->fCa
cheAlpha, |
576 cache->fShader.fGradFlags); | 604 cache->fShader.fGradFlags, cache->fCacheDither); |
577 } else { | 605 } else { |
578 Rec* rec = cache->fShader.fRecs; | 606 Rec* rec = cache->fShader.fRecs; |
579 int prevIndex = 0; | 607 int prevIndex = 0; |
580 for (int i = 1; i < cache->fShader.fColorCount; i++) { | 608 for (int i = 1; i < cache->fShader.fColorCount; i++) { |
581 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache32Shift; | 609 int nextIndex = SkFixedToFFFF(rec[i].fPos) >> kCache32Shift; |
582 SkASSERT(nextIndex < kCache32Count); | 610 SkASSERT(nextIndex < kCache32Count); |
583 | 611 |
584 if (nextIndex > prevIndex) | 612 if (nextIndex > prevIndex) |
585 Build32bitCache(cache->fCache32 + prevIndex, cache->fShader.fOri
gColors[i-1], | 613 Build32bitCache(cache->fCache32 + prevIndex, cache->fShader.fOri
gColors[i-1], |
586 cache->fShader.fOrigColors[i], nextIndex - prevI
ndex + 1, | 614 cache->fShader.fOrigColors[i], nextIndex - prevI
ndex + 1, |
587 cache->fCacheAlpha, cache->fShader.fGradFlags); | 615 cache->fCacheAlpha, cache->fShader.fGradFlags, c
ache->fCacheDither); |
588 prevIndex = nextIndex; | 616 prevIndex = nextIndex; |
589 } | 617 } |
590 } | 618 } |
591 } | 619 } |
592 | 620 |
593 /* | 621 /* |
594 * The gradient holds a cache for the most recent value of alpha. Successive | 622 * The gradient holds a cache for the most recent value of alpha. Successive |
595 * callers with the same alpha value will share the same cache. | 623 * callers with the same alpha value will share the same cache. |
596 */ | 624 */ |
597 SkGradientShaderBase::GradientShaderCache* SkGradientShaderBase::refCache(U8CPU
alpha) const { | 625 SkGradientShaderBase::GradientShaderCache* SkGradientShaderBase::refCache(U8CPU
alpha, |
| 626 bool d
ither) const { |
598 SkAutoMutexAcquire ama(fCacheMutex); | 627 SkAutoMutexAcquire ama(fCacheMutex); |
599 if (!fCache || fCache->getAlpha() != alpha) { | 628 if (!fCache || fCache->getAlpha() != alpha || fCache->getDither() != dither)
{ |
600 fCache.reset(new GradientShaderCache(alpha, *this)); | 629 fCache.reset(new GradientShaderCache(alpha, dither, *this)); |
601 } | 630 } |
602 // Increment the ref counter inside the mutex to ensure the returned pointer
is still valid. | 631 // Increment the ref counter inside the mutex to ensure the returned pointer
is still valid. |
603 // Otherwise, the pointer may have been overwritten on a different thread be
fore the object's | 632 // Otherwise, the pointer may have been overwritten on a different thread be
fore the object's |
604 // ref count was incremented. | 633 // ref count was incremented. |
605 fCache.get()->ref(); | 634 fCache.get()->ref(); |
606 return fCache; | 635 return fCache; |
607 } | 636 } |
608 | 637 |
609 SK_DECLARE_STATIC_MUTEX(gGradientCacheMutex); | 638 SK_DECLARE_STATIC_MUTEX(gGradientCacheMutex); |
610 /* | 639 /* |
611 * Because our caller might rebuild the same (logically the same) gradient | 640 * Because our caller might rebuild the same (logically the same) gradient |
612 * over and over, we'd like to return exactly the same "bitmap" if possible, | 641 * over and over, we'd like to return exactly the same "bitmap" if possible, |
613 * allowing the client to utilize a cache of our bitmap (e.g. with a GPU). | 642 * allowing the client to utilize a cache of our bitmap (e.g. with a GPU). |
614 * To do that, we maintain a private cache of built-bitmaps, based on our | 643 * To do that, we maintain a private cache of built-bitmaps, based on our |
615 * colors and positions. Note: we don't try to flatten the fMapper, so if one | 644 * colors and positions. Note: we don't try to flatten the fMapper, so if one |
616 * is present, we skip the cache for now. | 645 * is present, we skip the cache for now. |
617 */ | 646 */ |
618 void SkGradientShaderBase::getGradientTableBitmap(SkBitmap* bitmap) const { | 647 void SkGradientShaderBase::getGradientTableBitmap(SkBitmap* bitmap) const { |
619 // our caller assumes no external alpha, so we ensure that our cache is | 648 // our caller assumes no external alpha, so we ensure that our cache is |
620 // built with 0xFF | 649 // built with 0xFF |
621 SkAutoTUnref<GradientShaderCache> cache(this->refCache(0xFF)); | 650 SkAutoTUnref<GradientShaderCache> cache(this->refCache(0xFF, true)); |
622 | 651 |
623 // build our key: [numColors + colors[] + {positions[]} + flags ] | 652 // build our key: [numColors + colors[] + {positions[]} + flags ] |
624 int count = 1 + fColorCount + 1; | 653 int count = 1 + fColorCount + 1; |
625 if (fColorCount > 2) { | 654 if (fColorCount > 2) { |
626 count += fColorCount - 1; // fRecs[].fPos | 655 count += fColorCount - 1; // fRecs[].fPos |
627 } | 656 } |
628 | 657 |
629 SkAutoSTMalloc<16, int32_t> storage(count); | 658 SkAutoSTMalloc<16, int32_t> storage(count); |
630 int32_t* buffer = storage.get(); | 659 int32_t* buffer = storage.get(); |
631 | 660 |
(...skipping 546 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1178 (*stops)[i] = stop; | 1207 (*stops)[i] = stop; |
1179 stop = i < outColors - 1 ? stop + random->nextUScalar1() * (1.f - st
op) : 1.f; | 1208 stop = i < outColors - 1 ? stop + random->nextUScalar1() * (1.f - st
op) : 1.f; |
1180 } | 1209 } |
1181 } | 1210 } |
1182 *tm = static_cast<SkShader::TileMode>(random->nextULessThan(SkShader::kTileM
odeCount)); | 1211 *tm = static_cast<SkShader::TileMode>(random->nextULessThan(SkShader::kTileM
odeCount)); |
1183 | 1212 |
1184 return outColors; | 1213 return outColors; |
1185 } | 1214 } |
1186 | 1215 |
1187 #endif | 1216 #endif |
OLD | NEW |