OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkBitmapCache.h" | 8 #include "SkBitmapCache.h" |
9 #include "SkBitmapController.h" | 9 #include "SkBitmapController.h" |
10 #include "SkBitmapProcState.h" | 10 #include "SkBitmapProcState.h" |
(...skipping 482 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
493 | 493 |
494 if (s.fInvType > SkMatrix::kTranslate_Mask) { | 494 if (s.fInvType > SkMatrix::kTranslate_Mask) { |
495 const SkBitmapProcStateAutoMapper mapper(s, x, y); | 495 const SkBitmapProcStateAutoMapper mapper(s, x, y); |
496 | 496 |
497 // When the matrix has a scale component the setup code in | 497 // When the matrix has a scale component the setup code in |
498 // chooseProcs multiples the inverse matrix by the inverse of the | 498 // chooseProcs multiples the inverse matrix by the inverse of the |
499 // bitmap's width and height. Since this method is going to do | 499 // bitmap's width and height. Since this method is going to do |
500 // its own tiling and sampling we need to undo that here. | 500 // its own tiling and sampling we need to undo that here. |
501 if (SkShader::kClamp_TileMode != s.fTileModeX || | 501 if (SkShader::kClamp_TileMode != s.fTileModeX || |
502 SkShader::kClamp_TileMode != s.fTileModeY) { | 502 SkShader::kClamp_TileMode != s.fTileModeY) { |
503 yTemp = SkFractionalIntToInt(mapper.y() * s.fPixmap.height()); | 503 yTemp = SkFractionalIntToInt(mapper.fractionalIntY() * s.fPixmap
.height()); |
504 } else { | 504 } else { |
505 yTemp = SkFractionalIntToInt(mapper.y()); | 505 yTemp = mapper.intY(); |
506 } | 506 } |
507 } else { | 507 } else { |
508 yTemp = s.fFilterOneY + y; | 508 yTemp = s.fFilterOneY + y; |
509 } | 509 } |
510 | 510 |
511 const int stopY = s.fPixmap.height(); | 511 const int stopY = s.fPixmap.height(); |
512 switch (s.fTileModeY) { | 512 switch (s.fTileModeY) { |
513 case SkShader::kClamp_TileMode: | 513 case SkShader::kClamp_TileMode: |
514 iY0 = SkClampMax(yTemp, stopY-1); | 514 iY0 = SkClampMax(yTemp, stopY-1); |
515 break; | 515 break; |
516 case SkShader::kRepeat_TileMode: | 516 case SkShader::kRepeat_TileMode: |
517 iY0 = sk_int_mod(yTemp, stopY); | 517 iY0 = sk_int_mod(yTemp, stopY); |
518 break; | 518 break; |
519 case SkShader::kMirror_TileMode: | 519 case SkShader::kMirror_TileMode: |
520 default: | 520 default: |
521 iY0 = sk_int_mirror(yTemp, stopY); | 521 iY0 = sk_int_mirror(yTemp, stopY); |
522 break; | 522 break; |
523 } | 523 } |
524 | 524 |
525 #ifdef SK_DEBUG | 525 #ifdef SK_DEBUG |
526 { | 526 { |
527 const SkBitmapProcStateAutoMapper mapper(s, x, y); | 527 const SkBitmapProcStateAutoMapper mapper(s, x, y); |
528 int iY2; | 528 int iY2; |
529 | 529 |
530 if (s.fInvType > SkMatrix::kTranslate_Mask && | 530 if (s.fInvType > SkMatrix::kTranslate_Mask && |
531 (SkShader::kClamp_TileMode != s.fTileModeX || | 531 (SkShader::kClamp_TileMode != s.fTileModeX || |
532 SkShader::kClamp_TileMode != s.fTileModeY)) { | 532 SkShader::kClamp_TileMode != s.fTileModeY)) { |
533 iY2 = SkFractionalIntToInt(mapper.y() * s.fPixmap.height()); | 533 iY2 = SkFractionalIntToInt(mapper.fractionalIntY() * s.fPixmap.h
eight()); |
534 } else { | 534 } else { |
535 iY2 = SkFractionalIntToInt(mapper.y()); | 535 iY2 = mapper.intY(); |
536 } | 536 } |
537 | 537 |
538 switch (s.fTileModeY) { | 538 switch (s.fTileModeY) { |
539 case SkShader::kClamp_TileMode: | 539 case SkShader::kClamp_TileMode: |
540 iY2 = SkClampMax(iY2, stopY-1); | 540 iY2 = SkClampMax(iY2, stopY-1); |
541 break; | 541 break; |
542 case SkShader::kRepeat_TileMode: | 542 case SkShader::kRepeat_TileMode: |
543 iY2 = sk_int_mod(iY2, stopY); | 543 iY2 = sk_int_mod(iY2, stopY); |
544 break; | 544 break; |
545 case SkShader::kMirror_TileMode: | 545 case SkShader::kMirror_TileMode: |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
591 * negate it. | 591 * negate it. |
592 */ | 592 */ |
593 const SkScalar too_big = SkIntToScalar(1 << 30); | 593 const SkScalar too_big = SkIntToScalar(1 << 30); |
594 if (SkScalarAbs(pt.fX) > too_big || SkScalarAbs(pt.fY) > too_big) { | 594 if (SkScalarAbs(pt.fX) > too_big || SkScalarAbs(pt.fY) > too_big) { |
595 return false; | 595 return false; |
596 } | 596 } |
597 | 597 |
598 // Since we know we're not filtered, we re-purpose these fields allow | 598 // Since we know we're not filtered, we re-purpose these fields allow |
599 // us to go from device -> src coordinates w/ just an integer add, | 599 // us to go from device -> src coordinates w/ just an integer add, |
600 // rather than running through the inverse-matrix | 600 // rather than running through the inverse-matrix |
601 fFilterOneX = SkFractionalIntToInt(mapper.x()); | 601 fFilterOneX = mapper.intX(); |
602 fFilterOneY = SkFractionalIntToInt(mapper.y()); | 602 fFilterOneY = mapper.intY(); |
603 | 603 |
604 return true; | 604 return true; |
605 } | 605 } |
606 | 606 |
607 SkBitmapProcState::ShaderProc32 SkBitmapProcState::chooseShaderProc32() { | 607 SkBitmapProcState::ShaderProc32 SkBitmapProcState::chooseShaderProc32() { |
608 | 608 |
609 if (kN32_SkColorType != fPixmap.colorType()) { | 609 if (kN32_SkColorType != fPixmap.colorType()) { |
610 return nullptr; | 610 return nullptr; |
611 } | 611 } |
612 | 612 |
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
774 const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn); | 774 const SkBitmapProcState& s = *static_cast<const SkBitmapProcState*>(sIn); |
775 SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask | | 775 SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask | |
776 SkMatrix::kScale_Mask)) == 0); | 776 SkMatrix::kScale_Mask)) == 0); |
777 | 777 |
778 const unsigned maxX = s.fPixmap.width() - 1; | 778 const unsigned maxX = s.fPixmap.width() - 1; |
779 SkFractionalInt fx; | 779 SkFractionalInt fx; |
780 int dstY; | 780 int dstY; |
781 { | 781 { |
782 const SkBitmapProcStateAutoMapper mapper(s, x, y); | 782 const SkBitmapProcStateAutoMapper mapper(s, x, y); |
783 const unsigned maxY = s.fPixmap.height() - 1; | 783 const unsigned maxY = s.fPixmap.height() - 1; |
784 dstY = SkClampMax(SkFractionalIntToInt(mapper.y()), maxY); | 784 dstY = SkClampMax(mapper.intY(), maxY); |
785 fx = mapper.x(); | 785 fx = mapper.fractionalIntX(); |
786 } | 786 } |
787 | 787 |
788 const SkPMColor* SK_RESTRICT src = s.fPixmap.addr32(0, dstY); | 788 const SkPMColor* SK_RESTRICT src = s.fPixmap.addr32(0, dstY); |
789 const SkFractionalInt dx = s.fInvSxFractionalInt; | 789 const SkFractionalInt dx = s.fInvSxFractionalInt; |
790 | 790 |
791 // Check if we're safely inside [0...maxX] so no need to clamp each computed
index. | 791 // Check if we're safely inside [0...maxX] so no need to clamp each computed
index. |
792 // | 792 // |
793 if ((uint64_t)SkFractionalIntToInt(fx) <= maxX && | 793 if ((uint64_t)SkFractionalIntToInt(fx) <= maxX && |
794 (uint64_t)SkFractionalIntToInt(fx + dx * (count - 1)) <= maxX) | 794 (uint64_t)SkFractionalIntToInt(fx + dx * (count - 1)) <= maxX) |
795 { | 795 { |
(...skipping 16 matching lines...) Expand all Loading... |
812 fx += dx; | 812 fx += dx; |
813 } | 813 } |
814 } else { | 814 } else { |
815 for (int i = 0; i < count; ++i) { | 815 for (int i = 0; i < count; ++i) { |
816 dst[i] = src[SkClampMax(SkFractionalIntToInt(fx), maxX)]; | 816 dst[i] = src[SkClampMax(SkFractionalIntToInt(fx), maxX)]; |
817 fx += dx; | 817 fx += dx; |
818 } | 818 } |
819 } | 819 } |
820 } | 820 } |
821 | 821 |
OLD | NEW |