| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright 2015 Google Inc. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license that can be | |
| 5 * found in the LICENSE file. | |
| 6 */ | |
| 7 | |
| 8 #ifndef SkPx_none_DEFINED | |
| 9 #define SkPx_none_DEFINED | |
| 10 | |
| 11 // Nothing fancy here. We're the backup _none case after all. | |
| 12 // Our declared sweet spot is simply a single pixel at a time. | |
| 13 | |
| 14 namespace none { | |
| 15 | |
| 16 struct SkPx { | |
| 17 static const int N = 1; | |
| 18 uint8_t f8[4]; | |
| 19 | |
| 20 SkPx(uint32_t px) { memcpy(f8, &px, 4); } | |
| 21 SkPx(uint8_t x, uint8_t y, uint8_t z, uint8_t a) { | |
| 22 f8[0] = x; f8[1] = y; f8[2] = z; f8[3] = a; | |
| 23 } | |
| 24 | |
| 25 static SkPx Dup(uint32_t px) { return px; } | |
| 26 static SkPx Load(const uint32_t* px) { return *px; } | |
| 27 static SkPx Load(const uint32_t* px, int n) { | |
| 28 SkASSERT(false); // There are no 0<n<1. | |
| 29 return 0; | |
| 30 } | |
| 31 | |
| 32 void store(uint32_t* px) const { memcpy(px, f8, 4); } | |
| 33 void store(uint32_t* px, int n) const { | |
| 34 SkASSERT(false); // There are no 0<n<1. | |
| 35 } | |
| 36 | |
| 37 struct Alpha { | |
| 38 uint8_t fA; | |
| 39 Alpha(uint8_t a) : fA(a) {} | |
| 40 | |
| 41 static Alpha Dup(uint8_t a) { return a; } | |
| 42 static Alpha Load(const uint8_t* a) { return *a; } | |
| 43 static Alpha Load(const uint8_t* a, int n) { | |
| 44 SkASSERT(false); // There are no 0<n<1. | |
| 45 return 0; | |
| 46 } | |
| 47 Alpha inv() const { return 255 - fA; } | |
| 48 }; | |
| 49 | |
| 50 struct Wide { | |
| 51 uint16_t f16[4]; | |
| 52 | |
| 53 Wide(uint16_t x, uint16_t y, uint16_t z, uint16_t a) { | |
| 54 f16[0] = x; f16[1] = y; f16[2] = z; f16[3] = a; | |
| 55 } | |
| 56 | |
| 57 Wide operator+(const Wide& o) const { | |
| 58 return Wide(f16[0]+o.f16[0], f16[1]+o.f16[1], f16[2]+o.f16[2], f16[3
]+o.f16[3]); | |
| 59 } | |
| 60 Wide operator-(const Wide& o) const { | |
| 61 return Wide(f16[0]-o.f16[0], f16[1]-o.f16[1], f16[2]-o.f16[2], f16[3
]-o.f16[3]); | |
| 62 } | |
| 63 template <int bits> Wide shl() const { | |
| 64 return Wide(f16[0]<<bits, f16[1]<<bits, f16[2]<<bits, f16[3]<<bits); | |
| 65 } | |
| 66 template <int bits> Wide shr() const { | |
| 67 return Wide(f16[0]>>bits, f16[1]>>bits, f16[2]>>bits, f16[3]>>bits); | |
| 68 } | |
| 69 | |
| 70 SkPx addNarrowHi(const SkPx& o) const { | |
| 71 Wide sum = (*this + o.widenLo()).shr<8>(); | |
| 72 return SkPx(sum.f16[0], sum.f16[1], sum.f16[2], sum.f16[3]); | |
| 73 } | |
| 74 }; | |
| 75 | |
| 76 Alpha alpha() const { return f8[3]; } | |
| 77 | |
| 78 Wide widenLo() const { return Wide(f8[0], f8[1], f8[2], f8[3]); } | |
| 79 Wide widenHi() const { return this->widenLo().shl<8>(); } | |
| 80 Wide widenLoHi() const { return this->widenLo() + this->widenHi(); } | |
| 81 | |
| 82 SkPx operator+(const SkPx& o) const { | |
| 83 return SkPx(f8[0]+o.f8[0], f8[1]+o.f8[1], f8[2]+o.f8[2], f8[3]+o.f8[3]); | |
| 84 } | |
| 85 SkPx operator-(const SkPx& o) const { | |
| 86 return SkPx(f8[0]-o.f8[0], f8[1]-o.f8[1], f8[2]-o.f8[2], f8[3]-o.f8[3]); | |
| 87 } | |
| 88 SkPx saturatedAdd(const SkPx& o) const { | |
| 89 return SkPx(SkTMax(0, SkTMin(255, f8[0]+o.f8[0])), | |
| 90 SkTMax(0, SkTMin(255, f8[1]+o.f8[1])), | |
| 91 SkTMax(0, SkTMin(255, f8[2]+o.f8[2])), | |
| 92 SkTMax(0, SkTMin(255, f8[3]+o.f8[3]))); | |
| 93 } | |
| 94 | |
| 95 Wide operator*(const Alpha& a) const { | |
| 96 return Wide(f8[0]*a.fA, f8[1]*a.fA, f8[2]*a.fA, f8[3]*a.fA); | |
| 97 } | |
| 98 SkPx approxMulDiv255(const Alpha& a) const { | |
| 99 return (*this * a).addNarrowHi(*this); | |
| 100 } | |
| 101 | |
| 102 SkPx addAlpha(const Alpha& a) const { | |
| 103 return SkPx(f8[0], f8[1], f8[2], f8[3]+a.fA); | |
| 104 } | |
| 105 }; | |
| 106 | |
| 107 } // namespace none | |
| 108 | |
| 109 typedef none::SkPx SkPx; | |
| 110 | |
| 111 #endif//SkPx_none_DEFINED | |
| OLD | NEW |