OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef Sk4fGradientPriv_DEFINED | 8 #ifndef Sk4fGradientPriv_DEFINED |
9 #define Sk4fGradientPriv_DEFINED | 9 #define Sk4fGradientPriv_DEFINED |
10 | 10 |
(...skipping 11 matching lines...) Expand all Loading... |
22 | 22 |
23 enum class ApplyPremul { True, False }; | 23 enum class ApplyPremul { True, False }; |
24 | 24 |
25 enum class DstType { | 25 enum class DstType { |
26 L32, // Linear 32bit. Used for both shader/blitter paths. | 26 L32, // Linear 32bit. Used for both shader/blitter paths. |
27 S32, // SRGB 32bit. Used for the blitter path only. | 27 S32, // SRGB 32bit. Used for the blitter path only. |
28 F16, // Linear half-float. Used for blitters only. | 28 F16, // Linear half-float. Used for blitters only. |
29 F32, // Linear float. Used for shaders only. | 29 F32, // Linear float. Used for shaders only. |
30 }; | 30 }; |
31 | 31 |
32 template <ApplyPremul premul> | |
33 inline SkPMColor trunc_from_4f_255(const Sk4f& c) { | |
34 SkPMColor pmc; | |
35 SkNx_cast<uint8_t>(c).store(&pmc); | |
36 if (premul == ApplyPremul::True) { | |
37 pmc = SkPreMultiplyARGB(SkGetPackedA32(pmc), SkGetPackedR32(pmc), | |
38 SkGetPackedG32(pmc), SkGetPackedB32(pmc)); | |
39 } | |
40 return pmc; | |
41 } | |
42 | |
43 template <ApplyPremul> | 32 template <ApplyPremul> |
44 struct PremulTraits; | 33 struct PremulTraits; |
45 | 34 |
46 template <> | 35 template <> |
47 struct PremulTraits<ApplyPremul::False> { | 36 struct PremulTraits<ApplyPremul::False> { |
48 static Sk4f apply(const Sk4f& c) { return c; } | 37 static Sk4f apply(const Sk4f& c) { return c; } |
49 }; | 38 }; |
50 | 39 |
51 template <> | 40 template <> |
52 struct PremulTraits<ApplyPremul::True> { | 41 struct PremulTraits<ApplyPremul::True> { |
53 static Sk4f apply(const Sk4f& c) { | 42 static Sk4f apply(const Sk4f& c) { |
54 const float alpha = c[SkPM4f::A]; | 43 const float alpha = c[SkPM4f::A]; |
55 // FIXME: portable swizzle? | 44 // FIXME: portable swizzle? |
56 return c * Sk4f(alpha, alpha, alpha, 1); | 45 return c * Sk4f(alpha, alpha, alpha, 1); |
57 } | 46 } |
58 }; | 47 }; |
59 | 48 |
60 // Struct encapsulating various dest-dependent ops: | 49 // Struct encapsulating various dest-dependent ops: |
61 // | 50 // |
62 // - load() Load a SkPM4f value into Sk4f. Normally called once per int
erval | 51 // - load() Load a SkPM4f value into Sk4f. Normally called once per int
erval |
63 // advance. Also applies a scale and swizzle suitable for DstT
ype. | 52 // advance. Also applies a scale and swizzle suitable for DstT
ype. |
64 // | 53 // |
65 // - store() Store one Sk4f to dest. Optionally handles premul, color sp
ace | 54 // - store() Store one Sk4f to dest. Optionally handles premul, color sp
ace |
66 // conversion, etc. | 55 // conversion, etc. |
67 // | 56 // |
68 // - store(count) Store the Sk4f value repeatedly to dest, count times. | 57 // - store(count) Store the Sk4f value repeatedly to dest, count times. |
69 // | 58 // |
70 // - store4x() Store 4 Sk4f values to dest (opportunistic optimization). | 59 // - store4x() Store 4 Sk4f values to dest (opportunistic optimization). |
71 // | 60 // |
72 template <DstType, ApplyPremul premul = ApplyPremul::False> | 61 template <DstType, ApplyPremul premul> |
73 struct DstTraits; | 62 struct DstTraits; |
74 | 63 |
75 template <ApplyPremul premul> | 64 template <ApplyPremul premul> |
76 struct DstTraits<DstType::L32, premul> { | 65 struct DstTraits<DstType::L32, premul> { |
| 66 using PM = PremulTraits<premul>; |
77 using Type = SkPMColor; | 67 using Type = SkPMColor; |
78 | 68 |
79 // For L32, we prescale the values by 255 to save a per-pixel multiplication
. | 69 // For L32, prescaling by 255 saves a per-pixel multiplication when premul i
s not needed. |
80 static Sk4f load(const SkPM4f& c) { | 70 static Sk4f load(const SkPM4f& c) { |
81 return c.to4f_pmorder() * Sk4f(255); | 71 return premul == ApplyPremul::False |
| 72 ? c.to4f_pmorder() * Sk4f(255) |
| 73 : c.to4f_pmorder(); |
82 } | 74 } |
83 | 75 |
84 static void store(const Sk4f& c, Type* dst) { | 76 static void store(const Sk4f& c, Type* dst) { |
85 *dst = trunc_from_4f_255<premul>(c); | 77 if (premul == ApplyPremul::False) { |
| 78 // c is prescaled by 255, just store. |
| 79 SkNx_cast<uint8_t>(c).store(dst); |
| 80 } else { |
| 81 *dst = Sk4f_toL32(PM::apply(c)); |
| 82 } |
86 } | 83 } |
87 | 84 |
88 static void store(const Sk4f& c, Type* dst, int n) { | 85 static void store(const Sk4f& c, Type* dst, int n) { |
89 sk_memset32(dst, trunc_from_4f_255<premul>(c), n); | 86 Type pmc; |
| 87 store(c, &pmc); |
| 88 sk_memset32(dst, pmc, n); |
90 } | 89 } |
91 | 90 |
92 static void store4x(const Sk4f& c0, const Sk4f& c1, | 91 static void store4x(const Sk4f& c0, const Sk4f& c1, |
93 const Sk4f& c2, const Sk4f& c3, | 92 const Sk4f& c2, const Sk4f& c3, |
94 Type* dst) { | 93 Type* dst) { |
95 if (premul == ApplyPremul::False) { | 94 if (premul == ApplyPremul::False) { |
96 Sk4f_ToBytes((uint8_t*)dst, c0, c1, c2, c3); | 95 Sk4f_ToBytes((uint8_t*)dst, c0, c1, c2, c3); |
97 } else { | 96 } else { |
98 store(c0, dst + 0); | 97 store(c0, dst + 0); |
99 store(c1, dst + 1); | 98 store(c1, dst + 1); |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
186 store(c0, dst + 0); | 185 store(c0, dst + 0); |
187 store(c1, dst + 1); | 186 store(c1, dst + 1); |
188 store(c2, dst + 2); | 187 store(c2, dst + 2); |
189 store(c3, dst + 3); | 188 store(c3, dst + 3); |
190 } | 189 } |
191 }; | 190 }; |
192 | 191 |
193 } // anonymous namespace | 192 } // anonymous namespace |
194 | 193 |
195 #endif // Sk4fGradientPriv_DEFINED | 194 #endif // Sk4fGradientPriv_DEFINED |
OLD | NEW |