OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkHalf.h" | 8 #include "SkHalf.h" |
9 #include "SkPM4fPriv.h" | 9 #include "SkPM4fPriv.h" |
10 #include "SkUtils.h" | 10 #include "SkUtils.h" |
11 #include "SkXfermode.h" | 11 #include "SkXfermode.h" |
12 | 12 |
13 static void sk_memset64(uint64_t dst[], uint64_t value, int count) { | 13 static void sk_memset64(uint64_t dst[], uint64_t value, int count) { |
14 for (int i = 0; i < count; ++i) { | 14 for (int i = 0; i < count; ++i) { |
15 dst[i] = value; | 15 dst[i] = value; |
16 } | 16 } |
17 } | 17 } |
18 | 18 |
19 struct U64ProcPair { | |
20 SkXfermode::U64Proc1 fP1; | |
21 SkXfermode::U64ProcN fPN; | |
22 }; | |
23 | |
24 enum DstType { | 19 enum DstType { |
25 kU16_Dst, | 20 kU16_Dst, |
26 kF16_Dst, | 21 kF16_Dst, |
27 }; | 22 }; |
28 | 23 |
29 static Sk4f lerp_by_coverage(const Sk4f& src, const Sk4f& dst, uint8_t srcCovera
ge) { | 24 static Sk4f lerp_by_coverage(const Sk4f& src, const Sk4f& dst, uint8_t srcCovera
ge) { |
30 return dst + (src - dst) * Sk4f(srcCoverage * (1/255.0f)); | 25 return dst + (src - dst) * Sk4f(srcCoverage * (1/255.0f)); |
31 } | 26 } |
32 | 27 |
33 template <DstType D> Sk4f unit_to_bias(const Sk4f& x4) { | 28 template <DstType D> Sk4f unit_to_bias(const Sk4f& x4) { |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
65 if (SkPM4f::R == 0) { | 60 if (SkPM4f::R == 0) { |
66 return x; // we're already RGBA | 61 return x; // we're already RGBA |
67 } else { | 62 } else { |
68 // we're BGRA, so swap R and B | 63 // we're BGRA, so swap R and B |
69 return SkNx_shuffle<2, 1, 0, 3>(x); | 64 return SkNx_shuffle<2, 1, 0, 3>(x); |
70 } | 65 } |
71 } | 66 } |
72 | 67 |
73 ////////////////////////////////////////////////////////////////////////////////
/////////////////// | 68 ////////////////////////////////////////////////////////////////////////////////
/////////////////// |
74 | 69 |
75 template <DstType D> void xfer_u64_1(const SkXfermode::U64State& state, uint64_t
dst[], | 70 template <DstType D> void xfer_u64_1(const SkXfermode* xfer, uint64_t dst[], |
76 const SkPM4f& src, int count, const SkAlpha
aa[]) { | 71 const SkPM4f* src, int count, const SkAlpha
aa[]) { |
77 SkXfermodeProc4f proc = state.fXfer->getProc4f(); | 72 SkXfermodeProc4f proc = xfer->getProc4f(); |
78 SkPM4f d; | 73 SkPM4f d; |
79 if (aa) { | 74 if (aa) { |
80 for (int i = 0; i < count; ++i) { | 75 for (int i = 0; i < count; ++i) { |
81 Sk4f d4 = bias_to_unit<D>(load_from_dst<D>(dst[i])); | 76 Sk4f d4 = bias_to_unit<D>(load_from_dst<D>(dst[i])); |
82 d4.store(d.fVec); | 77 d4.store(d.fVec); |
83 Sk4f r4 = unit_to_bias<D>(Sk4f::Load(proc(src, d).fVec)); | 78 Sk4f r4 = unit_to_bias<D>(Sk4f::Load(proc(*src, d).fVec)); |
84 dst[i] = store_to_dst<D>(lerp_by_coverage(r4, d4, aa[i])); | 79 dst[i] = store_to_dst<D>(lerp_by_coverage(r4, d4, aa[i])); |
85 } | 80 } |
86 } else { | 81 } else { |
87 for (int i = 0; i < count; ++i) { | 82 for (int i = 0; i < count; ++i) { |
88 bias_to_unit<D>(load_from_dst<D>(dst[i])).store(d.fVec); | 83 bias_to_unit<D>(load_from_dst<D>(dst[i])).store(d.fVec); |
89 Sk4f r4 = unit_to_bias<D>(Sk4f::Load(proc(src, d).fVec)); | 84 Sk4f r4 = unit_to_bias<D>(Sk4f::Load(proc(*src, d).fVec)); |
90 dst[i] = store_to_dst<D>(r4); | 85 dst[i] = store_to_dst<D>(r4); |
91 } | 86 } |
92 } | 87 } |
93 } | 88 } |
94 | 89 |
95 template <DstType D> void xfer_u64_n(const SkXfermode::U64State& state, uint64_t
dst[], | 90 template <DstType D> void xfer_u64_n(const SkXfermode* xfer, uint64_t dst[], |
96 const SkPM4f src[], int count, const SkAlph
a aa[]) { | 91 const SkPM4f src[], int count, const SkAlph
a aa[]) { |
97 SkXfermodeProc4f proc = state.fXfer->getProc4f(); | 92 SkXfermodeProc4f proc = xfer->getProc4f(); |
98 SkPM4f d; | 93 SkPM4f d; |
99 if (aa) { | 94 if (aa) { |
100 for (int i = 0; i < count; ++i) { | 95 for (int i = 0; i < count; ++i) { |
101 Sk4f d4 = bias_to_unit<D>(load_from_dst<D>(dst[i])); | 96 Sk4f d4 = bias_to_unit<D>(load_from_dst<D>(dst[i])); |
102 d4.store(d.fVec); | 97 d4.store(d.fVec); |
103 Sk4f r4 = unit_to_bias<D>(Sk4f::Load(proc(src[i], d).fVec)); | 98 Sk4f r4 = unit_to_bias<D>(Sk4f::Load(proc(src[i], d).fVec)); |
104 dst[i] = store_to_dst<D>(lerp_by_coverage(r4, d4, aa[i])); | 99 dst[i] = store_to_dst<D>(lerp_by_coverage(r4, d4, aa[i])); |
105 } | 100 } |
106 } else { | 101 } else { |
107 for (int i = 0; i < count; ++i) { | 102 for (int i = 0; i < count; ++i) { |
108 bias_to_unit<D>(load_from_dst<D>(dst[i])).store(d.fVec); | 103 bias_to_unit<D>(load_from_dst<D>(dst[i])).store(d.fVec); |
109 Sk4f r4 = unit_to_bias<D>(Sk4f::Load(proc(src[i], d).fVec)); | 104 Sk4f r4 = unit_to_bias<D>(Sk4f::Load(proc(src[i], d).fVec)); |
110 dst[i] = store_to_dst<D>(r4); | 105 dst[i] = store_to_dst<D>(r4); |
111 } | 106 } |
112 } | 107 } |
113 } | 108 } |
114 | 109 |
115 const U64ProcPair gU64Procs_General[] = { | 110 const SkXfermode::D64Proc gProcs_General[] = { |
116 { xfer_u64_1<kU16_Dst>, xfer_u64_n<kU16_Dst> }, // U16 alpha | 111 xfer_u64_n<kU16_Dst>, xfer_u64_n<kU16_Dst>, |
117 { xfer_u64_1<kU16_Dst>, xfer_u64_n<kU16_Dst> }, // U16 opaque | 112 xfer_u64_1<kU16_Dst>, xfer_u64_1<kU16_Dst>, |
118 { xfer_u64_1<kF16_Dst>, xfer_u64_n<kF16_Dst> }, // F16 alpha | 113 xfer_u64_n<kF16_Dst>, xfer_u64_n<kF16_Dst>, |
119 { xfer_u64_1<kF16_Dst>, xfer_u64_n<kF16_Dst> }, // F16 opaque | 114 xfer_u64_1<kF16_Dst>, xfer_u64_1<kF16_Dst>, |
120 }; | 115 }; |
121 | 116 |
122 ////////////////////////////////////////////////////////////////////////////////
/////////////////// | 117 ////////////////////////////////////////////////////////////////////////////////
/////////////////// |
123 | 118 |
124 template <DstType D> void src_1(const SkXfermode::U64State& state, uint64_t dst[
], | 119 template <DstType D> void clear(const SkXfermode*, uint64_t dst[], |
125 const SkPM4f& src, int count, const SkAlpha aa[]
) { | 120 const SkPM4f*, int count, const SkAlpha aa[]) { |
126 const Sk4f s4 = pm_to_rgba_order(unit_to_bias<D>(Sk4f::Load(src.fVec))); | 121 if (aa) { |
| 122 for (int i = 0; i < count; ++i) { |
| 123 if (aa[i]) { |
| 124 const Sk4f d4 = load_from_dst<D>(dst[i]); |
| 125 dst[i] = store_to_dst<D>(d4 * Sk4f((255 - aa[i]) * 1.0f/255)); |
| 126 } |
| 127 } |
| 128 } else { |
| 129 sk_memset64(dst, 0, count); |
| 130 } |
| 131 } |
| 132 |
| 133 const SkXfermode::D64Proc gProcs_Clear[] = { |
| 134 clear<kU16_Dst>, clear<kU16_Dst>, |
| 135 clear<kU16_Dst>, clear<kU16_Dst>, |
| 136 clear<kF16_Dst>, clear<kF16_Dst>, |
| 137 clear<kF16_Dst>, clear<kF16_Dst>, |
| 138 }; |
| 139 |
| 140 ////////////////////////////////////////////////////////////////////////////////
/////////////////// |
| 141 |
| 142 template <DstType D> void src_1(const SkXfermode*, uint64_t dst[], |
| 143 const SkPM4f* src, int count, const SkAlpha aa[]
) { |
| 144 const Sk4f s4 = pm_to_rgba_order(unit_to_bias<D>(Sk4f::Load(src->fVec))); |
127 if (aa) { | 145 if (aa) { |
128 for (int i = 0; i < count; ++i) { | 146 for (int i = 0; i < count; ++i) { |
129 const Sk4f d4 = load_from_dst<D>(dst[i]); | 147 const Sk4f d4 = load_from_dst<D>(dst[i]); |
130 dst[i] = store_to_dst<D>(lerp_by_coverage(s4, d4, aa[i])); | 148 dst[i] = store_to_dst<D>(lerp_by_coverage(s4, d4, aa[i])); |
131 } | 149 } |
132 } else { | 150 } else { |
133 sk_memset64(dst, store_to_dst<D>(s4), count); | 151 sk_memset64(dst, store_to_dst<D>(s4), count); |
134 } | 152 } |
135 } | 153 } |
136 | 154 |
137 template <DstType D> void src_n(const SkXfermode::U64State& state, uint64_t dst[
], | 155 template <DstType D> void src_n(const SkXfermode*, uint64_t dst[], |
138 const SkPM4f src[], int count, const SkAlpha aa[
]) { | 156 const SkPM4f src[], int count, const SkAlpha aa[
]) { |
139 if (aa) { | 157 if (aa) { |
140 for (int i = 0; i < count; ++i) { | 158 for (int i = 0; i < count; ++i) { |
141 const Sk4f s4 = pm_to_rgba_order(unit_to_bias<D>(Sk4f::Load(src[i].f
Vec))); | 159 const Sk4f s4 = pm_to_rgba_order(unit_to_bias<D>(Sk4f::Load(src[i].f
Vec))); |
142 const Sk4f d4 = load_from_dst<D>(dst[i]); | 160 const Sk4f d4 = load_from_dst<D>(dst[i]); |
143 dst[i] = store_to_dst<D>(lerp_by_coverage(s4, d4, aa[i])); | 161 dst[i] = store_to_dst<D>(lerp_by_coverage(s4, d4, aa[i])); |
144 } | 162 } |
145 } else { | 163 } else { |
146 for (int i = 0; i < count; ++i) { | 164 for (int i = 0; i < count; ++i) { |
147 const Sk4f s4 = pm_to_rgba_order(unit_to_bias<D>(Sk4f::Load(src[i].f
Vec))); | 165 const Sk4f s4 = pm_to_rgba_order(unit_to_bias<D>(Sk4f::Load(src[i].f
Vec))); |
148 dst[i] = store_to_dst<D>(s4); | 166 dst[i] = store_to_dst<D>(s4); |
149 } | 167 } |
150 } | 168 } |
151 } | 169 } |
152 | 170 |
153 const U64ProcPair gU64Procs_Src[] = { | 171 const SkXfermode::D64Proc gProcs_Src[] = { |
154 { src_1<kU16_Dst>, src_n<kU16_Dst> }, // U16 alpha | 172 src_n<kU16_Dst>, src_n<kU16_Dst>, |
155 { src_1<kU16_Dst>, src_n<kU16_Dst> }, // U16 opaque | 173 src_1<kU16_Dst>, src_1<kU16_Dst>, |
156 { src_1<kF16_Dst>, src_n<kF16_Dst> }, // F16 alpha | 174 src_n<kF16_Dst>, src_n<kF16_Dst>, |
157 { src_1<kF16_Dst>, src_n<kF16_Dst> }, // F16 opaque | 175 src_1<kF16_Dst>, src_1<kF16_Dst>, |
158 }; | 176 }; |
159 | 177 |
160 ////////////////////////////////////////////////////////////////////////////////
/////////////////// | 178 ////////////////////////////////////////////////////////////////////////////////
/////////////////// |
161 | 179 |
162 template <DstType D> void srcover_1(const SkXfermode::U64State& state, uint64_t
dst[], | 180 static void dst(const SkXfermode*, uint64_t*, const SkPM4f*, int count, const Sk
Alpha[]) {} |
163 const SkPM4f& src, int count, const SkAlpha
aa[]) { | 181 |
164 const Sk4f s4 = pm_to_rgba_order(Sk4f::Load(src.fVec)); | 182 const SkXfermode::D64Proc gProcs_Dst[] = { |
| 183 dst, dst, dst, dst, dst, dst, dst, dst, |
| 184 }; |
| 185 |
| 186 ////////////////////////////////////////////////////////////////////////////////
/////////////////// |
| 187 |
| 188 template <DstType D> void srcover_1(const SkXfermode*, uint64_t dst[], |
| 189 const SkPM4f* src, int count, const SkAlpha
aa[]) { |
| 190 const Sk4f s4 = pm_to_rgba_order(Sk4f::Load(src->fVec)); |
165 const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); | 191 const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); |
166 const Sk4f s4bias = unit_to_bias<D>(s4); | 192 const Sk4f s4bias = unit_to_bias<D>(s4); |
167 for (int i = 0; i < count; ++i) { | 193 for (int i = 0; i < count; ++i) { |
168 const Sk4f d4bias = load_from_dst<D>(dst[i]); | 194 const Sk4f d4bias = load_from_dst<D>(dst[i]); |
169 const Sk4f r4bias = s4bias + d4bias * dst_scale; | 195 const Sk4f r4bias = s4bias + d4bias * dst_scale; |
170 if (aa) { | 196 if (aa) { |
171 dst[i] = store_to_dst<D>(lerp_by_coverage(r4bias, d4bias, aa[i])); | 197 dst[i] = store_to_dst<D>(lerp_by_coverage(r4bias, d4bias, aa[i])); |
172 } else { | 198 } else { |
173 dst[i] = store_to_dst<D>(r4bias); | 199 dst[i] = store_to_dst<D>(r4bias); |
174 } | 200 } |
175 } | 201 } |
176 } | 202 } |
177 | 203 |
178 template <DstType D> void srcover_n(const SkXfermode::U64State& state, uint64_t
dst[], | 204 template <DstType D> void srcover_n(const SkXfermode*, uint64_t dst[], |
179 const SkPM4f src[], int count, const SkAlpha
aa[]) { | 205 const SkPM4f src[], int count, const SkAlpha
aa[]) { |
180 for (int i = 0; i < count; ++i) { | 206 for (int i = 0; i < count; ++i) { |
181 const Sk4f s4 = pm_to_rgba_order(Sk4f::Load(src[i].fVec)); | 207 const Sk4f s4 = pm_to_rgba_order(Sk4f::Load(src[i].fVec)); |
182 const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); | 208 const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); |
183 const Sk4f s4bias = unit_to_bias<D>(s4); | 209 const Sk4f s4bias = unit_to_bias<D>(s4); |
184 const Sk4f d4bias = load_from_dst<D>(dst[i]); | 210 const Sk4f d4bias = load_from_dst<D>(dst[i]); |
185 const Sk4f r4bias = s4bias + d4bias * dst_scale; | 211 const Sk4f r4bias = s4bias + d4bias * dst_scale; |
186 if (aa) { | 212 if (aa) { |
187 dst[i] = store_to_dst<D>(lerp_by_coverage(r4bias, d4bias, aa[i])); | 213 dst[i] = store_to_dst<D>(lerp_by_coverage(r4bias, d4bias, aa[i])); |
188 } else { | 214 } else { |
189 dst[i] = store_to_dst<D>(r4bias); | 215 dst[i] = store_to_dst<D>(r4bias); |
190 } | 216 } |
191 } | 217 } |
192 } | 218 } |
193 | 219 |
194 const U64ProcPair gU64Procs_SrcOver[] = { | 220 const SkXfermode::D64Proc gProcs_SrcOver[] = { |
195 { srcover_1<kU16_Dst>, srcover_n<kU16_Dst> }, // U16 alpha | 221 srcover_n<kU16_Dst>, src_n<kU16_Dst>, |
196 { src_1<kU16_Dst>, src_n<kU16_Dst> }, // U16 opaque | 222 srcover_1<kU16_Dst>, src_1<kU16_Dst>, |
197 { srcover_1<kF16_Dst>, srcover_n<kF16_Dst> }, // F16 alpha | 223 srcover_n<kF16_Dst>, src_n<kF16_Dst>, |
198 { src_1<kF16_Dst>, src_n<kF16_Dst> }, // F16 opaque | 224 srcover_1<kF16_Dst>, src_1<kF16_Dst>, |
199 }; | 225 }; |
200 | 226 |
201 ////////////////////////////////////////////////////////////////////////////////
/////////////////// | 227 ////////////////////////////////////////////////////////////////////////////////
/////////////////// |
202 | 228 |
203 static U64ProcPair find_procs(SkXfermode::Mode mode, uint32_t flags) { | 229 static SkXfermode::D64Proc find_proc(SkXfermode::Mode mode, uint32_t flags) { |
204 SkASSERT(0 == (flags & ~3)); | 230 SkASSERT(0 == (flags & ~7)); |
205 flags &= 3; | 231 flags &= 7; |
206 | 232 |
207 switch (mode) { | 233 switch (mode) { |
208 case SkXfermode::kSrc_Mode: return gU64Procs_Src[flags]; | 234 case SkXfermode::kClear_Mode: return gProcs_Clear[flags]; |
209 case SkXfermode::kSrcOver_Mode: return gU64Procs_SrcOver[flags]; | 235 case SkXfermode::kSrc_Mode: return gProcs_Src[flags]; |
| 236 case SkXfermode::kDst_Mode: return gProcs_Dst[flags]; |
| 237 case SkXfermode::kSrcOver_Mode: return gProcs_SrcOver[flags]; |
210 default: | 238 default: |
211 break; | 239 break; |
212 } | 240 } |
213 return gU64Procs_General[flags]; | 241 return gProcs_General[flags]; |
214 } | 242 } |
215 | 243 |
216 SkXfermode::U64Proc1 SkXfermode::GetU64Proc1(Mode mode, uint32_t flags) { | 244 SkXfermode::D64Proc SkXfermode::onGetD64Proc(uint32_t flags) const { |
217 return find_procs(mode, flags).fP1; | 245 SkASSERT(0 == (flags & ~7)); |
| 246 flags &= 7; |
| 247 |
| 248 Mode mode; |
| 249 return this->asMode(&mode) ? find_proc(mode, flags) : gProcs_General[flags]; |
218 } | 250 } |
219 | 251 |
220 SkXfermode::U64ProcN SkXfermode::GetU64ProcN(Mode mode, uint32_t flags) { | 252 SkXfermode::D64Proc SkXfermode::GetD64Proc(SkXfermode* xfer, uint32_t flags) { |
221 return find_procs(mode, flags).fPN; | 253 return xfer ? xfer->onGetD64Proc(flags) : find_proc(SkXfermode::kSrcOver_Mod
e, flags); |
222 } | 254 } |
| 255 |
OLD | NEW |