OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2016 Google Inc. | 2 * Copyright 2016 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkHalf.h" | 8 #include "SkHalf.h" |
9 #include "SkPM4fPriv.h" | 9 #include "SkPM4fPriv.h" |
10 #include "SkUtils.h" | 10 #include "SkUtils.h" |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
43 // | 43 // |
44 template <DstType D> Sk4f load_from_dst(uint64_t dst) { | 44 template <DstType D> Sk4f load_from_dst(uint64_t dst) { |
45 return (D == kU16_Dst) ? load_from_u16(dst) : SkHalfToFloat_01(dst); | 45 return (D == kU16_Dst) ? load_from_u16(dst) : SkHalfToFloat_01(dst); |
46 } | 46 } |
47 | 47 |
48 // Assumes x4 is already in the "natural" bias (either unit-float or 16bit int) | 48 // Assumes x4 is already in the "natural" bias (either unit-float or 16bit int) |
49 template <DstType D> uint64_t store_to_dst(const Sk4f& x4) { | 49 template <DstType D> uint64_t store_to_dst(const Sk4f& x4) { |
50 return (D == kU16_Dst) ? store_to_u16(x4) : SkFloatToHalf_01(x4); | 50 return (D == kU16_Dst) ? store_to_u16(x4) : SkFloatToHalf_01(x4); |
51 } | 51 } |
52 | 52 |
53 static inline Sk4f pm_to_rgba_order(const Sk4f& x) { | |
54 if (SkPM4f::R == 0) { | |
55 return x; // we're already RGBA | |
56 } else { | |
57 // we're BGRA, so swap R and B | |
58 return SkNx_shuffle<2, 1, 0, 3>(x); | |
59 } | |
60 } | |
61 | |
62 ////////////////////////////////////////////////////////////////////////////////
/////////////////// | 53 ////////////////////////////////////////////////////////////////////////////////
/////////////////// |
63 | 54 |
64 template <DstType D> void xfer_u64_1(const SkXfermode* xfer, uint64_t dst[], | 55 template <DstType D> void xfer_u64_1(const SkXfermode* xfer, uint64_t dst[], |
65 const SkPM4f* src, int count, const SkAlpha
aa[]) { | 56 const SkPM4f* src, int count, const SkAlpha
aa[]) { |
66 SkXfermodeProc4f proc = xfer->getProc4f(); | 57 SkXfermodeProc4f proc = xfer->getProc4f(); |
67 SkPM4f d; | 58 SkPM4f d; |
68 if (aa) { | 59 if (aa) { |
69 for (int i = 0; i < count; ++i) { | 60 for (int i = 0; i < count; ++i) { |
70 Sk4f d4 = bias_to_unit<D>(load_from_dst<D>(dst[i])); | 61 Sk4f d4 = bias_to_unit<D>(load_from_dst<D>(dst[i])); |
71 d4.store(d.fVec); | 62 d4.store(d.fVec); |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
128 clear<kU16_Dst>, clear<kU16_Dst>, | 119 clear<kU16_Dst>, clear<kU16_Dst>, |
129 clear<kU16_Dst>, clear<kU16_Dst>, | 120 clear<kU16_Dst>, clear<kU16_Dst>, |
130 clear<kF16_Dst>, clear<kF16_Dst>, | 121 clear<kF16_Dst>, clear<kF16_Dst>, |
131 clear<kF16_Dst>, clear<kF16_Dst>, | 122 clear<kF16_Dst>, clear<kF16_Dst>, |
132 }; | 123 }; |
133 | 124 |
134 ////////////////////////////////////////////////////////////////////////////////
/////////////////// | 125 ////////////////////////////////////////////////////////////////////////////////
/////////////////// |
135 | 126 |
136 template <DstType D> void src_1(const SkXfermode*, uint64_t dst[], | 127 template <DstType D> void src_1(const SkXfermode*, uint64_t dst[], |
137 const SkPM4f* src, int count, const SkAlpha aa[]
) { | 128 const SkPM4f* src, int count, const SkAlpha aa[]
) { |
138 const Sk4f s4 = pm_to_rgba_order(unit_to_bias<D>(Sk4f::Load(src->fVec))); | 129 const Sk4f s4 = unit_to_bias<D>(Sk4f::Load(src->fVec)); |
139 if (aa) { | 130 if (aa) { |
140 for (int i = 0; i < count; ++i) { | 131 for (int i = 0; i < count; ++i) { |
141 const Sk4f d4 = load_from_dst<D>(dst[i]); | 132 const Sk4f d4 = load_from_dst<D>(dst[i]); |
142 dst[i] = store_to_dst<D>(lerp_by_coverage(s4, d4, aa[i])); | 133 dst[i] = store_to_dst<D>(lerp_by_coverage(s4, d4, aa[i])); |
143 } | 134 } |
144 } else { | 135 } else { |
145 sk_memset64(dst, store_to_dst<D>(s4), count); | 136 sk_memset64(dst, store_to_dst<D>(s4), count); |
146 } | 137 } |
147 } | 138 } |
148 | 139 |
149 template <DstType D> void src_n(const SkXfermode*, uint64_t dst[], | 140 template <DstType D> void src_n(const SkXfermode*, uint64_t dst[], |
150 const SkPM4f src[], int count, const SkAlpha aa[
]) { | 141 const SkPM4f src[], int count, const SkAlpha aa[
]) { |
151 if (aa) { | 142 if (aa) { |
152 for (int i = 0; i < count; ++i) { | 143 for (int i = 0; i < count; ++i) { |
153 const Sk4f s4 = pm_to_rgba_order(unit_to_bias<D>(Sk4f::Load(src[i].f
Vec))); | 144 const Sk4f s4 = unit_to_bias<D>(Sk4f::Load(src[i].fVec)); |
154 const Sk4f d4 = load_from_dst<D>(dst[i]); | 145 const Sk4f d4 = load_from_dst<D>(dst[i]); |
155 dst[i] = store_to_dst<D>(lerp_by_coverage(s4, d4, aa[i])); | 146 dst[i] = store_to_dst<D>(lerp_by_coverage(s4, d4, aa[i])); |
156 } | 147 } |
157 } else { | 148 } else { |
158 for (int i = 0; i < count; ++i) { | 149 for (int i = 0; i < count; ++i) { |
159 const Sk4f s4 = pm_to_rgba_order(unit_to_bias<D>(Sk4f::Load(src[i].f
Vec))); | 150 const Sk4f s4 = unit_to_bias<D>(Sk4f::Load(src[i].fVec)); |
160 dst[i] = store_to_dst<D>(s4); | 151 dst[i] = store_to_dst<D>(s4); |
161 } | 152 } |
162 } | 153 } |
163 } | 154 } |
164 | 155 |
165 const SkXfermode::D64Proc gProcs_Src[] = { | 156 const SkXfermode::D64Proc gProcs_Src[] = { |
166 src_n<kU16_Dst>, src_n<kU16_Dst>, | 157 src_n<kU16_Dst>, src_n<kU16_Dst>, |
167 src_1<kU16_Dst>, src_1<kU16_Dst>, | 158 src_1<kU16_Dst>, src_1<kU16_Dst>, |
168 src_n<kF16_Dst>, src_n<kF16_Dst>, | 159 src_n<kF16_Dst>, src_n<kF16_Dst>, |
169 src_1<kF16_Dst>, src_1<kF16_Dst>, | 160 src_1<kF16_Dst>, src_1<kF16_Dst>, |
170 }; | 161 }; |
171 | 162 |
172 ////////////////////////////////////////////////////////////////////////////////
/////////////////// | 163 ////////////////////////////////////////////////////////////////////////////////
/////////////////// |
173 | 164 |
174 static void dst(const SkXfermode*, uint64_t*, const SkPM4f*, int count, const Sk
Alpha[]) {} | 165 static void dst(const SkXfermode*, uint64_t*, const SkPM4f*, int count, const Sk
Alpha[]) {} |
175 | 166 |
176 const SkXfermode::D64Proc gProcs_Dst[] = { | 167 const SkXfermode::D64Proc gProcs_Dst[] = { |
177 dst, dst, dst, dst, dst, dst, dst, dst, | 168 dst, dst, dst, dst, dst, dst, dst, dst, |
178 }; | 169 }; |
179 | 170 |
180 ////////////////////////////////////////////////////////////////////////////////
/////////////////// | 171 ////////////////////////////////////////////////////////////////////////////////
/////////////////// |
181 | 172 |
182 template <DstType D> void srcover_1(const SkXfermode*, uint64_t dst[], | 173 template <DstType D> void srcover_1(const SkXfermode*, uint64_t dst[], |
183 const SkPM4f* src, int count, const SkAlpha
aa[]) { | 174 const SkPM4f* src, int count, const SkAlpha
aa[]) { |
184 const Sk4f s4 = pm_to_rgba_order(Sk4f::Load(src->fVec)); | 175 const Sk4f s4 = Sk4f::Load(src->fVec); |
185 const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); | 176 const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); |
186 const Sk4f s4bias = unit_to_bias<D>(s4); | 177 const Sk4f s4bias = unit_to_bias<D>(s4); |
187 for (int i = 0; i < count; ++i) { | 178 for (int i = 0; i < count; ++i) { |
188 const Sk4f d4bias = load_from_dst<D>(dst[i]); | 179 const Sk4f d4bias = load_from_dst<D>(dst[i]); |
189 const Sk4f r4bias = s4bias + d4bias * dst_scale; | 180 const Sk4f r4bias = s4bias + d4bias * dst_scale; |
190 if (aa) { | 181 if (aa) { |
191 dst[i] = store_to_dst<D>(lerp_by_coverage(r4bias, d4bias, aa[i])); | 182 dst[i] = store_to_dst<D>(lerp_by_coverage(r4bias, d4bias, aa[i])); |
192 } else { | 183 } else { |
193 dst[i] = store_to_dst<D>(r4bias); | 184 dst[i] = store_to_dst<D>(r4bias); |
194 } | 185 } |
195 } | 186 } |
196 } | 187 } |
197 | 188 |
198 template <DstType D> void srcover_n(const SkXfermode*, uint64_t dst[], | 189 template <DstType D> void srcover_n(const SkXfermode*, uint64_t dst[], |
199 const SkPM4f src[], int count, const SkAlpha
aa[]) { | 190 const SkPM4f src[], int count, const SkAlpha
aa[]) { |
200 for (int i = 0; i < count; ++i) { | 191 for (int i = 0; i < count; ++i) { |
201 const Sk4f s4 = pm_to_rgba_order(Sk4f::Load(src[i].fVec)); | 192 const Sk4f s4 = Sk4f::Load(src[i].fVec); |
202 const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); | 193 const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); |
203 const Sk4f s4bias = unit_to_bias<D>(s4); | 194 const Sk4f s4bias = unit_to_bias<D>(s4); |
204 const Sk4f d4bias = load_from_dst<D>(dst[i]); | 195 const Sk4f d4bias = load_from_dst<D>(dst[i]); |
205 const Sk4f r4bias = s4bias + d4bias * dst_scale; | 196 const Sk4f r4bias = s4bias + d4bias * dst_scale; |
206 if (aa) { | 197 if (aa) { |
207 dst[i] = store_to_dst<D>(lerp_by_coverage(r4bias, d4bias, aa[i])); | 198 dst[i] = store_to_dst<D>(lerp_by_coverage(r4bias, d4bias, aa[i])); |
208 } else { | 199 } else { |
209 dst[i] = store_to_dst<D>(r4bias); | 200 dst[i] = store_to_dst<D>(r4bias); |
210 } | 201 } |
211 } | 202 } |
(...skipping 27 matching lines...) Expand all Loading... |
239 SkASSERT(0 == (flags & ~7)); | 230 SkASSERT(0 == (flags & ~7)); |
240 flags &= 7; | 231 flags &= 7; |
241 | 232 |
242 Mode mode; | 233 Mode mode; |
243 return this->asMode(&mode) ? find_proc(mode, flags) : gProcs_General[flags]; | 234 return this->asMode(&mode) ? find_proc(mode, flags) : gProcs_General[flags]; |
244 } | 235 } |
245 | 236 |
246 SkXfermode::D64Proc SkXfermode::GetD64Proc(SkXfermode* xfer, uint32_t flags) { | 237 SkXfermode::D64Proc SkXfermode::GetD64Proc(SkXfermode* xfer, uint32_t flags) { |
247 return xfer ? xfer->onGetD64Proc(flags) : find_proc(SkXfermode::kSrcOver_Mod
e, flags); | 238 return xfer ? xfer->onGetD64Proc(flags) : find_proc(SkXfermode::kSrcOver_Mod
e, flags); |
248 } | 239 } |
OLD | NEW |