Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: src/opts/SkXfermode_opts_SSE2.cpp

Issue 1230023011: Clean up dead xfermode opts code. (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: remove guard Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/opts/SkXfermode_opts_SSE2.h ('k') | src/opts/SkXfermode_opts_arm.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "SkColorPriv.h"
9 #include "SkColor_opts_SSE2.h"
10 #include "SkMathPriv.h"
11 #include "SkMath_opts_SSE2.h"
12 #include "SkXfermode.h"
13 #include "SkXfermode_opts_SSE2.h"
14 #include "SkXfermode_proccoeff.h"
15
16 ////////////////////////////////////////////////////////////////////////////////
17 // 4 pixels SSE2 version functions
18 ////////////////////////////////////////////////////////////////////////////////
19
20 static inline __m128i SkDiv255Round_SSE2(const __m128i& a) {
21 __m128i prod = _mm_add_epi32(a, _mm_set1_epi32(128)); // prod += 128;
22 prod = _mm_add_epi32(prod, _mm_srli_epi32(prod, 8)); // prod + (prod >> 8)
23 prod = _mm_srli_epi32(prod, 8); // >> 8
24
25 return prod;
26 }
27
28 static inline __m128i clamp_div255round_SSE2(const __m128i& prod) {
29 // test if > 0
30 __m128i cmp1 = _mm_cmpgt_epi32(prod, _mm_setzero_si128());
31 // test if < 255*255
32 __m128i cmp2 = _mm_cmplt_epi32(prod, _mm_set1_epi32(255*255));
33
34 __m128i ret = _mm_setzero_si128();
35
36 // if value >= 255*255, value = 255
37 ret = _mm_andnot_si128(cmp2, _mm_set1_epi32(255));
38
39 __m128i div = SkDiv255Round_SSE2(prod);
40
41 // test if > 0 && < 255*255
42 __m128i cmp = _mm_and_si128(cmp1, cmp2);
43
44 ret = _mm_or_si128(_mm_and_si128(cmp, div), _mm_andnot_si128(cmp, ret));
45
46 return ret;
47 }
48 static inline __m128i SkMin32_SSE2(const __m128i& a, const __m128i& b) {
49 __m128i cmp = _mm_cmplt_epi32(a, b);
50 return _mm_or_si128(_mm_and_si128(cmp, a), _mm_andnot_si128(cmp, b));
51 }
52
53 static inline __m128i srcover_byte_SSE2(const __m128i& a, const __m128i& b) {
54 // a + b - SkAlphaMulAlpha(a, b);
55 return _mm_sub_epi32(_mm_add_epi32(a, b), SkAlphaMulAlpha_SSE2(a, b));
56
57 }
58
59 // Portable version overlay_byte() is in SkXfermode.cpp.
60 static inline __m128i overlay_byte_SSE2(const __m128i& sc, const __m128i& dc,
61 const __m128i& sa, const __m128i& da) {
62 __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
63 __m128i tmp1 = _mm_mullo_epi16(sc, ida);
64 __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
65 __m128i tmp2 = _mm_mullo_epi16(dc, isa);
66 __m128i tmp = _mm_add_epi32(tmp1, tmp2);
67
68 __m128i cmp = _mm_cmpgt_epi32(_mm_slli_epi32(dc, 1), da);
69 __m128i rc1 = _mm_slli_epi32(sc, 1); // 2 * sc
70 rc1 = Multiply32_SSE2(rc1, dc); // *dc
71
72 __m128i rc2 = _mm_mullo_epi16(sa, da); // sa * da
73 __m128i tmp3 = _mm_slli_epi32(_mm_sub_epi32(da, dc), 1); // 2 * (da - dc)
74 tmp3 = Multiply32_SSE2(tmp3, _mm_sub_epi32(sa, sc)); // * (sa - sc)
75 rc2 = _mm_sub_epi32(rc2, tmp3);
76
77 __m128i rc = _mm_or_si128(_mm_andnot_si128(cmp, rc1),
78 _mm_and_si128(cmp, rc2));
79 return clamp_div255round_SSE2(_mm_add_epi32(rc, tmp));
80 }
81
82 static __m128i overlay_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
83 __m128i sa = SkGetPackedA32_SSE2(src);
84 __m128i da = SkGetPackedA32_SSE2(dst);
85
86 __m128i a = srcover_byte_SSE2(sa, da);
87 __m128i r = overlay_byte_SSE2(SkGetPackedR32_SSE2(src),
88 SkGetPackedR32_SSE2(dst), sa, da);
89 __m128i g = overlay_byte_SSE2(SkGetPackedG32_SSE2(src),
90 SkGetPackedG32_SSE2(dst), sa, da);
91 __m128i b = overlay_byte_SSE2(SkGetPackedB32_SSE2(src),
92 SkGetPackedB32_SSE2(dst), sa, da);
93 return SkPackARGB32_SSE2(a, r, g, b);
94 }
95
96 static inline __m128i darken_byte_SSE2(const __m128i& sc, const __m128i& dc,
97 const __m128i& sa, const __m128i& da) {
98 __m128i sd = _mm_mullo_epi16(sc, da);
99 __m128i ds = _mm_mullo_epi16(dc, sa);
100
101 __m128i cmp = _mm_cmplt_epi32(sd, ds);
102
103 __m128i tmp = _mm_add_epi32(sc, dc);
104 __m128i ret1 = _mm_sub_epi32(tmp, SkDiv255Round_SSE2(ds));
105 __m128i ret2 = _mm_sub_epi32(tmp, SkDiv255Round_SSE2(sd));
106 __m128i ret = _mm_or_si128(_mm_and_si128(cmp, ret1),
107 _mm_andnot_si128(cmp, ret2));
108 return ret;
109 }
110
111 static __m128i darken_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
112 __m128i sa = SkGetPackedA32_SSE2(src);
113 __m128i da = SkGetPackedA32_SSE2(dst);
114
115 __m128i a = srcover_byte_SSE2(sa, da);
116 __m128i r = darken_byte_SSE2(SkGetPackedR32_SSE2(src),
117 SkGetPackedR32_SSE2(dst), sa, da);
118 __m128i g = darken_byte_SSE2(SkGetPackedG32_SSE2(src),
119 SkGetPackedG32_SSE2(dst), sa, da);
120 __m128i b = darken_byte_SSE2(SkGetPackedB32_SSE2(src),
121 SkGetPackedB32_SSE2(dst), sa, da);
122 return SkPackARGB32_SSE2(a, r, g, b);
123 }
124
125 static inline __m128i lighten_byte_SSE2(const __m128i& sc, const __m128i& dc,
126 const __m128i& sa, const __m128i& da) {
127 __m128i sd = _mm_mullo_epi16(sc, da);
128 __m128i ds = _mm_mullo_epi16(dc, sa);
129
130 __m128i cmp = _mm_cmpgt_epi32(sd, ds);
131
132 __m128i tmp = _mm_add_epi32(sc, dc);
133 __m128i ret1 = _mm_sub_epi32(tmp, SkDiv255Round_SSE2(ds));
134 __m128i ret2 = _mm_sub_epi32(tmp, SkDiv255Round_SSE2(sd));
135 __m128i ret = _mm_or_si128(_mm_and_si128(cmp, ret1),
136 _mm_andnot_si128(cmp, ret2));
137 return ret;
138 }
139
140 static __m128i lighten_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
141 __m128i sa = SkGetPackedA32_SSE2(src);
142 __m128i da = SkGetPackedA32_SSE2(dst);
143
144 __m128i a = srcover_byte_SSE2(sa, da);
145 __m128i r = lighten_byte_SSE2(SkGetPackedR32_SSE2(src),
146 SkGetPackedR32_SSE2(dst), sa, da);
147 __m128i g = lighten_byte_SSE2(SkGetPackedG32_SSE2(src),
148 SkGetPackedG32_SSE2(dst), sa, da);
149 __m128i b = lighten_byte_SSE2(SkGetPackedB32_SSE2(src),
150 SkGetPackedB32_SSE2(dst), sa, da);
151 return SkPackARGB32_SSE2(a, r, g, b);
152 }
153
154 static inline __m128i colordodge_byte_SSE2(const __m128i& sc, const __m128i& dc,
155 const __m128i& sa, const __m128i& da) {
156 __m128i diff = _mm_sub_epi32(sa, sc);
157 __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
158 __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
159
160 // if (0 == dc)
161 __m128i cmp1 = _mm_cmpeq_epi32(dc, _mm_setzero_si128());
162 __m128i rc1 = _mm_and_si128(cmp1, SkAlphaMulAlpha_SSE2(sc, ida));
163
164 // else if (0 == diff)
165 __m128i cmp2 = _mm_cmpeq_epi32(diff, _mm_setzero_si128());
166 __m128i cmp = _mm_andnot_si128(cmp1, cmp2);
167 __m128i tmp1 = _mm_mullo_epi16(sa, da);
168 __m128i tmp2 = _mm_mullo_epi16(sc, ida);
169 __m128i tmp3 = _mm_mullo_epi16(dc, isa);
170 __m128i rc2 = _mm_add_epi32(tmp1, tmp2);
171 rc2 = _mm_add_epi32(rc2, tmp3);
172 rc2 = clamp_div255round_SSE2(rc2);
173 rc2 = _mm_and_si128(cmp, rc2);
174
175 // else
176 __m128i cmp3 = _mm_or_si128(cmp1, cmp2);
177 __m128i value = _mm_mullo_epi16(dc, sa);
178 diff = shim_mm_div_epi32(value, diff);
179
180 __m128i tmp4 = SkMin32_SSE2(da, diff);
181 tmp4 = Multiply32_SSE2(sa, tmp4);
182 __m128i rc3 = _mm_add_epi32(tmp4, tmp2);
183 rc3 = _mm_add_epi32(rc3, tmp3);
184 rc3 = clamp_div255round_SSE2(rc3);
185 rc3 = _mm_andnot_si128(cmp3, rc3);
186
187 __m128i rc = _mm_or_si128(rc1, rc2);
188 rc = _mm_or_si128(rc, rc3);
189
190 return rc;
191 }
192
193 static __m128i colordodge_modeproc_SSE2(const __m128i& src,
194 const __m128i& dst) {
195 __m128i sa = SkGetPackedA32_SSE2(src);
196 __m128i da = SkGetPackedA32_SSE2(dst);
197
198 __m128i a = srcover_byte_SSE2(sa, da);
199 __m128i r = colordodge_byte_SSE2(SkGetPackedR32_SSE2(src),
200 SkGetPackedR32_SSE2(dst), sa, da);
201 __m128i g = colordodge_byte_SSE2(SkGetPackedG32_SSE2(src),
202 SkGetPackedG32_SSE2(dst), sa, da);
203 __m128i b = colordodge_byte_SSE2(SkGetPackedB32_SSE2(src),
204 SkGetPackedB32_SSE2(dst), sa, da);
205 return SkPackARGB32_SSE2(a, r, g, b);
206 }
207
208 static inline __m128i colorburn_byte_SSE2(const __m128i& sc, const __m128i& dc,
209 const __m128i& sa, const __m128i& da) {
210 __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
211 __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
212
213 // if (dc == da)
214 __m128i cmp1 = _mm_cmpeq_epi32(dc, da);
215 __m128i tmp1 = _mm_mullo_epi16(sa, da);
216 __m128i tmp2 = _mm_mullo_epi16(sc, ida);
217 __m128i tmp3 = _mm_mullo_epi16(dc, isa);
218 __m128i rc1 = _mm_add_epi32(tmp1, tmp2);
219 rc1 = _mm_add_epi32(rc1, tmp3);
220 rc1 = clamp_div255round_SSE2(rc1);
221 rc1 = _mm_and_si128(cmp1, rc1);
222
223 // else if (0 == sc)
224 __m128i cmp2 = _mm_cmpeq_epi32(sc, _mm_setzero_si128());
225 __m128i rc2 = SkAlphaMulAlpha_SSE2(dc, isa);
226 __m128i cmp = _mm_andnot_si128(cmp1, cmp2);
227 rc2 = _mm_and_si128(cmp, rc2);
228
229 // else
230 __m128i cmp3 = _mm_or_si128(cmp1, cmp2);
231 __m128i tmp4 = _mm_sub_epi32(da, dc);
232 tmp4 = Multiply32_SSE2(tmp4, sa);
233 tmp4 = shim_mm_div_epi32(tmp4, sc);
234
235 __m128i tmp5 = _mm_sub_epi32(da, SkMin32_SSE2(da, tmp4));
236 tmp5 = Multiply32_SSE2(sa, tmp5);
237 __m128i rc3 = _mm_add_epi32(tmp5, tmp2);
238 rc3 = _mm_add_epi32(rc3, tmp3);
239 rc3 = clamp_div255round_SSE2(rc3);
240 rc3 = _mm_andnot_si128(cmp3, rc3);
241
242 __m128i rc = _mm_or_si128(rc1, rc2);
243 rc = _mm_or_si128(rc, rc3);
244
245 return rc;
246 }
247
248 static __m128i colorburn_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
249 __m128i sa = SkGetPackedA32_SSE2(src);
250 __m128i da = SkGetPackedA32_SSE2(dst);
251
252 __m128i a = srcover_byte_SSE2(sa, da);
253 __m128i r = colorburn_byte_SSE2(SkGetPackedR32_SSE2(src),
254 SkGetPackedR32_SSE2(dst), sa, da);
255 __m128i g = colorburn_byte_SSE2(SkGetPackedG32_SSE2(src),
256 SkGetPackedG32_SSE2(dst), sa, da);
257 __m128i b = colorburn_byte_SSE2(SkGetPackedB32_SSE2(src),
258 SkGetPackedB32_SSE2(dst), sa, da);
259 return SkPackARGB32_SSE2(a, r, g, b);
260 }
261
262 static inline __m128i hardlight_byte_SSE2(const __m128i& sc, const __m128i& dc,
263 const __m128i& sa, const __m128i& da) {
264 // if (2 * sc <= sa)
265 __m128i tmp1 = _mm_slli_epi32(sc, 1);
266 __m128i cmp1 = _mm_cmpgt_epi32(tmp1, sa);
267 __m128i rc1 = _mm_mullo_epi16(sc, dc); // sc * dc;
268 rc1 = _mm_slli_epi32(rc1, 1); // 2 * sc * dc
269 rc1 = _mm_andnot_si128(cmp1, rc1);
270
271 // else
272 tmp1 = _mm_mullo_epi16(sa, da);
273 __m128i tmp2 = Multiply32_SSE2(_mm_sub_epi32(da, dc),
274 _mm_sub_epi32(sa, sc));
275 tmp2 = _mm_slli_epi32(tmp2, 1);
276 __m128i rc2 = _mm_sub_epi32(tmp1, tmp2);
277 rc2 = _mm_and_si128(cmp1, rc2);
278
279 __m128i rc = _mm_or_si128(rc1, rc2);
280
281 __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
282 tmp1 = _mm_mullo_epi16(sc, ida);
283 __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
284 tmp2 = _mm_mullo_epi16(dc, isa);
285 rc = _mm_add_epi32(rc, tmp1);
286 rc = _mm_add_epi32(rc, tmp2);
287 return clamp_div255round_SSE2(rc);
288 }
289
290 static __m128i hardlight_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
291 __m128i sa = SkGetPackedA32_SSE2(src);
292 __m128i da = SkGetPackedA32_SSE2(dst);
293
294 __m128i a = srcover_byte_SSE2(sa, da);
295 __m128i r = hardlight_byte_SSE2(SkGetPackedR32_SSE2(src),
296 SkGetPackedR32_SSE2(dst), sa, da);
297 __m128i g = hardlight_byte_SSE2(SkGetPackedG32_SSE2(src),
298 SkGetPackedG32_SSE2(dst), sa, da);
299 __m128i b = hardlight_byte_SSE2(SkGetPackedB32_SSE2(src),
300 SkGetPackedB32_SSE2(dst), sa, da);
301 return SkPackARGB32_SSE2(a, r, g, b);
302 }
303
304 static __m128i sqrt_unit_byte_SSE2(const __m128i& n) {
305 return SkSqrtBits_SSE2(n, 15+4);
306 }
307
308 static inline __m128i softlight_byte_SSE2(const __m128i& sc, const __m128i& dc,
309 const __m128i& sa, const __m128i& da) {
310 __m128i tmp1, tmp2, tmp3;
311
312 // int m = da ? dc * 256 / da : 0;
313 __m128i cmp = _mm_cmpeq_epi32(da, _mm_setzero_si128());
314 __m128i m = _mm_slli_epi32(dc, 8);
315 __m128 x = _mm_cvtepi32_ps(m);
316 __m128 y = _mm_cvtepi32_ps(da);
317 m = _mm_cvttps_epi32(_mm_div_ps(x, y));
318 m = _mm_andnot_si128(cmp, m);
319
320 // if (2 * sc <= sa)
321 tmp1 = _mm_slli_epi32(sc, 1); // 2 * sc
322 __m128i cmp1 = _mm_cmpgt_epi32(tmp1, sa);
323 tmp1 = _mm_sub_epi32(tmp1, sa); // 2 * sc - sa
324 tmp2 = _mm_sub_epi32(_mm_set1_epi32(256), m); // 256 - m
325 tmp1 = Multiply32_SSE2(tmp1, tmp2);
326 tmp1 = _mm_srai_epi32(tmp1, 8);
327 tmp1 = _mm_add_epi32(sa, tmp1);
328 tmp1 = Multiply32_SSE2(dc, tmp1);
329 __m128i rc1 = _mm_andnot_si128(cmp1, tmp1);
330
331 // else if (4 * dc <= da)
332 tmp2 = _mm_slli_epi32(dc, 2); // dc * 4
333 __m128i cmp2 = _mm_cmpgt_epi32(tmp2, da);
334 __m128i i = _mm_slli_epi32(m, 2); // 4 * m
335 __m128i j = _mm_add_epi32(i, _mm_set1_epi32(256)); // 4 * m + 256
336 __m128i k = Multiply32_SSE2(i, j); // 4 * m * (4 * m + 256)
337 __m128i t = _mm_sub_epi32(m, _mm_set1_epi32(256)); // m - 256
338 i = Multiply32_SSE2(k, t); // 4 * m * (4 * m + 256) * (m - 256)
339 i = _mm_srai_epi32(i, 16); // >> 16
340 j = Multiply32_SSE2(_mm_set1_epi32(7), m); // 7 * m
341 tmp2 = _mm_add_epi32(i, j);
342 i = Multiply32_SSE2(dc, sa); // dc * sa
343 j = _mm_slli_epi32(sc, 1); // 2 * sc
344 j = _mm_sub_epi32(j, sa); // 2 * sc - sa
345 j = Multiply32_SSE2(da, j); // da * (2 * sc - sa)
346 tmp2 = Multiply32_SSE2(j, tmp2); // * tmp
347 tmp2 = _mm_srai_epi32(tmp2, 8); // >> 8
348 tmp2 = _mm_add_epi32(i, tmp2);
349 cmp = _mm_andnot_si128(cmp2, cmp1);
350 __m128i rc2 = _mm_and_si128(cmp, tmp2);
351 __m128i rc = _mm_or_si128(rc1, rc2);
352
353 // else
354 tmp3 = sqrt_unit_byte_SSE2(m);
355 tmp3 = _mm_sub_epi32(tmp3, m);
356 tmp3 = Multiply32_SSE2(j, tmp3); // j = da * (2 * sc - sa)
357 tmp3 = _mm_srai_epi32(tmp3, 8);
358 tmp3 = _mm_add_epi32(i, tmp3); // i = dc * sa
359 cmp = _mm_and_si128(cmp1, cmp2);
360 __m128i rc3 = _mm_and_si128(cmp, tmp3);
361 rc = _mm_or_si128(rc, rc3);
362
363 tmp1 = _mm_sub_epi32(_mm_set1_epi32(255), da); // 255 - da
364 tmp1 = _mm_mullo_epi16(sc, tmp1);
365 tmp2 = _mm_sub_epi32(_mm_set1_epi32(255), sa); // 255 - sa
366 tmp2 = _mm_mullo_epi16(dc, tmp2);
367 rc = _mm_add_epi32(rc, tmp1);
368 rc = _mm_add_epi32(rc, tmp2);
369 return clamp_div255round_SSE2(rc);
370 }
371
372 static __m128i softlight_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
373 __m128i sa = SkGetPackedA32_SSE2(src);
374 __m128i da = SkGetPackedA32_SSE2(dst);
375
376 __m128i a = srcover_byte_SSE2(sa, da);
377 __m128i r = softlight_byte_SSE2(SkGetPackedR32_SSE2(src),
378 SkGetPackedR32_SSE2(dst), sa, da);
379 __m128i g = softlight_byte_SSE2(SkGetPackedG32_SSE2(src),
380 SkGetPackedG32_SSE2(dst), sa, da);
381 __m128i b = softlight_byte_SSE2(SkGetPackedB32_SSE2(src),
382 SkGetPackedB32_SSE2(dst), sa, da);
383 return SkPackARGB32_SSE2(a, r, g, b);
384 }
385
386
387 ////////////////////////////////////////////////////////////////////////////////
388
389 typedef __m128i (*SkXfermodeProcSIMD)(const __m128i& src, const __m128i& dst);
390
391 void SkSSE2ProcCoeffXfermode::xfer32(SkPMColor dst[], const SkPMColor src[],
392 int count, const SkAlpha aa[]) const {
393 SkASSERT(dst && src && count >= 0);
394
395 SkXfermodeProc proc = this->getProc();
396 SkXfermodeProcSIMD procSIMD = reinterpret_cast<SkXfermodeProcSIMD>(fProcSIMD );
397 SkASSERT(procSIMD != NULL);
398
399 if (NULL == aa) {
400 if (count >= 4) {
401 while (((size_t)dst & 0x0F) != 0) {
402 *dst = proc(*src, *dst);
403 dst++;
404 src++;
405 count--;
406 }
407
408 const __m128i* s = reinterpret_cast<const __m128i*>(src);
409 __m128i* d = reinterpret_cast<__m128i*>(dst);
410
411 while (count >= 4) {
412 __m128i src_pixel = _mm_loadu_si128(s++);
413 __m128i dst_pixel = _mm_load_si128(d);
414
415 dst_pixel = procSIMD(src_pixel, dst_pixel);
416 _mm_store_si128(d++, dst_pixel);
417 count -= 4;
418 }
419
420 src = reinterpret_cast<const SkPMColor*>(s);
421 dst = reinterpret_cast<SkPMColor*>(d);
422 }
423
424 for (int i = count - 1; i >= 0; --i) {
425 *dst = proc(*src, *dst);
426 dst++;
427 src++;
428 }
429 } else {
430 for (int i = count - 1; i >= 0; --i) {
431 unsigned a = aa[i];
432 if (0 != a) {
433 SkPMColor dstC = dst[i];
434 SkPMColor C = proc(src[i], dstC);
435 if (a != 0xFF) {
436 C = SkFourByteInterp(C, dstC, a);
437 }
438 dst[i] = C;
439 }
440 }
441 }
442 }
443
444 void SkSSE2ProcCoeffXfermode::xfer16(uint16_t dst[], const SkPMColor src[],
445 int count, const SkAlpha aa[]) const {
446 SkASSERT(dst && src && count >= 0);
447
448 SkXfermodeProc proc = this->getProc();
449 SkXfermodeProcSIMD procSIMD = reinterpret_cast<SkXfermodeProcSIMD>(fProcSIMD );
450 SkASSERT(procSIMD != NULL);
451
452 if (NULL == aa) {
453 if (count >= 8) {
454 while (((size_t)dst & 0x0F) != 0) {
455 SkPMColor dstC = SkPixel16ToPixel32(*dst);
456 *dst = SkPixel32ToPixel16_ToU16(proc(*src, dstC));
457 dst++;
458 src++;
459 count--;
460 }
461
462 const __m128i* s = reinterpret_cast<const __m128i*>(src);
463 __m128i* d = reinterpret_cast<__m128i*>(dst);
464
465 while (count >= 8) {
466 __m128i src_pixel1 = _mm_loadu_si128(s++);
467 __m128i src_pixel2 = _mm_loadu_si128(s++);
468 __m128i dst_pixel = _mm_load_si128(d);
469
470 __m128i dst_pixel1 = _mm_unpacklo_epi16(dst_pixel, _mm_setzero_s i128());
471 __m128i dst_pixel2 = _mm_unpackhi_epi16(dst_pixel, _mm_setzero_s i128());
472
473 __m128i dstC1 = SkPixel16ToPixel32_SSE2(dst_pixel1);
474 __m128i dstC2 = SkPixel16ToPixel32_SSE2(dst_pixel2);
475
476 dst_pixel1 = procSIMD(src_pixel1, dstC1);
477 dst_pixel2 = procSIMD(src_pixel2, dstC2);
478 dst_pixel = SkPixel32ToPixel16_ToU16_SSE2(dst_pixel1, dst_pixel2 );
479
480 _mm_store_si128(d++, dst_pixel);
481 count -= 8;
482 }
483
484 src = reinterpret_cast<const SkPMColor*>(s);
485 dst = reinterpret_cast<uint16_t*>(d);
486 }
487
488 for (int i = count - 1; i >= 0; --i) {
489 SkPMColor dstC = SkPixel16ToPixel32(*dst);
490 *dst = SkPixel32ToPixel16_ToU16(proc(*src, dstC));
491 dst++;
492 src++;
493 }
494 } else {
495 for (int i = count - 1; i >= 0; --i) {
496 unsigned a = aa[i];
497 if (0 != a) {
498 SkPMColor dstC = SkPixel16ToPixel32(dst[i]);
499 SkPMColor C = proc(src[i], dstC);
500 if (0xFF != a) {
501 C = SkFourByteInterp(C, dstC, a);
502 }
503 dst[i] = SkPixel32ToPixel16_ToU16(C);
504 }
505 }
506 }
507 }
508
509 #ifndef SK_IGNORE_TO_STRING
510 void SkSSE2ProcCoeffXfermode::toString(SkString* str) const {
511 this->INHERITED::toString(str);
512 }
513 #endif
514
515 SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl_SSE2(const ProcCoeff& rec,
516 SkXfermode::Mode mode) {
517 SkXfermodeProcSIMD proc = nullptr;
518 switch (mode) {
519 // TODO(mtklein): Sk4pxXfermode has these now. Clean up the whole file!
520 case SkProcCoeffXfermode::kOverlay_Mode: proc = overlay_modeproc_S SE2; break;
521 case SkProcCoeffXfermode::kDarken_Mode: proc = darken_modeproc_S SE2; break;
522 case SkProcCoeffXfermode::kLighten_Mode: proc = lighten_modeproc_S SE2; break;
523 case SkProcCoeffXfermode::kHardLight_Mode: proc = hardlight_modeproc_S SE2; break;
524 case SkProcCoeffXfermode::kColorDodge_Mode: proc = colordodge_modeproc_S SE2; break;
525 case SkProcCoeffXfermode::kColorBurn_Mode: proc = colorburn_modeproc_S SE2; break;
526 case SkProcCoeffXfermode::kSoftLight_Mode: proc = softlight_modeproc_S SE2; break;
527 default: break;
528 }
529 return proc ? SkNEW_ARGS(SkSSE2ProcCoeffXfermode, (rec, mode, (void*)proc)) : nullptr;
530 }
OLDNEW
« no previous file with comments | « src/opts/SkXfermode_opts_SSE2.h ('k') | src/opts/SkXfermode_opts_arm.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698