OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2015 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #ifndef SkNx_sse_DEFINED | |
9 #define SkNx_sse_DEFINED | |
10 | |
11 #include "SkCpu.h" | |
12 #include <immintrin.h> | |
13 | |
14 // This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything mo
re recent. | |
15 // If you do, make sure this is in a static inline function... anywhere else ris
ks violating ODR. | |
16 | |
17 #define SKNX_IS_FAST | |
18 | |
19 template <> | |
20 class SkNx<2, float> { | |
21 public: | |
22 SkNx(const __m128& vec) : fVec(vec) {} | |
23 | |
24 SkNx() {} | |
25 SkNx(float val) : fVec(_mm_set1_ps(val)) {} | |
26 static SkNx Load(const void* ptr) { | |
27 return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr)); | |
28 } | |
29 SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {} | |
30 | |
31 void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); } | |
32 | |
33 SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); } | |
34 SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); } | |
35 SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); } | |
36 SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); } | |
37 | |
38 SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec);
} | |
39 SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec);
} | |
40 SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec);
} | |
41 SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec);
} | |
42 SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec);
} | |
43 SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec);
} | |
44 | |
45 static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.
fVec); } | |
46 static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.
fVec); } | |
47 | |
48 SkNx sqrt() const { return _mm_sqrt_ps (fVec); } | |
49 SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); } | |
50 SkNx invert() const { return _mm_rcp_ps(fVec); } | |
51 | |
52 float operator[](int k) const { | |
53 SkASSERT(0 <= k && k < 2); | |
54 union { __m128 v; float fs[4]; } pun = {fVec}; | |
55 return pun.fs[k&1]; | |
56 } | |
57 | |
58 bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fV
ec)) & 0xff); } | |
59 bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fV
ec)) & 0xff); } | |
60 | |
61 __m128 fVec; | |
62 }; | |
63 | |
64 template <> | |
65 class SkNx<4, float> { | |
66 public: | |
67 SkNx(const __m128& vec) : fVec(vec) {} | |
68 | |
69 SkNx() {} | |
70 SkNx(float val) : fVec( _mm_set1_ps(val) ) {} | |
71 static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr);
} | |
72 | |
73 SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {} | |
74 | |
75 void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); } | |
76 | |
77 SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); } | |
78 SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); } | |
79 SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); } | |
80 SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); } | |
81 | |
82 SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec);
} | |
83 SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec);
} | |
84 SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec);
} | |
85 SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec);
} | |
86 SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec);
} | |
87 SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec);
} | |
88 | |
89 static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.
fVec); } | |
90 static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.
fVec); } | |
91 | |
92 SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); } | |
93 SkNx floor() const { | |
94 if (SkCpu::Supports(SkCpu::SSE41)) { | |
95 __m128 r; | |
96 #if defined(__GNUC__) || defined(__clang__) | |
97 asm("roundps $0x1, %[fVec], %[r]" : [r]"=x"(r) : [fVec]"x"(fVec)); | |
98 #else | |
99 r = _mm_floor_ps(fVec); | |
100 #endif | |
101 return r; | |
102 } | |
103 // Emulate _mm_floor_ps() with SSE2: | |
104 // - roundtrip through integers via truncation | |
105 // - subtract 1 if that's too big (possible for negative values). | |
106 // This restricts the domain of our inputs to a maximum somehwere around
2^31. | |
107 // Seems plenty big. | |
108 __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(fVec)); | |
109 __m128 too_big = _mm_cmpgt_ps(roundtrip, fVec); | |
110 return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f))); | |
111 } | |
112 | |
113 SkNx sqrt() const { return _mm_sqrt_ps (fVec); } | |
114 SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); } | |
115 SkNx invert() const { return _mm_rcp_ps(fVec); } | |
116 | |
117 float operator[](int k) const { | |
118 SkASSERT(0 <= k && k < 4); | |
119 union { __m128 v; float fs[4]; } pun = {fVec}; | |
120 return pun.fs[k&3]; | |
121 } | |
122 | |
123 bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(f
Vec)); } | |
124 bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(f
Vec)); } | |
125 | |
126 SkNx thenElse(const SkNx& t, const SkNx& e) const { | |
127 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 | |
128 return _mm_blendv_ps(e.fVec, t.fVec, fVec); | |
129 #else | |
130 return _mm_or_ps(_mm_and_ps (fVec, t.fVec), | |
131 _mm_andnot_ps(fVec, e.fVec)); | |
132 #endif | |
133 } | |
134 | |
135 __m128 fVec; | |
136 }; | |
137 | |
138 template <> | |
139 class SkNx<4, int> { | |
140 public: | |
141 SkNx(const __m128i& vec) : fVec(vec) {} | |
142 | |
143 SkNx() {} | |
144 SkNx(int val) : fVec(_mm_set1_epi32(val)) {} | |
145 static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)p
tr); } | |
146 SkNx(int a, int b, int c, int d) : fVec(_mm_setr_epi32(a,b,c,d)) {} | |
147 | |
148 void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } | |
149 | |
150 SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec);
} | |
151 SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec);
} | |
152 SkNx operator * (const SkNx& o) const { | |
153 __m128i mul20 = _mm_mul_epu32(fVec, o.fVec), | |
154 mul31 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.
fVec, 4)); | |
155 return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0))
, | |
156 _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0))
); | |
157 } | |
158 | |
159 SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } | |
160 | |
161 SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } | |
162 SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); } | |
163 | |
164 int operator[](int k) const { | |
165 SkASSERT(0 <= k && k < 4); | |
166 union { __m128i v; int is[4]; } pun = {fVec}; | |
167 return pun.is[k&3]; | |
168 } | |
169 | |
170 __m128i fVec; | |
171 }; | |
172 | |
173 template <> | |
174 class SkNx<4, uint16_t> { | |
175 public: | |
176 SkNx(const __m128i& vec) : fVec(vec) {} | |
177 | |
178 SkNx() {} | |
179 SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {} | |
180 static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)p
tr); } | |
181 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) : fVec(_mm_setr_epi16(a
,b,c,d,0,0,0,0)) {} | |
182 | |
183 void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); } | |
184 | |
185 SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec);
} | |
186 SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec);
} | |
187 SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec)
; } | |
188 | |
189 SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); } | |
190 SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); } | |
191 | |
192 uint16_t operator[](int k) const { | |
193 SkASSERT(0 <= k && k < 4); | |
194 union { __m128i v; uint16_t us[8]; } pun = {fVec}; | |
195 return pun.us[k&3]; | |
196 } | |
197 | |
198 __m128i fVec; | |
199 }; | |
200 | |
201 template <> | |
202 class SkNx<8, uint16_t> { | |
203 public: | |
204 SkNx(const __m128i& vec) : fVec(vec) {} | |
205 | |
206 SkNx() {} | |
207 SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {} | |
208 static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)p
tr); } | |
209 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, | |
210 uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec(_mm_setr_epi16(a
,b,c,d,e,f,g,h)) {} | |
211 | |
212 void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } | |
213 | |
214 SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec);
} | |
215 SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec);
} | |
216 SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec)
; } | |
217 | |
218 SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); } | |
219 SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); } | |
220 | |
221 static SkNx Min(const SkNx& a, const SkNx& b) { | |
222 // No unsigned _mm_min_epu16, so we'll shift into a space where we can u
se the | |
223 // signed version, _mm_min_epi16, then shift back. | |
224 const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 o
r MSVC will whine. | |
225 const __m128i top_8x = _mm_set1_epi16(top); | |
226 return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x), | |
227 _mm_sub_epi8(b.fVec, top_8x)))
; | |
228 } | |
229 | |
230 SkNx thenElse(const SkNx& t, const SkNx& e) const { | |
231 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), | |
232 _mm_andnot_si128(fVec, e.fVec)); | |
233 } | |
234 | |
235 uint16_t operator[](int k) const { | |
236 SkASSERT(0 <= k && k < 8); | |
237 union { __m128i v; uint16_t us[8]; } pun = {fVec}; | |
238 return pun.us[k&7]; | |
239 } | |
240 | |
241 __m128i fVec; | |
242 }; | |
243 | |
244 template <> | |
245 class SkNx<4, uint8_t> { | |
246 public: | |
247 SkNx() {} | |
248 SkNx(const __m128i& vec) : fVec(vec) {} | |
249 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) | |
250 : fVec(_mm_setr_epi8(a,b,c,d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {} | |
251 | |
252 | |
253 static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)pt
r); } | |
254 void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); } | |
255 | |
256 uint8_t operator[](int k) const { | |
257 SkASSERT(0 <= k && k < 4); | |
258 union { __m128i v; uint8_t us[16]; } pun = {fVec}; | |
259 return pun.us[k&3]; | |
260 } | |
261 | |
262 // TODO as needed | |
263 | |
264 __m128i fVec; | |
265 }; | |
266 | |
267 template <> | |
268 class SkNx<16, uint8_t> { | |
269 public: | |
270 SkNx(const __m128i& vec) : fVec(vec) {} | |
271 | |
272 SkNx() {} | |
273 SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {} | |
274 static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)p
tr); } | |
275 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, | |
276 uint8_t e, uint8_t f, uint8_t g, uint8_t h, | |
277 uint8_t i, uint8_t j, uint8_t k, uint8_t l, | |
278 uint8_t m, uint8_t n, uint8_t o, uint8_t p) | |
279 : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {} | |
280 | |
281 void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } | |
282 | |
283 SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec);
} | |
284 | |
285 SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); } | |
286 SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); } | |
287 | |
288 static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec,
b.fVec); } | |
289 SkNx operator < (const SkNx& o) const { | |
290 // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use
a signed compare. | |
291 auto flip = _mm_set1_epi8(char(0x80)); | |
292 return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.f
Vec)); | |
293 } | |
294 | |
295 uint8_t operator[](int k) const { | |
296 SkASSERT(0 <= k && k < 16); | |
297 union { __m128i v; uint8_t us[16]; } pun = {fVec}; | |
298 return pun.us[k&15]; | |
299 } | |
300 | |
301 SkNx thenElse(const SkNx& t, const SkNx& e) const { | |
302 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), | |
303 _mm_andnot_si128(fVec, e.fVec)); | |
304 } | |
305 | |
306 __m128i fVec; | |
307 }; | |
308 | |
309 template<> /*static*/ inline Sk4f SkNx_cast<float, int>(const Sk4i& src) { | |
310 return _mm_cvtepi32_ps(src.fVec); | |
311 } | |
312 | |
313 template <> /*static*/ inline Sk4i SkNx_cast<int, float>(const Sk4f& src) { | |
314 return _mm_cvttps_epi32(src.fVec); | |
315 } | |
316 | |
317 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { | |
318 auto _32 = _mm_cvttps_epi32(src.fVec); | |
319 // Ideally we'd use _mm_packus_epi32 here. But that's SSE4.1+. | |
320 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 | |
321 // With SSSE3, we can just shuffle the low 2 bytes from each lane right into
place. | |
322 const int _ = ~0; | |
323 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_
,_,_)); | |
324 #else | |
325 // With SSE2, we have to emulate _mm_packus_epi32 with _mm_packs_epi32: | |
326 _32 = _mm_sub_epi32(_32, _mm_set1_epi32((int)0x00008000)); | |
327 return _mm_add_epi16(_mm_packs_epi32(_32, _32), _mm_set1_epi16((short)0x8000
)); | |
328 #endif | |
329 } | |
330 | |
331 template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) { | |
332 auto _32 = _mm_cvttps_epi32(src.fVec); | |
333 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 | |
334 const int _ = ~0; | |
335 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_
,_)); | |
336 #else | |
337 auto _16 = _mm_packus_epi16(_32, _32); | |
338 return _mm_packus_epi16(_16, _16); | |
339 #endif | |
340 } | |
341 | |
342 template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { | |
343 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 | |
344 const int _ = ~0; | |
345 auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,
_, 3,_,_,_)); | |
346 #else | |
347 auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()), | |
348 _32 = _mm_unpacklo_epi16(_16, _mm_setzero_si128()); | |
349 #endif | |
350 return _mm_cvtepi32_ps(_32); | |
351 } | |
352 | |
353 template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) { | |
354 auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); | |
355 return _mm_cvtepi32_ps(_32); | |
356 } | |
357 | |
358 template<> /*static*/ inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) { | |
359 Sk8f ab, cd; | |
360 SkNx_split(src, &ab, &cd); | |
361 | |
362 Sk4f a,b,c,d; | |
363 SkNx_split(ab, &a, &b); | |
364 SkNx_split(cd, &c, &d); | |
365 | |
366 return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec), | |
367 _mm_cvttps_epi32(b.fVec)), | |
368 _mm_packus_epi16(_mm_cvttps_epi32(c.fVec), | |
369 _mm_cvttps_epi32(d.fVec))); | |
370 } | |
371 | |
372 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src)
{ | |
373 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); | |
374 } | |
375 | |
376 template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src)
{ | |
377 return _mm_packus_epi16(src.fVec, src.fVec); | |
378 } | |
379 | |
380 #endif//SkNx_sse_DEFINED | |
OLD | NEW |