OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2015 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 #ifndef SkNx_sse_DEFINED |
| 9 #define SkNx_sse_DEFINED |
| 10 |
| 11 #include <immintrin.h> |
| 12 |
| 13 // This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything mo
re recent. |
| 14 // If you do, make sure this is in a static inline function... anywhere else ris
ks violating ODR. |
| 15 |
| 16 #define SKNX_IS_FAST |
| 17 |
| 18 template <> |
| 19 class SkNx<2, float> { |
| 20 public: |
| 21 SkNx(const __m128& vec) : fVec(vec) {} |
| 22 |
| 23 SkNx() {} |
| 24 SkNx(float val) : fVec(_mm_set1_ps(val)) {} |
| 25 static SkNx Load(const void* ptr) { |
| 26 return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr)); |
| 27 } |
| 28 SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {} |
| 29 |
| 30 void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); } |
| 31 |
| 32 SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); } |
| 33 SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); } |
| 34 SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); } |
| 35 SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); } |
| 36 |
| 37 SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec);
} |
| 38 SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec);
} |
| 39 SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec);
} |
| 40 SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec);
} |
| 41 SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec);
} |
| 42 SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec);
} |
| 43 |
| 44 static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.
fVec); } |
| 45 static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.
fVec); } |
| 46 |
| 47 SkNx sqrt() const { return _mm_sqrt_ps (fVec); } |
| 48 SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); } |
| 49 SkNx invert() const { return _mm_rcp_ps(fVec); } |
| 50 |
| 51 float operator[](int k) const { |
| 52 SkASSERT(0 <= k && k < 2); |
| 53 union { __m128 v; float fs[4]; } pun = {fVec}; |
| 54 return pun.fs[k&1]; |
| 55 } |
| 56 |
| 57 bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fV
ec)) & 0xff); } |
| 58 bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fV
ec)) & 0xff); } |
| 59 |
| 60 __m128 fVec; |
| 61 }; |
| 62 |
| 63 template <> |
| 64 class SkNx<4, float> { |
| 65 public: |
| 66 SkNx(const __m128& vec) : fVec(vec) {} |
| 67 |
| 68 SkNx() {} |
| 69 SkNx(float val) : fVec( _mm_set1_ps(val) ) {} |
| 70 static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr);
} |
| 71 |
| 72 SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {} |
| 73 |
| 74 void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); } |
| 75 |
| 76 SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); } |
| 77 SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); } |
| 78 SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); } |
| 79 SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); } |
| 80 |
| 81 SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec);
} |
| 82 SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec);
} |
| 83 SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec);
} |
| 84 SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec);
} |
| 85 SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec);
} |
| 86 SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec);
} |
| 87 |
| 88 static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.
fVec); } |
| 89 static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.
fVec); } |
| 90 |
| 91 SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); } |
| 92 SkNx floor() const { |
| 93 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 |
| 94 return _mm_floor_ps(fVec); |
| 95 #else |
| 96 // Emulate _mm_floor_ps() with SSE2: |
| 97 // - roundtrip through integers via truncation |
| 98 // - subtract 1 if that's too big (possible for negative values). |
| 99 // This restricts the domain of our inputs to a maximum somehwere around
2^31. |
| 100 // Seems plenty big. |
| 101 __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(fVec)); |
| 102 __m128 too_big = _mm_cmpgt_ps(roundtrip, fVec); |
| 103 return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f))); |
| 104 #endif |
| 105 } |
| 106 |
| 107 SkNx sqrt() const { return _mm_sqrt_ps (fVec); } |
| 108 SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); } |
| 109 SkNx invert() const { return _mm_rcp_ps(fVec); } |
| 110 |
| 111 float operator[](int k) const { |
| 112 SkASSERT(0 <= k && k < 4); |
| 113 union { __m128 v; float fs[4]; } pun = {fVec}; |
| 114 return pun.fs[k&3]; |
| 115 } |
| 116 |
| 117 bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(f
Vec)); } |
| 118 bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(f
Vec)); } |
| 119 |
| 120 SkNx thenElse(const SkNx& t, const SkNx& e) const { |
| 121 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 |
| 122 return _mm_blendv_ps(e.fVec, t.fVec, fVec); |
| 123 #else |
| 124 return _mm_or_ps(_mm_and_ps (fVec, t.fVec), |
| 125 _mm_andnot_ps(fVec, e.fVec)); |
| 126 #endif |
| 127 } |
| 128 |
| 129 __m128 fVec; |
| 130 }; |
| 131 |
| 132 template <> |
| 133 class SkNx<4, int> { |
| 134 public: |
| 135 SkNx(const __m128i& vec) : fVec(vec) {} |
| 136 |
| 137 SkNx() {} |
| 138 SkNx(int val) : fVec(_mm_set1_epi32(val)) {} |
| 139 static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)p
tr); } |
| 140 SkNx(int a, int b, int c, int d) : fVec(_mm_setr_epi32(a,b,c,d)) {} |
| 141 |
| 142 void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } |
| 143 |
| 144 SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec);
} |
| 145 SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec);
} |
| 146 SkNx operator * (const SkNx& o) const { |
| 147 __m128i mul20 = _mm_mul_epu32(fVec, o.fVec), |
| 148 mul31 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.
fVec, 4)); |
| 149 return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0))
, |
| 150 _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0))
); |
| 151 } |
| 152 |
| 153 SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); } |
| 154 |
| 155 SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); } |
| 156 SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); } |
| 157 |
| 158 int operator[](int k) const { |
| 159 SkASSERT(0 <= k && k < 4); |
| 160 union { __m128i v; int is[4]; } pun = {fVec}; |
| 161 return pun.is[k&3]; |
| 162 } |
| 163 |
| 164 __m128i fVec; |
| 165 }; |
| 166 |
| 167 template <> |
| 168 class SkNx<4, uint16_t> { |
| 169 public: |
| 170 SkNx(const __m128i& vec) : fVec(vec) {} |
| 171 |
| 172 SkNx() {} |
| 173 SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {} |
| 174 static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)p
tr); } |
| 175 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) : fVec(_mm_setr_epi16(a
,b,c,d,0,0,0,0)) {} |
| 176 |
| 177 void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); } |
| 178 |
| 179 SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec);
} |
| 180 SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec);
} |
| 181 SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec)
; } |
| 182 |
| 183 SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); } |
| 184 SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); } |
| 185 |
| 186 uint16_t operator[](int k) const { |
| 187 SkASSERT(0 <= k && k < 4); |
| 188 union { __m128i v; uint16_t us[8]; } pun = {fVec}; |
| 189 return pun.us[k&3]; |
| 190 } |
| 191 |
| 192 __m128i fVec; |
| 193 }; |
| 194 |
| 195 template <> |
| 196 class SkNx<8, uint16_t> { |
| 197 public: |
| 198 SkNx(const __m128i& vec) : fVec(vec) {} |
| 199 |
| 200 SkNx() {} |
| 201 SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {} |
| 202 static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)p
tr); } |
| 203 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, |
| 204 uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec(_mm_setr_epi16(a
,b,c,d,e,f,g,h)) {} |
| 205 |
| 206 void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } |
| 207 |
| 208 SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec);
} |
| 209 SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec);
} |
| 210 SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec)
; } |
| 211 |
| 212 SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); } |
| 213 SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); } |
| 214 |
| 215 static SkNx Min(const SkNx& a, const SkNx& b) { |
| 216 // No unsigned _mm_min_epu16, so we'll shift into a space where we can u
se the |
| 217 // signed version, _mm_min_epi16, then shift back. |
| 218 const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 o
r MSVC will whine. |
| 219 const __m128i top_8x = _mm_set1_epi16(top); |
| 220 return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x), |
| 221 _mm_sub_epi8(b.fVec, top_8x)))
; |
| 222 } |
| 223 |
| 224 SkNx thenElse(const SkNx& t, const SkNx& e) const { |
| 225 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), |
| 226 _mm_andnot_si128(fVec, e.fVec)); |
| 227 } |
| 228 |
| 229 uint16_t operator[](int k) const { |
| 230 SkASSERT(0 <= k && k < 8); |
| 231 union { __m128i v; uint16_t us[8]; } pun = {fVec}; |
| 232 return pun.us[k&7]; |
| 233 } |
| 234 |
| 235 __m128i fVec; |
| 236 }; |
| 237 |
| 238 template <> |
| 239 class SkNx<4, uint8_t> { |
| 240 public: |
| 241 SkNx() {} |
| 242 SkNx(const __m128i& vec) : fVec(vec) {} |
| 243 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) |
| 244 : fVec(_mm_setr_epi8(a,b,c,d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {} |
| 245 |
| 246 |
| 247 static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)pt
r); } |
| 248 void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); } |
| 249 |
| 250 uint8_t operator[](int k) const { |
| 251 SkASSERT(0 <= k && k < 4); |
| 252 union { __m128i v; uint8_t us[16]; } pun = {fVec}; |
| 253 return pun.us[k&3]; |
| 254 } |
| 255 |
| 256 // TODO as needed |
| 257 |
| 258 __m128i fVec; |
| 259 }; |
| 260 |
| 261 template <> |
| 262 class SkNx<16, uint8_t> { |
| 263 public: |
| 264 SkNx(const __m128i& vec) : fVec(vec) {} |
| 265 |
| 266 SkNx() {} |
| 267 SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {} |
| 268 static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)p
tr); } |
| 269 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, |
| 270 uint8_t e, uint8_t f, uint8_t g, uint8_t h, |
| 271 uint8_t i, uint8_t j, uint8_t k, uint8_t l, |
| 272 uint8_t m, uint8_t n, uint8_t o, uint8_t p) |
| 273 : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {} |
| 274 |
| 275 void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); } |
| 276 |
| 277 SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec);
} |
| 278 |
| 279 SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); } |
| 280 SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); } |
| 281 |
| 282 static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec,
b.fVec); } |
| 283 SkNx operator < (const SkNx& o) const { |
| 284 // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use
a signed compare. |
| 285 auto flip = _mm_set1_epi8(char(0x80)); |
| 286 return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.f
Vec)); |
| 287 } |
| 288 |
| 289 uint8_t operator[](int k) const { |
| 290 SkASSERT(0 <= k && k < 16); |
| 291 union { __m128i v; uint8_t us[16]; } pun = {fVec}; |
| 292 return pun.us[k&15]; |
| 293 } |
| 294 |
| 295 SkNx thenElse(const SkNx& t, const SkNx& e) const { |
| 296 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), |
| 297 _mm_andnot_si128(fVec, e.fVec)); |
| 298 } |
| 299 |
| 300 __m128i fVec; |
| 301 }; |
| 302 |
| 303 template<> /*static*/ inline Sk4f SkNx_cast<float, int>(const Sk4i& src) { |
| 304 return _mm_cvtepi32_ps(src.fVec); |
| 305 } |
| 306 |
| 307 template <> /*static*/ inline Sk4i SkNx_cast<int, float>(const Sk4f& src) { |
| 308 return _mm_cvttps_epi32(src.fVec); |
| 309 } |
| 310 |
| 311 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { |
| 312 auto _32 = _mm_cvttps_epi32(src.fVec); |
| 313 // Ideally we'd use _mm_packus_epi32 here. But that's SSE4.1+. |
| 314 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
| 315 // With SSSE3, we can just shuffle the low 2 bytes from each lane right into
place. |
| 316 const int _ = ~0; |
| 317 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_
,_,_)); |
| 318 #else |
| 319 // With SSE2, we have to emulate _mm_packus_epi32 with _mm_packs_epi32: |
| 320 _32 = _mm_sub_epi32(_32, _mm_set1_epi32((int)0x00008000)); |
| 321 return _mm_add_epi16(_mm_packs_epi32(_32, _32), _mm_set1_epi16((short)0x8000
)); |
| 322 #endif |
| 323 } |
| 324 |
| 325 template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) { |
| 326 auto _32 = _mm_cvttps_epi32(src.fVec); |
| 327 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
| 328 const int _ = ~0; |
| 329 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_
,_)); |
| 330 #else |
| 331 auto _16 = _mm_packus_epi16(_32, _32); |
| 332 return _mm_packus_epi16(_16, _16); |
| 333 #endif |
| 334 } |
| 335 |
| 336 template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { |
| 337 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
| 338 const int _ = ~0; |
| 339 auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,
_, 3,_,_,_)); |
| 340 #else |
| 341 auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()), |
| 342 _32 = _mm_unpacklo_epi16(_16, _mm_setzero_si128()); |
| 343 #endif |
| 344 return _mm_cvtepi32_ps(_32); |
| 345 } |
| 346 |
| 347 template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) { |
| 348 auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); |
| 349 return _mm_cvtepi32_ps(_32); |
| 350 } |
| 351 |
| 352 template<> /*static*/ inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) { |
| 353 Sk8f ab, cd; |
| 354 SkNx_split(src, &ab, &cd); |
| 355 |
| 356 Sk4f a,b,c,d; |
| 357 SkNx_split(ab, &a, &b); |
| 358 SkNx_split(cd, &c, &d); |
| 359 |
| 360 return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec), |
| 361 _mm_cvttps_epi32(b.fVec)), |
| 362 _mm_packus_epi16(_mm_cvttps_epi32(c.fVec), |
| 363 _mm_cvttps_epi32(d.fVec))); |
| 364 } |
| 365 |
| 366 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src)
{ |
| 367 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); |
| 368 } |
| 369 |
| 370 template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src)
{ |
| 371 return _mm_packus_epi16(src.fVec, src.fVec); |
| 372 } |
| 373 |
| 374 #endif//SkNx_sse_DEFINED |
OLD | NEW |