OLD | NEW |
1 // It is important _not_ to put header guards here. | 1 // It is important _not_ to put header guards here. |
2 // This file will be intentionally included three times. | 2 // This file will be intentionally included three times. |
3 | 3 |
4 // Useful reading: | 4 // Useful reading: |
5 // https://software.intel.com/sites/landingpage/IntrinsicsGuide/ | 5 // https://software.intel.com/sites/landingpage/IntrinsicsGuide/ |
6 | 6 |
7 #if defined(SK4X_PREAMBLE) | 7 #if defined(SK4X_PREAMBLE) |
8 // Code in this file may assume SSE and SSE2. | 8 // Code in this file may assume SSE and SSE2. |
9 #include <emmintrin.h> | 9 #include <emmintrin.h> |
10 | 10 |
11 // It must check for later instruction sets. | 11 // It must check for later instruction sets. |
12 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 | 12 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 |
13 #include <immintrin.h> | 13 #include <immintrin.h> |
14 #endif | 14 #endif |
15 | 15 |
16 // A little bit of template metaprogramming to map | 16 // A little bit of template metaprogramming to map |
17 // float to __m128 and int32_t to __m128i. | 17 // float to __m128 and int32_t to __m128i. |
18 template <typename T> struct SkScalarToSIMD; | 18 template <typename T> struct SkScalarToSIMD; |
19 template <> struct SkScalarToSIMD<float> { typedef __m128 Type; }; | 19 template <> struct SkScalarToSIMD<float> { typedef __m128 Type; }; |
20 template <> struct SkScalarToSIMD<int32_t> { typedef __m128i Type; }; | 20 template <> struct SkScalarToSIMD<int32_t> { typedef __m128i Type; }; |
21 | 21 |
22 // These are all free, zero instructions. | 22 // These are all free, zero instructions. |
23 // MSVC insists we use _mm_castA_B(a) instead of (B)a. | 23 // MSVC insists we use _mm_castA_B(a) instead of (B)a. |
24 static __m128 as_4f(__m128i v) { return _mm_castsi128_ps(v); } | 24 static inline __m128 as_4f(__m128i v) { return _mm_castsi128_ps(v); } |
25 static __m128 as_4f(__m128 v) { return v ; } | 25 static inline __m128 as_4f(__m128 v) { return v ; } |
26 static __m128i as_4i(__m128i v) { return v ; } | 26 static inline __m128i as_4i(__m128i v) { return v ; } |
27 static __m128i as_4i(__m128 v) { return _mm_castps_si128(v); } | 27 static inline __m128i as_4i(__m128 v) { return _mm_castps_si128(v); } |
28 | 28 |
29 #elif defined(SK4X_PRIVATE) | 29 #elif defined(SK4X_PRIVATE) |
30 // It'd be slightly faster to call _mm_cmpeq_epi32() on an unintialized regi
ster and itself, | 30 // It'd be slightly faster to call _mm_cmpeq_epi32() on an unintialized regi
ster and itself, |
31 // but that has caused hard to debug issues when compilers recognize dealing
with uninitialized | 31 // but that has caused hard to debug issues when compilers recognize dealing
with uninitialized |
32 // memory as undefined behavior that can be optimized away. | 32 // memory as undefined behavior that can be optimized away. |
33 static __m128i True() { return _mm_set1_epi8(~0); } | 33 static __m128i True() { return _mm_set1_epi8(~0); } |
34 | 34 |
35 // Leaving these implicit makes the rest of the code below a bit less noisy
to read. | 35 // Leaving these implicit makes the rest of the code below a bit less noisy
to read. |
36 Sk4x(__m128i); | 36 Sk4x(__m128i); |
37 Sk4x(__m128); | 37 Sk4x(__m128); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
76 #define M(...) template <> inline __VA_ARGS__ Sk4f:: | 76 #define M(...) template <> inline __VA_ARGS__ Sk4f:: |
77 | 77 |
78 M() Sk4x(float a, float b, float c, float d) : fVec(_mm_set_ps(d,c,b,a)) {} | 78 M() Sk4x(float a, float b, float c, float d) : fVec(_mm_set_ps(d,c,b,a)) {} |
79 | 79 |
80 M(Sk4f) Load (const float fs[4]) { return _mm_loadu_ps(fs); } | 80 M(Sk4f) Load (const float fs[4]) { return _mm_loadu_ps(fs); } |
81 M(Sk4f) LoadAligned(const float fs[4]) { return _mm_load_ps (fs); } | 81 M(Sk4f) LoadAligned(const float fs[4]) { return _mm_load_ps (fs); } |
82 | 82 |
83 M(void) store (float fs[4]) const { _mm_storeu_ps(fs, fVec); } | 83 M(void) store (float fs[4]) const { _mm_storeu_ps(fs, fVec); } |
84 M(void) storeAligned(float fs[4]) const { _mm_store_ps (fs, fVec); } | 84 M(void) storeAligned(float fs[4]) const { _mm_store_ps (fs, fVec); } |
85 | 85 |
86 template <> template <> | 86 template <> |
87 Sk4i Sk4f::reinterpret<Sk4i>() const { return as_4i(fVec); } | 87 M(Sk4i) reinterpret<Sk4i>() const { return as_4i(fVec); } |
88 | 88 |
89 template <> template <> | 89 template <> |
90 Sk4i Sk4f::cast<Sk4i>() const { return _mm_cvtps_epi32(fVec); } | 90 M(Sk4i) cast<Sk4i>() const { return _mm_cvtps_epi32(fVec); } |
91 | 91 |
92 // We're going to try a little experiment here and skip allTrue(), anyTrue(), an
d bit-manipulators | 92 // We're going to try a little experiment here and skip allTrue(), anyTrue(), an
d bit-manipulators |
93 // for Sk4f. Code that calls them probably does so accidentally. | 93 // for Sk4f. Code that calls them probably does so accidentally. |
94 // Ask mtklein to fill these in if you really need them. | 94 // Ask mtklein to fill these in if you really need them. |
95 | 95 |
96 M(Sk4f) add (const Sk4f& o) const { return _mm_add_ps(fVec, o.fVec); } | 96 M(Sk4f) add (const Sk4f& o) const { return _mm_add_ps(fVec, o.fVec); } |
97 M(Sk4f) subtract(const Sk4f& o) const { return _mm_sub_ps(fVec, o.fVec); } | 97 M(Sk4f) subtract(const Sk4f& o) const { return _mm_sub_ps(fVec, o.fVec); } |
98 M(Sk4f) multiply(const Sk4f& o) const { return _mm_mul_ps(fVec, o.fVec); } | 98 M(Sk4f) multiply(const Sk4f& o) const { return _mm_mul_ps(fVec, o.fVec); } |
99 M(Sk4f) divide (const Sk4f& o) const { return _mm_div_ps(fVec, o.fVec); } | 99 M(Sk4f) divide (const Sk4f& o) const { return _mm_div_ps(fVec, o.fVec); } |
100 | 100 |
(...skipping 12 matching lines...) Expand all Loading... |
113 #define M(...) template <> inline __VA_ARGS__ Sk4i:: | 113 #define M(...) template <> inline __VA_ARGS__ Sk4i:: |
114 | 114 |
115 M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_set_epi32(d,c,b,
a)) {} | 115 M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_set_epi32(d,c,b,
a)) {} |
116 | 116 |
117 M(Sk4i) Load (const int32_t is[4]) { return _mm_loadu_si128((const __m128i
*)is); } | 117 M(Sk4i) Load (const int32_t is[4]) { return _mm_loadu_si128((const __m128i
*)is); } |
118 M(Sk4i) LoadAligned(const int32_t is[4]) { return _mm_load_si128 ((const __m128i
*)is); } | 118 M(Sk4i) LoadAligned(const int32_t is[4]) { return _mm_load_si128 ((const __m128i
*)is); } |
119 | 119 |
120 M(void) store (int32_t is[4]) const { _mm_storeu_si128((__m128i*)is, fVec)
; } | 120 M(void) store (int32_t is[4]) const { _mm_storeu_si128((__m128i*)is, fVec)
; } |
121 M(void) storeAligned(int32_t is[4]) const { _mm_store_si128 ((__m128i*)is, fVec)
; } | 121 M(void) storeAligned(int32_t is[4]) const { _mm_store_si128 ((__m128i*)is, fVec)
; } |
122 | 122 |
123 template <> template <> | 123 template <> |
124 Sk4f Sk4i::reinterpret<Sk4f>() const { return as_4f(fVec); } | 124 M(Sk4f) reinterpret<Sk4f>() const { return as_4f(fVec); } |
125 | 125 |
126 template <> template <> | 126 template <> |
127 Sk4f Sk4i::cast<Sk4f>() const { return _mm_cvtepi32_ps(fVec); } | 127 M(Sk4f) cast<Sk4f>() const { return _mm_cvtepi32_ps(fVec); } |
128 | 128 |
129 M(bool) allTrue() const { return 0xf == _mm_movemask_ps(as_4f(fVec)); } | 129 M(bool) allTrue() const { return 0xf == _mm_movemask_ps(as_4f(fVec)); } |
130 M(bool) anyTrue() const { return 0x0 != _mm_movemask_ps(as_4f(fVec)); } | 130 M(bool) anyTrue() const { return 0x0 != _mm_movemask_ps(as_4f(fVec)); } |
131 | 131 |
132 M(Sk4i) bitNot() const { return _mm_xor_si128(fVec, True()); } | 132 M(Sk4i) bitNot() const { return _mm_xor_si128(fVec, True()); } |
133 M(Sk4i) bitAnd(const Sk4i& o) const { return _mm_and_si128(fVec, o.fVec); } | 133 M(Sk4i) bitAnd(const Sk4i& o) const { return _mm_and_si128(fVec, o.fVec); } |
134 M(Sk4i) bitOr (const Sk4i& o) const { return _mm_or_si128 (fVec, o.fVec); } | 134 M(Sk4i) bitOr (const Sk4i& o) const { return _mm_or_si128 (fVec, o.fVec); } |
135 | 135 |
136 M(Sk4i) equal (const Sk4i& o) const { return _mm_cmpeq_epi32 (fVec, o.
fVec); } | 136 M(Sk4i) equal (const Sk4i& o) const { return _mm_cmpeq_epi32 (fVec, o.
fVec); } |
137 M(Sk4i) lessThan (const Sk4i& o) const { return _mm_cmplt_epi32 (fVec, o.
fVec); } | 137 M(Sk4i) lessThan (const Sk4i& o) const { return _mm_cmplt_epi32 (fVec, o.
fVec); } |
(...skipping 30 matching lines...) Expand all Loading... |
168 } | 168 } |
169 M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { | 169 M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { |
170 Sk4i less = a.lessThan(b); | 170 Sk4i less = a.lessThan(b); |
171 return b.bitAnd(less).bitOr(a.andNot(less)); | 171 return b.bitAnd(less).bitOr(a.andNot(less)); |
172 } | 172 } |
173 #endif | 173 #endif |
174 | 174 |
175 #undef M | 175 #undef M |
176 | 176 |
177 #endif//Method definitions. | 177 #endif//Method definitions. |
OLD | NEW |