OLD | NEW |
| (Empty) |
1 // It is important _not_ to put header guards here. | |
2 // This file will be intentionally included three times. | |
3 | |
4 // Useful reading: | |
5 // https://software.intel.com/sites/landingpage/IntrinsicsGuide/ | |
6 | |
7 #include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362 | |
8 | |
9 #if defined(SK4X_PREAMBLE) | |
10 // Code in this file may assume SSE and SSE2. | |
11 #include <emmintrin.h> | |
12 | |
13 // It must check for later instruction sets. | |
14 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 | |
15 #include <immintrin.h> | |
16 #endif | |
17 | |
18 // A little bit of template metaprogramming to map | |
19 // float to __m128 and int32_t to __m128i. | |
20 template <typename T> struct SkScalarToSIMD; | |
21 template <> struct SkScalarToSIMD<float> { typedef __m128 Type; }; | |
22 template <> struct SkScalarToSIMD<int32_t> { typedef __m128i Type; }; | |
23 | |
24 // These are all free, zero instructions. | |
25 // MSVC insists we use _mm_castA_B(a) instead of (B)a. | |
26 static inline __m128 as_4f(__m128i v) { return _mm_castsi128_ps(v); } | |
27 static inline __m128 as_4f(__m128 v) { return v ; } | |
28 static inline __m128i as_4i(__m128i v) { return v ; } | |
29 static inline __m128i as_4i(__m128 v) { return _mm_castps_si128(v); } | |
30 | |
31 #elif defined(SK4X_PRIVATE) | |
32 // It'd be slightly faster to call _mm_cmpeq_epi32() on an unintialized regi
ster and itself, | |
33 // but that has caused hard to debug issues when compilers recognize dealing
with uninitialized | |
34 // memory as undefined behavior that can be optimized away. | |
35 static __m128i True() { return _mm_set1_epi8(~0); } | |
36 | |
37 // Leaving these implicit makes the rest of the code below a bit less noisy
to read. | |
38 Sk4x(__m128i); | |
39 Sk4x(__m128); | |
40 | |
41 Sk4x andNot(const Sk4x&) const; | |
42 | |
43 typename SkScalarToSIMD<T>::Type fVec; | |
44 | |
45 #else//Method definitions. | |
46 | |
47 // Helps to get these in before anything else. | |
48 template <> inline Sk4f::Sk4x(__m128i v) : fVec(as_4f(v)) {} | |
49 template <> inline Sk4f::Sk4x(__m128 v) : fVec( v ) {} | |
50 template <> inline Sk4i::Sk4x(__m128i v) : fVec( v ) {} | |
51 template <> inline Sk4i::Sk4x(__m128 v) : fVec(as_4i(v)) {} | |
52 | |
53 // Next, methods whose implementation is the same for Sk4f and Sk4i. | |
54 template <typename T> Sk4x<T>::Sk4x() {} | |
55 template <typename T> Sk4x<T>::Sk4x(const Sk4x& other) { *this = other; } | |
56 template <typename T> Sk4x<T>& Sk4x<T>::operator=(const Sk4x<T>& other) { | |
57 fVec = other.fVec; | |
58 return *this; | |
59 } | |
60 | |
61 // We pun in these _mm_shuffle_* methods a little to use the fastest / most avai
lable methods. | |
62 // They're all bit-preserving operations so it shouldn't matter. | |
63 | |
64 template <typename T> | |
65 Sk4x<T> Sk4x<T>::aacc() const { return _mm_shuffle_epi32(as_4i(fVec), _MM_SHUFFL
E(2,2,0,0)); } | |
66 template <typename T> | |
67 Sk4x<T> Sk4x<T>::bbdd() const { return _mm_shuffle_epi32(as_4i(fVec), _MM_SHUFFL
E(3,3,1,1)); } | |
68 template <typename T> | |
69 Sk4x<T> Sk4x<T>::badc() const { return _mm_shuffle_epi32(as_4i(fVec), _MM_SHUFFL
E(2,3,0,1)); } | |
70 | |
71 // Now we'll write all Sk4f specific methods. This M() macro will remove some n
oise. | |
72 #define M(...) template <> inline __VA_ARGS__ Sk4f:: | |
73 | |
74 M() Sk4x(float v) : fVec(_mm_set1_ps(v)) {} | |
75 M() Sk4x(float a, float b, float c, float d) : fVec(_mm_set_ps(d,c,b,a)) {} | |
76 | |
77 M(Sk4f) Load (const float fs[4]) { return _mm_loadu_ps(fs); } | |
78 M(Sk4f) LoadAligned(const float fs[4]) { return _mm_load_ps (fs); } | |
79 | |
80 M(void) store (float fs[4]) const { _mm_storeu_ps(fs, fVec); } | |
81 M(void) storeAligned(float fs[4]) const { _mm_store_ps (fs, fVec); } | |
82 | |
83 template <> M(Sk4i) reinterpret<Sk4i>() const { return as_4i(fVec); } | |
84 | |
85 // cvttps truncates, same as (int) when positive. | |
86 template <> M(Sk4i) cast<Sk4i>() const { return _mm_cvttps_epi32(fVec); } | |
87 | |
88 // We're going to try a little experiment here and skip allTrue(), anyTrue(), an
d bit-manipulators | |
89 // for Sk4f. Code that calls them probably does so accidentally. | |
90 // Ask mtklein to fill these in if you really need them. | |
91 | |
92 M(Sk4f) add (const Sk4f& o) const { return _mm_add_ps(fVec, o.fVec); } | |
93 M(Sk4f) subtract(const Sk4f& o) const { return _mm_sub_ps(fVec, o.fVec); } | |
94 M(Sk4f) multiply(const Sk4f& o) const { return _mm_mul_ps(fVec, o.fVec); } | |
95 M(Sk4f) divide (const Sk4f& o) const { return _mm_div_ps(fVec, o.fVec); } | |
96 | |
97 M(Sk4f) rsqrt() const { return _mm_rsqrt_ps(fVec); } | |
98 M(Sk4f) sqrt() const { return _mm_sqrt_ps( fVec); } | |
99 | |
100 M(Sk4i) equal (const Sk4f& o) const { return _mm_cmpeq_ps (fVec, o.fVe
c); } | |
101 M(Sk4i) notEqual (const Sk4f& o) const { return _mm_cmpneq_ps(fVec, o.fVe
c); } | |
102 M(Sk4i) lessThan (const Sk4f& o) const { return _mm_cmplt_ps (fVec, o.fVe
c); } | |
103 M(Sk4i) greaterThan (const Sk4f& o) const { return _mm_cmpgt_ps (fVec, o.fVe
c); } | |
104 M(Sk4i) lessThanEqual (const Sk4f& o) const { return _mm_cmple_ps (fVec, o.fVe
c); } | |
105 M(Sk4i) greaterThanEqual(const Sk4f& o) const { return _mm_cmpge_ps (fVec, o.fVe
c); } | |
106 | |
107 M(Sk4f) Min(const Sk4f& a, const Sk4f& b) { return _mm_min_ps(a.fVec, b.fVec); } | |
108 M(Sk4f) Max(const Sk4f& a, const Sk4f& b) { return _mm_max_ps(a.fVec, b.fVec); } | |
109 | |
110 // Now we'll write all the Sk4i specific methods. Same deal for M(). | |
111 #undef M | |
112 #define M(...) template <> inline __VA_ARGS__ Sk4i:: | |
113 | |
114 M() Sk4x(int32_t v) : fVec(_mm_set1_epi32(v)) {} | |
115 M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_set_epi32(d,c,b,
a)) {} | |
116 | |
117 M(Sk4i) Load (const int32_t is[4]) { return _mm_loadu_si128((const __m128i
*)is); } | |
118 M(Sk4i) LoadAligned(const int32_t is[4]) { return _mm_load_si128 ((const __m128i
*)is); } | |
119 | |
120 M(void) store (int32_t is[4]) const { _mm_storeu_si128((__m128i*)is, fVec)
; } | |
121 M(void) storeAligned(int32_t is[4]) const { _mm_store_si128 ((__m128i*)is, fVec)
; } | |
122 | |
123 template <> | |
124 M(Sk4f) reinterpret<Sk4f>() const { return as_4f(fVec); } | |
125 | |
126 template <> | |
127 M(Sk4f) cast<Sk4f>() const { return _mm_cvtepi32_ps(fVec); } | |
128 | |
129 M(bool) allTrue() const { return 0xf == _mm_movemask_ps(as_4f(fVec)); } | |
130 M(bool) anyTrue() const { return 0x0 != _mm_movemask_ps(as_4f(fVec)); } | |
131 | |
132 M(Sk4i) bitNot() const { return _mm_xor_si128(fVec, True()); } | |
133 M(Sk4i) bitAnd(const Sk4i& o) const { return _mm_and_si128(fVec, o.fVec); } | |
134 M(Sk4i) bitOr (const Sk4i& o) const { return _mm_or_si128 (fVec, o.fVec); } | |
135 | |
136 M(Sk4i) equal (const Sk4i& o) const { return _mm_cmpeq_epi32 (fVec, o.
fVec); } | |
137 M(Sk4i) lessThan (const Sk4i& o) const { return _mm_cmplt_epi32 (fVec, o.
fVec); } | |
138 M(Sk4i) greaterThan (const Sk4i& o) const { return _mm_cmpgt_epi32 (fVec, o.
fVec); } | |
139 M(Sk4i) notEqual (const Sk4i& o) const { return this-> equal(o).bitN
ot(); } | |
140 M(Sk4i) lessThanEqual (const Sk4i& o) const { return this->greaterThan(o).bitN
ot(); } | |
141 M(Sk4i) greaterThanEqual(const Sk4i& o) const { return this-> lessThan(o).bitN
ot(); } | |
142 | |
143 M(Sk4i) add (const Sk4i& o) const { return _mm_add_epi32(fVec, o.fVec); } | |
144 M(Sk4i) subtract(const Sk4i& o) const { return _mm_sub_epi32(fVec, o.fVec); } | |
145 | |
146 // SSE doesn't have integer division. Let's see how far we can get without Sk4i
::divide(). | |
147 | |
148 // Sk4i's multiply(), Min(), and Max() all improve significantly with SSE4.1. | |
149 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41 | |
150 M(Sk4i) multiply(const Sk4i& o) const { return _mm_mullo_epi32(fVec, o.fVec)
; } | |
151 M(Sk4i) Min(const Sk4i& a, const Sk4i& b) { return _mm_min_epi32(a.fVec, b.f
Vec); } | |
152 M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { return _mm_max_epi32(a.fVec, b.f
Vec); } | |
153 #else | |
154 M(Sk4i) multiply(const Sk4i& o) const { | |
155 // First 2 32->64 bit multiplies. | |
156 __m128i mul02 = _mm_mul_epu32(fVec, o.fVec), | |
157 mul13 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.
fVec, 4)); | |
158 // Now recombine the high bits of the two products. | |
159 return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul02, _MM_SHUFFLE(0,0,2,0))
, | |
160 _mm_shuffle_epi32(mul13, _MM_SHUFFLE(0,0,2,0))
); | |
161 } | |
162 | |
163 M(Sk4i) andNot(const Sk4i& o) const { return _mm_andnot_si128(o.fVec, fVec);
} | |
164 | |
165 M(Sk4i) Min(const Sk4i& a, const Sk4i& b) { | |
166 Sk4i less = a.lessThan(b); | |
167 return a.bitAnd(less).bitOr(b.andNot(less)); | |
168 } | |
169 M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { | |
170 Sk4i less = a.lessThan(b); | |
171 return b.bitAnd(less).bitOr(a.andNot(less)); | |
172 } | |
173 #endif | |
174 | |
175 #undef M | |
176 | |
177 #endif//Method definitions. | |
OLD | NEW |