Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(170)

Side by Side Diff: src/core/Sk4x_sse.h

Issue 1021713004: Reorg Sk4x to match the pattern of SkPMFloat. (Closed) Base URL: https://skia.googlesource.com/skia@master
Patch Set: Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/core/Sk4x_portable.h ('k') | src/opts/Sk4x_neon.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // It is important _not_ to put header guards here.
2 // This file will be intentionally included three times.
3
4 // Useful reading:
5 // https://software.intel.com/sites/landingpage/IntrinsicsGuide/
6
7 #include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
8
9 #if defined(SK4X_PREAMBLE)
10 // Code in this file may assume SSE and SSE2.
11 #include <emmintrin.h>
12
13 // It must check for later instruction sets.
14 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
15 #include <immintrin.h>
16 #endif
17
18 // A little bit of template metaprogramming to map
19 // float to __m128 and int32_t to __m128i.
20 template <typename T> struct SkScalarToSIMD;
21 template <> struct SkScalarToSIMD<float> { typedef __m128 Type; };
22 template <> struct SkScalarToSIMD<int32_t> { typedef __m128i Type; };
23
24 // These are all free, zero instructions.
25 // MSVC insists we use _mm_castA_B(a) instead of (B)a.
26 static inline __m128 as_4f(__m128i v) { return _mm_castsi128_ps(v); }
27 static inline __m128 as_4f(__m128 v) { return v ; }
28 static inline __m128i as_4i(__m128i v) { return v ; }
29 static inline __m128i as_4i(__m128 v) { return _mm_castps_si128(v); }
30
31 #elif defined(SK4X_PRIVATE)
32 // It'd be slightly faster to call _mm_cmpeq_epi32() on an unintialized regi ster and itself,
33 // but that has caused hard to debug issues when compilers recognize dealing with uninitialized
34 // memory as undefined behavior that can be optimized away.
35 static __m128i True() { return _mm_set1_epi8(~0); }
36
37 // Leaving these implicit makes the rest of the code below a bit less noisy to read.
38 Sk4x(__m128i);
39 Sk4x(__m128);
40
41 Sk4x andNot(const Sk4x&) const;
42
43 typename SkScalarToSIMD<T>::Type fVec;
44
45 #else//Method definitions.
46
47 // Helps to get these in before anything else.
48 template <> inline Sk4f::Sk4x(__m128i v) : fVec(as_4f(v)) {}
49 template <> inline Sk4f::Sk4x(__m128 v) : fVec( v ) {}
50 template <> inline Sk4i::Sk4x(__m128i v) : fVec( v ) {}
51 template <> inline Sk4i::Sk4x(__m128 v) : fVec(as_4i(v)) {}
52
53 // Next, methods whose implementation is the same for Sk4f and Sk4i.
54 template <typename T> Sk4x<T>::Sk4x() {}
55 template <typename T> Sk4x<T>::Sk4x(const Sk4x& other) { *this = other; }
56 template <typename T> Sk4x<T>& Sk4x<T>::operator=(const Sk4x<T>& other) {
57 fVec = other.fVec;
58 return *this;
59 }
60
61 // We pun in these _mm_shuffle_* methods a little to use the fastest / most avai lable methods.
62 // They're all bit-preserving operations so it shouldn't matter.
63
64 template <typename T>
65 Sk4x<T> Sk4x<T>::zwxy() const { return _mm_shuffle_epi32(as_4i(fVec), _MM_SHUFFL E(1,0,3,2)); }
66
67 template <typename T>
68 Sk4x<T> Sk4x<T>::XYAB(const Sk4x<T>& a, const Sk4x<T>& b) {
69 return _mm_movelh_ps(as_4f(a.fVec), as_4f(b.fVec));
70 }
71
72 template <typename T>
73 Sk4x<T> Sk4x<T>::ZWCD(const Sk4x<T>& a, const Sk4x<T>& b) {
74 return _mm_movehl_ps(as_4f(b.fVec), as_4f(a.fVec));
75 }
76
77 // Now we'll write all Sk4f specific methods. This M() macro will remove some n oise.
78 #define M(...) template <> inline __VA_ARGS__ Sk4f::
79
80 M() Sk4x(float v) : fVec(_mm_set1_ps(v)) {}
81 M() Sk4x(float a, float b, float c, float d) : fVec(_mm_set_ps(d,c,b,a)) {}
82
83 M(Sk4f) Load (const float fs[4]) { return _mm_loadu_ps(fs); }
84 M(Sk4f) LoadAligned(const float fs[4]) { return _mm_load_ps (fs); }
85
86 M(void) store (float fs[4]) const { _mm_storeu_ps(fs, fVec); }
87 M(void) storeAligned(float fs[4]) const { _mm_store_ps (fs, fVec); }
88
89 template <>
90 M(Sk4i) reinterpret<Sk4i>() const { return as_4i(fVec); }
91
92 template <>
93 M(Sk4i) cast<Sk4i>() const { return _mm_cvtps_epi32(fVec); }
94
95 // We're going to try a little experiment here and skip allTrue(), anyTrue(), an d bit-manipulators
96 // for Sk4f. Code that calls them probably does so accidentally.
97 // Ask mtklein to fill these in if you really need them.
98
99 M(Sk4f) add (const Sk4f& o) const { return _mm_add_ps(fVec, o.fVec); }
100 M(Sk4f) subtract(const Sk4f& o) const { return _mm_sub_ps(fVec, o.fVec); }
101 M(Sk4f) multiply(const Sk4f& o) const { return _mm_mul_ps(fVec, o.fVec); }
102 M(Sk4f) divide (const Sk4f& o) const { return _mm_div_ps(fVec, o.fVec); }
103
104 M(Sk4f) rsqrt() const { return _mm_rsqrt_ps(fVec); }
105 M(Sk4f) sqrt() const { return _mm_sqrt_ps( fVec); }
106
107 M(Sk4i) equal (const Sk4f& o) const { return _mm_cmpeq_ps (fVec, o.fVe c); }
108 M(Sk4i) notEqual (const Sk4f& o) const { return _mm_cmpneq_ps(fVec, o.fVe c); }
109 M(Sk4i) lessThan (const Sk4f& o) const { return _mm_cmplt_ps (fVec, o.fVe c); }
110 M(Sk4i) greaterThan (const Sk4f& o) const { return _mm_cmpgt_ps (fVec, o.fVe c); }
111 M(Sk4i) lessThanEqual (const Sk4f& o) const { return _mm_cmple_ps (fVec, o.fVe c); }
112 M(Sk4i) greaterThanEqual(const Sk4f& o) const { return _mm_cmpge_ps (fVec, o.fVe c); }
113
114 M(Sk4f) Min(const Sk4f& a, const Sk4f& b) { return _mm_min_ps(a.fVec, b.fVec); }
115 M(Sk4f) Max(const Sk4f& a, const Sk4f& b) { return _mm_max_ps(a.fVec, b.fVec); }
116
117 // Now we'll write all the Sk4i specific methods. Same deal for M().
118 #undef M
119 #define M(...) template <> inline __VA_ARGS__ Sk4i::
120
121 M() Sk4x(int32_t v) : fVec(_mm_set1_epi32(v)) {}
122 M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_set_epi32(d,c,b, a)) {}
123
124 M(Sk4i) Load (const int32_t is[4]) { return _mm_loadu_si128((const __m128i *)is); }
125 M(Sk4i) LoadAligned(const int32_t is[4]) { return _mm_load_si128 ((const __m128i *)is); }
126
127 M(void) store (int32_t is[4]) const { _mm_storeu_si128((__m128i*)is, fVec) ; }
128 M(void) storeAligned(int32_t is[4]) const { _mm_store_si128 ((__m128i*)is, fVec) ; }
129
130 template <>
131 M(Sk4f) reinterpret<Sk4f>() const { return as_4f(fVec); }
132
133 template <>
134 M(Sk4f) cast<Sk4f>() const { return _mm_cvtepi32_ps(fVec); }
135
136 M(bool) allTrue() const { return 0xf == _mm_movemask_ps(as_4f(fVec)); }
137 M(bool) anyTrue() const { return 0x0 != _mm_movemask_ps(as_4f(fVec)); }
138
139 M(Sk4i) bitNot() const { return _mm_xor_si128(fVec, True()); }
140 M(Sk4i) bitAnd(const Sk4i& o) const { return _mm_and_si128(fVec, o.fVec); }
141 M(Sk4i) bitOr (const Sk4i& o) const { return _mm_or_si128 (fVec, o.fVec); }
142
143 M(Sk4i) equal (const Sk4i& o) const { return _mm_cmpeq_epi32 (fVec, o. fVec); }
144 M(Sk4i) lessThan (const Sk4i& o) const { return _mm_cmplt_epi32 (fVec, o. fVec); }
145 M(Sk4i) greaterThan (const Sk4i& o) const { return _mm_cmpgt_epi32 (fVec, o. fVec); }
146 M(Sk4i) notEqual (const Sk4i& o) const { return this-> equal(o).bitN ot(); }
147 M(Sk4i) lessThanEqual (const Sk4i& o) const { return this->greaterThan(o).bitN ot(); }
148 M(Sk4i) greaterThanEqual(const Sk4i& o) const { return this-> lessThan(o).bitN ot(); }
149
150 M(Sk4i) add (const Sk4i& o) const { return _mm_add_epi32(fVec, o.fVec); }
151 M(Sk4i) subtract(const Sk4i& o) const { return _mm_sub_epi32(fVec, o.fVec); }
152
153 // SSE doesn't have integer division. Let's see how far we can get without Sk4i ::divide().
154
155 // Sk4i's multiply(), Min(), and Max() all improve significantly with SSE4.1.
156 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
157 M(Sk4i) multiply(const Sk4i& o) const { return _mm_mullo_epi32(fVec, o.fVec) ; }
158 M(Sk4i) Min(const Sk4i& a, const Sk4i& b) { return _mm_min_epi32(a.fVec, b.f Vec); }
159 M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { return _mm_max_epi32(a.fVec, b.f Vec); }
160 #else
161 M(Sk4i) multiply(const Sk4i& o) const {
162 // First 2 32->64 bit multiplies.
163 __m128i mul02 = _mm_mul_epu32(fVec, o.fVec),
164 mul13 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o. fVec, 4));
165 // Now recombine the high bits of the two products.
166 return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul02, _MM_SHUFFLE(0,0,2,0)) ,
167 _mm_shuffle_epi32(mul13, _MM_SHUFFLE(0,0,2,0)) );
168 }
169
170 M(Sk4i) andNot(const Sk4i& o) const { return _mm_andnot_si128(o.fVec, fVec); }
171
172 M(Sk4i) Min(const Sk4i& a, const Sk4i& b) {
173 Sk4i less = a.lessThan(b);
174 return a.bitAnd(less).bitOr(b.andNot(less));
175 }
176 M(Sk4i) Max(const Sk4i& a, const Sk4i& b) {
177 Sk4i less = a.lessThan(b);
178 return b.bitAnd(less).bitOr(a.andNot(less));
179 }
180 #endif
181
182 #undef M
183
184 #endif//Method definitions.
OLDNEW
« no previous file with comments | « src/core/Sk4x_portable.h ('k') | src/opts/Sk4x_neon.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698