OLD | NEW |
| (Empty) |
1 // It is important _not_ to put header guards here. | |
2 // This file will be intentionally included three times. | |
3 | |
4 #include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362 | |
5 | |
6 #if defined(SK4X_PREAMBLE) | |
7 #include <arm_neon.h> | |
8 | |
9 // Template metaprogramming to map scalar types to vector types. | |
10 template <typename T> struct SkScalarToSIMD; | |
11 template <> struct SkScalarToSIMD<float> { typedef float32x4_t Type; }; | |
12 template <> struct SkScalarToSIMD<int32_t> { typedef int32x4_t Type; }; | |
13 | |
14 #elif defined(SK4X_PRIVATE) | |
15 Sk4x(float32x4_t); | |
16 Sk4x(int32x4_t); | |
17 | |
18 typename SkScalarToSIMD<T>::Type fVec; | |
19 | |
20 #else | |
21 | |
22 // Vector Constructors | |
23 //template <> inline Sk4f::Sk4x(int32x4_t v) : fVec(vcvtq_f32_s32(v)) {} | |
24 template <> inline Sk4f::Sk4x(float32x4_t v) : fVec(v) {} | |
25 template <> inline Sk4i::Sk4x(int32x4_t v) : fVec(v) {} | |
26 //template <> inline Sk4i::Sk4x(float32x4_t v) : fVec(vcvtq_s32_f32(v)) {} | |
27 | |
28 // Generic Methods | |
29 template <typename T> Sk4x<T>::Sk4x() {} | |
30 template <typename T> Sk4x<T>::Sk4x(const Sk4x& other) { *this = other; } | |
31 template <typename T> Sk4x<T>& Sk4x<T>::operator=(const Sk4x<T>& other) { | |
32 fVec = other.fVec; | |
33 return *this; | |
34 } | |
35 | |
36 // Sk4f Methods | |
37 #define M(...) template <> inline __VA_ARGS__ Sk4f:: | |
38 | |
39 M() Sk4x(float v) : fVec(vdupq_n_f32(v)) {} | |
40 M() Sk4x(float a, float b, float c, float d) { | |
41 // NEON lacks an intrinsic to make this easy. It is recommended to avoid | |
42 // this constructor unless it is absolutely necessary. | |
43 | |
44 // I am choosing to use the set lane intrinsics. Particularly, in the case | |
45 // of floating point, it is likely that the values are already in the right | |
46 // register file, so this may be the best approach. However, I am not | |
47 // certain that this is the fastest approach and experimentation might be | |
48 // useful. | |
49 fVec = vsetq_lane_f32(a, fVec, 0); | |
50 fVec = vsetq_lane_f32(b, fVec, 1); | |
51 fVec = vsetq_lane_f32(c, fVec, 2); | |
52 fVec = vsetq_lane_f32(d, fVec, 3); | |
53 } | |
54 | |
55 // As far as I can tell, it's not possible to provide an alignment hint to | |
56 // NEON using intrinsics. However, I think it is possible at the assembly | |
57 // level if we want to get into that. | |
58 // TODO: Write our own aligned load and store. | |
59 M(Sk4f) Load (const float fs[4]) { return vld1q_f32(fs); } | |
60 M(Sk4f) LoadAligned(const float fs[4]) { return vld1q_f32(fs); } | |
61 M(void) store (float fs[4]) const { vst1q_f32(fs, fVec); } | |
62 M(void) storeAligned(float fs[4]) const { vst1q_f32 (fs, fVec); } | |
63 | |
64 template <> | |
65 M(Sk4i) reinterpret<Sk4i>() const { return vreinterpretq_s32_f32(fVec); } | |
66 | |
67 template <> | |
68 M(Sk4i) cast<Sk4i>() const { return vcvtq_s32_f32(fVec); } | |
69 | |
70 // We're going to skip allTrue(), anyTrue(), and bit-manipulators | |
71 // for Sk4f. Code that calls them probably does so accidentally. | |
72 // Ask msarett or mtklein to fill these in if you really need them. | |
73 M(Sk4f) add (const Sk4f& o) const { return vaddq_f32(fVec, o.fVec); } | |
74 M(Sk4f) subtract(const Sk4f& o) const { return vsubq_f32(fVec, o.fVec); } | |
75 M(Sk4f) multiply(const Sk4f& o) const { return vmulq_f32(fVec, o.fVec); } | |
76 | |
77 M(Sk4f) divide (const Sk4f& o) const { | |
78 float32x4_t est0 = vrecpeq_f32(o.fVec); | |
79 float32x4_t est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0); | |
80 float32x4_t est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1); | |
81 return vmulq_f32(est2, fVec); | |
82 } | |
83 | |
84 M(Sk4f) rsqrt() const { | |
85 float32x4_t est0 = vrsqrteq_f32(fVec); | |
86 float32x4_t est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0
); | |
87 float32x4_t est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1
); | |
88 return est2; | |
89 } | |
90 | |
91 M(Sk4f) sqrt() const { return this->multiply(this->rsqrt()); } | |
92 | |
93 M(Sk4i) equal (const Sk4f& o) const { return vreinterpretq_s32_u32(vce
qq_f32(fVec, o.fVec)); } | |
94 M(Sk4i) notEqual (const Sk4f& o) const { return vreinterpretq_s32_u32(vmv
nq_u32(vceqq_f32(fVec, o.fVec))); } | |
95 M(Sk4i) lessThan (const Sk4f& o) const { return vreinterpretq_s32_u32(vcl
tq_f32(fVec, o.fVec)); } | |
96 M(Sk4i) greaterThan (const Sk4f& o) const { return vreinterpretq_s32_u32(vcg
tq_f32(fVec, o.fVec)); } | |
97 M(Sk4i) lessThanEqual (const Sk4f& o) const { return vreinterpretq_s32_u32(vcl
eq_f32(fVec, o.fVec)); } | |
98 M(Sk4i) greaterThanEqual(const Sk4f& o) const { return vreinterpretq_s32_u32(vcg
eq_f32(fVec, o.fVec)); } | |
99 | |
100 M(Sk4f) Min(const Sk4f& a, const Sk4f& b) { return vminq_f32(a.fVec, b.fVec); } | |
101 M(Sk4f) Max(const Sk4f& a, const Sk4f& b) { return vmaxq_f32(a.fVec, b.fVec); } | |
102 | |
103 // These shuffle operations are implemented more efficiently with SSE. | |
104 // NEON has efficient zip, unzip, and transpose, but it is more costly to | |
105 // exploit zip and unzip in order to shuffle. | |
106 M(Sk4f) zwxy() const { | |
107 float32x4x2_t zip = vzipq_f32(fVec, vdupq_n_f32(0.0)); | |
108 return vuzpq_f32(zip.val[1], zip.val[0]).val[0]; | |
109 } | |
110 // Note that XYAB and ZWCD share code. If both are needed, they could be | |
111 // implemented more efficiently together. Also, ABXY and CDZW are available | |
112 // as well. | |
113 M(Sk4f) XYAB(const Sk4f& xyzw, const Sk4f& abcd) { | |
114 float32x4x2_t xayb_zcwd = vzipq_f32(xyzw.fVec, abcd.fVec); | |
115 float32x4x2_t axby_czdw = vzipq_f32(abcd.fVec, xyzw.fVec); | |
116 return vuzpq_f32(xayb_zcwd.val[0], axby_czdw.val[0]).val[0]; | |
117 } | |
118 M(Sk4f) ZWCD(const Sk4f& xyzw, const Sk4f& abcd) { | |
119 float32x4x2_t xayb_zcwd = vzipq_f32(xyzw.fVec, abcd.fVec); | |
120 float32x4x2_t axby_czdw = vzipq_f32(abcd.fVec, xyzw.fVec); | |
121 return vuzpq_f32(xayb_zcwd.val[1], axby_czdw.val[1]).val[0]; | |
122 } | |
123 | |
124 // Sk4i Methods | |
125 #undef M | |
126 #define M(...) template <> inline __VA_ARGS__ Sk4i:: | |
127 | |
128 M() Sk4x(int32_t v) : fVec(vdupq_n_s32(v)) {} | |
129 M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) { | |
130 // NEON lacks an intrinsic to make this easy. It is recommended to avoid | |
131 // this constructor unless it is absolutely necessary. | |
132 | |
133 // There are a few different implementation strategies. | |
134 | |
135 // uint64_t ab_i = ((uint32_t) a) | (((uint64_t) b) << 32); | |
136 // uint64_t cd_i = ((uint32_t) c) | (((uint64_t) d) << 32); | |
137 // int32x2_t ab = vcreate_s32(ab_i); | |
138 // int32x2_t cd = vcreate_s32(cd_i); | |
139 // fVec = vcombine_s32(ab, cd); | |
140 // This might not be a bad idea for the integer case. Either way I think, | |
141 // we will need to move values from general registers to NEON registers. | |
142 | |
143 // I am choosing to use the set lane intrinsics. I am not certain that | |
144 // this is the fastest approach. It may be useful to try the above code | |
145 // for integers. | |
146 fVec = vsetq_lane_s32(a, fVec, 0); | |
147 fVec = vsetq_lane_s32(b, fVec, 1); | |
148 fVec = vsetq_lane_s32(c, fVec, 2); | |
149 fVec = vsetq_lane_s32(d, fVec, 3); | |
150 } | |
151 | |
152 // As far as I can tell, it's not possible to provide an alignment hint to | |
153 // NEON using intrinsics. However, I think it is possible at the assembly | |
154 // level if we want to get into that. | |
155 M(Sk4i) Load (const int32_t is[4]) { return vld1q_s32(is); } | |
156 M(Sk4i) LoadAligned(const int32_t is[4]) { return vld1q_s32(is); } | |
157 M(void) store (int32_t is[4]) const { vst1q_s32(is, fVec); } | |
158 M(void) storeAligned(int32_t is[4]) const { vst1q_s32 (is, fVec); } | |
159 | |
160 template <> | |
161 M(Sk4f) reinterpret<Sk4f>() const { return vreinterpretq_f32_s32(fVec); } | |
162 | |
163 template <> | |
164 M(Sk4f) cast<Sk4f>() const { return vcvtq_f32_s32(fVec); } | |
165 | |
166 M(bool) allTrue() const { | |
167 int32_t a = vgetq_lane_s32(fVec, 0); | |
168 int32_t b = vgetq_lane_s32(fVec, 1); | |
169 int32_t c = vgetq_lane_s32(fVec, 2); | |
170 int32_t d = vgetq_lane_s32(fVec, 3); | |
171 return a & b & c & d; | |
172 } | |
173 M(bool) anyTrue() const { | |
174 int32_t a = vgetq_lane_s32(fVec, 0); | |
175 int32_t b = vgetq_lane_s32(fVec, 1); | |
176 int32_t c = vgetq_lane_s32(fVec, 2); | |
177 int32_t d = vgetq_lane_s32(fVec, 3); | |
178 return a | b | c | d; | |
179 } | |
180 | |
181 M(Sk4i) bitNot() const { return vmvnq_s32(fVec); } | |
182 M(Sk4i) bitAnd(const Sk4i& o) const { return vandq_s32(fVec, o.fVec); } | |
183 M(Sk4i) bitOr (const Sk4i& o) const { return vorrq_s32(fVec, o.fVec); } | |
184 | |
185 M(Sk4i) equal (const Sk4i& o) const { return vreinterpretq_s32_u32(vce
qq_s32(fVec, o.fVec)); } | |
186 M(Sk4i) notEqual (const Sk4i& o) const { return vreinterpretq_s32_u32(vmv
nq_u32(vceqq_s32(fVec, o.fVec))); } | |
187 M(Sk4i) lessThan (const Sk4i& o) const { return vreinterpretq_s32_u32(vcl
tq_s32(fVec, o.fVec)); } | |
188 M(Sk4i) greaterThan (const Sk4i& o) const { return vreinterpretq_s32_u32(vcg
tq_s32(fVec, o.fVec)); } | |
189 M(Sk4i) lessThanEqual (const Sk4i& o) const { return vreinterpretq_s32_u32(vcl
eq_s32(fVec, o.fVec)); } | |
190 M(Sk4i) greaterThanEqual(const Sk4i& o) const { return vreinterpretq_s32_u32(vcg
eq_s32(fVec, o.fVec)); } | |
191 | |
192 M(Sk4i) add (const Sk4i& o) const { return vaddq_s32(fVec, o.fVec); } | |
193 M(Sk4i) subtract(const Sk4i& o) const { return vsubq_s32(fVec, o.fVec); } | |
194 M(Sk4i) multiply(const Sk4i& o) const { return vmulq_s32(fVec, o.fVec); } | |
195 // NEON does not have integer reciprocal, sqrt, or division. | |
196 M(Sk4i) Min(const Sk4i& a, const Sk4i& b) { return vminq_s32(a.fVec, b.fVec); } | |
197 M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { return vmaxq_s32(a.fVec, b.fVec); } | |
198 | |
199 // These shuffle operations are implemented more efficiently with SSE. | |
200 // NEON has efficient zip, unzip, and transpose, but it is more costly to | |
201 // exploit zip and unzip in order to shuffle. | |
202 M(Sk4i) zwxy() const { | |
203 int32x4x2_t zip = vzipq_s32(fVec, vdupq_n_s32(0.0)); | |
204 return vuzpq_s32(zip.val[1], zip.val[0]).val[0]; | |
205 } | |
206 // Note that XYAB and ZWCD share code. If both are needed, they could be | |
207 // implemented more efficiently together. Also, ABXY and CDZW are available | |
208 // as well. | |
209 M(Sk4i) XYAB(const Sk4i& xyzw, const Sk4i& abcd) { | |
210 int32x4x2_t xayb_zcwd = vzipq_s32(xyzw.fVec, abcd.fVec); | |
211 int32x4x2_t axby_czdw = vzipq_s32(abcd.fVec, xyzw.fVec); | |
212 return vuzpq_s32(xayb_zcwd.val[0], axby_czdw.val[0]).val[0]; | |
213 } | |
214 M(Sk4i) ZWCD(const Sk4i& xyzw, const Sk4i& abcd) { | |
215 int32x4x2_t xayb_zcwd = vzipq_s32(xyzw.fVec, abcd.fVec); | |
216 int32x4x2_t axby_czdw = vzipq_s32(abcd.fVec, xyzw.fVec); | |
217 return vuzpq_s32(xayb_zcwd.val[1], axby_czdw.val[1]).val[0]; | |
218 } | |
219 | |
220 #undef M | |
221 | |
222 #endif | |
OLD | NEW |