OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkNx_neon_DEFINED | 8 #ifndef SkNx_neon_DEFINED |
9 #define SkNx_neon_DEFINED | 9 #define SkNx_neon_DEFINED |
10 | 10 |
11 #include <arm_neon.h> | 11 #include <arm_neon.h> |
12 | 12 |
13 #define SKNX_IS_FAST | 13 #define SKNX_IS_FAST |
14 | 14 |
15 // ARMv8 has vrndmq_f32 to floor 4 floats. Here we emulate it: | 15 // ARMv8 has vrndmq_f32 to floor 4 floats. Here we emulate it: |
16 // - roundtrip through integers via truncation | 16 // - roundtrip through integers via truncation |
17 // - subtract 1 if that's too big (possible for negative values). | 17 // - subtract 1 if that's too big (possible for negative values). |
18 // This restricts the domain of our inputs to a maximum somehwere around 2^31.
Seems plenty big. | 18 // This restricts the domain of our inputs to a maximum somehwere around 2^31.
Seems plenty big. |
19 static inline float32x4_t armv7_vrndmq_f32(float32x4_t v) { | 19 static inline float32x4_t armv7_vrndmq_f32(float32x4_t v) { |
20 auto roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v)); | 20 float32x4_t roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v)); |
21 auto too_big = vcgtq_f32(roundtrip, v); | 21 uint32x4_t too_big = roundtrip > v; |
22 return vsubq_f32(roundtrip, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdup
q_n_f32(1))); | 22 return roundtrip - (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1
)); |
23 } | 23 } |
24 | 24 |
25 // Well, this is absurd. The shifts require compile-time constant arguments. | 25 // Well, this is absurd. The shifts require compile-time constant arguments. |
26 | 26 |
27 #define SHIFT8(op, v, bits) switch(bits) { \ | 27 #define SHIFT8(op, v, bits) switch(bits) { \ |
28 case 1: return op(v, 1); case 2: return op(v, 2); case 3: return op(v
, 3); \ | 28 case 1: return op(v, 1); case 2: return op(v, 2); case 3: return op(v
, 3); \ |
29 case 4: return op(v, 4); case 5: return op(v, 5); case 6: return op(v
, 6); \ | 29 case 4: return op(v, 4); case 5: return op(v, 5); case 6: return op(v
, 6); \ |
30 case 7: return op(v, 7); \ | 30 case 7: return op(v, 7); \ |
31 } return fVec | 31 } return fVec |
32 | 32 |
(...skipping 10 matching lines...) Expand all Loading... |
43 case 25: return op(v, 25); case 26: return op(v, 26); case 27: return op(v
, 27); \ | 43 case 25: return op(v, 25); case 26: return op(v, 26); case 27: return op(v
, 27); \ |
44 case 28: return op(v, 28); case 29: return op(v, 29); case 30: return op(v
, 30); \ | 44 case 28: return op(v, 28); case 29: return op(v, 29); case 30: return op(v
, 30); \ |
45 case 31: return op(v, 31); } return fVec | 45 case 31: return op(v, 31); } return fVec |
46 | 46 |
47 template <> | 47 template <> |
48 class SkNx<2, float> { | 48 class SkNx<2, float> { |
49 public: | 49 public: |
50 SkNx(float32x2_t vec) : fVec(vec) {} | 50 SkNx(float32x2_t vec) : fVec(vec) {} |
51 | 51 |
52 SkNx() {} | 52 SkNx() {} |
53 SkNx(float val) : fVec(vdup_n_f32(val)) {} | 53 SkNx(float a, float b) : fVec{a,b} {} |
| 54 SkNx(float v) : fVec{v,v} {} |
| 55 |
54 static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); } | 56 static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); } |
55 SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; } | |
56 | |
57 void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } | 57 void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } |
58 | 58 |
59 SkNx invert() const { | 59 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } |
60 float32x2_t est0 = vrecpe_f32(fVec), | 60 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } |
61 est1 = vmul_f32(vrecps_f32(est0, fVec), est0); | 61 SkNx operator * (const SkNx& o) const { return fVec * o.fVec; } |
62 return est1; | 62 SkNx operator / (const SkNx& o) const { return fVec / o.fVec; } |
63 } | |
64 | 63 |
65 SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); } | 64 SkNx operator == (const SkNx& o) const { return fVec == o.fVec; } |
66 SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); } | 65 SkNx operator < (const SkNx& o) const { return fVec < o.fVec; } |
67 SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); } | 66 SkNx operator > (const SkNx& o) const { return fVec > o.fVec; } |
68 SkNx operator / (const SkNx& o) const { | 67 SkNx operator <= (const SkNx& o) const { return fVec <= o.fVec; } |
69 #if defined(SK_CPU_ARM64) | 68 SkNx operator >= (const SkNx& o) const { return fVec >= o.fVec; } |
70 return vdiv_f32(fVec, o.fVec); | 69 SkNx operator != (const SkNx& o) const { return fVec != o.fVec; } |
71 #else | |
72 float32x2_t est0 = vrecpe_f32(o.fVec), | |
73 est1 = vmul_f32(vrecps_f32(est0, o.fVec), est0), | |
74 est2 = vmul_f32(vrecps_f32(est1, o.fVec), est1); | |
75 return vmul_f32(fVec, est2); | |
76 #endif | |
77 } | |
78 | |
79 SkNx operator == (const SkNx& o) const { return vreinterpret_f32_u32(vceq_f3
2(fVec, o.fVec)); } | |
80 SkNx operator < (const SkNx& o) const { return vreinterpret_f32_u32(vclt_f3
2(fVec, o.fVec)); } | |
81 SkNx operator > (const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f3
2(fVec, o.fVec)); } | |
82 SkNx operator <= (const SkNx& o) const { return vreinterpret_f32_u32(vcle_f3
2(fVec, o.fVec)); } | |
83 SkNx operator >= (const SkNx& o) const { return vreinterpret_f32_u32(vcge_f3
2(fVec, o.fVec)); } | |
84 SkNx operator != (const SkNx& o) const { | |
85 return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec))); | |
86 } | |
87 | 70 |
88 static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fV
ec); } | 71 static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fV
ec); } |
89 static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fV
ec); } | 72 static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fV
ec); } |
90 | 73 |
91 SkNx rsqrt() const { | 74 SkNx rsqrt() const { |
92 float32x2_t est0 = vrsqrte_f32(fVec); | 75 float32x2_t est0 = vrsqrte_f32(fVec); |
93 return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0); | 76 return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0); |
94 } | 77 } |
95 | 78 |
96 SkNx sqrt() const { | 79 SkNx sqrt() const { |
97 #if defined(SK_CPU_ARM64) | 80 #if defined(SK_CPU_ARM64) |
98 return vsqrt_f32(fVec); | 81 return vsqrt_f32(fVec); |
99 #else | 82 #else |
100 float32x2_t est0 = vrsqrte_f32(fVec), | 83 float32x2_t est0 = vrsqrte_f32(fVec), |
101 est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est
0), | 84 est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est
0), |
102 est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est
1); | 85 est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est
1); |
103 return vmul_f32(fVec, est2); | 86 return vmul_f32(fVec, est2); |
104 #endif | 87 #endif |
105 } | 88 } |
106 | 89 |
107 float operator[](int k) const { | 90 SkNx invert() const { |
108 SkASSERT(0 <= k && k < 2); | 91 float32x2_t est0 = vrecpe_f32(fVec), |
109 union { float32x2_t v; float fs[2]; } pun = {fVec}; | 92 est1 = vmul_f32(vrecps_f32(est0, fVec), est0); |
110 return pun.fs[k&1]; | 93 return est1; |
111 } | 94 } |
112 | 95 |
| 96 float operator[](int k) const { return fVec[k&1]; } |
| 97 |
113 bool allTrue() const { | 98 bool allTrue() const { |
114 auto v = vreinterpret_u32_f32(fVec); | 99 auto v = vreinterpret_u32_f32(fVec); |
115 return vget_lane_u32(v,0) && vget_lane_u32(v,1); | 100 return vget_lane_u32(v,0) && vget_lane_u32(v,1); |
116 } | 101 } |
117 bool anyTrue() const { | 102 bool anyTrue() const { |
118 auto v = vreinterpret_u32_f32(fVec); | 103 auto v = vreinterpret_u32_f32(fVec); |
119 return vget_lane_u32(v,0) || vget_lane_u32(v,1); | 104 return vget_lane_u32(v,0) || vget_lane_u32(v,1); |
120 } | 105 } |
121 | 106 |
122 float32x2_t fVec; | 107 float32x2_t fVec; |
123 }; | 108 }; |
124 | 109 |
125 template <> | 110 template <> |
126 class SkNx<4, float> { | 111 class SkNx<4, float> { |
127 public: | 112 public: |
128 SkNx(float32x4_t vec) : fVec(vec) {} | 113 SkNx(float32x4_t vec) : fVec(vec) {} |
129 | 114 |
130 SkNx() {} | 115 SkNx() {} |
131 SkNx(float val) : fVec(vdupq_n_f32(val)) {} | 116 SkNx(float a, float b, float c, float d) : fVec{a,b,c,d} {} |
| 117 SkNx(float v) : fVec{v,v,v,v} {} |
| 118 |
132 static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); } | 119 static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); } |
133 SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d
}; } | 120 void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); } |
134 | 121 |
135 void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); } | 122 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } |
136 SkNx invert() const { | 123 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } |
137 float32x4_t est0 = vrecpeq_f32(fVec), | 124 SkNx operator * (const SkNx& o) const { return fVec * o.fVec; } |
138 est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0); | 125 SkNx operator / (const SkNx& o) const { return fVec / o.fVec; } |
139 return est1; | |
140 } | |
141 | 126 |
142 SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); } | 127 SkNx operator==(const SkNx& o) const { return fVec == o.fVec; } |
143 SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); } | 128 SkNx operator <(const SkNx& o) const { return fVec < o.fVec; } |
144 SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); } | 129 SkNx operator >(const SkNx& o) const { return fVec > o.fVec; } |
145 SkNx operator / (const SkNx& o) const { | 130 SkNx operator<=(const SkNx& o) const { return fVec <= o.fVec; } |
146 #if defined(SK_CPU_ARM64) | 131 SkNx operator>=(const SkNx& o) const { return fVec >= o.fVec; } |
147 return vdivq_f32(fVec, o.fVec); | 132 SkNx operator!=(const SkNx& o) const { return fVec != o.fVec; } |
148 #else | |
149 float32x4_t est0 = vrecpeq_f32(o.fVec), | |
150 est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0), | |
151 est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1); | |
152 return vmulq_f32(fVec, est2); | |
153 #endif | |
154 } | |
155 | |
156 SkNx operator==(const SkNx& o) const { return vreinterpretq_f32_u32(vceqq_f3
2(fVec, o.fVec)); } | |
157 SkNx operator <(const SkNx& o) const { return vreinterpretq_f32_u32(vcltq_f3
2(fVec, o.fVec)); } | |
158 SkNx operator >(const SkNx& o) const { return vreinterpretq_f32_u32(vcgtq_f3
2(fVec, o.fVec)); } | |
159 SkNx operator<=(const SkNx& o) const { return vreinterpretq_f32_u32(vcleq_f3
2(fVec, o.fVec)); } | |
160 SkNx operator>=(const SkNx& o) const { return vreinterpretq_f32_u32(vcgeq_f3
2(fVec, o.fVec)); } | |
161 SkNx operator!=(const SkNx& o) const { | |
162 return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec))); | |
163 } | |
164 | 133 |
165 static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.f
Vec); } | 134 static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.f
Vec); } |
166 static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.f
Vec); } | 135 static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.f
Vec); } |
167 | 136 |
168 SkNx abs() const { return vabsq_f32(fVec); } | 137 SkNx abs() const { return vabsq_f32(fVec); } |
169 SkNx floor() const { | 138 SkNx floor() const { |
170 #if defined(SK_CPU_ARM64) | 139 #if defined(SK_CPU_ARM64) |
171 return vrndmq_f32(fVec); | 140 return vrndmq_f32(fVec); |
172 #else | 141 #else |
173 return armv7_vrndmq_f32(fVec); | 142 return armv7_vrndmq_f32(fVec); |
174 #endif | 143 #endif |
175 } | 144 } |
176 | 145 |
177 | |
178 SkNx rsqrt() const { | 146 SkNx rsqrt() const { |
179 float32x4_t est0 = vrsqrteq_f32(fVec); | 147 float32x4_t est0 = vrsqrteq_f32(fVec); |
180 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0); | 148 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0); |
181 } | 149 } |
182 | 150 |
183 SkNx sqrt() const { | 151 SkNx sqrt() const { |
184 #if defined(SK_CPU_ARM64) | 152 #if defined(SK_CPU_ARM64) |
185 return vsqrtq_f32(fVec); | 153 return vsqrtq_f32(fVec); |
186 #else | 154 #else |
187 float32x4_t est0 = vrsqrteq_f32(fVec), | 155 float32x4_t est0 = vrsqrteq_f32(fVec), |
188 est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)),
est0), | 156 est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)),
est0), |
189 est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)),
est1); | 157 est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)),
est1); |
190 return vmulq_f32(fVec, est2); | 158 return vmulq_f32(fVec, est2); |
191 #endif | 159 #endif |
192 } | 160 } |
193 | 161 |
194 float operator[](int k) const { | 162 SkNx invert() const { |
195 SkASSERT(0 <= k && k < 4); | 163 float32x4_t est0 = vrecpeq_f32(fVec), |
196 union { float32x4_t v; float fs[4]; } pun = {fVec}; | 164 est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0); |
197 return pun.fs[k&3]; | 165 return est1; |
198 } | 166 } |
199 | 167 |
| 168 float operator[](int k) const { return fVec[k&3]; } |
| 169 |
200 bool allTrue() const { | 170 bool allTrue() const { |
201 auto v = vreinterpretq_u32_f32(fVec); | 171 auto v = vreinterpretq_u32_f32(fVec); |
202 return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1) | 172 return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1) |
203 && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3); | 173 && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3); |
204 } | 174 } |
205 bool anyTrue() const { | 175 bool anyTrue() const { |
206 auto v = vreinterpretq_u32_f32(fVec); | 176 auto v = vreinterpretq_u32_f32(fVec); |
207 return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1) | 177 return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1) |
208 || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3); | 178 || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3); |
209 } | 179 } |
210 | 180 |
211 SkNx thenElse(const SkNx& t, const SkNx& e) const { | 181 SkNx thenElse(const SkNx& t, const SkNx& e) const { |
212 return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec); | 182 return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec); |
213 } | 183 } |
214 | 184 |
215 float32x4_t fVec; | 185 float32x4_t fVec; |
216 }; | 186 }; |
217 | 187 |
218 // It's possible that for our current use cases, representing this as | 188 // It's possible that for our current use cases, representing this as |
219 // half a uint16x8_t might be better than representing it as a uint16x4_t. | 189 // half a uint16x8_t might be better than representing it as a uint16x4_t. |
220 // It'd make conversion to Sk4b one step simpler. | 190 // It'd make conversion to Sk4b one step simpler. |
221 template <> | 191 template <> |
222 class SkNx<4, uint16_t> { | 192 class SkNx<4, uint16_t> { |
223 public: | 193 public: |
224 SkNx(const uint16x4_t& vec) : fVec(vec) {} | 194 SkNx(const uint16x4_t& vec) : fVec(vec) {} |
225 | 195 |
226 SkNx() {} | 196 SkNx() {} |
227 SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {} | 197 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) : fVec{a,b,c,d} {} |
| 198 SkNx(uint16_t v) : fVec{v,v,v,v} {} |
| 199 |
228 static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); } | 200 static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); } |
229 | |
230 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) { | |
231 fVec = (uint16x4_t) { a,b,c,d }; | |
232 } | |
233 | |
234 void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); } | 201 void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); } |
235 | 202 |
236 SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); } | 203 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } |
237 SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); } | 204 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } |
238 SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); } | 205 SkNx operator * (const SkNx& o) const { return fVec * o.fVec; } |
239 | 206 |
240 SkNx operator << (int bits) const { SHIFT16(vshl_n_u16, fVec, bits); } | 207 SkNx operator << (int bits) const { SHIFT16(vshl_n_u16, fVec, bits); } |
241 SkNx operator >> (int bits) const { SHIFT16(vshr_n_u16, fVec, bits); } | 208 SkNx operator >> (int bits) const { SHIFT16(vshr_n_u16, fVec, bits); } |
242 | 209 |
243 static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fV
ec); } | 210 static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fV
ec); } |
244 | 211 |
245 uint16_t operator[](int k) const { | 212 uint16_t operator[](int k) const { return fVec[k&3]; } |
246 SkASSERT(0 <= k && k < 4); | |
247 union { uint16x4_t v; uint16_t us[4]; } pun = {fVec}; | |
248 return pun.us[k&3]; | |
249 } | |
250 | 213 |
251 SkNx thenElse(const SkNx& t, const SkNx& e) const { | 214 SkNx thenElse(const SkNx& t, const SkNx& e) const { |
252 return vbsl_u16(fVec, t.fVec, e.fVec); | 215 return vbsl_u16(fVec, t.fVec, e.fVec); |
253 } | 216 } |
254 | 217 |
255 uint16x4_t fVec; | 218 uint16x4_t fVec; |
256 }; | 219 }; |
257 | 220 |
258 template <> | 221 template <> |
259 class SkNx<8, uint16_t> { | 222 class SkNx<8, uint16_t> { |
260 public: | 223 public: |
261 SkNx(const uint16x8_t& vec) : fVec(vec) {} | 224 SkNx(const uint16x8_t& vec) : fVec(vec) {} |
262 | 225 |
263 SkNx() {} | 226 SkNx() {} |
264 SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {} | 227 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, |
| 228 uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec{a,b,c,d,e,f,g,h}
{} |
| 229 SkNx(uint16_t v) : fVec{v,v,v,v,v,v,v,v}
{} |
| 230 |
265 static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr);
} | 231 static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr);
} |
266 | |
267 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, | |
268 uint16_t e, uint16_t f, uint16_t g, uint16_t h) { | |
269 fVec = (uint16x8_t) { a,b,c,d, e,f,g,h }; | |
270 } | |
271 | |
272 void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); } | 232 void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); } |
273 | 233 |
274 SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); } | 234 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } |
275 SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); } | 235 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } |
276 SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); } | 236 SkNx operator * (const SkNx& o) const { return fVec * o.fVec; } |
277 | 237 |
278 SkNx operator << (int bits) const { SHIFT16(vshlq_n_u16, fVec, bits); } | 238 SkNx operator << (int bits) const { SHIFT16(vshlq_n_u16, fVec, bits); } |
279 SkNx operator >> (int bits) const { SHIFT16(vshrq_n_u16, fVec, bits); } | 239 SkNx operator >> (int bits) const { SHIFT16(vshrq_n_u16, fVec, bits); } |
280 | 240 |
281 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.f
Vec); } | 241 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.f
Vec); } |
282 | 242 |
283 uint16_t operator[](int k) const { | 243 uint16_t operator[](int k) const { return fVec[k&7]; } |
284 SkASSERT(0 <= k && k < 8); | |
285 union { uint16x8_t v; uint16_t us[8]; } pun = {fVec}; | |
286 return pun.us[k&7]; | |
287 } | |
288 | 244 |
289 SkNx thenElse(const SkNx& t, const SkNx& e) const { | 245 SkNx thenElse(const SkNx& t, const SkNx& e) const { |
290 return vbslq_u16(fVec, t.fVec, e.fVec); | 246 return vbslq_u16(fVec, t.fVec, e.fVec); |
291 } | 247 } |
292 | 248 |
293 uint16x8_t fVec; | 249 uint16x8_t fVec; |
294 }; | 250 }; |
295 | 251 |
296 template <> | 252 template <> |
297 class SkNx<4, uint8_t> { | 253 class SkNx<4, uint8_t> { |
298 public: | 254 public: |
299 typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t; | 255 typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t; |
300 | 256 |
301 SkNx(const uint8x8_t& vec) : fVec(vec) {} | 257 SkNx(const uint8x8_t& vec) : fVec(vec) {} |
302 | 258 |
303 SkNx() {} | 259 SkNx() {} |
304 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) { | 260 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) : fVec{a,b,c,d,0,0,0,0} {} |
305 fVec = (uint8x8_t){a,b,c,d, 0,0,0,0}; | 261 SkNx(uint8_t v) : fVec{v,v,v,v,0,0,0,0} {} |
306 } | 262 |
307 static SkNx Load(const void* ptr) { | 263 static SkNx Load(const void* ptr) { |
308 return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr); | 264 return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr); |
309 } | 265 } |
310 void store(void* ptr) const { | 266 void store(void* ptr) const { |
311 return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0); | 267 return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0); |
312 } | 268 } |
313 uint8_t operator[](int k) const { | |
314 SkASSERT(0 <= k && k < 4); | |
315 union { uint8x8_t v; uint8_t us[8]; } pun = {fVec}; | |
316 return pun.us[k&3]; | |
317 } | |
318 | 269 |
319 // TODO as needed | 270 uint8_t operator[](int k) const { return fVec[k&3]; } |
320 | 271 |
321 uint8x8_t fVec; | 272 uint8x8_t fVec; |
322 }; | 273 }; |
323 | 274 |
324 template <> | 275 template <> |
325 class SkNx<16, uint8_t> { | 276 class SkNx<16, uint8_t> { |
326 public: | 277 public: |
327 SkNx(const uint8x16_t& vec) : fVec(vec) {} | 278 SkNx(const uint8x16_t& vec) : fVec(vec) {} |
328 | 279 |
329 SkNx() {} | 280 SkNx() {} |
330 SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {} | |
331 static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); } | |
332 | |
333 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, | 281 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, |
334 uint8_t e, uint8_t f, uint8_t g, uint8_t h, | 282 uint8_t e, uint8_t f, uint8_t g, uint8_t h, |
335 uint8_t i, uint8_t j, uint8_t k, uint8_t l, | 283 uint8_t i, uint8_t j, uint8_t k, uint8_t l, |
336 uint8_t m, uint8_t n, uint8_t o, uint8_t p) { | 284 uint8_t m, uint8_t n, uint8_t o, uint8_t p) : fVec{a,b,c,d,e,f,g,h,i,j,
k,l,m,n,o,p} {} |
337 fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p }; | 285 SkNx(uint8_t v) : fVec{v,v,v,v,v,v,v,v,v,v,
v,v,v,v,v,v} {} |
338 } | |
339 | 286 |
| 287 static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); } |
340 void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); } | 288 void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); } |
341 | 289 |
342 SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); } | 290 SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); } |
| 291 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } |
| 292 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } |
343 | 293 |
344 SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); } | 294 SkNx operator < (const SkNx& o) const { return fVec < o.fVec; } |
345 SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); } | |
346 | 295 |
347 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fV
ec); } | 296 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fV
ec); } |
348 SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); } | |
349 | 297 |
350 uint8_t operator[](int k) const { | 298 uint8_t operator[](int k) const { return fVec[k&15]; } |
351 SkASSERT(0 <= k && k < 16); | |
352 union { uint8x16_t v; uint8_t us[16]; } pun = {fVec}; | |
353 return pun.us[k&15]; | |
354 } | |
355 | 299 |
356 SkNx thenElse(const SkNx& t, const SkNx& e) const { | 300 SkNx thenElse(const SkNx& t, const SkNx& e) const { |
357 return vbslq_u8(fVec, t.fVec, e.fVec); | 301 return vbslq_u8(fVec, t.fVec, e.fVec); |
358 } | 302 } |
359 | 303 |
360 uint8x16_t fVec; | 304 uint8x16_t fVec; |
361 }; | 305 }; |
362 | 306 |
363 template <> | 307 template <> |
364 class SkNx<4, int32_t> { | 308 class SkNx<4, int32_t> { |
365 public: | 309 public: |
366 SkNx(const int32x4_t& vec) : fVec(vec) {} | 310 SkNx(const int32x4_t& vec) : fVec(vec) {} |
367 | 311 |
368 SkNx() {} | 312 SkNx() {} |
369 SkNx(int32_t v) { | 313 SkNx(int32_t a, int32_t b, int32_t c, int32_t d) : fVec{a,b,c,d} {} |
370 fVec = vdupq_n_s32(v); | 314 SkNx(int32_t v) : fVec{v,v,v,v} {} |
371 } | |
372 SkNx(int32_t a, int32_t b, int32_t c, int32_t d) { | |
373 fVec = (int32x4_t){a,b,c,d}; | |
374 } | |
375 static SkNx Load(const void* ptr) { | |
376 return vld1q_s32((const int32_t*)ptr); | |
377 } | |
378 void store(void* ptr) const { | |
379 return vst1q_s32((int32_t*)ptr, fVec); | |
380 } | |
381 int32_t operator[](int k) const { | |
382 SkASSERT(0 <= k && k < 4); | |
383 union { int32x4_t v; int32_t is[4]; } pun = {fVec}; | |
384 return pun.is[k&3]; | |
385 } | |
386 | 315 |
387 SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); } | 316 static SkNx Load(const void* ptr) { return vld1q_s32((const int32_t*)ptr); } |
388 SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); } | 317 void store(void* ptr) const { return vst1q_s32((int32_t*)ptr, fVec); } |
389 SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); } | |
390 | 318 |
391 SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); } | 319 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } |
392 SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); } | 320 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } |
393 SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); } | 321 SkNx operator * (const SkNx& o) const { return fVec * o.fVec; } |
| 322 |
| 323 SkNx operator & (const SkNx& o) const { return fVec & o.fVec; } |
| 324 SkNx operator | (const SkNx& o) const { return fVec | o.fVec; } |
| 325 SkNx operator ^ (const SkNx& o) const { return fVec ^ o.fVec; } |
394 | 326 |
395 SkNx operator << (int bits) const { SHIFT32(vshlq_n_s32, fVec, bits); } | 327 SkNx operator << (int bits) const { SHIFT32(vshlq_n_s32, fVec, bits); } |
396 SkNx operator >> (int bits) const { SHIFT32(vshrq_n_s32, fVec, bits); } | 328 SkNx operator >> (int bits) const { SHIFT32(vshrq_n_s32, fVec, bits); } |
397 | 329 |
398 SkNx operator == (const SkNx& o) const { | 330 SkNx operator == (const SkNx& o) const { return fVec == o.fVec; } |
399 return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec)); | 331 SkNx operator < (const SkNx& o) const { return fVec < o.fVec; } |
400 } | 332 SkNx operator > (const SkNx& o) const { return fVec > o.fVec; } |
401 SkNx operator < (const SkNx& o) const { | |
402 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec)); | |
403 } | |
404 SkNx operator > (const SkNx& o) const { | |
405 return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec)); | |
406 } | |
407 | 333 |
408 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.f
Vec); } | 334 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.f
Vec); } |
409 // TODO as needed | 335 |
| 336 int32_t operator[](int k) const { return fVec[k&3]; } |
410 | 337 |
411 SkNx thenElse(const SkNx& t, const SkNx& e) const { | 338 SkNx thenElse(const SkNx& t, const SkNx& e) const { |
412 return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec); | 339 return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec); |
413 } | 340 } |
414 | 341 |
415 int32x4_t fVec; | 342 int32x4_t fVec; |
416 }; | 343 }; |
417 | 344 |
418 template <> | 345 template <> |
419 class SkNx<4, uint32_t> { | 346 class SkNx<4, uint32_t> { |
420 public: | 347 public: |
421 SkNx(const uint32x4_t& vec) : fVec(vec) {} | 348 SkNx(const uint32x4_t& vec) : fVec(vec) {} |
422 | 349 |
423 SkNx() {} | 350 SkNx() {} |
424 SkNx(uint32_t v) { | 351 SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) : fVec{a,b,c,d} {} |
425 fVec = vdupq_n_u32(v); | 352 SkNx(uint32_t v) : fVec{v,v,v,v} {} |
426 } | |
427 SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) { | |
428 fVec = (uint32x4_t){a,b,c,d}; | |
429 } | |
430 static SkNx Load(const void* ptr) { | |
431 return vld1q_u32((const uint32_t*)ptr); | |
432 } | |
433 void store(void* ptr) const { | |
434 return vst1q_u32((uint32_t*)ptr, fVec); | |
435 } | |
436 uint32_t operator[](int k) const { | |
437 SkASSERT(0 <= k && k < 4); | |
438 union { uint32x4_t v; uint32_t us[4]; } pun = {fVec}; | |
439 return pun.us[k&3]; | |
440 } | |
441 | 353 |
442 SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); } | 354 static SkNx Load(const void* ptr) { return vld1q_u32((const uint32_t*)ptr);
} |
443 SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); } | 355 void store(void* ptr) const { return vst1q_u32((uint32_t*)ptr, fVec); } |
444 SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); } | |
445 | 356 |
446 SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); } | 357 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } |
447 SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); } | 358 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } |
448 SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); } | 359 SkNx operator * (const SkNx& o) const { return fVec * o.fVec; } |
| 360 |
| 361 SkNx operator & (const SkNx& o) const { return fVec & o.fVec; } |
| 362 SkNx operator | (const SkNx& o) const { return fVec | o.fVec; } |
| 363 SkNx operator ^ (const SkNx& o) const { return fVec ^ o.fVec; } |
449 | 364 |
450 SkNx operator << (int bits) const { SHIFT32(vshlq_n_u32, fVec, bits); } | 365 SkNx operator << (int bits) const { SHIFT32(vshlq_n_u32, fVec, bits); } |
451 SkNx operator >> (int bits) const { SHIFT32(vshrq_n_u32, fVec, bits); } | 366 SkNx operator >> (int bits) const { SHIFT32(vshrq_n_u32, fVec, bits); } |
452 | 367 |
453 SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); } | 368 SkNx operator == (const SkNx& o) const { return fVec == o.fVec; } |
454 SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); } | 369 SkNx operator < (const SkNx& o) const { return fVec < o.fVec; } |
455 SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); } | 370 SkNx operator > (const SkNx& o) const { return fVec > o.fVec; } |
456 | 371 |
457 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.f
Vec); } | 372 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.f
Vec); } |
458 // TODO as needed | 373 |
| 374 uint32_t operator[](int k) const { return fVec[k&3]; } |
459 | 375 |
460 SkNx thenElse(const SkNx& t, const SkNx& e) const { | 376 SkNx thenElse(const SkNx& t, const SkNx& e) const { |
461 return vbslq_u32(fVec, t.fVec, e.fVec); | 377 return vbslq_u32(fVec, t.fVec, e.fVec); |
462 } | 378 } |
463 | 379 |
464 uint32x4_t fVec; | 380 uint32x4_t fVec; |
465 }; | 381 }; |
466 | 382 |
467 #undef SHIFT32 | 383 #undef SHIFT32 |
468 #undef SHIFT16 | 384 #undef SHIFT16 |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
550 uint16x4x4_t rgba = {{ | 466 uint16x4x4_t rgba = {{ |
551 r.fVec, | 467 r.fVec, |
552 g.fVec, | 468 g.fVec, |
553 b.fVec, | 469 b.fVec, |
554 a.fVec, | 470 a.fVec, |
555 }}; | 471 }}; |
556 vst4_u16((uint16_t*) dst, rgba); | 472 vst4_u16((uint16_t*) dst, rgba); |
557 } | 473 } |
558 | 474 |
559 #endif//SkNx_neon_DEFINED | 475 #endif//SkNx_neon_DEFINED |
OLD | NEW |