Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(526)

Side by Side Diff: src/opts/SkNx_neon.h

Issue 2196953002: Revert of Tidy up SkNx_neon. (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #ifndef SkNx_neon_DEFINED 8 #ifndef SkNx_neon_DEFINED
9 #define SkNx_neon_DEFINED 9 #define SkNx_neon_DEFINED
10 10
11 #include <arm_neon.h> 11 #include <arm_neon.h>
12 12
13 #define SKNX_IS_FAST 13 #define SKNX_IS_FAST
14 14
15 // ARMv8 has vrndmq_f32 to floor 4 floats. Here we emulate it: 15 // ARMv8 has vrndmq_f32 to floor 4 floats. Here we emulate it:
16 // - roundtrip through integers via truncation 16 // - roundtrip through integers via truncation
17 // - subtract 1 if that's too big (possible for negative values). 17 // - subtract 1 if that's too big (possible for negative values).
18 // This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big. 18 // This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big.
19 static inline float32x4_t armv7_vrndmq_f32(float32x4_t v) { 19 static inline float32x4_t armv7_vrndmq_f32(float32x4_t v) {
20 float32x4_t roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v)); 20 auto roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
21 uint32x4_t too_big = roundtrip > v; 21 auto too_big = vcgtq_f32(roundtrip, v);
22 return roundtrip - (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1 )); 22 return vsubq_f32(roundtrip, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdup q_n_f32(1)));
23 } 23 }
24 24
25 // Well, this is absurd. The shifts require compile-time constant arguments. 25 // Well, this is absurd. The shifts require compile-time constant arguments.
26 26
27 #define SHIFT8(op, v, bits) switch(bits) { \ 27 #define SHIFT8(op, v, bits) switch(bits) { \
28 case 1: return op(v, 1); case 2: return op(v, 2); case 3: return op(v , 3); \ 28 case 1: return op(v, 1); case 2: return op(v, 2); case 3: return op(v , 3); \
29 case 4: return op(v, 4); case 5: return op(v, 5); case 6: return op(v , 6); \ 29 case 4: return op(v, 4); case 5: return op(v, 5); case 6: return op(v , 6); \
30 case 7: return op(v, 7); \ 30 case 7: return op(v, 7); \
31 } return fVec 31 } return fVec
32 32
(...skipping 10 matching lines...) Expand all
43 case 25: return op(v, 25); case 26: return op(v, 26); case 27: return op(v , 27); \ 43 case 25: return op(v, 25); case 26: return op(v, 26); case 27: return op(v , 27); \
44 case 28: return op(v, 28); case 29: return op(v, 29); case 30: return op(v , 30); \ 44 case 28: return op(v, 28); case 29: return op(v, 29); case 30: return op(v , 30); \
45 case 31: return op(v, 31); } return fVec 45 case 31: return op(v, 31); } return fVec
46 46
47 template <> 47 template <>
48 class SkNx<2, float> { 48 class SkNx<2, float> {
49 public: 49 public:
50 SkNx(float32x2_t vec) : fVec(vec) {} 50 SkNx(float32x2_t vec) : fVec(vec) {}
51 51
52 SkNx() {} 52 SkNx() {}
53 SkNx(float a, float b) : fVec{a,b} {} 53 SkNx(float val) : fVec(vdup_n_f32(val)) {}
54 SkNx(float v) : fVec{v,v} {} 54 static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); }
55 SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; }
55 56
56 static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); }
57 void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } 57 void store(void* ptr) const { vst1_f32((float*)ptr, fVec); }
58 58
59 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } 59 SkNx invert() const {
60 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } 60 float32x2_t est0 = vrecpe_f32(fVec),
61 SkNx operator * (const SkNx& o) const { return fVec * o.fVec; } 61 est1 = vmul_f32(vrecps_f32(est0, fVec), est0);
62 SkNx operator / (const SkNx& o) const { return fVec / o.fVec; } 62 return est1;
63 }
63 64
64 SkNx operator == (const SkNx& o) const { return fVec == o.fVec; } 65 SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); }
65 SkNx operator < (const SkNx& o) const { return fVec < o.fVec; } 66 SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); }
66 SkNx operator > (const SkNx& o) const { return fVec > o.fVec; } 67 SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); }
67 SkNx operator <= (const SkNx& o) const { return fVec <= o.fVec; } 68 SkNx operator / (const SkNx& o) const {
68 SkNx operator >= (const SkNx& o) const { return fVec >= o.fVec; } 69 #if defined(SK_CPU_ARM64)
69 SkNx operator != (const SkNx& o) const { return fVec != o.fVec; } 70 return vdiv_f32(fVec, o.fVec);
71 #else
72 float32x2_t est0 = vrecpe_f32(o.fVec),
73 est1 = vmul_f32(vrecps_f32(est0, o.fVec), est0),
74 est2 = vmul_f32(vrecps_f32(est1, o.fVec), est1);
75 return vmul_f32(fVec, est2);
76 #endif
77 }
78
79 SkNx operator == (const SkNx& o) const { return vreinterpret_f32_u32(vceq_f3 2(fVec, o.fVec)); }
80 SkNx operator < (const SkNx& o) const { return vreinterpret_f32_u32(vclt_f3 2(fVec, o.fVec)); }
81 SkNx operator > (const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f3 2(fVec, o.fVec)); }
82 SkNx operator <= (const SkNx& o) const { return vreinterpret_f32_u32(vcle_f3 2(fVec, o.fVec)); }
83 SkNx operator >= (const SkNx& o) const { return vreinterpret_f32_u32(vcge_f3 2(fVec, o.fVec)); }
84 SkNx operator != (const SkNx& o) const {
85 return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec)));
86 }
70 87
71 static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fV ec); } 88 static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fV ec); }
72 static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fV ec); } 89 static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fV ec); }
73 90
74 SkNx rsqrt() const { 91 SkNx rsqrt() const {
75 float32x2_t est0 = vrsqrte_f32(fVec); 92 float32x2_t est0 = vrsqrte_f32(fVec);
76 return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0); 93 return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0);
77 } 94 }
78 95
79 SkNx sqrt() const { 96 SkNx sqrt() const {
80 #if defined(SK_CPU_ARM64) 97 #if defined(SK_CPU_ARM64)
81 return vsqrt_f32(fVec); 98 return vsqrt_f32(fVec);
82 #else 99 #else
83 float32x2_t est0 = vrsqrte_f32(fVec), 100 float32x2_t est0 = vrsqrte_f32(fVec),
84 est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est 0), 101 est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est 0),
85 est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est 1); 102 est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est 1);
86 return vmul_f32(fVec, est2); 103 return vmul_f32(fVec, est2);
87 #endif 104 #endif
88 } 105 }
89 106
90 SkNx invert() const { 107 float operator[](int k) const {
91 float32x2_t est0 = vrecpe_f32(fVec), 108 SkASSERT(0 <= k && k < 2);
92 est1 = vmul_f32(vrecps_f32(est0, fVec), est0); 109 union { float32x2_t v; float fs[2]; } pun = {fVec};
93 return est1; 110 return pun.fs[k&1];
94 } 111 }
95 112
96 float operator[](int k) const { return fVec[k&1]; }
97
98 bool allTrue() const { 113 bool allTrue() const {
99 auto v = vreinterpret_u32_f32(fVec); 114 auto v = vreinterpret_u32_f32(fVec);
100 return vget_lane_u32(v,0) && vget_lane_u32(v,1); 115 return vget_lane_u32(v,0) && vget_lane_u32(v,1);
101 } 116 }
102 bool anyTrue() const { 117 bool anyTrue() const {
103 auto v = vreinterpret_u32_f32(fVec); 118 auto v = vreinterpret_u32_f32(fVec);
104 return vget_lane_u32(v,0) || vget_lane_u32(v,1); 119 return vget_lane_u32(v,0) || vget_lane_u32(v,1);
105 } 120 }
106 121
107 float32x2_t fVec; 122 float32x2_t fVec;
108 }; 123 };
109 124
110 template <> 125 template <>
111 class SkNx<4, float> { 126 class SkNx<4, float> {
112 public: 127 public:
113 SkNx(float32x4_t vec) : fVec(vec) {} 128 SkNx(float32x4_t vec) : fVec(vec) {}
114 129
115 SkNx() {} 130 SkNx() {}
116 SkNx(float a, float b, float c, float d) : fVec{a,b,c,d} {} 131 SkNx(float val) : fVec(vdupq_n_f32(val)) {}
117 SkNx(float v) : fVec{v,v,v,v} {} 132 static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); }
133 SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
118 134
119 static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); }
120 void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); } 135 void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); }
136 SkNx invert() const {
137 float32x4_t est0 = vrecpeq_f32(fVec),
138 est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0);
139 return est1;
140 }
121 141
122 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } 142 SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); }
123 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } 143 SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); }
124 SkNx operator * (const SkNx& o) const { return fVec * o.fVec; } 144 SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); }
125 SkNx operator / (const SkNx& o) const { return fVec / o.fVec; } 145 SkNx operator / (const SkNx& o) const {
146 #if defined(SK_CPU_ARM64)
147 return vdivq_f32(fVec, o.fVec);
148 #else
149 float32x4_t est0 = vrecpeq_f32(o.fVec),
150 est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0),
151 est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1);
152 return vmulq_f32(fVec, est2);
153 #endif
154 }
126 155
127 SkNx operator==(const SkNx& o) const { return fVec == o.fVec; } 156 SkNx operator==(const SkNx& o) const { return vreinterpretq_f32_u32(vceqq_f3 2(fVec, o.fVec)); }
128 SkNx operator <(const SkNx& o) const { return fVec < o.fVec; } 157 SkNx operator <(const SkNx& o) const { return vreinterpretq_f32_u32(vcltq_f3 2(fVec, o.fVec)); }
129 SkNx operator >(const SkNx& o) const { return fVec > o.fVec; } 158 SkNx operator >(const SkNx& o) const { return vreinterpretq_f32_u32(vcgtq_f3 2(fVec, o.fVec)); }
130 SkNx operator<=(const SkNx& o) const { return fVec <= o.fVec; } 159 SkNx operator<=(const SkNx& o) const { return vreinterpretq_f32_u32(vcleq_f3 2(fVec, o.fVec)); }
131 SkNx operator>=(const SkNx& o) const { return fVec >= o.fVec; } 160 SkNx operator>=(const SkNx& o) const { return vreinterpretq_f32_u32(vcgeq_f3 2(fVec, o.fVec)); }
132 SkNx operator!=(const SkNx& o) const { return fVec != o.fVec; } 161 SkNx operator!=(const SkNx& o) const {
162 return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec)));
163 }
133 164
134 static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.f Vec); } 165 static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.f Vec); }
135 static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.f Vec); } 166 static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.f Vec); }
136 167
137 SkNx abs() const { return vabsq_f32(fVec); } 168 SkNx abs() const { return vabsq_f32(fVec); }
138 SkNx floor() const { 169 SkNx floor() const {
139 #if defined(SK_CPU_ARM64) 170 #if defined(SK_CPU_ARM64)
140 return vrndmq_f32(fVec); 171 return vrndmq_f32(fVec);
141 #else 172 #else
142 return armv7_vrndmq_f32(fVec); 173 return armv7_vrndmq_f32(fVec);
143 #endif 174 #endif
144 } 175 }
145 176
177
146 SkNx rsqrt() const { 178 SkNx rsqrt() const {
147 float32x4_t est0 = vrsqrteq_f32(fVec); 179 float32x4_t est0 = vrsqrteq_f32(fVec);
148 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0); 180 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
149 } 181 }
150 182
151 SkNx sqrt() const { 183 SkNx sqrt() const {
152 #if defined(SK_CPU_ARM64) 184 #if defined(SK_CPU_ARM64)
153 return vsqrtq_f32(fVec); 185 return vsqrtq_f32(fVec);
154 #else 186 #else
155 float32x4_t est0 = vrsqrteq_f32(fVec), 187 float32x4_t est0 = vrsqrteq_f32(fVec),
156 est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0), 188 est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0),
157 est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1); 189 est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
158 return vmulq_f32(fVec, est2); 190 return vmulq_f32(fVec, est2);
159 #endif 191 #endif
160 } 192 }
161 193
162 SkNx invert() const { 194 float operator[](int k) const {
163 float32x4_t est0 = vrecpeq_f32(fVec), 195 SkASSERT(0 <= k && k < 4);
164 est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0); 196 union { float32x4_t v; float fs[4]; } pun = {fVec};
165 return est1; 197 return pun.fs[k&3];
166 } 198 }
167 199
168 float operator[](int k) const { return fVec[k&3]; }
169
170 bool allTrue() const { 200 bool allTrue() const {
171 auto v = vreinterpretq_u32_f32(fVec); 201 auto v = vreinterpretq_u32_f32(fVec);
172 return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1) 202 return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1)
173 && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3); 203 && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3);
174 } 204 }
175 bool anyTrue() const { 205 bool anyTrue() const {
176 auto v = vreinterpretq_u32_f32(fVec); 206 auto v = vreinterpretq_u32_f32(fVec);
177 return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1) 207 return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1)
178 || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3); 208 || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3);
179 } 209 }
180 210
181 SkNx thenElse(const SkNx& t, const SkNx& e) const { 211 SkNx thenElse(const SkNx& t, const SkNx& e) const {
182 return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec); 212 return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec);
183 } 213 }
184 214
185 float32x4_t fVec; 215 float32x4_t fVec;
186 }; 216 };
187 217
188 // It's possible that for our current use cases, representing this as 218 // It's possible that for our current use cases, representing this as
189 // half a uint16x8_t might be better than representing it as a uint16x4_t. 219 // half a uint16x8_t might be better than representing it as a uint16x4_t.
190 // It'd make conversion to Sk4b one step simpler. 220 // It'd make conversion to Sk4b one step simpler.
191 template <> 221 template <>
192 class SkNx<4, uint16_t> { 222 class SkNx<4, uint16_t> {
193 public: 223 public:
194 SkNx(const uint16x4_t& vec) : fVec(vec) {} 224 SkNx(const uint16x4_t& vec) : fVec(vec) {}
195 225
196 SkNx() {} 226 SkNx() {}
197 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) : fVec{a,b,c,d} {} 227 SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {}
198 SkNx(uint16_t v) : fVec{v,v,v,v} {} 228 static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); }
199 229
200 static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); } 230 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
231 fVec = (uint16x4_t) { a,b,c,d };
232 }
233
201 void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); } 234 void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); }
202 235
203 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } 236 SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); }
204 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } 237 SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); }
205 SkNx operator * (const SkNx& o) const { return fVec * o.fVec; } 238 SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); }
206 239
207 SkNx operator << (int bits) const { SHIFT16(vshl_n_u16, fVec, bits); } 240 SkNx operator << (int bits) const { SHIFT16(vshl_n_u16, fVec, bits); }
208 SkNx operator >> (int bits) const { SHIFT16(vshr_n_u16, fVec, bits); } 241 SkNx operator >> (int bits) const { SHIFT16(vshr_n_u16, fVec, bits); }
209 242
210 static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fV ec); } 243 static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fV ec); }
211 244
212 uint16_t operator[](int k) const { return fVec[k&3]; } 245 uint16_t operator[](int k) const {
246 SkASSERT(0 <= k && k < 4);
247 union { uint16x4_t v; uint16_t us[4]; } pun = {fVec};
248 return pun.us[k&3];
249 }
213 250
214 SkNx thenElse(const SkNx& t, const SkNx& e) const { 251 SkNx thenElse(const SkNx& t, const SkNx& e) const {
215 return vbsl_u16(fVec, t.fVec, e.fVec); 252 return vbsl_u16(fVec, t.fVec, e.fVec);
216 } 253 }
217 254
218 uint16x4_t fVec; 255 uint16x4_t fVec;
219 }; 256 };
220 257
221 template <> 258 template <>
222 class SkNx<8, uint16_t> { 259 class SkNx<8, uint16_t> {
223 public: 260 public:
224 SkNx(const uint16x8_t& vec) : fVec(vec) {} 261 SkNx(const uint16x8_t& vec) : fVec(vec) {}
225 262
226 SkNx() {} 263 SkNx() {}
264 SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {}
265 static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); }
266
227 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d, 267 SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
228 uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec{a,b,c,d,e,f,g,h} {} 268 uint16_t e, uint16_t f, uint16_t g, uint16_t h) {
229 SkNx(uint16_t v) : fVec{v,v,v,v,v,v,v,v} {} 269 fVec = (uint16x8_t) { a,b,c,d, e,f,g,h };
270 }
230 271
231 static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); }
232 void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); } 272 void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); }
233 273
234 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } 274 SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); }
235 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } 275 SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); }
236 SkNx operator * (const SkNx& o) const { return fVec * o.fVec; } 276 SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); }
237 277
238 SkNx operator << (int bits) const { SHIFT16(vshlq_n_u16, fVec, bits); } 278 SkNx operator << (int bits) const { SHIFT16(vshlq_n_u16, fVec, bits); }
239 SkNx operator >> (int bits) const { SHIFT16(vshrq_n_u16, fVec, bits); } 279 SkNx operator >> (int bits) const { SHIFT16(vshrq_n_u16, fVec, bits); }
240 280
241 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.f Vec); } 281 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.f Vec); }
242 282
243 uint16_t operator[](int k) const { return fVec[k&7]; } 283 uint16_t operator[](int k) const {
284 SkASSERT(0 <= k && k < 8);
285 union { uint16x8_t v; uint16_t us[8]; } pun = {fVec};
286 return pun.us[k&7];
287 }
244 288
245 SkNx thenElse(const SkNx& t, const SkNx& e) const { 289 SkNx thenElse(const SkNx& t, const SkNx& e) const {
246 return vbslq_u16(fVec, t.fVec, e.fVec); 290 return vbslq_u16(fVec, t.fVec, e.fVec);
247 } 291 }
248 292
249 uint16x8_t fVec; 293 uint16x8_t fVec;
250 }; 294 };
251 295
252 template <> 296 template <>
253 class SkNx<4, uint8_t> { 297 class SkNx<4, uint8_t> {
254 public: 298 public:
255 typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t; 299 typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t;
256 300
257 SkNx(const uint8x8_t& vec) : fVec(vec) {} 301 SkNx(const uint8x8_t& vec) : fVec(vec) {}
258 302
259 SkNx() {} 303 SkNx() {}
260 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) : fVec{a,b,c,d,0,0,0,0} {} 304 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) {
261 SkNx(uint8_t v) : fVec{v,v,v,v,0,0,0,0} {} 305 fVec = (uint8x8_t){a,b,c,d, 0,0,0,0};
262 306 }
263 static SkNx Load(const void* ptr) { 307 static SkNx Load(const void* ptr) {
264 return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr); 308 return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr);
265 } 309 }
266 void store(void* ptr) const { 310 void store(void* ptr) const {
267 return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0); 311 return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0);
268 } 312 }
313 uint8_t operator[](int k) const {
314 SkASSERT(0 <= k && k < 4);
315 union { uint8x8_t v; uint8_t us[8]; } pun = {fVec};
316 return pun.us[k&3];
317 }
269 318
270 uint8_t operator[](int k) const { return fVec[k&3]; } 319 // TODO as needed
271 320
272 uint8x8_t fVec; 321 uint8x8_t fVec;
273 }; 322 };
274 323
275 template <> 324 template <>
276 class SkNx<16, uint8_t> { 325 class SkNx<16, uint8_t> {
277 public: 326 public:
278 SkNx(const uint8x16_t& vec) : fVec(vec) {} 327 SkNx(const uint8x16_t& vec) : fVec(vec) {}
279 328
280 SkNx() {} 329 SkNx() {}
330 SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {}
331 static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); }
332
281 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d, 333 SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
282 uint8_t e, uint8_t f, uint8_t g, uint8_t h, 334 uint8_t e, uint8_t f, uint8_t g, uint8_t h,
283 uint8_t i, uint8_t j, uint8_t k, uint8_t l, 335 uint8_t i, uint8_t j, uint8_t k, uint8_t l,
284 uint8_t m, uint8_t n, uint8_t o, uint8_t p) : fVec{a,b,c,d,e,f,g,h,i,j, k,l,m,n,o,p} {} 336 uint8_t m, uint8_t n, uint8_t o, uint8_t p) {
285 SkNx(uint8_t v) : fVec{v,v,v,v,v,v,v,v,v,v, v,v,v,v,v,v} {} 337 fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p };
338 }
286 339
287 static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); }
288 void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); } 340 void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); }
289 341
290 SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); } 342 SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); }
291 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; }
292 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; }
293 343
294 SkNx operator < (const SkNx& o) const { return fVec < o.fVec; } 344 SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); }
345 SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); }
295 346
296 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fV ec); } 347 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fV ec); }
348 SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); }
297 349
298 uint8_t operator[](int k) const { return fVec[k&15]; } 350 uint8_t operator[](int k) const {
351 SkASSERT(0 <= k && k < 16);
352 union { uint8x16_t v; uint8_t us[16]; } pun = {fVec};
353 return pun.us[k&15];
354 }
299 355
300 SkNx thenElse(const SkNx& t, const SkNx& e) const { 356 SkNx thenElse(const SkNx& t, const SkNx& e) const {
301 return vbslq_u8(fVec, t.fVec, e.fVec); 357 return vbslq_u8(fVec, t.fVec, e.fVec);
302 } 358 }
303 359
304 uint8x16_t fVec; 360 uint8x16_t fVec;
305 }; 361 };
306 362
307 template <> 363 template <>
308 class SkNx<4, int32_t> { 364 class SkNx<4, int32_t> {
309 public: 365 public:
310 SkNx(const int32x4_t& vec) : fVec(vec) {} 366 SkNx(const int32x4_t& vec) : fVec(vec) {}
311 367
312 SkNx() {} 368 SkNx() {}
313 SkNx(int32_t a, int32_t b, int32_t c, int32_t d) : fVec{a,b,c,d} {} 369 SkNx(int32_t v) {
314 SkNx(int32_t v) : fVec{v,v,v,v} {} 370 fVec = vdupq_n_s32(v);
371 }
372 SkNx(int32_t a, int32_t b, int32_t c, int32_t d) {
373 fVec = (int32x4_t){a,b,c,d};
374 }
375 static SkNx Load(const void* ptr) {
376 return vld1q_s32((const int32_t*)ptr);
377 }
378 void store(void* ptr) const {
379 return vst1q_s32((int32_t*)ptr, fVec);
380 }
381 int32_t operator[](int k) const {
382 SkASSERT(0 <= k && k < 4);
383 union { int32x4_t v; int32_t is[4]; } pun = {fVec};
384 return pun.is[k&3];
385 }
315 386
316 static SkNx Load(const void* ptr) { return vld1q_s32((const int32_t*)ptr); } 387 SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); }
317 void store(void* ptr) const { return vst1q_s32((int32_t*)ptr, fVec); } 388 SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); }
389 SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); }
318 390
319 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } 391 SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); }
320 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } 392 SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); }
321 SkNx operator * (const SkNx& o) const { return fVec * o.fVec; } 393 SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); }
322
323 SkNx operator & (const SkNx& o) const { return fVec & o.fVec; }
324 SkNx operator | (const SkNx& o) const { return fVec | o.fVec; }
325 SkNx operator ^ (const SkNx& o) const { return fVec ^ o.fVec; }
326 394
327 SkNx operator << (int bits) const { SHIFT32(vshlq_n_s32, fVec, bits); } 395 SkNx operator << (int bits) const { SHIFT32(vshlq_n_s32, fVec, bits); }
328 SkNx operator >> (int bits) const { SHIFT32(vshrq_n_s32, fVec, bits); } 396 SkNx operator >> (int bits) const { SHIFT32(vshrq_n_s32, fVec, bits); }
329 397
330 SkNx operator == (const SkNx& o) const { return fVec == o.fVec; } 398 SkNx operator == (const SkNx& o) const {
331 SkNx operator < (const SkNx& o) const { return fVec < o.fVec; } 399 return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec));
332 SkNx operator > (const SkNx& o) const { return fVec > o.fVec; } 400 }
401 SkNx operator < (const SkNx& o) const {
402 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec));
403 }
404 SkNx operator > (const SkNx& o) const {
405 return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec));
406 }
333 407
334 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.f Vec); } 408 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.f Vec); }
335 409 // TODO as needed
336 int32_t operator[](int k) const { return fVec[k&3]; }
337 410
338 SkNx thenElse(const SkNx& t, const SkNx& e) const { 411 SkNx thenElse(const SkNx& t, const SkNx& e) const {
339 return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec); 412 return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec);
340 } 413 }
341 414
342 int32x4_t fVec; 415 int32x4_t fVec;
343 }; 416 };
344 417
345 template <> 418 template <>
346 class SkNx<4, uint32_t> { 419 class SkNx<4, uint32_t> {
347 public: 420 public:
348 SkNx(const uint32x4_t& vec) : fVec(vec) {} 421 SkNx(const uint32x4_t& vec) : fVec(vec) {}
349 422
350 SkNx() {} 423 SkNx() {}
351 SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) : fVec{a,b,c,d} {} 424 SkNx(uint32_t v) {
352 SkNx(uint32_t v) : fVec{v,v,v,v} {} 425 fVec = vdupq_n_u32(v);
426 }
427 SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
428 fVec = (uint32x4_t){a,b,c,d};
429 }
430 static SkNx Load(const void* ptr) {
431 return vld1q_u32((const uint32_t*)ptr);
432 }
433 void store(void* ptr) const {
434 return vst1q_u32((uint32_t*)ptr, fVec);
435 }
436 uint32_t operator[](int k) const {
437 SkASSERT(0 <= k && k < 4);
438 union { uint32x4_t v; uint32_t us[4]; } pun = {fVec};
439 return pun.us[k&3];
440 }
353 441
354 static SkNx Load(const void* ptr) { return vld1q_u32((const uint32_t*)ptr); } 442 SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); }
355 void store(void* ptr) const { return vst1q_u32((uint32_t*)ptr, fVec); } 443 SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); }
444 SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); }
356 445
357 SkNx operator + (const SkNx& o) const { return fVec + o.fVec; } 446 SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); }
358 SkNx operator - (const SkNx& o) const { return fVec - o.fVec; } 447 SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); }
359 SkNx operator * (const SkNx& o) const { return fVec * o.fVec; } 448 SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); }
360
361 SkNx operator & (const SkNx& o) const { return fVec & o.fVec; }
362 SkNx operator | (const SkNx& o) const { return fVec | o.fVec; }
363 SkNx operator ^ (const SkNx& o) const { return fVec ^ o.fVec; }
364 449
365 SkNx operator << (int bits) const { SHIFT32(vshlq_n_u32, fVec, bits); } 450 SkNx operator << (int bits) const { SHIFT32(vshlq_n_u32, fVec, bits); }
366 SkNx operator >> (int bits) const { SHIFT32(vshrq_n_u32, fVec, bits); } 451 SkNx operator >> (int bits) const { SHIFT32(vshrq_n_u32, fVec, bits); }
367 452
368 SkNx operator == (const SkNx& o) const { return fVec == o.fVec; } 453 SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); }
369 SkNx operator < (const SkNx& o) const { return fVec < o.fVec; } 454 SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); }
370 SkNx operator > (const SkNx& o) const { return fVec > o.fVec; } 455 SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); }
371 456
372 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.f Vec); } 457 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.f Vec); }
373 458 // TODO as needed
374 uint32_t operator[](int k) const { return fVec[k&3]; }
375 459
376 SkNx thenElse(const SkNx& t, const SkNx& e) const { 460 SkNx thenElse(const SkNx& t, const SkNx& e) const {
377 return vbslq_u32(fVec, t.fVec, e.fVec); 461 return vbslq_u32(fVec, t.fVec, e.fVec);
378 } 462 }
379 463
380 uint32x4_t fVec; 464 uint32x4_t fVec;
381 }; 465 };
382 466
383 #undef SHIFT32 467 #undef SHIFT32
384 #undef SHIFT16 468 #undef SHIFT16
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
466 uint16x4x4_t rgba = {{ 550 uint16x4x4_t rgba = {{
467 r.fVec, 551 r.fVec,
468 g.fVec, 552 g.fVec,
469 b.fVec, 553 b.fVec,
470 a.fVec, 554 a.fVec,
471 }}; 555 }};
472 vst4_u16((uint16_t*) dst, rgba); 556 vst4_u16((uint16_t*) dst, rgba);
473 } 557 }
474 558
475 #endif//SkNx_neon_DEFINED 559 #endif//SkNx_neon_DEFINED
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698