OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkNx_neon_DEFINED | 8 #ifndef SkNx_neon_DEFINED |
9 #define SkNx_neon_DEFINED | 9 #define SkNx_neon_DEFINED |
10 | 10 |
11 #define SKNX_IS_FAST | 11 #define SKNX_IS_FAST |
12 | 12 |
13 // ARMv8 has vrndmq_f32 to floor 4 floats. Here we emulate it: | |
14 // - round by adding (1<<23) with our sign, then subtracting it; | |
15 // - if that rounded value is bigger than our input, subtract 1. | |
16 static inline float32x4_t armv7_vrndmq_f32(float32x4_t v) { | |
17 auto sign = vandq_u32((uint32x4_t)v, vdupq_n_u32(1<<31)); | |
18 auto bias = (float32x4_t)(vorrq_u32((uint32x4_t)vdupq_n_f32(1<<23), sign)); | |
19 auto rounded = vsubq_f32(vaddq_f32(v, bias), bias); | |
20 auto too_big = vcgtq_f32(rounded, v); | |
21 return vsubq_f32(rounded, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_
n_f32(1))); | |
22 } | |
23 | |
24 // Well, this is absurd. The shifts require compile-time constant arguments. | 13 // Well, this is absurd. The shifts require compile-time constant arguments. |
25 | 14 |
26 #define SHIFT8(op, v, bits) switch(bits) { \ | 15 #define SHIFT8(op, v, bits) switch(bits) { \ |
27 case 1: return op(v, 1); case 2: return op(v, 2); case 3: return op(v
, 3); \ | 16 case 1: return op(v, 1); case 2: return op(v, 2); case 3: return op(v
, 3); \ |
28 case 4: return op(v, 4); case 5: return op(v, 5); case 6: return op(v
, 6); \ | 17 case 4: return op(v, 4); case 5: return op(v, 5); case 6: return op(v
, 6); \ |
29 case 7: return op(v, 7); \ | 18 case 7: return op(v, 7); \ |
30 } return fVec | 19 } return fVec |
31 | 20 |
32 #define SHIFT16(op, v, bits) if (bits < 8) { SHIFT8(op, v, bits); } switch(bits)
{ \ | 21 #define SHIFT16(op, v, bits) if (bits < 8) { SHIFT8(op, v, bits); } switch(bits)
{ \ |
33 case 8: return op(v, 8); case 9: return op(v
, 9); \ | 22 case 8: return op(v, 8); case 9: return op(v
, 9); \ |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
165 SkNx operator<=(const SkNx& o) const { return vreinterpretq_f32_u32(vcleq_f3
2(fVec, o.fVec)); } | 154 SkNx operator<=(const SkNx& o) const { return vreinterpretq_f32_u32(vcleq_f3
2(fVec, o.fVec)); } |
166 SkNx operator>=(const SkNx& o) const { return vreinterpretq_f32_u32(vcgeq_f3
2(fVec, o.fVec)); } | 155 SkNx operator>=(const SkNx& o) const { return vreinterpretq_f32_u32(vcgeq_f3
2(fVec, o.fVec)); } |
167 SkNx operator!=(const SkNx& o) const { | 156 SkNx operator!=(const SkNx& o) const { |
168 return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec))); | 157 return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec))); |
169 } | 158 } |
170 | 159 |
171 static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.f
Vec); } | 160 static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.f
Vec); } |
172 static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.f
Vec); } | 161 static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.f
Vec); } |
173 | 162 |
174 SkNx abs() const { return vabsq_f32(fVec); } | 163 SkNx abs() const { return vabsq_f32(fVec); } |
175 SkNx floor() const { | |
176 #if defined(SK_CPU_ARM64) | |
177 return vrndmq_f32(fVec); | |
178 #else | |
179 return armv7_vrndmq_f32(fVec); | |
180 #endif | |
181 } | |
182 | |
183 | 164 |
184 SkNx rsqrt0() const { return vrsqrteq_f32(fVec); } | 165 SkNx rsqrt0() const { return vrsqrteq_f32(fVec); } |
185 SkNx rsqrt1() const { | 166 SkNx rsqrt1() const { |
186 float32x4_t est0 = this->rsqrt0().fVec; | 167 float32x4_t est0 = this->rsqrt0().fVec; |
187 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0); | 168 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0); |
188 } | 169 } |
189 SkNx rsqrt2() const { | 170 SkNx rsqrt2() const { |
190 float32x4_t est1 = this->rsqrt1().fVec; | 171 float32x4_t est1 = this->rsqrt1().fVec; |
191 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1); | 172 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1); |
192 } | 173 } |
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
388 | 369 |
389 template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { | 370 template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { |
390 return vget_low_u16(vmovl_u8(src.fVec)); | 371 return vget_low_u16(vmovl_u8(src.fVec)); |
391 } | 372 } |
392 | 373 |
393 template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { | 374 template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { |
394 return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); | 375 return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); |
395 } | 376 } |
396 | 377 |
397 #endif//SkNx_neon_DEFINED | 378 #endif//SkNx_neon_DEFINED |
OLD | NEW |