| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef SkNx_neon_DEFINED | 8 #ifndef SkNx_neon_DEFINED |
| 9 #define SkNx_neon_DEFINED | 9 #define SkNx_neon_DEFINED |
| 10 | 10 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 26 | 26 |
| 27 #define SHIFT32(op, v, bits) if (bits < 16) { SHIFT16(op, v, bits); } switch(bit
s) { \ | 27 #define SHIFT32(op, v, bits) if (bits < 16) { SHIFT16(op, v, bits); } switch(bit
s) { \ |
| 28 case 16: return op(v, 16); case 17: return op(v, 17); case 18: return op(v
, 18); \ | 28 case 16: return op(v, 16); case 17: return op(v, 17); case 18: return op(v
, 18); \ |
| 29 case 19: return op(v, 19); case 20: return op(v, 20); case 21: return op(v
, 21); \ | 29 case 19: return op(v, 19); case 20: return op(v, 20); case 21: return op(v
, 21); \ |
| 30 case 22: return op(v, 22); case 23: return op(v, 23); case 24: return op(v
, 24); \ | 30 case 22: return op(v, 22); case 23: return op(v, 23); case 24: return op(v
, 24); \ |
| 31 case 25: return op(v, 25); case 26: return op(v, 26); case 27: return op(v
, 27); \ | 31 case 25: return op(v, 25); case 26: return op(v, 26); case 27: return op(v
, 27); \ |
| 32 case 28: return op(v, 28); case 29: return op(v, 29); case 30: return op(v
, 30); \ | 32 case 28: return op(v, 28); case 29: return op(v, 29); case 30: return op(v
, 30); \ |
| 33 case 31: return op(v, 31); } return fVec | 33 case 31: return op(v, 31); } return fVec |
| 34 | 34 |
| 35 template <> | 35 template <> |
| 36 class SkNb<2, 4> { | |
| 37 public: | |
| 38 SkNb(uint32x2_t vec) : fVec(vec) {} | |
| 39 | |
| 40 SkNb() {} | |
| 41 bool allTrue() const { return vget_lane_u32(fVec, 0) && vget_lane_u32(fVec,
1); } | |
| 42 bool anyTrue() const { return vget_lane_u32(fVec, 0) || vget_lane_u32(fVec,
1); } | |
| 43 | |
| 44 uint32x2_t fVec; | |
| 45 }; | |
| 46 | |
| 47 template <> | |
| 48 class SkNb<4, 4> { | |
| 49 public: | |
| 50 SkNb(uint32x4_t vec) : fVec(vec) {} | |
| 51 | |
| 52 SkNb() {} | |
| 53 bool allTrue() const { return vgetq_lane_u32(fVec, 0) && vgetq_lane_u32(fVec
, 1) | |
| 54 && vgetq_lane_u32(fVec, 2) && vgetq_lane_u32(fVec
, 3); } | |
| 55 bool anyTrue() const { return vgetq_lane_u32(fVec, 0) || vgetq_lane_u32(fVec
, 1) | |
| 56 || vgetq_lane_u32(fVec, 2) || vgetq_lane_u32(fVec
, 3); } | |
| 57 | |
| 58 uint32x4_t fVec; | |
| 59 }; | |
| 60 | |
| 61 template <> | |
| 62 class SkNf<2, float> { | 36 class SkNf<2, float> { |
| 63 typedef SkNb<2, 4> Nb; | |
| 64 public: | 37 public: |
| 65 SkNf(float32x2_t vec) : fVec(vec) {} | 38 SkNf(float32x2_t vec) : fVec(vec) {} |
| 66 | 39 |
| 67 SkNf() {} | 40 SkNf() {} |
| 68 explicit SkNf(float val) : fVec(vdup_n_f32(val)) {} | 41 explicit SkNf(float val) : fVec(vdup_n_f32(val)) {} |
| 69 static SkNf Load(const float vals[2]) { return vld1_f32(vals); } | 42 static SkNf Load(const float vals[2]) { return vld1_f32(vals); } |
| 70 SkNf(float a, float b) { fVec = (float32x2_t) { a, b }; } | 43 SkNf(float a, float b) { fVec = (float32x2_t) { a, b }; } |
| 71 | 44 |
| 72 void store(float vals[2]) const { vst1_f32(vals, fVec); } | 45 void store(float vals[2]) const { vst1_f32(vals, fVec); } |
| 73 | 46 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 86 SkNf operator - (const SkNf& o) const { return vsub_f32(fVec, o.fVec); } | 59 SkNf operator - (const SkNf& o) const { return vsub_f32(fVec, o.fVec); } |
| 87 SkNf operator * (const SkNf& o) const { return vmul_f32(fVec, o.fVec); } | 60 SkNf operator * (const SkNf& o) const { return vmul_f32(fVec, o.fVec); } |
| 88 SkNf operator / (const SkNf& o) const { | 61 SkNf operator / (const SkNf& o) const { |
| 89 #if defined(SK_CPU_ARM64) | 62 #if defined(SK_CPU_ARM64) |
| 90 return vdiv_f32(fVec, o.fVec); | 63 return vdiv_f32(fVec, o.fVec); |
| 91 #else | 64 #else |
| 92 return vmul_f32(fVec, o.invert().fVec); | 65 return vmul_f32(fVec, o.invert().fVec); |
| 93 #endif | 66 #endif |
| 94 } | 67 } |
| 95 | 68 |
| 96 Nb operator == (const SkNf& o) const { return vceq_f32(fVec, o.fVec); } | 69 SkNf operator == (const SkNf& o) const { return vreinterpret_f32_u32(vceq_f3
2(fVec, o.fVec)); } |
| 97 Nb operator < (const SkNf& o) const { return vclt_f32(fVec, o.fVec); } | 70 SkNf operator < (const SkNf& o) const { return vreinterpret_f32_u32(vclt_f3
2(fVec, o.fVec)); } |
| 98 Nb operator > (const SkNf& o) const { return vcgt_f32(fVec, o.fVec); } | 71 SkNf operator > (const SkNf& o) const { return vreinterpret_f32_u32(vcgt_f3
2(fVec, o.fVec)); } |
| 99 Nb operator <= (const SkNf& o) const { return vcle_f32(fVec, o.fVec); } | 72 SkNf operator <= (const SkNf& o) const { return vreinterpret_f32_u32(vcle_f3
2(fVec, o.fVec)); } |
| 100 Nb operator >= (const SkNf& o) const { return vcge_f32(fVec, o.fVec); } | 73 SkNf operator >= (const SkNf& o) const { return vreinterpret_f32_u32(vcge_f3
2(fVec, o.fVec)); } |
| 101 Nb operator != (const SkNf& o) const { return vmvn_u32(vceq_f32(fVec, o.fVec
)); } | 74 SkNf operator != (const SkNf& o) const { |
| 75 return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec))); |
| 76 } |
| 102 | 77 |
| 103 static SkNf Min(const SkNf& l, const SkNf& r) { return vmin_f32(l.fVec, r.fV
ec); } | 78 static SkNf Min(const SkNf& l, const SkNf& r) { return vmin_f32(l.fVec, r.fV
ec); } |
| 104 static SkNf Max(const SkNf& l, const SkNf& r) { return vmax_f32(l.fVec, r.fV
ec); } | 79 static SkNf Max(const SkNf& l, const SkNf& r) { return vmax_f32(l.fVec, r.fV
ec); } |
| 105 | 80 |
| 106 SkNf rsqrt0() const { return vrsqrte_f32(fVec); } | 81 SkNf rsqrt0() const { return vrsqrte_f32(fVec); } |
| 107 SkNf rsqrt1() const { | 82 SkNf rsqrt1() const { |
| 108 float32x2_t est0 = this->rsqrt0().fVec; | 83 float32x2_t est0 = this->rsqrt0().fVec; |
| 109 return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0); | 84 return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0); |
| 110 } | 85 } |
| 111 SkNf rsqrt2() const { | 86 SkNf rsqrt2() const { |
| 112 float32x2_t est1 = this->rsqrt1().fVec; | 87 float32x2_t est1 = this->rsqrt1().fVec; |
| 113 return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1); | 88 return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1); |
| 114 } | 89 } |
| 115 | 90 |
| 116 SkNf sqrt() const { | 91 SkNf sqrt() const { |
| 117 #if defined(SK_CPU_ARM64) | 92 #if defined(SK_CPU_ARM64) |
| 118 return vsqrt_f32(fVec); | 93 return vsqrt_f32(fVec); |
| 119 #else | 94 #else |
| 120 return *this * this->rsqrt2(); | 95 return *this * this->rsqrt2(); |
| 121 #endif | 96 #endif |
| 122 } | 97 } |
| 123 | 98 |
| 124 template <int k> float kth() const { | 99 template <int k> float kth() const { |
| 125 SkASSERT(0 <= k && k < 2); | 100 SkASSERT(0 <= k && k < 2); |
| 126 return vget_lane_f32(fVec, k&1); | 101 return vget_lane_f32(fVec, k&1); |
| 127 } | 102 } |
| 128 | 103 |
| 104 bool allTrue() const { |
| 105 auto v = vreinterpret_u32_f32(fVec); |
| 106 return vget_lane_u32(v,0) && vget_lane_u32(v,1); |
| 107 } |
| 108 bool anyTrue() const { |
| 109 auto v = vreinterpret_u32_f32(fVec); |
| 110 return vget_lane_u32(v,0) || vget_lane_u32(v,1); |
| 111 } |
| 112 |
| 129 float32x2_t fVec; | 113 float32x2_t fVec; |
| 130 }; | 114 }; |
| 131 | 115 |
| 132 #if defined(SK_CPU_ARM64) | 116 #if defined(SK_CPU_ARM64) |
| 133 template <> | 117 template <> |
| 134 class SkNb<2, 8> { | |
| 135 public: | |
| 136 SkNb(uint64x2_t vec) : fVec(vec) {} | |
| 137 | |
| 138 SkNb() {} | |
| 139 bool allTrue() const { return vgetq_lane_u64(fVec, 0) && vgetq_lane_u64(fVec
, 1); } | |
| 140 bool anyTrue() const { return vgetq_lane_u64(fVec, 0) || vgetq_lane_u64(fVec
, 1); } | |
| 141 | |
| 142 uint64x2_t fVec; | |
| 143 }; | |
| 144 | |
| 145 template <> | |
| 146 class SkNf<2, double> { | 118 class SkNf<2, double> { |
| 147 typedef SkNb<2, 8> Nb; | |
| 148 public: | 119 public: |
| 149 SkNf(float64x2_t vec) : fVec(vec) {} | 120 SkNf(float64x2_t vec) : fVec(vec) {} |
| 150 | 121 |
| 151 SkNf() {} | 122 SkNf() {} |
| 152 explicit SkNf(double val) : fVec(vdupq_n_f64(val)) {} | 123 explicit SkNf(double val) : fVec(vdupq_n_f64(val)) {} |
| 153 static SkNf Load(const double vals[2]) { return vld1q_f64(vals); } | 124 static SkNf Load(const double vals[2]) { return vld1q_f64(vals); } |
| 154 SkNf(double a, double b) { fVec = (float64x2_t) { a, b }; } | 125 SkNf(double a, double b) { fVec = (float64x2_t) { a, b }; } |
| 155 | 126 |
| 156 void store(double vals[2]) const { vst1q_f64(vals, fVec); } | 127 void store(double vals[2]) const { vst1q_f64(vals, fVec); } |
| 157 | 128 |
| 158 SkNf operator + (const SkNf& o) const { return vaddq_f64(fVec, o.fVec); } | 129 SkNf operator + (const SkNf& o) const { return vaddq_f64(fVec, o.fVec); } |
| 159 SkNf operator - (const SkNf& o) const { return vsubq_f64(fVec, o.fVec); } | 130 SkNf operator - (const SkNf& o) const { return vsubq_f64(fVec, o.fVec); } |
| 160 SkNf operator * (const SkNf& o) const { return vmulq_f64(fVec, o.fVec); } | 131 SkNf operator * (const SkNf& o) const { return vmulq_f64(fVec, o.fVec); } |
| 161 SkNf operator / (const SkNf& o) const { return vdivq_f64(fVec, o.fVec); } | 132 SkNf operator / (const SkNf& o) const { return vdivq_f64(fVec, o.fVec); } |
| 162 | 133 |
| 163 Nb operator == (const SkNf& o) const { return vceqq_f64(fVec, o.fVec); } | 134 // vreinterpretq_f64_u64 and vreinterpretq_f64_u32 don't seem to exist....
weird. |
| 164 Nb operator < (const SkNf& o) const { return vcltq_f64(fVec, o.fVec); } | 135 SkNf operator==(const SkNf& o) const { return (float64x2_t)(vceqq_f64(fVec,
o.fVec)); } |
| 165 Nb operator > (const SkNf& o) const { return vcgtq_f64(fVec, o.fVec); } | 136 SkNf operator <(const SkNf& o) const { return (float64x2_t)(vcltq_f64(fVec,
o.fVec)); } |
| 166 Nb operator <= (const SkNf& o) const { return vcleq_f64(fVec, o.fVec); } | 137 SkNf operator >(const SkNf& o) const { return (float64x2_t)(vcgtq_f64(fVec,
o.fVec)); } |
| 167 Nb operator >= (const SkNf& o) const { return vcgeq_f64(fVec, o.fVec); } | 138 SkNf operator<=(const SkNf& o) const { return (float64x2_t)(vcleq_f64(fVec,
o.fVec)); } |
| 168 Nb operator != (const SkNf& o) const { | 139 SkNf operator>=(const SkNf& o) const { return (float64x2_t)(vcgeq_f64(fVec,
o.fVec)); } |
| 169 return vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(f
Vec, o.fVec)))); | 140 SkNf operator != (const SkNf& o) const { |
| 141 return (float64x2_t)(vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(fVec, o.f
Vec)))); |
| 170 } | 142 } |
| 171 | 143 |
| 172 static SkNf Min(const SkNf& l, const SkNf& r) { return vminq_f64(l.fVec, r.f
Vec); } | 144 static SkNf Min(const SkNf& l, const SkNf& r) { return vminq_f64(l.fVec, r.f
Vec); } |
| 173 static SkNf Max(const SkNf& l, const SkNf& r) { return vmaxq_f64(l.fVec, r.f
Vec); } | 145 static SkNf Max(const SkNf& l, const SkNf& r) { return vmaxq_f64(l.fVec, r.f
Vec); } |
| 174 | 146 |
| 175 SkNf sqrt() const { return vsqrtq_f64(fVec); } | 147 SkNf sqrt() const { return vsqrtq_f64(fVec); } |
| 176 | 148 |
| 177 SkNf rsqrt0() const { return vrsqrteq_f64(fVec); } | 149 SkNf rsqrt0() const { return vrsqrteq_f64(fVec); } |
| 178 SkNf rsqrt1() const { | 150 SkNf rsqrt1() const { |
| 179 float64x2_t est0 = this->rsqrt0().fVec; | 151 float64x2_t est0 = this->rsqrt0().fVec; |
| (...skipping 15 matching lines...) Expand all Loading... |
| 195 est2 = vmulq_f64(vrecpsq_f64(est1, fVec), est1), | 167 est2 = vmulq_f64(vrecpsq_f64(est1, fVec), est1), |
| 196 est3 = vmulq_f64(vrecpsq_f64(est2, fVec), est2); | 168 est3 = vmulq_f64(vrecpsq_f64(est2, fVec), est2); |
| 197 return est3; | 169 return est3; |
| 198 } | 170 } |
| 199 | 171 |
| 200 template <int k> double kth() const { | 172 template <int k> double kth() const { |
| 201 SkASSERT(0 <= k && k < 2); | 173 SkASSERT(0 <= k && k < 2); |
| 202 return vgetq_lane_f64(fVec, k&1); | 174 return vgetq_lane_f64(fVec, k&1); |
| 203 } | 175 } |
| 204 | 176 |
| 177 // vreinterpretq_u64_f64 doesn't seem to exist.... weird. |
| 178 bool allTrue() const { |
| 179 auto v = (uint64x2_t)(fVec); |
| 180 return vgetq_lane_u64(v,0) && vgetq_lane_u64(v,1); |
| 181 } |
| 182 bool anyTrue() const { |
| 183 auto v = (uint64x2_t)(fVec); |
| 184 return vgetq_lane_u64(v,0) || vgetq_lane_u64(v,1); |
| 185 } |
| 186 |
| 205 float64x2_t fVec; | 187 float64x2_t fVec; |
| 206 }; | 188 }; |
| 207 #endif//defined(SK_CPU_ARM64) | 189 #endif//defined(SK_CPU_ARM64) |
| 208 | 190 |
| 209 template <> | 191 template <> |
| 210 class SkNi<4, int> { | 192 class SkNi<4, int> { |
| 211 public: | 193 public: |
| 212 SkNi(const int32x4_t& vec) : fVec(vec) {} | 194 SkNi(const int32x4_t& vec) : fVec(vec) {} |
| 213 | 195 |
| 214 SkNi() {} | 196 SkNi() {} |
| (...skipping 13 matching lines...) Expand all Loading... |
| 228 template <int k> int kth() const { | 210 template <int k> int kth() const { |
| 229 SkASSERT(0 <= k && k < 4); | 211 SkASSERT(0 <= k && k < 4); |
| 230 return vgetq_lane_s32(fVec, k&3); | 212 return vgetq_lane_s32(fVec, k&3); |
| 231 } | 213 } |
| 232 | 214 |
| 233 int32x4_t fVec; | 215 int32x4_t fVec; |
| 234 }; | 216 }; |
| 235 | 217 |
| 236 template <> | 218 template <> |
| 237 class SkNf<4, float> { | 219 class SkNf<4, float> { |
| 238 typedef SkNb<4, 4> Nb; | |
| 239 public: | 220 public: |
| 240 SkNf(float32x4_t vec) : fVec(vec) {} | 221 SkNf(float32x4_t vec) : fVec(vec) {} |
| 241 | 222 |
| 242 SkNf() {} | 223 SkNf() {} |
| 243 explicit SkNf(float val) : fVec(vdupq_n_f32(val)) {} | 224 explicit SkNf(float val) : fVec(vdupq_n_f32(val)) {} |
| 244 static SkNf Load(const float vals[4]) { return vld1q_f32(vals); } | 225 static SkNf Load(const float vals[4]) { return vld1q_f32(vals); } |
| 245 SkNf(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d
}; } | 226 SkNf(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d
}; } |
| 246 | 227 |
| 247 void store(float vals[4]) const { vst1q_f32(vals, fVec); } | 228 void store(float vals[4]) const { vst1q_f32(vals, fVec); } |
| 248 | 229 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 263 SkNf operator - (const SkNf& o) const { return vsubq_f32(fVec, o.fVec); } | 244 SkNf operator - (const SkNf& o) const { return vsubq_f32(fVec, o.fVec); } |
| 264 SkNf operator * (const SkNf& o) const { return vmulq_f32(fVec, o.fVec); } | 245 SkNf operator * (const SkNf& o) const { return vmulq_f32(fVec, o.fVec); } |
| 265 SkNf operator / (const SkNf& o) const { | 246 SkNf operator / (const SkNf& o) const { |
| 266 #if defined(SK_CPU_ARM64) | 247 #if defined(SK_CPU_ARM64) |
| 267 return vdivq_f32(fVec, o.fVec); | 248 return vdivq_f32(fVec, o.fVec); |
| 268 #else | 249 #else |
| 269 return vmulq_f32(fVec, o.invert().fVec); | 250 return vmulq_f32(fVec, o.invert().fVec); |
| 270 #endif | 251 #endif |
| 271 } | 252 } |
| 272 | 253 |
| 273 Nb operator == (const SkNf& o) const { return vceqq_f32(fVec, o.fVec); } | 254 SkNf operator==(const SkNf& o) const { return vreinterpretq_f32_u32(vceqq_f3
2(fVec, o.fVec)); } |
| 274 Nb operator < (const SkNf& o) const { return vcltq_f32(fVec, o.fVec); } | 255 SkNf operator <(const SkNf& o) const { return vreinterpretq_f32_u32(vcltq_f3
2(fVec, o.fVec)); } |
| 275 Nb operator > (const SkNf& o) const { return vcgtq_f32(fVec, o.fVec); } | 256 SkNf operator >(const SkNf& o) const { return vreinterpretq_f32_u32(vcgtq_f3
2(fVec, o.fVec)); } |
| 276 Nb operator <= (const SkNf& o) const { return vcleq_f32(fVec, o.fVec); } | 257 SkNf operator<=(const SkNf& o) const { return vreinterpretq_f32_u32(vcleq_f3
2(fVec, o.fVec)); } |
| 277 Nb operator >= (const SkNf& o) const { return vcgeq_f32(fVec, o.fVec); } | 258 SkNf operator>=(const SkNf& o) const { return vreinterpretq_f32_u32(vcgeq_f3
2(fVec, o.fVec)); } |
| 278 Nb operator != (const SkNf& o) const { return vmvnq_u32(vceqq_f32(fVec, o.fV
ec)); } | 259 SkNf operator!=(const SkNf& o) const { |
| 260 return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec))); |
| 261 } |
| 279 | 262 |
| 280 static SkNf Min(const SkNf& l, const SkNf& r) { return vminq_f32(l.fVec, r.f
Vec); } | 263 static SkNf Min(const SkNf& l, const SkNf& r) { return vminq_f32(l.fVec, r.f
Vec); } |
| 281 static SkNf Max(const SkNf& l, const SkNf& r) { return vmaxq_f32(l.fVec, r.f
Vec); } | 264 static SkNf Max(const SkNf& l, const SkNf& r) { return vmaxq_f32(l.fVec, r.f
Vec); } |
| 282 | 265 |
| 283 SkNf rsqrt0() const { return vrsqrteq_f32(fVec); } | 266 SkNf rsqrt0() const { return vrsqrteq_f32(fVec); } |
| 284 SkNf rsqrt1() const { | 267 SkNf rsqrt1() const { |
| 285 float32x4_t est0 = this->rsqrt0().fVec; | 268 float32x4_t est0 = this->rsqrt0().fVec; |
| 286 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0); | 269 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0); |
| 287 } | 270 } |
| 288 SkNf rsqrt2() const { | 271 SkNf rsqrt2() const { |
| 289 float32x4_t est1 = this->rsqrt1().fVec; | 272 float32x4_t est1 = this->rsqrt1().fVec; |
| 290 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1); | 273 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1); |
| 291 } | 274 } |
| 292 | 275 |
| 293 SkNf sqrt() const { | 276 SkNf sqrt() const { |
| 294 #if defined(SK_CPU_ARM64) | 277 #if defined(SK_CPU_ARM64) |
| 295 return vsqrtq_f32(fVec); | 278 return vsqrtq_f32(fVec); |
| 296 #else | 279 #else |
| 297 return *this * this->rsqrt2(); | 280 return *this * this->rsqrt2(); |
| 298 #endif | 281 #endif |
| 299 } | 282 } |
| 300 | 283 |
| 301 template <int k> float kth() const { | 284 template <int k> float kth() const { |
| 302 SkASSERT(0 <= k && k < 4); | 285 SkASSERT(0 <= k && k < 4); |
| 303 return vgetq_lane_f32(fVec, k&3); | 286 return vgetq_lane_f32(fVec, k&3); |
| 304 } | 287 } |
| 305 | 288 |
| 289 bool allTrue() const { |
| 290 auto v = vreinterpretq_u32_f32(fVec); |
| 291 return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1) |
| 292 && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3); |
| 293 } |
| 294 bool anyTrue() const { |
| 295 auto v = vreinterpretq_u32_f32(fVec); |
| 296 return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1) |
| 297 || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3); |
| 298 } |
| 299 |
| 306 float32x4_t fVec; | 300 float32x4_t fVec; |
| 307 }; | 301 }; |
| 308 | 302 |
| 309 template <> | 303 template <> |
| 310 class SkNi<8, uint16_t> { | 304 class SkNi<8, uint16_t> { |
| 311 public: | 305 public: |
| 312 SkNi(const uint16x8_t& vec) : fVec(vec) {} | 306 SkNi(const uint16x8_t& vec) : fVec(vec) {} |
| 313 | 307 |
| 314 SkNi() {} | 308 SkNi() {} |
| 315 explicit SkNi(uint16_t val) : fVec(vdupq_n_u16(val)) {} | 309 explicit SkNi(uint16_t val) : fVec(vdupq_n_u16(val)) {} |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 356 } | 350 } |
| 357 | 351 |
| 358 void store(uint8_t vals[16]) const { vst1q_u8(vals, fVec); } | 352 void store(uint8_t vals[16]) const { vst1q_u8(vals, fVec); } |
| 359 | 353 |
| 360 SkNi saturatedAdd(const SkNi& o) const { return vqaddq_u8(fVec, o.fVec); } | 354 SkNi saturatedAdd(const SkNi& o) const { return vqaddq_u8(fVec, o.fVec); } |
| 361 | 355 |
| 362 SkNi operator + (const SkNi& o) const { return vaddq_u8(fVec, o.fVec); } | 356 SkNi operator + (const SkNi& o) const { return vaddq_u8(fVec, o.fVec); } |
| 363 SkNi operator - (const SkNi& o) const { return vsubq_u8(fVec, o.fVec); } | 357 SkNi operator - (const SkNi& o) const { return vsubq_u8(fVec, o.fVec); } |
| 364 | 358 |
| 365 static SkNi Min(const SkNi& a, const SkNi& b) { return vminq_u8(a.fVec, b.fV
ec); } | 359 static SkNi Min(const SkNi& a, const SkNi& b) { return vminq_u8(a.fVec, b.fV
ec); } |
| 360 SkNi operator < (const SkNi& o) const { return vcltq_u8(fVec, o.fVec); } |
| 366 | 361 |
| 367 template <int k> uint8_t kth() const { | 362 template <int k> uint8_t kth() const { |
| 368 SkASSERT(0 <= k && k < 15); | 363 SkASSERT(0 <= k && k < 15); |
| 369 return vgetq_lane_u8(fVec, k&16); | 364 return vgetq_lane_u8(fVec, k&16); |
| 370 } | 365 } |
| 371 | 366 |
| 367 SkNi thenElse(const SkNi& t, const SkNi& e) const { |
| 368 return vorrq_u8(vandq_u8(t.fVec, fVec), |
| 369 vbicq_u8(e.fVec, fVec)); |
| 370 } |
| 371 |
| 372 uint8x16_t fVec; | 372 uint8x16_t fVec; |
| 373 }; | 373 }; |
| 374 | 374 |
| 375 #undef SHIFT32 | 375 #undef SHIFT32 |
| 376 #undef SHIFT16 | 376 #undef SHIFT16 |
| 377 #undef SHIFT8 | 377 #undef SHIFT8 |
| 378 | 378 |
| 379 } // namespace | 379 } // namespace |
| 380 | 380 |
| 381 #endif//SkNx_neon_DEFINED | 381 #endif//SkNx_neon_DEFINED |
| OLD | NEW |