Index: src/opts/SkNx_neon.h |
diff --git a/src/opts/SkNx_neon.h b/src/opts/SkNx_neon.h |
index d1760b3d945a9a59c45c50534b01358ef56a641d..da926e0b4c2fafe57bba7b919e5d668e21ea6171 100644 |
--- a/src/opts/SkNx_neon.h |
+++ b/src/opts/SkNx_neon.h |
@@ -33,7 +33,34 @@ |
case 31: return op(v, 31); } return fVec |
template <> |
+class SkNb<2, 4> { |
+public: |
+ SkNb(uint32x2_t vec) : fVec(vec) {} |
+ |
+ SkNb() {} |
+ bool allTrue() const { return vget_lane_u32(fVec, 0) && vget_lane_u32(fVec, 1); } |
+ bool anyTrue() const { return vget_lane_u32(fVec, 0) || vget_lane_u32(fVec, 1); } |
+ |
+ uint32x2_t fVec; |
+}; |
+ |
+template <> |
+class SkNb<4, 4> { |
+public: |
+ SkNb(uint32x4_t vec) : fVec(vec) {} |
+ |
+ SkNb() {} |
+ bool allTrue() const { return vgetq_lane_u32(fVec, 0) && vgetq_lane_u32(fVec, 1) |
+ && vgetq_lane_u32(fVec, 2) && vgetq_lane_u32(fVec, 3); } |
+ bool anyTrue() const { return vgetq_lane_u32(fVec, 0) || vgetq_lane_u32(fVec, 1) |
+ || vgetq_lane_u32(fVec, 2) || vgetq_lane_u32(fVec, 3); } |
+ |
+ uint32x4_t fVec; |
+}; |
+ |
+template <> |
class SkNf<2, float> { |
+ typedef SkNb<2, 4> Nb; |
public: |
SkNf(float32x2_t vec) : fVec(vec) {} |
@@ -66,14 +93,12 @@ |
#endif |
} |
- SkNf operator == (const SkNf& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); } |
- SkNf operator < (const SkNf& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); } |
- SkNf operator > (const SkNf& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); } |
- SkNf operator <= (const SkNf& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); } |
- SkNf operator >= (const SkNf& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); } |
- SkNf operator != (const SkNf& o) const { |
- return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec))); |
- } |
+ Nb operator == (const SkNf& o) const { return vceq_f32(fVec, o.fVec); } |
+ Nb operator < (const SkNf& o) const { return vclt_f32(fVec, o.fVec); } |
+ Nb operator > (const SkNf& o) const { return vcgt_f32(fVec, o.fVec); } |
+ Nb operator <= (const SkNf& o) const { return vcle_f32(fVec, o.fVec); } |
+ Nb operator >= (const SkNf& o) const { return vcge_f32(fVec, o.fVec); } |
+ Nb operator != (const SkNf& o) const { return vmvn_u32(vceq_f32(fVec, o.fVec)); } |
static SkNf Min(const SkNf& l, const SkNf& r) { return vmin_f32(l.fVec, r.fVec); } |
static SkNf Max(const SkNf& l, const SkNf& r) { return vmax_f32(l.fVec, r.fVec); } |
@@ -101,21 +126,25 @@ |
return vget_lane_f32(fVec, k&1); |
} |
- bool allTrue() const { |
- auto v = vreinterpret_u32_f32(fVec); |
- return vget_lane_u32(v,0) && vget_lane_u32(v,1); |
- } |
- bool anyTrue() const { |
- auto v = vreinterpret_u32_f32(fVec); |
- return vget_lane_u32(v,0) || vget_lane_u32(v,1); |
- } |
- |
float32x2_t fVec; |
}; |
#if defined(SK_CPU_ARM64) |
template <> |
+class SkNb<2, 8> { |
+public: |
+ SkNb(uint64x2_t vec) : fVec(vec) {} |
+ |
+ SkNb() {} |
+ bool allTrue() const { return vgetq_lane_u64(fVec, 0) && vgetq_lane_u64(fVec, 1); } |
+ bool anyTrue() const { return vgetq_lane_u64(fVec, 0) || vgetq_lane_u64(fVec, 1); } |
+ |
+ uint64x2_t fVec; |
+}; |
+ |
+template <> |
class SkNf<2, double> { |
+ typedef SkNb<2, 8> Nb; |
public: |
SkNf(float64x2_t vec) : fVec(vec) {} |
@@ -131,13 +160,13 @@ |
SkNf operator * (const SkNf& o) const { return vmulq_f64(fVec, o.fVec); } |
SkNf operator / (const SkNf& o) const { return vdivq_f64(fVec, o.fVec); } |
- SkNf operator==(const SkNf& o) const { return vreinterpretq_f64_u64(vceqq_f64(fVec, o.fVec)); } |
- SkNf operator <(const SkNf& o) const { return vreinterpretq_f64_u64(vcltq_f64(fVec, o.fVec)); } |
- SkNf operator >(const SkNf& o) const { return vreinterpretq_f64_u64(vcgtq_f64(fVec, o.fVec)); } |
- SkNf operator<=(const SkNf& o) const { return vreinterpretq_f64_u64(vcleq_f64(fVec, o.fVec)); } |
- SkNf operator>=(const SkNf& o) const { return vreinterpretq_f64_u64(vcgeq_f64(fVec, o.fVec)); } |
- SkNf operator != (const SkNf& o) const { |
- return vreinterpretq_f64_u32(vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(fVec, o.fVec)))); |
+ Nb operator == (const SkNf& o) const { return vceqq_f64(fVec, o.fVec); } |
+ Nb operator < (const SkNf& o) const { return vcltq_f64(fVec, o.fVec); } |
+ Nb operator > (const SkNf& o) const { return vcgtq_f64(fVec, o.fVec); } |
+ Nb operator <= (const SkNf& o) const { return vcleq_f64(fVec, o.fVec); } |
+ Nb operator >= (const SkNf& o) const { return vcgeq_f64(fVec, o.fVec); } |
+ Nb operator != (const SkNf& o) const { |
+ return vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(fVec, o.fVec)))); |
} |
static SkNf Min(const SkNf& l, const SkNf& r) { return vminq_f64(l.fVec, r.fVec); } |
@@ -173,15 +202,6 @@ |
return vgetq_lane_f64(fVec, k&1); |
} |
- bool allTrue() const { |
- auto v = vreinterpretq_u64_f64(fVec); |
- return vgetq_lane_u64(v,0) && vgetq_lane_u64(v,1); |
- } |
- bool anyTrue() const { |
- auto v = vreinterpretq_u64_f64(fVec); |
- return vgetq_lane_u64(v,0) || vgetq_lane_u64(v,1); |
- } |
- |
float64x2_t fVec; |
}; |
#endif//defined(SK_CPU_ARM64) |
@@ -215,6 +235,7 @@ |
template <> |
class SkNf<4, float> { |
+ typedef SkNb<4, 4> Nb; |
public: |
SkNf(float32x4_t vec) : fVec(vec) {} |
@@ -249,14 +270,12 @@ |
#endif |
} |
- SkNf operator==(const SkNf& o) const { return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec)); } |
- SkNf operator <(const SkNf& o) const { return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec)); } |
- SkNf operator >(const SkNf& o) const { return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec)); } |
- SkNf operator<=(const SkNf& o) const { return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec)); } |
- SkNf operator>=(const SkNf& o) const { return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec)); } |
- SkNf operator!=(const SkNf& o) const { |
- return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec))); |
- } |
+ Nb operator == (const SkNf& o) const { return vceqq_f32(fVec, o.fVec); } |
+ Nb operator < (const SkNf& o) const { return vcltq_f32(fVec, o.fVec); } |
+ Nb operator > (const SkNf& o) const { return vcgtq_f32(fVec, o.fVec); } |
+ Nb operator <= (const SkNf& o) const { return vcleq_f32(fVec, o.fVec); } |
+ Nb operator >= (const SkNf& o) const { return vcgeq_f32(fVec, o.fVec); } |
+ Nb operator != (const SkNf& o) const { return vmvnq_u32(vceqq_f32(fVec, o.fVec)); } |
static SkNf Min(const SkNf& l, const SkNf& r) { return vminq_f32(l.fVec, r.fVec); } |
static SkNf Max(const SkNf& l, const SkNf& r) { return vmaxq_f32(l.fVec, r.fVec); } |
@@ -284,17 +303,6 @@ |
return vgetq_lane_f32(fVec, k&3); |
} |
- bool allTrue() const { |
- auto v = vreinterpretq_u32_f32(fVec); |
- return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1) |
- && vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3); |
- } |
- bool anyTrue() const { |
- auto v = vreinterpretq_u32_f32(fVec); |
- return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1) |
- || vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3); |
- } |
- |
float32x4_t fVec; |
}; |
@@ -355,18 +363,12 @@ |
SkNi operator - (const SkNi& o) const { return vsubq_u8(fVec, o.fVec); } |
static SkNi Min(const SkNi& a, const SkNi& b) { return vminq_u8(a.fVec, b.fVec); } |
- SkNi operator < (const SkNi& o) const { return vcltq_u8(fVec, o.fVec); } |
template <int k> uint8_t kth() const { |
SkASSERT(0 <= k && k < 15); |
return vgetq_lane_u8(fVec, k&16); |
} |
- SkNi thenElse(const SkNi& t, const SkNi& e) const { |
- return vorrq_u8(vandq_u8(t.fVec, fVec), |
- vbicq_u8(e.fVec, fVec)); |
- } |
- |
uint8x16_t fVec; |
}; |