OLD | NEW |
| (Empty) |
1 // It is important _not_ to put header guards here. | |
2 // This file will be intentionally included three times. | |
3 | |
4 #include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362 | |
5 | |
6 #if defined(SK4X_PREAMBLE) | |
7 #include <arm_neon.h> | |
8 | |
9 // Template metaprogramming to map scalar types to vector types. | |
10 template <typename T> struct SkScalarToSIMD; | |
11 template <> struct SkScalarToSIMD<float> { typedef float32x4_t Type; }; | |
12 template <> struct SkScalarToSIMD<int32_t> { typedef int32x4_t Type; }; | |
13 | |
14 #elif defined(SK4X_PRIVATE) | |
15 Sk4x(float32x4_t); | |
16 Sk4x(int32x4_t); | |
17 | |
18 typename SkScalarToSIMD<T>::Type fVec; | |
19 | |
20 #else | |
21 | |
22 // Vector Constructors | |
23 //template <> inline Sk4f::Sk4x(int32x4_t v) : fVec(vcvtq_f32_s32(v)) {} | |
24 template <> inline Sk4f::Sk4x(float32x4_t v) : fVec(v) {} | |
25 template <> inline Sk4i::Sk4x(int32x4_t v) : fVec(v) {} | |
26 //template <> inline Sk4i::Sk4x(float32x4_t v) : fVec(vcvtq_s32_f32(v)) {} | |
27 | |
28 // Generic Methods | |
29 template <typename T> Sk4x<T>::Sk4x() {} | |
30 template <typename T> Sk4x<T>::Sk4x(const Sk4x& other) { *this = other; } | |
31 template <typename T> Sk4x<T>& Sk4x<T>::operator=(const Sk4x<T>& other) { | |
32 fVec = other.fVec; | |
33 return *this; | |
34 } | |
35 | |
36 // Sk4f Methods | |
37 #define M(...) template <> inline __VA_ARGS__ Sk4f:: | |
38 | |
39 M() Sk4x(float v) : fVec(vdupq_n_f32(v)) {} | |
40 M() Sk4x(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d
}; } | |
41 | |
42 // As far as I can tell, it's not possible to provide an alignment hint to | |
43 // NEON using intrinsics. However, I think it is possible at the assembly | |
44 // level if we want to get into that. | |
45 // TODO: Write our own aligned load and store. | |
46 M(Sk4f) Load (const float fs[4]) { return vld1q_f32(fs); } | |
47 M(Sk4f) LoadAligned(const float fs[4]) { return vld1q_f32(fs); } | |
48 M(void) store (float fs[4]) const { vst1q_f32(fs, fVec); } | |
49 M(void) storeAligned(float fs[4]) const { vst1q_f32 (fs, fVec); } | |
50 | |
51 template <> | |
52 M(Sk4i) reinterpret<Sk4i>() const { return vreinterpretq_s32_f32(fVec); } | |
53 | |
54 template <> | |
55 M(Sk4i) cast<Sk4i>() const { return vcvtq_s32_f32(fVec); } | |
56 | |
57 // We're going to skip allTrue(), anyTrue(), and bit-manipulators | |
58 // for Sk4f. Code that calls them probably does so accidentally. | |
59 // Ask msarett or mtklein to fill these in if you really need them. | |
60 M(Sk4f) add (const Sk4f& o) const { return vaddq_f32(fVec, o.fVec); } | |
61 M(Sk4f) subtract(const Sk4f& o) const { return vsubq_f32(fVec, o.fVec); } | |
62 M(Sk4f) multiply(const Sk4f& o) const { return vmulq_f32(fVec, o.fVec); } | |
63 | |
64 M(Sk4f) divide (const Sk4f& o) const { | |
65 #if defined(SK_CPU_ARM64) | |
66 return vdivq_f32(fVec, o.fVec); | |
67 #else | |
68 float32x4_t est0 = vrecpeq_f32(o.fVec), | |
69 est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0), | |
70 est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1); | |
71 return vmulq_f32(est2, fVec); | |
72 #endif | |
73 } | |
74 | |
75 M(Sk4f) rsqrt() const { | |
76 float32x4_t est0 = vrsqrteq_f32(fVec), | |
77 est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0
); | |
78 return est1; | |
79 } | |
80 | |
81 M(Sk4f) sqrt() const { | |
82 #if defined(SK_CPU_ARM64) | |
83 return vsqrtq_f32(fVec); | |
84 #else | |
85 float32x4_t est1 = this->rsqrt().fVec, | |
86 // An extra step of Newton's method to refine the estimate of 1/sqrt(this). | |
87 est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1
); | |
88 return vmulq_f32(fVec, est2); | |
89 #endif | |
90 } | |
91 | |
92 M(Sk4i) equal (const Sk4f& o) const { return vreinterpretq_s32_u32(vce
qq_f32(fVec, o.fVec)); } | |
93 M(Sk4i) notEqual (const Sk4f& o) const { return vreinterpretq_s32_u32(vmv
nq_u32(vceqq_f32(fVec, o.fVec))); } | |
94 M(Sk4i) lessThan (const Sk4f& o) const { return vreinterpretq_s32_u32(vcl
tq_f32(fVec, o.fVec)); } | |
95 M(Sk4i) greaterThan (const Sk4f& o) const { return vreinterpretq_s32_u32(vcg
tq_f32(fVec, o.fVec)); } | |
96 M(Sk4i) lessThanEqual (const Sk4f& o) const { return vreinterpretq_s32_u32(vcl
eq_f32(fVec, o.fVec)); } | |
97 M(Sk4i) greaterThanEqual(const Sk4f& o) const { return vreinterpretq_s32_u32(vcg
eq_f32(fVec, o.fVec)); } | |
98 | |
99 M(Sk4f) Min(const Sk4f& a, const Sk4f& b) { return vminq_f32(a.fVec, b.fVec); } | |
100 M(Sk4f) Max(const Sk4f& a, const Sk4f& b) { return vmaxq_f32(a.fVec, b.fVec); } | |
101 | |
102 M(Sk4f) aacc() const { return vtrnq_f32(fVec, fVec).val[0]; } | |
103 M(Sk4f) bbdd() const { return vtrnq_f32(fVec, fVec).val[1]; } | |
104 M(Sk4f) badc() const { return vrev64q_f32(fVec); } | |
105 | |
106 // Sk4i Methods | |
107 #undef M | |
108 #define M(...) template <> inline __VA_ARGS__ Sk4i:: | |
109 | |
110 M() Sk4x(int32_t v) : fVec(vdupq_n_s32(v)) {} | |
111 M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) { fVec = (int32x4_t) { a, b
, c, d }; } | |
112 | |
113 // As far as I can tell, it's not possible to provide an alignment hint to | |
114 // NEON using intrinsics. However, I think it is possible at the assembly | |
115 // level if we want to get into that. | |
116 M(Sk4i) Load (const int32_t is[4]) { return vld1q_s32(is); } | |
117 M(Sk4i) LoadAligned(const int32_t is[4]) { return vld1q_s32(is); } | |
118 M(void) store (int32_t is[4]) const { vst1q_s32(is, fVec); } | |
119 M(void) storeAligned(int32_t is[4]) const { vst1q_s32 (is, fVec); } | |
120 | |
121 template <> | |
122 M(Sk4f) reinterpret<Sk4f>() const { return vreinterpretq_f32_s32(fVec); } | |
123 | |
124 template <> | |
125 M(Sk4f) cast<Sk4f>() const { return vcvtq_f32_s32(fVec); } | |
126 | |
127 M(bool) allTrue() const { | |
128 int32_t a = vgetq_lane_s32(fVec, 0); | |
129 int32_t b = vgetq_lane_s32(fVec, 1); | |
130 int32_t c = vgetq_lane_s32(fVec, 2); | |
131 int32_t d = vgetq_lane_s32(fVec, 3); | |
132 return a & b & c & d; | |
133 } | |
134 M(bool) anyTrue() const { | |
135 int32_t a = vgetq_lane_s32(fVec, 0); | |
136 int32_t b = vgetq_lane_s32(fVec, 1); | |
137 int32_t c = vgetq_lane_s32(fVec, 2); | |
138 int32_t d = vgetq_lane_s32(fVec, 3); | |
139 return a | b | c | d; | |
140 } | |
141 | |
142 M(Sk4i) bitNot() const { return vmvnq_s32(fVec); } | |
143 M(Sk4i) bitAnd(const Sk4i& o) const { return vandq_s32(fVec, o.fVec); } | |
144 M(Sk4i) bitOr (const Sk4i& o) const { return vorrq_s32(fVec, o.fVec); } | |
145 | |
146 M(Sk4i) equal (const Sk4i& o) const { return vreinterpretq_s32_u32(vce
qq_s32(fVec, o.fVec)); } | |
147 M(Sk4i) notEqual (const Sk4i& o) const { return vreinterpretq_s32_u32(vmv
nq_u32(vceqq_s32(fVec, o.fVec))); } | |
148 M(Sk4i) lessThan (const Sk4i& o) const { return vreinterpretq_s32_u32(vcl
tq_s32(fVec, o.fVec)); } | |
149 M(Sk4i) greaterThan (const Sk4i& o) const { return vreinterpretq_s32_u32(vcg
tq_s32(fVec, o.fVec)); } | |
150 M(Sk4i) lessThanEqual (const Sk4i& o) const { return vreinterpretq_s32_u32(vcl
eq_s32(fVec, o.fVec)); } | |
151 M(Sk4i) greaterThanEqual(const Sk4i& o) const { return vreinterpretq_s32_u32(vcg
eq_s32(fVec, o.fVec)); } | |
152 | |
153 M(Sk4i) add (const Sk4i& o) const { return vaddq_s32(fVec, o.fVec); } | |
154 M(Sk4i) subtract(const Sk4i& o) const { return vsubq_s32(fVec, o.fVec); } | |
155 M(Sk4i) multiply(const Sk4i& o) const { return vmulq_s32(fVec, o.fVec); } | |
156 // NEON does not have integer reciprocal, sqrt, or division. | |
157 M(Sk4i) Min(const Sk4i& a, const Sk4i& b) { return vminq_s32(a.fVec, b.fVec); } | |
158 M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { return vmaxq_s32(a.fVec, b.fVec); } | |
159 | |
160 M(Sk4i) aacc() const { return vtrnq_s32(fVec, fVec).val[0]; } | |
161 M(Sk4i) bbdd() const { return vtrnq_s32(fVec, fVec).val[1]; } | |
162 M(Sk4i) badc() const { return vrev64q_s32(fVec); } | |
163 | |
164 #undef M | |
165 | |
166 #endif | |
OLD | NEW |