OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2015 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #ifndef SkPx_neon_DEFINED | |
9 #define SkPx_neon_DEFINED | |
10 | |
11 // When we have NEON, we like to work 8 pixels at a time. | |
12 // This lets us exploit vld4/vst4 and represent SkPx as planar uint8x8x4_t, | |
13 // Wide as planar uint16x8x4_t, and Alpha as a single uint8x8_t plane. | |
14 | |
15 struct SkPx_neon { | |
16 static const int N = 8; | |
17 | |
18 uint8x8x4_t fVec; | |
19 SkPx_neon(uint8x8x4_t vec) : fVec(vec) {} | |
20 | |
21 static SkPx_neon Dup(uint32_t px) { return vld4_dup_u8((const uint8_t*)&px);
} | |
22 static SkPx_neon Load(const uint32_t* px) { return vld4_u8((const uint8_t*)p
x); } | |
23 static SkPx_neon Load(const uint32_t* px, int n) { | |
24 SkASSERT(0 < n && n < 8); | |
25 uint8x8x4_t v = vld4_dup_u8((const uint8_t*)px); // n>=1, so start all
lanes with pixel 0. | |
26 switch (n) { | |
27 case 7: v = vld4_lane_u8((const uint8_t*)(px+6), v, 6); // fall thr
ough | |
28 case 6: v = vld4_lane_u8((const uint8_t*)(px+5), v, 5); // fall thr
ough | |
29 case 5: v = vld4_lane_u8((const uint8_t*)(px+4), v, 4); // fall thr
ough | |
30 case 4: v = vld4_lane_u8((const uint8_t*)(px+3), v, 3); // fall thr
ough | |
31 case 3: v = vld4_lane_u8((const uint8_t*)(px+2), v, 2); // fall thr
ough | |
32 case 2: v = vld4_lane_u8((const uint8_t*)(px+1), v, 1); | |
33 } | |
34 return v; | |
35 } | |
36 | |
37 void store(uint32_t* px) const { vst4_u8((uint8_t*)px, fVec); } | |
38 void store(uint32_t* px, int n) const { | |
39 SkASSERT(0 < n && n < 8); | |
40 switch (n) { | |
41 case 7: vst4_lane_u8((uint8_t*)(px+6), fVec, 6); | |
42 case 6: vst4_lane_u8((uint8_t*)(px+5), fVec, 5); | |
43 case 5: vst4_lane_u8((uint8_t*)(px+4), fVec, 4); | |
44 case 4: vst4_lane_u8((uint8_t*)(px+3), fVec, 3); | |
45 case 3: vst4_lane_u8((uint8_t*)(px+2), fVec, 2); | |
46 case 2: vst4_lane_u8((uint8_t*)(px+1), fVec, 1); | |
47 case 1: vst4_lane_u8((uint8_t*)(px+0), fVec, 0); | |
48 } | |
49 } | |
50 | |
51 struct Alpha { | |
52 uint8x8_t fA; | |
53 Alpha(uint8x8_t a) : fA(a) {} | |
54 | |
55 static Alpha Dup(uint8_t a) { return vdup_n_u8(a); } | |
56 static Alpha Load(const uint8_t* a) { return vld1_u8(a); } | |
57 static Alpha Load(const uint8_t* a, int n) { | |
58 SkASSERT(0 < n && n < 8); | |
59 uint8x8_t v = vld1_dup_u8(a); // n>=1, so start all lanes with alph
a 0. | |
60 switch (n) { | |
61 case 7: v = vld1_lane_u8(a+6, v, 6); // fall through | |
62 case 6: v = vld1_lane_u8(a+5, v, 5); // fall through | |
63 case 5: v = vld1_lane_u8(a+4, v, 4); // fall through | |
64 case 4: v = vld1_lane_u8(a+3, v, 3); // fall through | |
65 case 3: v = vld1_lane_u8(a+2, v, 2); // fall through | |
66 case 2: v = vld1_lane_u8(a+1, v, 1); | |
67 } | |
68 return v; | |
69 } | |
70 Alpha inv() const { return vsub_u8(vdup_n_u8(255), fA); } | |
71 }; | |
72 | |
73 struct Wide { | |
74 uint16x8x4_t fVec; | |
75 Wide(uint16x8x4_t vec) : fVec(vec) {} | |
76 | |
77 Wide operator+(const Wide& o) const { | |
78 return (uint16x8x4_t) {{ | |
79 vaddq_u16(fVec.val[0], o.fVec.val[0]), | |
80 vaddq_u16(fVec.val[1], o.fVec.val[1]), | |
81 vaddq_u16(fVec.val[2], o.fVec.val[2]), | |
82 vaddq_u16(fVec.val[3], o.fVec.val[3]), | |
83 }}; | |
84 } | |
85 Wide operator-(const Wide& o) const { | |
86 return (uint16x8x4_t) {{ | |
87 vsubq_u16(fVec.val[0], o.fVec.val[0]), | |
88 vsubq_u16(fVec.val[1], o.fVec.val[1]), | |
89 vsubq_u16(fVec.val[2], o.fVec.val[2]), | |
90 vsubq_u16(fVec.val[3], o.fVec.val[3]), | |
91 }}; | |
92 } | |
93 Wide operator<<(int bits) const { | |
94 #if defined(SK_DEBUG) | |
95 return (uint16x8x4_t) {{ | |
96 shift_slow(fVec.val[0], -bits), | |
97 shift_slow(fVec.val[1], -bits), | |
98 shift_slow(fVec.val[2], -bits), | |
99 shift_slow(fVec.val[3], -bits), | |
100 }}; | |
101 #else | |
102 return (uint16x8x4_t) {{ | |
103 vshlq_n_u16(fVec.val[0], bits), | |
104 vshlq_n_u16(fVec.val[1], bits), | |
105 vshlq_n_u16(fVec.val[2], bits), | |
106 vshlq_n_u16(fVec.val[3], bits), | |
107 }}; | |
108 #endif | |
109 } | |
110 Wide operator>>(int bits) const { | |
111 #if defined(SK_DEBUG) | |
112 return (uint16x8x4_t) {{ | |
113 shift_slow(fVec.val[0], bits), | |
114 shift_slow(fVec.val[1], bits), | |
115 shift_slow(fVec.val[2], bits), | |
116 shift_slow(fVec.val[3], bits), | |
117 }}; | |
118 #else | |
119 return (uint16x8x4_t) {{ | |
120 vshrq_n_u16(fVec.val[0], bits), | |
121 vshrq_n_u16(fVec.val[1], bits), | |
122 vshrq_n_u16(fVec.val[2], bits), | |
123 vshrq_n_u16(fVec.val[3], bits), | |
124 }}; | |
125 #endif | |
126 } | |
127 | |
128 // v >> bits, for bits in [-15, 16]. | |
129 static uint16x8_t shift_slow(uint16x8_t v, int bits) { | |
130 SkASSERT(bits >= -16 && bits <= 16); | |
131 switch (bits) { | |
132 #define L(n) case -n: return vshlq_n_u16(v, n); | |
133 #define R(n) case n: return vshrq_n_u16(v, n); | |
134 L(15) L(14) L(13) L(10) L(9) L(8) L(7) L(6) L(5) L(4) L(3)
L(2) L(1) | |
135 R(16) R(15) R(14) R(13) R(10) R(9) R(8) R(7) R(6) R(5) R(4) R(3)
R(2) R(1) | |
136 #undef L | |
137 #undef R | |
138 } | |
139 return v; | |
140 } | |
141 | |
142 SkPx_neon addNarrowHi(const SkPx_neon& o) const { | |
143 return (uint8x8x4_t) {{ | |
144 vshrn_n_u16(vaddw_u8(fVec.val[0], o.fVec.val[0]), 8), | |
145 vshrn_n_u16(vaddw_u8(fVec.val[1], o.fVec.val[1]), 8), | |
146 vshrn_n_u16(vaddw_u8(fVec.val[2], o.fVec.val[2]), 8), | |
147 vshrn_n_u16(vaddw_u8(fVec.val[3], o.fVec.val[3]), 8), | |
148 }}; | |
149 } | |
150 }; | |
151 | |
152 Alpha alpha() const { return fVec.val[3]; } | |
153 | |
154 Wide widenLo() const { | |
155 return (uint16x8x4_t) {{ | |
156 vmovl_u8(fVec.val[0]), | |
157 vmovl_u8(fVec.val[1]), | |
158 vmovl_u8(fVec.val[2]), | |
159 vmovl_u8(fVec.val[3]), | |
160 }}; | |
161 } | |
162 // TODO: these two can probably be done faster. | |
163 Wide widenHi() const { return this->widenLo() << 8; } | |
164 Wide widenLoHi() const { return this->widenLo() + this->widenHi(); } | |
165 | |
166 SkPx_neon operator+(const SkPx_neon& o) const { | |
167 return (uint8x8x4_t) {{ | |
168 vadd_u8(fVec.val[0], o.fVec.val[0]), | |
169 vadd_u8(fVec.val[1], o.fVec.val[1]), | |
170 vadd_u8(fVec.val[2], o.fVec.val[2]), | |
171 vadd_u8(fVec.val[3], o.fVec.val[3]), | |
172 }}; | |
173 } | |
174 SkPx_neon operator-(const SkPx_neon& o) const { | |
175 return (uint8x8x4_t) {{ | |
176 vsub_u8(fVec.val[0], o.fVec.val[0]), | |
177 vsub_u8(fVec.val[1], o.fVec.val[1]), | |
178 vsub_u8(fVec.val[2], o.fVec.val[2]), | |
179 vsub_u8(fVec.val[3], o.fVec.val[3]), | |
180 }}; | |
181 } | |
182 SkPx_neon saturatedAdd(const SkPx_neon& o) const { | |
183 return (uint8x8x4_t) {{ | |
184 vqadd_u8(fVec.val[0], o.fVec.val[0]), | |
185 vqadd_u8(fVec.val[1], o.fVec.val[1]), | |
186 vqadd_u8(fVec.val[2], o.fVec.val[2]), | |
187 vqadd_u8(fVec.val[3], o.fVec.val[3]), | |
188 }}; | |
189 } | |
190 | |
191 Wide operator*(const Alpha& a) const { | |
192 return (uint16x8x4_t) {{ | |
193 vmull_u8(fVec.val[0], a.fA), | |
194 vmull_u8(fVec.val[1], a.fA), | |
195 vmull_u8(fVec.val[2], a.fA), | |
196 vmull_u8(fVec.val[3], a.fA), | |
197 }}; | |
198 } | |
199 SkPx_neon approxMulDiv255(const Alpha& a) const { | |
200 return (*this * a).addNarrowHi(*this); | |
201 } | |
202 | |
203 SkPx_neon addAlpha(const Alpha& a) const { | |
204 return (uint8x8x4_t) {{ | |
205 fVec.val[0], | |
206 fVec.val[1], | |
207 fVec.val[2], | |
208 vadd_u8(fVec.val[3], a.fA), | |
209 }}; | |
210 } | |
211 }; | |
212 typedef SkPx_neon SkPx; | |
213 | |
214 #endif//SkPx_neon_DEFINED | |
OLD | NEW |