OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2015 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #ifndef SkPx_neon_DEFINED | |
9 #define SkPx_neon_DEFINED | |
10 | |
11 // When we have NEON, we like to work 8 pixels at a time. | |
12 // This lets us exploit vld4/vst4 and represent SkPx as planar uint8x8x4_t, | |
13 // Wide as planar uint16x8x4_t, and Alpha as a single uint8x8_t plane. | |
14 | |
15 struct SkPx_neon { | |
16 static const int N = 8; | |
17 | |
18 uint8x8x4_t fVec; | |
19 SkPx_neon(uint8x8x4_t vec) : fVec(vec) {} | |
20 | |
21 static SkPx_neon Dup(uint32_t px) { return vld4_dup_u8((const uint8_t*)&px);
} | |
22 static SkPx_neon LoadN(const uint32_t* px) { return vld4_u8((const uint8_t*)
px); } | |
23 static SkPx_neon Load(int n, const uint32_t* px) { | |
24 SkASSERT(0 < n && n < 8); | |
25 uint8x8x4_t v = vld4_dup_u8((const uint8_t*)px); // n>=1, so start all
lanes with pixel 0. | |
26 switch (n) { | |
27 case 7: v = vld4_lane_u8((const uint8_t*)(px+6), v, 6); // fall thr
ough | |
28 case 6: v = vld4_lane_u8((const uint8_t*)(px+5), v, 5); // fall thr
ough | |
29 case 5: v = vld4_lane_u8((const uint8_t*)(px+4), v, 4); // fall thr
ough | |
30 case 4: v = vld4_lane_u8((const uint8_t*)(px+3), v, 3); // fall thr
ough | |
31 case 3: v = vld4_lane_u8((const uint8_t*)(px+2), v, 2); // fall thr
ough | |
32 case 2: v = vld4_lane_u8((const uint8_t*)(px+1), v, 1); | |
33 } | |
34 return v; | |
35 } | |
36 | |
37 void storeN(uint32_t* px) const { vst4_u8((uint8_t*)px, fVec); } | |
38 void store(int n, uint32_t* px) const { | |
39 SkASSERT(0 < n && n < 8); | |
40 switch (n) { | |
41 case 7: vst4_lane_u8((uint8_t*)(px+6), fVec, 6); | |
42 case 6: vst4_lane_u8((uint8_t*)(px+5), fVec, 5); | |
43 case 5: vst4_lane_u8((uint8_t*)(px+4), fVec, 4); | |
44 case 4: vst4_lane_u8((uint8_t*)(px+3), fVec, 3); | |
45 case 3: vst4_lane_u8((uint8_t*)(px+2), fVec, 2); | |
46 case 2: vst4_lane_u8((uint8_t*)(px+1), fVec, 1); | |
47 case 1: vst4_lane_u8((uint8_t*)(px+0), fVec, 0); | |
48 } | |
49 } | |
50 | |
51 struct Alpha { | |
52 uint8x8_t fA; | |
53 Alpha(uint8x8_t a) : fA(a) {} | |
54 | |
55 static Alpha Dup(uint8_t a) { return vdup_n_u8(a); } | |
56 static Alpha LoadN(const uint8_t* a) { return vld1_u8(a); } | |
57 static Alpha Load(int n, const uint8_t* a) { | |
58 SkASSERT(0 < n && n < 8); | |
59 uint8x8_t v = vld1_dup_u8(a); // n>=1, so start all lanes with alph
a 0. | |
60 switch (n) { | |
61 case 7: v = vld1_lane_u8(a+6, v, 6); // fall through | |
62 case 6: v = vld1_lane_u8(a+5, v, 5); // fall through | |
63 case 5: v = vld1_lane_u8(a+4, v, 4); // fall through | |
64 case 4: v = vld1_lane_u8(a+3, v, 3); // fall through | |
65 case 3: v = vld1_lane_u8(a+2, v, 2); // fall through | |
66 case 2: v = vld1_lane_u8(a+1, v, 1); | |
67 } | |
68 return v; | |
69 } | |
70 Alpha inv() const { return vsub_u8(vdup_n_u8(255), fA); } | |
71 }; | |
72 | |
73 struct Wide { | |
74 uint16x8x4_t fVec; | |
75 Wide(uint16x8x4_t vec) : fVec(vec) {} | |
76 | |
77 Wide operator+(const Wide& o) const { | |
78 return (uint16x8x4_t) {{ | |
79 vaddq_u16(fVec.val[0], o.fVec.val[0]), | |
80 vaddq_u16(fVec.val[1], o.fVec.val[1]), | |
81 vaddq_u16(fVec.val[2], o.fVec.val[2]), | |
82 vaddq_u16(fVec.val[3], o.fVec.val[3]), | |
83 }}; | |
84 } | |
85 Wide operator-(const Wide& o) const { | |
86 return (uint16x8x4_t) {{ | |
87 vsubq_u16(fVec.val[0], o.fVec.val[0]), | |
88 vsubq_u16(fVec.val[1], o.fVec.val[1]), | |
89 vsubq_u16(fVec.val[2], o.fVec.val[2]), | |
90 vsubq_u16(fVec.val[3], o.fVec.val[3]), | |
91 }}; | |
92 } | |
93 SK_ALWAYS_INLINE Wide operator<<(int bits) const { | |
94 return (uint16x8x4_t) {{ | |
95 vshlq_n_u16(fVec.val[0], bits), | |
96 vshlq_n_u16(fVec.val[1], bits), | |
97 vshlq_n_u16(fVec.val[2], bits), | |
98 vshlq_n_u16(fVec.val[3], bits), | |
99 }}; | |
100 } | |
101 SK_ALWAYS_INLINE Wide operator>>(int bits) const { | |
102 return (uint16x8x4_t) {{ | |
103 vshrq_n_u16(fVec.val[0], bits), | |
104 vshrq_n_u16(fVec.val[1], bits), | |
105 vshrq_n_u16(fVec.val[2], bits), | |
106 vshrq_n_u16(fVec.val[3], bits), | |
107 }}; | |
108 } | |
109 | |
110 SkPx_neon addNarrowHi(const SkPx_neon& o) const { | |
111 return (uint8x8x4_t) {{ | |
112 vshrn_n_u16(vaddw_u8(fVec.val[0], o.fVec.val[0]), 8), | |
113 vshrn_n_u16(vaddw_u8(fVec.val[1], o.fVec.val[1]), 8), | |
114 vshrn_n_u16(vaddw_u8(fVec.val[2], o.fVec.val[2]), 8), | |
115 vshrn_n_u16(vaddw_u8(fVec.val[3], o.fVec.val[3]), 8), | |
116 }}; | |
117 } | |
118 }; | |
119 | |
120 Alpha alpha() const { return fVec.val[3]; } | |
121 | |
122 Wide widenLo() const { | |
123 return (uint16x8x4_t) {{ | |
124 vmovl_u8(fVec.val[0]), | |
125 vmovl_u8(fVec.val[1]), | |
126 vmovl_u8(fVec.val[2]), | |
127 vmovl_u8(fVec.val[3]), | |
128 }}; | |
129 } | |
130 // TODO: these two can probably be done faster. | |
131 Wide widenHi() const { return this->widenLo() << 8; } | |
132 Wide widenLoHi() const { return this->widenLo() + this->widenHi(); } | |
133 | |
134 SkPx_neon operator+(const SkPx_neon& o) const { | |
135 return (uint8x8x4_t) {{ | |
136 vadd_u8(fVec.val[0], o.fVec.val[0]), | |
137 vadd_u8(fVec.val[1], o.fVec.val[1]), | |
138 vadd_u8(fVec.val[2], o.fVec.val[2]), | |
139 vadd_u8(fVec.val[3], o.fVec.val[3]), | |
140 }}; | |
141 } | |
142 SkPx_neon operator-(const SkPx_neon& o) const { | |
143 return (uint8x8x4_t) {{ | |
144 vsub_u8(fVec.val[0], o.fVec.val[0]), | |
145 vsub_u8(fVec.val[1], o.fVec.val[1]), | |
146 vsub_u8(fVec.val[2], o.fVec.val[2]), | |
147 vsub_u8(fVec.val[3], o.fVec.val[3]), | |
148 }}; | |
149 } | |
150 SkPx_neon saturatedAdd(const SkPx_neon& o) const { | |
151 return (uint8x8x4_t) {{ | |
152 vqadd_u8(fVec.val[0], o.fVec.val[0]), | |
153 vqadd_u8(fVec.val[1], o.fVec.val[1]), | |
154 vqadd_u8(fVec.val[2], o.fVec.val[2]), | |
155 vqadd_u8(fVec.val[3], o.fVec.val[3]), | |
156 }}; | |
157 } | |
158 | |
159 Wide operator*(const Alpha& a) const { | |
160 return (uint16x8x4_t) {{ | |
161 vmull_u8(fVec.val[0], a.fA), | |
162 vmull_u8(fVec.val[1], a.fA), | |
163 vmull_u8(fVec.val[2], a.fA), | |
164 vmull_u8(fVec.val[3], a.fA), | |
165 }}; | |
166 } | |
167 SkPx_neon approxMulDiv255(const Alpha& a) const { | |
168 return (*this * a).addNarrowHi(*this); | |
169 } | |
170 | |
171 SkPx_neon addAlpha(const Alpha& a) const { | |
172 return (uint8x8x4_t) {{ | |
173 fVec.val[0], | |
174 fVec.val[1], | |
175 fVec.val[2], | |
176 vadd_u8(fVec.val[3], a.fA), | |
177 }}; | |
178 } | |
179 }; | |
180 typedef SkPx_neon SkPx; | |
181 | |
182 #endif//SkPx_neon_DEFINED | |
OLD | NEW |