Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/opts/SkNx_neon.h

Issue 1685773002: Sk4f: add floor() (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: add a bench Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/core/SkNx.h ('k') | src/opts/SkNx_sse.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #ifndef SkNx_neon_DEFINED 8 #ifndef SkNx_neon_DEFINED
9 #define SkNx_neon_DEFINED 9 #define SkNx_neon_DEFINED
10 10
11 #define SKNX_IS_FAST 11 #define SKNX_IS_FAST
12 12
13 // ARMv8 has vrndmq_f32 to floor 4 floats. Here we emulate it:
14 // - round by adding (1<<23) with our sign, then subtracting it;
15 // - if that rounded value is bigger than our input, subtract 1.
16 static inline float32x4_t armv7_vrndmq_f32(float32x4_t v) {
17 auto sign = vandq_u32((uint32x4_t)v, vdupq_n_u32(1<<31));
18 auto bias = (float32x4_t)(vorrq_u32((uint32x4_t)vdupq_n_f32(1<<23), sign));
19 auto rounded = vsubq_f32(vaddq_f32(v, bias), bias);
20 auto too_big = vcgtq_f32(rounded, v);
21 return vsubq_f32(rounded, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_ n_f32(1)));
22 }
23
13 // Well, this is absurd. The shifts require compile-time constant arguments. 24 // Well, this is absurd. The shifts require compile-time constant arguments.
14 25
15 #define SHIFT8(op, v, bits) switch(bits) { \ 26 #define SHIFT8(op, v, bits) switch(bits) { \
16 case 1: return op(v, 1); case 2: return op(v, 2); case 3: return op(v , 3); \ 27 case 1: return op(v, 1); case 2: return op(v, 2); case 3: return op(v , 3); \
17 case 4: return op(v, 4); case 5: return op(v, 5); case 6: return op(v , 6); \ 28 case 4: return op(v, 4); case 5: return op(v, 5); case 6: return op(v , 6); \
18 case 7: return op(v, 7); \ 29 case 7: return op(v, 7); \
19 } return fVec 30 } return fVec
20 31
21 #define SHIFT16(op, v, bits) if (bits < 8) { SHIFT8(op, v, bits); } switch(bits) { \ 32 #define SHIFT16(op, v, bits) if (bits < 8) { SHIFT8(op, v, bits); } switch(bits) { \
22 case 8: return op(v, 8); case 9: return op(v , 9); \ 33 case 8: return op(v, 8); case 9: return op(v , 9); \
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
154 SkNx operator<=(const SkNx& o) const { return vreinterpretq_f32_u32(vcleq_f3 2(fVec, o.fVec)); } 165 SkNx operator<=(const SkNx& o) const { return vreinterpretq_f32_u32(vcleq_f3 2(fVec, o.fVec)); }
155 SkNx operator>=(const SkNx& o) const { return vreinterpretq_f32_u32(vcgeq_f3 2(fVec, o.fVec)); } 166 SkNx operator>=(const SkNx& o) const { return vreinterpretq_f32_u32(vcgeq_f3 2(fVec, o.fVec)); }
156 SkNx operator!=(const SkNx& o) const { 167 SkNx operator!=(const SkNx& o) const {
157 return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec))); 168 return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec)));
158 } 169 }
159 170
160 static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.f Vec); } 171 static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.f Vec); }
161 static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.f Vec); } 172 static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.f Vec); }
162 173
163 SkNx abs() const { return vabsq_f32(fVec); } 174 SkNx abs() const { return vabsq_f32(fVec); }
175 SkNx floor() const {
176 #if defined(SK_CPU_ARM64)
177 return vrndmq_f32(fVec);
178 #else
179 return armv7_vrndmq_f32(fVec);
180 #endif
181 }
182
164 183
165 SkNx rsqrt0() const { return vrsqrteq_f32(fVec); } 184 SkNx rsqrt0() const { return vrsqrteq_f32(fVec); }
166 SkNx rsqrt1() const { 185 SkNx rsqrt1() const {
167 float32x4_t est0 = this->rsqrt0().fVec; 186 float32x4_t est0 = this->rsqrt0().fVec;
168 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0); 187 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
169 } 188 }
170 SkNx rsqrt2() const { 189 SkNx rsqrt2() const {
171 float32x4_t est1 = this->rsqrt1().fVec; 190 float32x4_t est1 = this->rsqrt1().fVec;
172 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1); 191 return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
173 } 192 }
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
369 388
370 template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { 389 template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
371 return vget_low_u16(vmovl_u8(src.fVec)); 390 return vget_low_u16(vmovl_u8(src.fVec));
372 } 391 }
373 392
374 template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { 393 template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
375 return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); 394 return vmovn_u16(vcombine_u16(src.fVec, src.fVec));
376 } 395 }
377 396
378 #endif//SkNx_neon_DEFINED 397 #endif//SkNx_neon_DEFINED
OLDNEW
« no previous file with comments | « src/core/SkNx.h ('k') | src/opts/SkNx_sse.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698