OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2015 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 // It is important _not_ to put header guards here. |
| 9 // This file will be intentionally included three times. |
| 10 |
| 11 #include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362 |
| 12 |
| 13 #if defined(SK2X_PREAMBLE) |
| 14 #include <arm_neon.h> |
| 15 #include <math.h> |
| 16 template <typename T> struct SkScalarToSIMD; |
| 17 template <> struct SkScalarToSIMD< float> { typedef float32x2_t Type; }; |
| 18 template <> struct SkScalarToSIMD<double> { typedef double Type[2]; }; |
| 19 |
| 20 |
| 21 #elif defined(SK2X_PRIVATE) |
| 22 typename SkScalarToSIMD<T>::Type fVec; |
| 23 /*implicit*/ Sk2x(const typename SkScalarToSIMD<T>::Type vec) { fVec = vec;
} |
| 24 |
| 25 #else |
| 26 |
| 27 #define M(...) template <> inline __VA_ARGS__ Sk2x<float>:: |
| 28 |
| 29 M() Sk2x() {} |
| 30 M() Sk2x(float val) { fVec = vdup_n_f32(val); } |
| 31 M() Sk2x(float a, float b) { |
| 32 fVec = vset_lane_f32(a, fVec, 0); |
| 33 fVec = vset_lane_f32(b, fVec, 1); |
| 34 } |
| 35 M(Sk2f&) operator=(const Sk2f& o) { fVec = o.fVec; return *this; } |
| 36 |
| 37 M(Sk2f) Load(const float vals[2]) { return vld1_f32(vals); } |
| 38 M(void) store(float vals[2]) const { vst1_f32(vals, fVec); } |
| 39 |
| 40 M(Sk2f) add(const Sk2f& o) const { return vadd_f32(fVec, o.fVec); } |
| 41 M(Sk2f) subtract(const Sk2f& o) const { return vsub_f32(fVec, o.fVec); } |
| 42 M(Sk2f) multiply(const Sk2f& o) const { return vmul_f32(fVec, o.fVec); } |
| 43 |
| 44 M(Sk2f) Min(const Sk2f& a, const Sk2f& b) { return vmin_f32(a.fVec, b.fVec); } |
| 45 M(Sk2f) Max(const Sk2f& a, const Sk2f& b) { return vmax_f32(a.fVec, b.fVec); } |
| 46 |
| 47 M(Sk2f) rsqrt() const { |
| 48 float32x2_t est0 = vrsqrte_f32(fVec), |
| 49 est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0); |
| 50 return est1; |
| 51 } |
| 52 M(Sk2f) sqrt() const { |
| 53 float32x2_t est1 = this->rsqrt().fVec, |
| 54 // An extra step of Newton's method to refine the estimate of 1/sqrt(this). |
| 55 est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1); |
| 56 return vmul_f32(fVec, est2); |
| 57 } |
| 58 |
| 59 #undef M |
| 60 |
| 61 #define M(...) template <> inline __VA_ARGS__ Sk2x<double>:: |
| 62 |
| 63 // TODO: #ifdef SK_CPU_ARM64 use float64x2_t for Sk2d. |
| 64 |
| 65 M() Sk2x() {} |
| 66 M() Sk2x(double val) { fVec[0] = fVec[1] = val; } |
| 67 M() Sk2x(double a, double b) { fVec[0] = a; fVec[1] = b; } |
| 68 M(Sk2d&) operator=(const Sk2d& o) { |
| 69 fVec[0] = o.fVec[0]; |
| 70 fVec[1] = o.fVec[1]; |
| 71 return *this; |
| 72 } |
| 73 |
| 74 M(Sk2d) Load(const double vals[2]) { return Sk2d(vals[0], vals[1]); } |
| 75 M(void) store(double vals[2]) const { vals[0] = fVec[0]; vals[1] = fVec[1]; } |
| 76 |
| 77 M(Sk2d) add(const Sk2d& o) const { return Sk2d(fVec[0] + o.fVec[0], fVec[1]
+ o.fVec[1]); } |
| 78 M(Sk2d) subtract(const Sk2d& o) const { return Sk2d(fVec[0] - o.fVec[0], fVec[1]
- o.fVec[1]); } |
| 79 M(Sk2d) multiply(const Sk2d& o) const { return Sk2d(fVec[0] * o.fVec[0], fVec[1]
* o.fVec[1]); } |
| 80 |
| 81 M(Sk2d) Min(const Sk2d& a, const Sk2d& b) { |
| 82 return Sk2d(SkTMin(a.fVec[0], b.fVec[0]), SkTMin(a.fVec[1], b.fVec[1])); |
| 83 } |
| 84 M(Sk2d) Max(const Sk2d& a, const Sk2d& b) { |
| 85 return Sk2d(SkTMax(a.fVec[0], b.fVec[0]), SkTMax(a.fVec[1], b.fVec[1])); |
| 86 } |
| 87 |
| 88 M(Sk2d) rsqrt() const { return Sk2d(1.0/::sqrt(fVec[0]), 1.0/::sqrt(fVec[1])); } |
| 89 M(Sk2d) sqrt() const { return Sk2d( ::sqrt(fVec[0]), ::sqrt(fVec[1])); } |
| 90 |
| 91 #undef M |
| 92 |
| 93 #endif |
OLD | NEW |