Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(974)

Unified Diff: src/opts/Sk2x_neon.h

Issue 1020963002: Specialize Sk2d for ARM64 (Closed) Base URL: https://skia.googlesource.com/skia@master
Patch Set: Avoid use of vset[q]_lane, instead intializing vectors directly. Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/core/SkUtilsArm.h ('k') | src/opts/Sk4x_neon.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/opts/Sk2x_neon.h
diff --git a/src/opts/Sk2x_neon.h b/src/opts/Sk2x_neon.h
index cc4e799490927c2fd4562df7bdd3336d26cfcf48..00ab00aeaa6ddd5a3db3eb58f161e6ef78eddef9 100644
--- a/src/opts/Sk2x_neon.h
+++ b/src/opts/Sk2x_neon.h
@@ -15,7 +15,11 @@
#include <math.h>
template <typename T> struct SkScalarToSIMD;
template <> struct SkScalarToSIMD< float> { typedef float32x2_t Type; };
- template <> struct SkScalarToSIMD<double> { typedef double Type[2]; };
+ #if defined(SK_CPU_ARM64)
+ template <> struct SkScalarToSIMD<double> { typedef float64x2_t Type; };
+ #else
+ template <> struct SkScalarToSIMD<double> { typedef double Type[2]; };
+ #endif
#elif defined(SK2X_PRIVATE)
@@ -28,10 +32,7 @@
M() Sk2x() {}
M() Sk2x(float val) { fVec = vdup_n_f32(val); }
-M() Sk2x(float a, float b) {
- fVec = vset_lane_f32(a, fVec, 0);
- fVec = vset_lane_f32(b, fVec, 1);
-}
+M() Sk2x(float a, float b) { fVec = (float32x2_t) { a, b }; }
M(Sk2f&) operator=(const Sk2f& o) { fVec = o.fVec; return *this; }
M(Sk2f) Load(const float vals[2]) { return vld1_f32(vals); }
@@ -60,33 +61,62 @@ M(Sk2f) sqrt() const {
#define M(...) template <> inline __VA_ARGS__ Sk2x<double>::
-// TODO: #ifdef SK_CPU_ARM64 use float64x2_t for Sk2d.
-
-M() Sk2x() {}
-M() Sk2x(double val) { fVec[0] = fVec[1] = val; }
-M() Sk2x(double a, double b) { fVec[0] = a; fVec[1] = b; }
-M(Sk2d&) operator=(const Sk2d& o) {
- fVec[0] = o.fVec[0];
- fVec[1] = o.fVec[1];
- return *this;
-}
-
-M(Sk2d) Load(const double vals[2]) { return Sk2d(vals[0], vals[1]); }
-M(void) store(double vals[2]) const { vals[0] = fVec[0]; vals[1] = fVec[1]; }
-
-M(Sk2d) add(const Sk2d& o) const { return Sk2d(fVec[0] + o.fVec[0], fVec[1] + o.fVec[1]); }
-M(Sk2d) subtract(const Sk2d& o) const { return Sk2d(fVec[0] - o.fVec[0], fVec[1] - o.fVec[1]); }
-M(Sk2d) multiply(const Sk2d& o) const { return Sk2d(fVec[0] * o.fVec[0], fVec[1] * o.fVec[1]); }
-
-M(Sk2d) Min(const Sk2d& a, const Sk2d& b) {
- return Sk2d(SkTMin(a.fVec[0], b.fVec[0]), SkTMin(a.fVec[1], b.fVec[1]));
-}
-M(Sk2d) Max(const Sk2d& a, const Sk2d& b) {
- return Sk2d(SkTMax(a.fVec[0], b.fVec[0]), SkTMax(a.fVec[1], b.fVec[1]));
-}
-
-M(Sk2d) rsqrt() const { return Sk2d(1.0/::sqrt(fVec[0]), 1.0/::sqrt(fVec[1])); }
-M(Sk2d) sqrt() const { return Sk2d( ::sqrt(fVec[0]), ::sqrt(fVec[1])); }
+#if defined(SK_CPU_ARM64)
+ M() Sk2x() {}
+ M() Sk2x(double val) { fVec = vdupq_n_f64(val); }
+ M() Sk2x(double a, double b) { fVec = (float64x2_t) { a, b }; }
+ M(Sk2d&) operator=(const Sk2d& o) { fVec = o.fVec; return *this; }
+
+ M(Sk2d) Load(const double vals[2]) { return vld1q_f64(vals); }
+ M(void) store(double vals[2]) const { vst1q_f64(vals, fVec); }
+
+ M(Sk2d) add(const Sk2d& o) const { return vaddq_f64(fVec, o.fVec); }
+ M(Sk2d) subtract(const Sk2d& o) const { return vsubq_f64(fVec, o.fVec); }
+ M(Sk2d) multiply(const Sk2d& o) const { return vmulq_f64(fVec, o.fVec); }
+
+ M(Sk2d) Min(const Sk2d& a, const Sk2d& b) { return vminq_f64(a.fVec, b.fVec); }
+ M(Sk2d) Max(const Sk2d& a, const Sk2d& b) { return vmaxq_f64(a.fVec, b.fVec); }
+
+ M(Sk2d) rsqrt() const {
+ float64x2_t est0 = vrsqrteq_f64(fVec),
+ est1 = vmulq_f64(vrsqrtsq_f64(fVec, vmulq_f64(est0, est0)), est0);
+ return est1;
+ }
+ M(Sk2d) sqrt() const {
+ float64x2_t est1 = this->rsqrt().fVec,
+ // Two extra steps of Newton's method to refine the estimate of 1/sqrt(this).
+ est2 = vmulq_f64(vrsqrtsq_f64(fVec, vmulq_f64(est1, est1)), est1),
+ est3 = vmulq_f64(vrsqrtsq_f64(fVec, vmulq_f64(est2, est2)), est2);
+ return vmulq_f64(fVec, est3);
+ }
+
+#else // Scalar implementation for 32-bit chips, which don't have float64x2_t.
+ M() Sk2x() {}
+ M() Sk2x(double val) { fVec[0] = fVec[1] = val; }
+ M() Sk2x(double a, double b) { fVec[0] = a; fVec[1] = b; }
+ M(Sk2d&) operator=(const Sk2d& o) {
+ fVec[0] = o.fVec[0];
+ fVec[1] = o.fVec[1];
+ return *this;
+ }
+
+ M(Sk2d) Load(const double vals[2]) { return Sk2d(vals[0], vals[1]); }
+ M(void) store(double vals[2]) const { vals[0] = fVec[0]; vals[1] = fVec[1]; }
+
+ M(Sk2d) add(const Sk2d& o) const { return Sk2d(fVec[0] + o.fVec[0], fVec[1] + o.fVec[1]); }
+ M(Sk2d) subtract(const Sk2d& o) const { return Sk2d(fVec[0] - o.fVec[0], fVec[1] - o.fVec[1]); }
+ M(Sk2d) multiply(const Sk2d& o) const { return Sk2d(fVec[0] * o.fVec[0], fVec[1] * o.fVec[1]); }
+
+ M(Sk2d) Min(const Sk2d& a, const Sk2d& b) {
+ return Sk2d(SkTMin(a.fVec[0], b.fVec[0]), SkTMin(a.fVec[1], b.fVec[1]));
+ }
+ M(Sk2d) Max(const Sk2d& a, const Sk2d& b) {
+ return Sk2d(SkTMax(a.fVec[0], b.fVec[0]), SkTMax(a.fVec[1], b.fVec[1]));
+ }
+
+ M(Sk2d) rsqrt() const { return Sk2d(1.0/::sqrt(fVec[0]), 1.0/::sqrt(fVec[1])); }
+ M(Sk2d) sqrt() const { return Sk2d( ::sqrt(fVec[0]), ::sqrt(fVec[1])); }
+#endif
#undef M
« no previous file with comments | « src/core/SkUtilsArm.h ('k') | src/opts/Sk4x_neon.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698