| OLD | NEW |
| 1 // It is important _not_ to put header guards here. | 1 // It is important _not_ to put header guards here. |
| 2 // This file will be intentionally included three times. | 2 // This file will be intentionally included three times. |
| 3 | 3 |
| 4 // Useful reading: | 4 // Useful reading: |
| 5 // https://software.intel.com/sites/landingpage/IntrinsicsGuide/ | 5 // https://software.intel.com/sites/landingpage/IntrinsicsGuide/ |
| 6 | 6 |
| 7 #if defined(SK4X_PREAMBLE) | 7 #if defined(SK4X_PREAMBLE) |
| 8 // Code in this file may assume SSE and SSE2. | 8 // Code in this file may assume SSE and SSE2. |
| 9 #include <emmintrin.h> | 9 #include <emmintrin.h> |
| 10 | 10 |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 68 } | 68 } |
| 69 | 69 |
| 70 template <typename T> | 70 template <typename T> |
| 71 Sk4x<T> Sk4x<T>::ZWCD(const Sk4x<T>& a, const Sk4x<T>& b) { | 71 Sk4x<T> Sk4x<T>::ZWCD(const Sk4x<T>& a, const Sk4x<T>& b) { |
| 72 return _mm_movehl_ps(as_4f(b.fVec), as_4f(a.fVec)); | 72 return _mm_movehl_ps(as_4f(b.fVec), as_4f(a.fVec)); |
| 73 } | 73 } |
| 74 | 74 |
| 75 // Now we'll write all Sk4f specific methods. This M() macro will remove some n
oise. | 75 // Now we'll write all Sk4f specific methods. This M() macro will remove some n
oise. |
| 76 #define M(...) template <> inline __VA_ARGS__ Sk4f:: | 76 #define M(...) template <> inline __VA_ARGS__ Sk4f:: |
| 77 | 77 |
| 78 M() Sk4x(float v) : fVec(_mm_set1_ps(v)) {} |
| 78 M() Sk4x(float a, float b, float c, float d) : fVec(_mm_set_ps(d,c,b,a)) {} | 79 M() Sk4x(float a, float b, float c, float d) : fVec(_mm_set_ps(d,c,b,a)) {} |
| 79 | 80 |
| 80 M(Sk4f) Load (const float fs[4]) { return _mm_loadu_ps(fs); } | 81 M(Sk4f) Load (const float fs[4]) { return _mm_loadu_ps(fs); } |
| 81 M(Sk4f) LoadAligned(const float fs[4]) { return _mm_load_ps (fs); } | 82 M(Sk4f) LoadAligned(const float fs[4]) { return _mm_load_ps (fs); } |
| 82 | 83 |
| 83 M(void) store (float fs[4]) const { _mm_storeu_ps(fs, fVec); } | 84 M(void) store (float fs[4]) const { _mm_storeu_ps(fs, fVec); } |
| 84 M(void) storeAligned(float fs[4]) const { _mm_store_ps (fs, fVec); } | 85 M(void) storeAligned(float fs[4]) const { _mm_store_ps (fs, fVec); } |
| 85 | 86 |
| 86 template <> | 87 template <> |
| 87 M(Sk4i) reinterpret<Sk4i>() const { return as_4i(fVec); } | 88 M(Sk4i) reinterpret<Sk4i>() const { return as_4i(fVec); } |
| (...skipping 17 matching lines...) Expand all Loading... |
| 105 M(Sk4i) lessThanEqual (const Sk4f& o) const { return _mm_cmple_ps (fVec, o.fVe
c); } | 106 M(Sk4i) lessThanEqual (const Sk4f& o) const { return _mm_cmple_ps (fVec, o.fVe
c); } |
| 106 M(Sk4i) greaterThanEqual(const Sk4f& o) const { return _mm_cmpge_ps (fVec, o.fVe
c); } | 107 M(Sk4i) greaterThanEqual(const Sk4f& o) const { return _mm_cmpge_ps (fVec, o.fVe
c); } |
| 107 | 108 |
| 108 M(Sk4f) Min(const Sk4f& a, const Sk4f& b) { return _mm_min_ps(a.fVec, b.fVec); } | 109 M(Sk4f) Min(const Sk4f& a, const Sk4f& b) { return _mm_min_ps(a.fVec, b.fVec); } |
| 109 M(Sk4f) Max(const Sk4f& a, const Sk4f& b) { return _mm_max_ps(a.fVec, b.fVec); } | 110 M(Sk4f) Max(const Sk4f& a, const Sk4f& b) { return _mm_max_ps(a.fVec, b.fVec); } |
| 110 | 111 |
| 111 // Now we'll write all the Sk4i specific methods. Same deal for M(). | 112 // Now we'll write all the Sk4i specific methods. Same deal for M(). |
| 112 #undef M | 113 #undef M |
| 113 #define M(...) template <> inline __VA_ARGS__ Sk4i:: | 114 #define M(...) template <> inline __VA_ARGS__ Sk4i:: |
| 114 | 115 |
| 116 M() Sk4x(int32_t v) : fVec(_mm_set1_epi32(v)) {} |
| 115 M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_set_epi32(d,c,b,
a)) {} | 117 M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_set_epi32(d,c,b,
a)) {} |
| 116 | 118 |
| 117 M(Sk4i) Load (const int32_t is[4]) { return _mm_loadu_si128((const __m128i
*)is); } | 119 M(Sk4i) Load (const int32_t is[4]) { return _mm_loadu_si128((const __m128i
*)is); } |
| 118 M(Sk4i) LoadAligned(const int32_t is[4]) { return _mm_load_si128 ((const __m128i
*)is); } | 120 M(Sk4i) LoadAligned(const int32_t is[4]) { return _mm_load_si128 ((const __m128i
*)is); } |
| 119 | 121 |
| 120 M(void) store (int32_t is[4]) const { _mm_storeu_si128((__m128i*)is, fVec)
; } | 122 M(void) store (int32_t is[4]) const { _mm_storeu_si128((__m128i*)is, fVec)
; } |
| 121 M(void) storeAligned(int32_t is[4]) const { _mm_store_si128 ((__m128i*)is, fVec)
; } | 123 M(void) storeAligned(int32_t is[4]) const { _mm_store_si128 ((__m128i*)is, fVec)
; } |
| 122 | 124 |
| 123 template <> | 125 template <> |
| 124 M(Sk4f) reinterpret<Sk4f>() const { return as_4f(fVec); } | 126 M(Sk4f) reinterpret<Sk4f>() const { return as_4f(fVec); } |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 168 } | 170 } |
| 169 M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { | 171 M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { |
| 170 Sk4i less = a.lessThan(b); | 172 Sk4i less = a.lessThan(b); |
| 171 return b.bitAnd(less).bitOr(a.andNot(less)); | 173 return b.bitAnd(less).bitOr(a.andNot(less)); |
| 172 } | 174 } |
| 173 #endif | 175 #endif |
| 174 | 176 |
| 175 #undef M | 177 #undef M |
| 176 | 178 |
| 177 #endif//Method definitions. | 179 #endif//Method definitions. |
| OLD | NEW |