OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkNx_sse_DEFINED | 8 #ifndef SkNx_sse_DEFINED |
9 #define SkNx_sse_DEFINED | 9 #define SkNx_sse_DEFINED |
10 | 10 |
11 // This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything mo
re recent. | 11 // This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything mo
re recent. |
12 // If you do, make sure this is in a static inline function... anywhere else ris
ks violating ODR. | 12 // If you do, make sure this is in a static inline function... anywhere else ris
ks violating ODR. |
13 | 13 |
14 #define SKNX_IS_FAST | 14 #define SKNX_IS_FAST |
15 | 15 |
| 16 // SSE 4.1 has _mm_floor_ps to floor 4 floats. We emulate it: |
| 17 // - round by adding (1<<23) with our sign, then subtracting it; |
| 18 // - if that rounded value is bigger than our input, subtract 1. |
| 19 static inline __m128 sse2_mm_floor_ps(__m128 v) { |
| 20 __m128 sign = _mm_and_ps(v, _mm_set1_ps(-0.0f)); |
| 21 __m128 bias = _mm_or_ps(sign, _mm_set1_ps(1<<23)); |
| 22 __m128 rounded = _mm_sub_ps(_mm_add_ps(v, bias), bias); |
| 23 __m128 too_big = _mm_cmpgt_ps(rounded, v); |
| 24 return _mm_sub_ps(rounded, _mm_and_ps(too_big, _mm_set1_ps(1.0f))); |
| 25 } |
| 26 |
16 template <> | 27 template <> |
17 class SkNx<2, float> { | 28 class SkNx<2, float> { |
18 public: | 29 public: |
19 SkNx(const __m128& vec) : fVec(vec) {} | 30 SkNx(const __m128& vec) : fVec(vec) {} |
20 | 31 |
21 SkNx() {} | 32 SkNx() {} |
22 SkNx(float val) : fVec(_mm_set1_ps(val)) {} | 33 SkNx(float val) : fVec(_mm_set1_ps(val)) {} |
23 static SkNx Load(const void* ptr) { | 34 static SkNx Load(const void* ptr) { |
24 return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr)); | 35 return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr)); |
25 } | 36 } |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
85 SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec);
} | 96 SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec);
} |
86 SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec);
} | 97 SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec);
} |
87 SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec);
} | 98 SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec);
} |
88 SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec);
} | 99 SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec);
} |
89 SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec);
} | 100 SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec);
} |
90 | 101 |
91 static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.
fVec); } | 102 static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.
fVec); } |
92 static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.
fVec); } | 103 static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.
fVec); } |
93 | 104 |
94 SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); } | 105 SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); } |
| 106 SkNx floor() const { return sse2_mm_floor_ps(fVec); } |
95 | 107 |
96 SkNx sqrt () const { return _mm_sqrt_ps (fVec); } | 108 SkNx sqrt () const { return _mm_sqrt_ps (fVec); } |
97 SkNx rsqrt0() const { return _mm_rsqrt_ps(fVec); } | 109 SkNx rsqrt0() const { return _mm_rsqrt_ps(fVec); } |
98 SkNx rsqrt1() const { return this->rsqrt0(); } | 110 SkNx rsqrt1() const { return this->rsqrt0(); } |
99 SkNx rsqrt2() const { return this->rsqrt1(); } | 111 SkNx rsqrt2() const { return this->rsqrt1(); } |
100 | 112 |
101 SkNx invert() const { return SkNx(1) / *this; } | 113 SkNx invert() const { return SkNx(1) / *this; } |
102 SkNx approxInvert() const { return _mm_rcp_ps(fVec); } | 114 SkNx approxInvert() const { return _mm_rcp_ps(fVec); } |
103 | 115 |
104 float operator[](int k) const { | 116 float operator[](int k) const { |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
336 | 348 |
337 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src)
{ | 349 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src)
{ |
338 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); | 350 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); |
339 } | 351 } |
340 | 352 |
341 template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src)
{ | 353 template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src)
{ |
342 return _mm_packus_epi16(src.fVec, src.fVec); | 354 return _mm_packus_epi16(src.fVec, src.fVec); |
343 } | 355 } |
344 | 356 |
345 #endif//SkNx_sse_DEFINED | 357 #endif//SkNx_sse_DEFINED |
OLD | NEW |