OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkNx_avx_DEFINED | 8 #ifndef SkNx_avx_DEFINED |
9 #define SkNx_avx_DEFINED | 9 #define SkNx_avx_DEFINED |
10 | 10 |
11 // This file may assume <= AVX, but must check SK_CPU_SSE_LEVEL for anything mor
e recent. | 11 // This file may assume <= AVX, but must check SK_CPU_SSE_LEVEL for anything mor
e recent. |
12 | 12 |
13 // All the SSE specializations are still good ideas. We'll just add Sk8f. | 13 // All the SSE specializations are still good ideas. We'll just add Sk8f. |
14 #include "SkNx_sse.h" | 14 #include "SkNx_sse.h" |
15 | 15 |
16 // SkNx_sse.h defines SKNX_IS_FAST. | 16 // SkNx_sse.h defines SKNX_IS_FAST. |
17 | 17 |
18 namespace { // See SkNx.h | 18 namespace { // See SkNx.h |
19 | 19 |
20 template <> | 20 template <> |
21 class SkNx<8, float> { | 21 class SkNx<8, float> { |
22 public: | 22 public: |
23 SkNx(const __m256& vec) : fVec(vec) {} | 23 SkNx(const __m256& vec) : fVec(vec) {} |
24 | 24 |
25 SkNx() {} | 25 SkNx() {} |
26 SkNx(float val) : fVec(_mm256_set1_ps(val)) {} | 26 SkNx(float val) : fVec(_mm256_set1_ps(val)) {} |
27 static SkNx Load(const float vals[8]) { return _mm256_loadu_ps(vals); } | 27 static SkNx Load(const float vals[8]) { return _mm256_loadu_ps(vals); } |
28 | 28 |
29 static SkNx FromBytes(const uint8_t bytes[8]) { | |
30 __m128i fix8 = _mm_loadl_epi64((const __m128i*)bytes), | |
31 fix16 = _mm_unpacklo_epi8 (fix8 , _mm_setzero_si128()), | |
32 lo32 = _mm_unpacklo_epi16(fix16, _mm_setzero_si128()), | |
33 hi32 = _mm_unpackhi_epi16(fix16, _mm_setzero_si128()); | |
34 __m256i fix32 = _mm256_insertf128_si256(_mm256_castsi128_si256(lo32), hi
32, 1); | |
35 return _mm256_cvtepi32_ps(fix32); | |
36 } | |
37 | |
38 SkNx(float a, float b, float c, float d, | 29 SkNx(float a, float b, float c, float d, |
39 float e, float f, float g, float h) : fVec(_mm256_setr_ps(a,b,c,d,e,f,g
,h)) {} | 30 float e, float f, float g, float h) : fVec(_mm256_setr_ps(a,b,c,d,e,f,g
,h)) {} |
40 | 31 |
41 void store(float vals[8]) const { _mm256_storeu_ps(vals, fVec); } | 32 void store(float vals[8]) const { _mm256_storeu_ps(vals, fVec); } |
42 void toBytes(uint8_t bytes[8]) const { | |
43 __m256i fix32 = _mm256_cvttps_epi32(fVec); | |
44 __m128i lo32 = _mm256_extractf128_si256(fix32, 0), | |
45 hi32 = _mm256_extractf128_si256(fix32, 1), | |
46 fix16 = _mm_packus_epi32(lo32, hi32), | |
47 fix8 = _mm_packus_epi16(fix16, fix16); | |
48 _mm_storel_epi64((__m128i*)bytes, fix8); | |
49 } | |
50 | 33 |
51 SkNx operator + (const SkNx& o) const { return _mm256_add_ps(fVec, o.fVec);
} | 34 SkNx operator + (const SkNx& o) const { return _mm256_add_ps(fVec, o.fVec);
} |
52 SkNx operator - (const SkNx& o) const { return _mm256_sub_ps(fVec, o.fVec);
} | 35 SkNx operator - (const SkNx& o) const { return _mm256_sub_ps(fVec, o.fVec);
} |
53 SkNx operator * (const SkNx& o) const { return _mm256_mul_ps(fVec, o.fVec);
} | 36 SkNx operator * (const SkNx& o) const { return _mm256_mul_ps(fVec, o.fVec);
} |
54 SkNx operator / (const SkNx& o) const { return _mm256_div_ps(fVec, o.fVec);
} | 37 SkNx operator / (const SkNx& o) const { return _mm256_div_ps(fVec, o.fVec);
} |
55 | 38 |
56 SkNx operator == (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_EQ_OQ); } | 39 SkNx operator == (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_EQ_OQ); } |
57 SkNx operator != (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_NEQ_OQ); } | 40 SkNx operator != (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_NEQ_OQ); } |
58 SkNx operator < (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_LT_OQ); } | 41 SkNx operator < (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_LT_OQ); } |
59 SkNx operator > (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_GT_OQ); } | 42 SkNx operator > (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_GT_OQ); } |
(...skipping 20 matching lines...) Expand all Loading... |
80 bool allTrue() const { return 0xff == _mm256_movemask_ps(fVec); } | 63 bool allTrue() const { return 0xff == _mm256_movemask_ps(fVec); } |
81 bool anyTrue() const { return 0x00 != _mm256_movemask_ps(fVec); } | 64 bool anyTrue() const { return 0x00 != _mm256_movemask_ps(fVec); } |
82 | 65 |
83 SkNx thenElse(const SkNx& t, const SkNx& e) const { | 66 SkNx thenElse(const SkNx& t, const SkNx& e) const { |
84 return _mm256_blendv_ps(e.fVec, t.fVec, fVec); | 67 return _mm256_blendv_ps(e.fVec, t.fVec, fVec); |
85 } | 68 } |
86 | 69 |
87 __m256 fVec; | 70 __m256 fVec; |
88 }; | 71 }; |
89 | 72 |
| 73 template<> inline Sk8b SkNx_cast<uint8_t, float, 8>(const Sk8f& src) { |
| 74 __m256i _32 = _mm256_cvttps_epi32(src.fVec); |
| 75 __m128i lo = _mm256_extractf128_si256(_32, 0), |
| 76 hi = _mm256_extractf128_si256(_32, 1), |
| 77 _16 = _mm_packus_epi32(lo, hi); |
| 78 return _mm_packus_epi16(_16, _16); |
| 79 } |
| 80 |
| 81 template<> inline Sk8f SkNx_cast<float, uint8_t, 8>(const Sk8b& src) { |
| 82 /* TODO lo = _mm_cvtepu8_epi32(src.fVec), |
| 83 * hi = _mm_cvtepu8_epi32(_mm_srli_si128(src.fVec, 4)) |
| 84 */ |
| 85 __m128i _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()), |
| 86 lo = _mm_unpacklo_epi16(_16, _mm_setzero_si128()), |
| 87 hi = _mm_unpackhi_epi16(_16, _mm_setzero_si128()); |
| 88 __m256i _32 = _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1); |
| 89 return _mm256_cvtepi32_ps(_32); |
| 90 } |
| 91 |
90 } // namespace | 92 } // namespace |
91 | 93 |
92 #endif//SkNx_avx_DEFINED | 94 #endif//SkNx_avx_DEFINED |
OLD | NEW |