OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2015 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #ifndef SkNx_avx_DEFINED | |
9 #define SkNx_avx_DEFINED | |
10 | |
11 // This file may assume <= AVX, but must check SK_CPU_SSE_LEVEL for anything mor
e recent. | |
12 | |
13 // All the SSE specializations are still good ideas. We'll just add Sk8f. | |
14 #include "SkNx_sse.h" | |
15 | |
16 // SkNx_sse.h defines SKNX_IS_FAST. | |
17 | |
18 namespace { // See SkNx.h | |
19 | |
20 template <> | |
21 class SkNx<8, float> { | |
22 public: | |
23 SkNx(const __m256& vec) : fVec(vec) {} | |
24 | |
25 SkNx() {} | |
26 SkNx(float val) : fVec(_mm256_set1_ps(val)) {} | |
27 static SkNx Load(const void* ptr) { return _mm256_loadu_ps((const float*)ptr
); } | |
28 | |
29 SkNx(float a, float b, float c, float d, | |
30 float e, float f, float g, float h) : fVec(_mm256_setr_ps(a,b,c,d,e,f,g
,h)) {} | |
31 | |
32 void store(void* ptr) const { _mm256_storeu_ps((float*)ptr, fVec); } | |
33 | |
34 SkNx operator + (const SkNx& o) const { return _mm256_add_ps(fVec, o.fVec);
} | |
35 SkNx operator - (const SkNx& o) const { return _mm256_sub_ps(fVec, o.fVec);
} | |
36 SkNx operator * (const SkNx& o) const { return _mm256_mul_ps(fVec, o.fVec);
} | |
37 SkNx operator / (const SkNx& o) const { return _mm256_div_ps(fVec, o.fVec);
} | |
38 | |
39 SkNx operator == (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_EQ_OQ); } | |
40 SkNx operator != (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_NEQ_OQ); } | |
41 SkNx operator < (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_LT_OQ); } | |
42 SkNx operator > (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_GT_OQ); } | |
43 SkNx operator <= (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_LE_OQ); } | |
44 SkNx operator >= (const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec,
_CMP_GE_OQ); } | |
45 | |
46 static SkNx Min(const SkNx& l, const SkNx& r) { return _mm256_min_ps(l.fVec,
r.fVec); } | |
47 static SkNx Max(const SkNx& l, const SkNx& r) { return _mm256_max_ps(l.fVec,
r.fVec); } | |
48 | |
49 SkNx sqrt() const { return _mm256_sqrt_ps (fVec); } | |
50 SkNx rsqrt0() const { return _mm256_rsqrt_ps(fVec); } | |
51 SkNx rsqrt1() const { return this->rsqrt0(); } | |
52 SkNx rsqrt2() const { return this->rsqrt1(); } | |
53 | |
54 SkNx invert() const { return SkNx(1) / *this; } | |
55 SkNx approxInvert() const { return _mm256_rcp_ps(fVec); } | |
56 | |
57 template <int k> float kth() const { | |
58 SkASSERT(0 <= k && k < 8); | |
59 union { __m256 v; float fs[8]; } pun = {fVec}; | |
60 return pun.fs[k&7]; | |
61 } | |
62 | |
63 bool allTrue() const { return 0xff == _mm256_movemask_ps(fVec); } | |
64 bool anyTrue() const { return 0x00 != _mm256_movemask_ps(fVec); } | |
65 | |
66 SkNx thenElse(const SkNx& t, const SkNx& e) const { | |
67 return _mm256_blendv_ps(e.fVec, t.fVec, fVec); | |
68 } | |
69 | |
70 __m256 fVec; | |
71 }; | |
72 | |
73 template<> inline Sk8b SkNx_cast<uint8_t, float, 8>(const Sk8f& src) { | |
74 __m256i _32 = _mm256_cvttps_epi32(src.fVec); | |
75 __m128i lo = _mm256_extractf128_si256(_32, 0), | |
76 hi = _mm256_extractf128_si256(_32, 1), | |
77 _16 = _mm_packus_epi32(lo, hi); | |
78 return _mm_packus_epi16(_16, _16); | |
79 } | |
80 | |
81 template<> inline Sk8f SkNx_cast<float, uint8_t, 8>(const Sk8b& src) { | |
82 /* TODO lo = _mm_cvtepu8_epi32(src.fVec), | |
83 * hi = _mm_cvtepu8_epi32(_mm_srli_si128(src.fVec, 4)) | |
84 */ | |
85 __m128i _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()), | |
86 lo = _mm_unpacklo_epi16(_16, _mm_setzero_si128()), | |
87 hi = _mm_unpackhi_epi16(_16, _mm_setzero_si128()); | |
88 __m256i _32 = _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1); | |
89 return _mm256_cvtepi32_ps(_32); | |
90 } | |
91 | |
92 } // namespace | |
93 | |
94 #endif//SkNx_avx_DEFINED | |
OLD | NEW |