OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 namespace { // See Sk4px.h | 8 SK_ALWAYS_INLINE Sk4px Sk4px::DupPMColor(SkPMColor px) { return Sk16b(_mm_set1_e
pi32(px)); } |
9 | 9 |
10 inline Sk4px Sk4px::DupPMColor(SkPMColor px) { return Sk16b(_mm_set1_epi32(px));
} | 10 SK_ALWAYS_INLINE Sk4px Sk4px::Load4(const SkPMColor px[4]) { |
11 | |
12 inline Sk4px Sk4px::Load4(const SkPMColor px[4]) { | |
13 return Sk16b(_mm_loadu_si128((const __m128i*)px)); | 11 return Sk16b(_mm_loadu_si128((const __m128i*)px)); |
14 } | 12 } |
15 inline Sk4px Sk4px::Load2(const SkPMColor px[2]) { | 13 SK_ALWAYS_INLINE Sk4px Sk4px::Load2(const SkPMColor px[2]) { |
16 return Sk16b(_mm_loadl_epi64((const __m128i*)px)); | 14 return Sk16b(_mm_loadl_epi64((const __m128i*)px)); |
17 } | 15 } |
18 inline Sk4px Sk4px::Load1(const SkPMColor px[1]) { return Sk16b(_mm_cvtsi32_si12
8(*px)); } | 16 SK_ALWAYS_INLINE Sk4px Sk4px::Load1(const SkPMColor px[1]) { return Sk16b(_mm_cv
tsi32_si128(*px)); } |
19 | 17 |
20 inline void Sk4px::store4(SkPMColor px[4]) const { _mm_storeu_si128((__m128i*)px
, this->fVec); } | 18 SK_ALWAYS_INLINE void Sk4px::store4(SkPMColor px[4]) const { |
21 inline void Sk4px::store2(SkPMColor px[2]) const { _mm_storel_epi64((__m128i*)px
, this->fVec); } | 19 _mm_storeu_si128((__m128i*)px, this->fVec); |
22 inline void Sk4px::store1(SkPMColor px[1]) const { *px = _mm_cvtsi128_si32(this-
>fVec); } | 20 } |
| 21 SK_ALWAYS_INLINE void Sk4px::store2(SkPMColor px[2]) const { |
| 22 _mm_storel_epi64((__m128i*)px, this->fVec); |
| 23 } |
| 24 SK_ALWAYS_INLINE void Sk4px::store1(SkPMColor px[1]) const { |
| 25 *px = _mm_cvtsi128_si32(this->fVec); |
| 26 } |
23 | 27 |
24 inline Sk4px::Wide Sk4px::widenLo() const { | 28 SK_ALWAYS_INLINE Sk4px::Wide Sk4px::widenLo() const { |
25 return Sk16h(_mm_unpacklo_epi8(this->fVec, _mm_setzero_si128()), | 29 return Sk16h(_mm_unpacklo_epi8(this->fVec, _mm_setzero_si128()), |
26 _mm_unpackhi_epi8(this->fVec, _mm_setzero_si128())); | 30 _mm_unpackhi_epi8(this->fVec, _mm_setzero_si128())); |
27 } | 31 } |
28 | 32 |
29 inline Sk4px::Wide Sk4px::widenHi() const { | 33 SK_ALWAYS_INLINE Sk4px::Wide Sk4px::widenHi() const { |
30 return Sk16h(_mm_unpacklo_epi8(_mm_setzero_si128(), this->fVec), | 34 return Sk16h(_mm_unpacklo_epi8(_mm_setzero_si128(), this->fVec), |
31 _mm_unpackhi_epi8(_mm_setzero_si128(), this->fVec)); | 35 _mm_unpackhi_epi8(_mm_setzero_si128(), this->fVec)); |
32 } | 36 } |
33 | 37 |
34 inline Sk4px::Wide Sk4px::widenLoHi() const { | 38 SK_ALWAYS_INLINE Sk4px::Wide Sk4px::widenLoHi() const { |
35 return Sk16h(_mm_unpacklo_epi8(this->fVec, this->fVec), | 39 return Sk16h(_mm_unpacklo_epi8(this->fVec, this->fVec), |
36 _mm_unpackhi_epi8(this->fVec, this->fVec)); | 40 _mm_unpackhi_epi8(this->fVec, this->fVec)); |
37 } | 41 } |
38 | 42 |
39 inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const { | 43 SK_ALWAYS_INLINE Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const { |
40 return this->widenLo() * Sk4px(other).widenLo(); | 44 return this->widenLo() * Sk4px(other).widenLo(); |
41 } | 45 } |
42 | 46 |
43 inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const { | 47 SK_ALWAYS_INLINE Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const { |
44 Sk4px::Wide r = (*this + other) >> 8; | 48 Sk4px::Wide r = (*this + other) >> 8; |
45 return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec)); | 49 return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec)); |
46 } | 50 } |
47 | 51 |
48 // Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16
_t or uint32_t). | 52 // Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16
_t or uint32_t). |
49 // These are safe on x86, often with no speed penalty. | 53 // These are safe on x86, often with no speed penalty. |
50 | 54 |
51 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 | 55 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
52 inline Sk4px Sk4px::alphas() const { | 56 SK_ALWAYS_INLINE Sk4px Sk4px::alphas() const { |
53 static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian."); | 57 static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian."); |
54 __m128i splat = _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3)
; | 58 __m128i splat = _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3)
; |
55 return Sk16b(_mm_shuffle_epi8(this->fVec, splat)); | 59 return Sk16b(_mm_shuffle_epi8(this->fVec, splat)); |
56 } | 60 } |
57 | 61 |
58 inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) { | 62 SK_ALWAYS_INLINE Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) { |
59 uint32_t as = *(const uint32_t*)a; | 63 uint32_t as = *(const uint32_t*)a; |
60 __m128i splat = _mm_set_epi8(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0); | 64 __m128i splat = _mm_set_epi8(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0); |
61 return Sk16b(_mm_shuffle_epi8(_mm_cvtsi32_si128(as), splat)); | 65 return Sk16b(_mm_shuffle_epi8(_mm_cvtsi32_si128(as), splat)); |
62 } | 66 } |
63 #else | 67 #else |
64 inline Sk4px Sk4px::alphas() const { | 68 SK_ALWAYS_INLINE Sk4px Sk4px::alphas() const { |
65 static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian."); | 69 static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian."); |
66 __m128i as = _mm_srli_epi32(this->fVec, 24); // ___3 ___2 ___1 ___0 | 70 __m128i as = _mm_srli_epi32(this->fVec, 24); // ___3 ___2 ___1 ___0 |
67 as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11 __00 | 71 as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11 __00 |
68 as = _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111 0000 | 72 as = _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111 0000 |
69 return Sk16b(as); | 73 return Sk16b(as); |
70 } | 74 } |
71 | 75 |
72 inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) { | 76 SK_ALWAYS_INLINE Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) { |
73 __m128i as = _mm_cvtsi32_si128(*(const uint32_t*)a); // ____ ____ ____
3210 | 77 __m128i as = _mm_cvtsi32_si128(*(const uint32_t*)a); // ____ ____ ____
3210 |
74 as = _mm_unpacklo_epi8 (as, _mm_setzero_si128()); // ____ ____ _3_2
_1_0 | 78 as = _mm_unpacklo_epi8 (as, _mm_setzero_si128()); // ____ ____ _3_2
_1_0 |
75 as = _mm_unpacklo_epi16(as, _mm_setzero_si128()); // ___3 ___2 ___1
___0 | 79 as = _mm_unpacklo_epi16(as, _mm_setzero_si128()); // ___3 ___2 ___1
___0 |
76 as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11
__00 | 80 as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11
__00 |
77 as = _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111
0000 | 81 as = _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111
0000 |
78 return Sk16b(as); | 82 return Sk16b(as); |
79 } | 83 } |
80 #endif | 84 #endif |
81 | 85 |
82 inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) { | 86 SK_ALWAYS_INLINE Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) { |
83 uint32_t as = *(const uint16_t*)a; // Aa -> Aa00 | 87 uint32_t as = *(const uint16_t*)a; // Aa -> Aa00 |
84 return Load4Alphas((const SkAlpha*)&as); | 88 return Load4Alphas((const SkAlpha*)&as); |
85 } | 89 } |
86 | 90 |
87 inline Sk4px Sk4px::zeroColors() const { | 91 SK_ALWAYS_INLINE Sk4px Sk4px::zeroColors() const { |
88 return Sk16b(_mm_and_si128(_mm_set1_epi32(0xFF << SK_A32_SHIFT), this->fVec)
); | 92 return Sk16b(_mm_and_si128(_mm_set1_epi32(0xFF << SK_A32_SHIFT), this->fVec)
); |
89 } | 93 } |
90 | 94 |
91 inline Sk4px Sk4px::zeroAlphas() const { | 95 SK_ALWAYS_INLINE Sk4px Sk4px::zeroAlphas() const { |
92 // andnot(a,b) == ~a & b | 96 // andnot(a,b) == ~a & b |
93 return Sk16b(_mm_andnot_si128(_mm_set1_epi32(0xFF << SK_A32_SHIFT), this->fV
ec)); | 97 return Sk16b(_mm_andnot_si128(_mm_set1_epi32(0xFF << SK_A32_SHIFT), this->fV
ec)); |
94 } | 98 } |
95 | 99 |
96 static inline __m128i widen_low_half_to_8888(__m128i v) { | 100 static SK_ALWAYS_INLINE __m128i widen_low_half_to_8888(__m128i v) { |
97 // RGB565 format: |R....|G.....|B....| | 101 // RGB565 format: |R....|G.....|B....| |
98 // Bit: 16 11 5 0 | 102 // Bit: 16 11 5 0 |
99 | 103 |
100 // First get each pixel into its own 32-bit lane. | 104 // First get each pixel into its own 32-bit lane. |
101 // v == ____ ____ ____ ____ rgb3 rgb2 rgb1 rgb0 | 105 // v == ____ ____ ____ ____ rgb3 rgb2 rgb1 rgb0 |
102 // spread == 0000 rgb3 0000 rgb2 0000 rgb1 0000 rgb0 | 106 // spread == 0000 rgb3 0000 rgb2 0000 rgb1 0000 rgb0 |
103 auto spread = _mm_unpacklo_epi16(v, _mm_setzero_si128()); | 107 auto spread = _mm_unpacklo_epi16(v, _mm_setzero_si128()); |
104 | 108 |
105 // Get each color independently, still in 565 precison but down at bit 0. | 109 // Get each color independently, still in 565 precison but down at bit 0. |
106 auto r5 = _mm_srli_epi32(spread, 11), | 110 auto r5 = _mm_srli_epi32(spread, 11), |
107 g6 = _mm_and_si128(_mm_set1_epi32(63), _mm_srli_epi32(spread, 5)), | 111 g6 = _mm_and_si128(_mm_set1_epi32(63), _mm_srli_epi32(spread, 5)), |
108 b5 = _mm_and_si128(_mm_set1_epi32(31), spread); | 112 b5 = _mm_and_si128(_mm_set1_epi32(31), spread); |
109 | 113 |
110 // Scale 565 precision up to 8-bit each, filling low 323 bits with high bits
of each component. | 114 // Scale 565 precision up to 8-bit each, filling low 323 bits with high bits
of each component. |
111 auto r8 = _mm_or_si128(_mm_slli_epi32(r5, 3), _mm_srli_epi32(r5, 2)), | 115 auto r8 = _mm_or_si128(_mm_slli_epi32(r5, 3), _mm_srli_epi32(r5, 2)), |
112 g8 = _mm_or_si128(_mm_slli_epi32(g6, 2), _mm_srli_epi32(g6, 4)), | 116 g8 = _mm_or_si128(_mm_slli_epi32(g6, 2), _mm_srli_epi32(g6, 4)), |
113 b8 = _mm_or_si128(_mm_slli_epi32(b5, 3), _mm_srli_epi32(b5, 2)); | 117 b8 = _mm_or_si128(_mm_slli_epi32(b5, 3), _mm_srli_epi32(b5, 2)); |
114 | 118 |
115 // Now put all the 8-bit components into SkPMColor order. | 119 // Now put all the 8-bit components into SkPMColor order. |
116 return _mm_or_si128(_mm_slli_epi32(r8, SK_R32_SHIFT), // TODO: one of thes
e shifts is zero... | 120 return _mm_or_si128(_mm_slli_epi32(r8, SK_R32_SHIFT), // TODO: one of thes
e shifts is zero... |
117 _mm_or_si128(_mm_slli_epi32(g8, SK_G32_SHIFT), | 121 _mm_or_si128(_mm_slli_epi32(g8, SK_G32_SHIFT), |
118 _mm_or_si128(_mm_slli_epi32(b8, SK_B32_SHIFT), | 122 _mm_or_si128(_mm_slli_epi32(b8, SK_B32_SHIFT), |
119 _mm_set1_epi32(0xFF << SK_A32_SHIFT)))); | 123 _mm_set1_epi32(0xFF << SK_A32_SHIFT)))); |
120 } | 124 } |
121 | 125 |
122 static inline __m128i narrow_to_565(__m128i w) { | 126 static SK_ALWAYS_INLINE __m128i narrow_to_565(__m128i w) { |
123 // Extract out top RGB 565 bits of each pixel, with no rounding. | 127 // Extract out top RGB 565 bits of each pixel, with no rounding. |
124 auto r5 = _mm_and_si128(_mm_set1_epi32(31), _mm_srli_epi32(w, SK_R32_SHIFT +
3)), | 128 auto r5 = _mm_and_si128(_mm_set1_epi32(31), _mm_srli_epi32(w, SK_R32_SHIFT +
3)), |
125 g6 = _mm_and_si128(_mm_set1_epi32(63), _mm_srli_epi32(w, SK_G32_SHIFT +
2)), | 129 g6 = _mm_and_si128(_mm_set1_epi32(63), _mm_srli_epi32(w, SK_G32_SHIFT +
2)), |
126 b5 = _mm_and_si128(_mm_set1_epi32(31), _mm_srli_epi32(w, SK_B32_SHIFT +
3)); | 130 b5 = _mm_and_si128(_mm_set1_epi32(31), _mm_srli_epi32(w, SK_B32_SHIFT +
3)); |
127 | 131 |
128 // Now put the bits in place in the low 16-bits of each 32-bit lane. | 132 // Now put the bits in place in the low 16-bits of each 32-bit lane. |
129 auto spread = _mm_or_si128(_mm_slli_epi32(r5, 11), | 133 auto spread = _mm_or_si128(_mm_slli_epi32(r5, 11), |
130 _mm_or_si128(_mm_slli_epi32(g6, 5), | 134 _mm_or_si128(_mm_slli_epi32(g6, 5), |
131 b5)); | 135 b5)); |
132 | 136 |
133 // We want to pack the bottom 16-bits of spread down into the low half of th
e register, v. | 137 // We want to pack the bottom 16-bits of spread down into the low half of th
e register, v. |
134 // spread == 0000 rgb3 0000 rgb2 0000 rgb1 0000 rgb0 | 138 // spread == 0000 rgb3 0000 rgb2 0000 rgb1 0000 rgb0 |
135 // v == ____ ____ ____ ____ rgb3 rgb2 rgb1 rgb0 | 139 // v == ____ ____ ____ ____ rgb3 rgb2 rgb1 rgb0 |
136 | 140 |
137 // Ideally now we'd use _mm_packus_epi32(spread, <anything>) to pack v. But
that's from SSE4. | 141 // Ideally now we'd use _mm_packus_epi32(spread, <anything>) to pack v. But
that's from SSE4. |
138 // With only SSE2, we need to use _mm_packs_epi32. That does signed saturat
ion, and | 142 // With only SSE2, we need to use _mm_packs_epi32. That does signed saturat
ion, and |
139 // we need to preserve all 16 bits. So we pretend our data is signed by sig
n-extending first. | 143 // we need to preserve all 16 bits. So we pretend our data is signed by sig
n-extending first. |
140 // TODO: is it faster to just _mm_shuffle_epi8 this when we have SSSE3? | 144 // TODO: is it faster to just _mm_shuffle_epi8 this when we have SSSE3? |
141 auto signExtended = _mm_srai_epi32(_mm_slli_epi32(spread, 16), 16); | 145 auto signExtended = _mm_srai_epi32(_mm_slli_epi32(spread, 16), 16); |
142 auto v = _mm_packs_epi32(signExtended, signExtended); | 146 auto v = _mm_packs_epi32(signExtended, signExtended); |
143 return v; | 147 return v; |
144 } | 148 } |
145 | 149 |
146 inline Sk4px Sk4px::Load4(const SkPMColor16 src[4]) { | 150 SK_ALWAYS_INLINE Sk4px Sk4px::Load4(const SkPMColor16 src[4]) { |
147 return Sk16b(widen_low_half_to_8888(_mm_loadl_epi64((const __m128i*)src))); | 151 return Sk16b(widen_low_half_to_8888(_mm_loadl_epi64((const __m128i*)src))); |
148 } | 152 } |
149 inline Sk4px Sk4px::Load2(const SkPMColor16 src[2]) { | 153 SK_ALWAYS_INLINE Sk4px Sk4px::Load2(const SkPMColor16 src[2]) { |
150 auto src2 = ((uint32_t)src[0] ) | 154 auto src2 = ((uint32_t)src[0] ) |
151 | ((uint32_t)src[1] << 16); | 155 | ((uint32_t)src[1] << 16); |
152 return Sk16b(widen_low_half_to_8888(_mm_cvtsi32_si128(src2))); | 156 return Sk16b(widen_low_half_to_8888(_mm_cvtsi32_si128(src2))); |
153 } | 157 } |
154 inline Sk4px Sk4px::Load1(const SkPMColor16 src[1]) { | 158 SK_ALWAYS_INLINE Sk4px Sk4px::Load1(const SkPMColor16 src[1]) { |
155 return Sk16b(widen_low_half_to_8888(_mm_insert_epi16(_mm_setzero_si128(), sr
c[0], 0))); | 159 return Sk16b(widen_low_half_to_8888(_mm_insert_epi16(_mm_setzero_si128(), sr
c[0], 0))); |
156 } | 160 } |
157 | 161 |
158 inline void Sk4px::store4(SkPMColor16 dst[4]) const { | 162 SK_ALWAYS_INLINE void Sk4px::store4(SkPMColor16 dst[4]) const { |
159 _mm_storel_epi64((__m128i*)dst, narrow_to_565(this->fVec)); | 163 _mm_storel_epi64((__m128i*)dst, narrow_to_565(this->fVec)); |
160 } | 164 } |
161 inline void Sk4px::store2(SkPMColor16 dst[2]) const { | 165 SK_ALWAYS_INLINE void Sk4px::store2(SkPMColor16 dst[2]) const { |
162 uint32_t dst2 = _mm_cvtsi128_si32(narrow_to_565(this->fVec)); | 166 uint32_t dst2 = _mm_cvtsi128_si32(narrow_to_565(this->fVec)); |
163 dst[0] = dst2; | 167 dst[0] = dst2; |
164 dst[1] = dst2 >> 16; | 168 dst[1] = dst2 >> 16; |
165 } | 169 } |
166 inline void Sk4px::store1(SkPMColor16 dst[1]) const { | 170 SK_ALWAYS_INLINE void Sk4px::store1(SkPMColor16 dst[1]) const { |
167 uint32_t dst2 = _mm_cvtsi128_si32(narrow_to_565(this->fVec)); | 171 uint32_t dst2 = _mm_cvtsi128_si32(narrow_to_565(this->fVec)); |
168 dst[0] = dst2; | 172 dst[0] = dst2; |
169 } | 173 } |
170 | |
171 } // namespace | |
OLD | NEW |