Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/opts/Sk4px_SSE2.h

Issue 1284333002: Revert of Refactor to put SkXfermode_opts inside SK_OPTS_NS. (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/opts/Sk4px_NEON.h ('k') | src/opts/Sk4px_none.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 SK_ALWAYS_INLINE Sk4px Sk4px::DupPMColor(SkPMColor px) { return Sk16b(_mm_set1_e pi32(px)); } 8 namespace { // See Sk4px.h
9 9
10 SK_ALWAYS_INLINE Sk4px Sk4px::Load4(const SkPMColor px[4]) { 10 inline Sk4px Sk4px::DupPMColor(SkPMColor px) { return Sk16b(_mm_set1_epi32(px)); }
11
12 inline Sk4px Sk4px::Load4(const SkPMColor px[4]) {
11 return Sk16b(_mm_loadu_si128((const __m128i*)px)); 13 return Sk16b(_mm_loadu_si128((const __m128i*)px));
12 } 14 }
13 SK_ALWAYS_INLINE Sk4px Sk4px::Load2(const SkPMColor px[2]) { 15 inline Sk4px Sk4px::Load2(const SkPMColor px[2]) {
14 return Sk16b(_mm_loadl_epi64((const __m128i*)px)); 16 return Sk16b(_mm_loadl_epi64((const __m128i*)px));
15 } 17 }
16 SK_ALWAYS_INLINE Sk4px Sk4px::Load1(const SkPMColor px[1]) { return Sk16b(_mm_cv tsi32_si128(*px)); } 18 inline Sk4px Sk4px::Load1(const SkPMColor px[1]) { return Sk16b(_mm_cvtsi32_si12 8(*px)); }
17 19
18 SK_ALWAYS_INLINE void Sk4px::store4(SkPMColor px[4]) const { 20 inline void Sk4px::store4(SkPMColor px[4]) const { _mm_storeu_si128((__m128i*)px , this->fVec); }
19 _mm_storeu_si128((__m128i*)px, this->fVec); 21 inline void Sk4px::store2(SkPMColor px[2]) const { _mm_storel_epi64((__m128i*)px , this->fVec); }
20 } 22 inline void Sk4px::store1(SkPMColor px[1]) const { *px = _mm_cvtsi128_si32(this- >fVec); }
21 SK_ALWAYS_INLINE void Sk4px::store2(SkPMColor px[2]) const {
22 _mm_storel_epi64((__m128i*)px, this->fVec);
23 }
24 SK_ALWAYS_INLINE void Sk4px::store1(SkPMColor px[1]) const {
25 *px = _mm_cvtsi128_si32(this->fVec);
26 }
27 23
28 SK_ALWAYS_INLINE Sk4px::Wide Sk4px::widenLo() const { 24 inline Sk4px::Wide Sk4px::widenLo() const {
29 return Sk16h(_mm_unpacklo_epi8(this->fVec, _mm_setzero_si128()), 25 return Sk16h(_mm_unpacklo_epi8(this->fVec, _mm_setzero_si128()),
30 _mm_unpackhi_epi8(this->fVec, _mm_setzero_si128())); 26 _mm_unpackhi_epi8(this->fVec, _mm_setzero_si128()));
31 } 27 }
32 28
33 SK_ALWAYS_INLINE Sk4px::Wide Sk4px::widenHi() const { 29 inline Sk4px::Wide Sk4px::widenHi() const {
34 return Sk16h(_mm_unpacklo_epi8(_mm_setzero_si128(), this->fVec), 30 return Sk16h(_mm_unpacklo_epi8(_mm_setzero_si128(), this->fVec),
35 _mm_unpackhi_epi8(_mm_setzero_si128(), this->fVec)); 31 _mm_unpackhi_epi8(_mm_setzero_si128(), this->fVec));
36 } 32 }
37 33
38 SK_ALWAYS_INLINE Sk4px::Wide Sk4px::widenLoHi() const { 34 inline Sk4px::Wide Sk4px::widenLoHi() const {
39 return Sk16h(_mm_unpacklo_epi8(this->fVec, this->fVec), 35 return Sk16h(_mm_unpacklo_epi8(this->fVec, this->fVec),
40 _mm_unpackhi_epi8(this->fVec, this->fVec)); 36 _mm_unpackhi_epi8(this->fVec, this->fVec));
41 } 37 }
42 38
43 SK_ALWAYS_INLINE Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const { 39 inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const {
44 return this->widenLo() * Sk4px(other).widenLo(); 40 return this->widenLo() * Sk4px(other).widenLo();
45 } 41 }
46 42
47 SK_ALWAYS_INLINE Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const { 43 inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
48 Sk4px::Wide r = (*this + other) >> 8; 44 Sk4px::Wide r = (*this + other) >> 8;
49 return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec)); 45 return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec));
50 } 46 }
51 47
52 // Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16 _t or uint32_t). 48 // Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16 _t or uint32_t).
53 // These are safe on x86, often with no speed penalty. 49 // These are safe on x86, often with no speed penalty.
54 50
55 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 51 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
56 SK_ALWAYS_INLINE Sk4px Sk4px::alphas() const { 52 inline Sk4px Sk4px::alphas() const {
57 static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian."); 53 static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian.");
58 __m128i splat = _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3) ; 54 __m128i splat = _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3) ;
59 return Sk16b(_mm_shuffle_epi8(this->fVec, splat)); 55 return Sk16b(_mm_shuffle_epi8(this->fVec, splat));
60 } 56 }
61 57
62 SK_ALWAYS_INLINE Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) { 58 inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
63 uint32_t as = *(const uint32_t*)a; 59 uint32_t as = *(const uint32_t*)a;
64 __m128i splat = _mm_set_epi8(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0); 60 __m128i splat = _mm_set_epi8(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0);
65 return Sk16b(_mm_shuffle_epi8(_mm_cvtsi32_si128(as), splat)); 61 return Sk16b(_mm_shuffle_epi8(_mm_cvtsi32_si128(as), splat));
66 } 62 }
67 #else 63 #else
68 SK_ALWAYS_INLINE Sk4px Sk4px::alphas() const { 64 inline Sk4px Sk4px::alphas() const {
69 static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian."); 65 static_assert(SK_A32_SHIFT == 24, "Intel's always little-endian.");
70 __m128i as = _mm_srli_epi32(this->fVec, 24); // ___3 ___2 ___1 ___0 66 __m128i as = _mm_srli_epi32(this->fVec, 24); // ___3 ___2 ___1 ___0
71 as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11 __00 67 as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11 __00
72 as = _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111 0000 68 as = _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111 0000
73 return Sk16b(as); 69 return Sk16b(as);
74 } 70 }
75 71
76 SK_ALWAYS_INLINE Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) { 72 inline Sk4px Sk4px::Load4Alphas(const SkAlpha a[4]) {
77 __m128i as = _mm_cvtsi32_si128(*(const uint32_t*)a); // ____ ____ ____ 3210 73 __m128i as = _mm_cvtsi32_si128(*(const uint32_t*)a); // ____ ____ ____ 3210
78 as = _mm_unpacklo_epi8 (as, _mm_setzero_si128()); // ____ ____ _3_2 _1_0 74 as = _mm_unpacklo_epi8 (as, _mm_setzero_si128()); // ____ ____ _3_2 _1_0
79 as = _mm_unpacklo_epi16(as, _mm_setzero_si128()); // ___3 ___2 ___1 ___0 75 as = _mm_unpacklo_epi16(as, _mm_setzero_si128()); // ___3 ___2 ___1 ___0
80 as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11 __00 76 as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11 __00
81 as = _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111 0000 77 as = _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111 0000
82 return Sk16b(as); 78 return Sk16b(as);
83 } 79 }
84 #endif 80 #endif
85 81
86 SK_ALWAYS_INLINE Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) { 82 inline Sk4px Sk4px::Load2Alphas(const SkAlpha a[2]) {
87 uint32_t as = *(const uint16_t*)a; // Aa -> Aa00 83 uint32_t as = *(const uint16_t*)a; // Aa -> Aa00
88 return Load4Alphas((const SkAlpha*)&as); 84 return Load4Alphas((const SkAlpha*)&as);
89 } 85 }
90 86
91 SK_ALWAYS_INLINE Sk4px Sk4px::zeroColors() const { 87 inline Sk4px Sk4px::zeroColors() const {
92 return Sk16b(_mm_and_si128(_mm_set1_epi32(0xFF << SK_A32_SHIFT), this->fVec) ); 88 return Sk16b(_mm_and_si128(_mm_set1_epi32(0xFF << SK_A32_SHIFT), this->fVec) );
93 } 89 }
94 90
95 SK_ALWAYS_INLINE Sk4px Sk4px::zeroAlphas() const { 91 inline Sk4px Sk4px::zeroAlphas() const {
96 // andnot(a,b) == ~a & b 92 // andnot(a,b) == ~a & b
97 return Sk16b(_mm_andnot_si128(_mm_set1_epi32(0xFF << SK_A32_SHIFT), this->fV ec)); 93 return Sk16b(_mm_andnot_si128(_mm_set1_epi32(0xFF << SK_A32_SHIFT), this->fV ec));
98 } 94 }
99 95
100 static SK_ALWAYS_INLINE __m128i widen_low_half_to_8888(__m128i v) { 96 static inline __m128i widen_low_half_to_8888(__m128i v) {
101 // RGB565 format: |R....|G.....|B....| 97 // RGB565 format: |R....|G.....|B....|
102 // Bit: 16 11 5 0 98 // Bit: 16 11 5 0
103 99
104 // First get each pixel into its own 32-bit lane. 100 // First get each pixel into its own 32-bit lane.
105 // v == ____ ____ ____ ____ rgb3 rgb2 rgb1 rgb0 101 // v == ____ ____ ____ ____ rgb3 rgb2 rgb1 rgb0
106 // spread == 0000 rgb3 0000 rgb2 0000 rgb1 0000 rgb0 102 // spread == 0000 rgb3 0000 rgb2 0000 rgb1 0000 rgb0
107 auto spread = _mm_unpacklo_epi16(v, _mm_setzero_si128()); 103 auto spread = _mm_unpacklo_epi16(v, _mm_setzero_si128());
108 104
109 // Get each color independently, still in 565 precison but down at bit 0. 105 // Get each color independently, still in 565 precison but down at bit 0.
110 auto r5 = _mm_srli_epi32(spread, 11), 106 auto r5 = _mm_srli_epi32(spread, 11),
111 g6 = _mm_and_si128(_mm_set1_epi32(63), _mm_srli_epi32(spread, 5)), 107 g6 = _mm_and_si128(_mm_set1_epi32(63), _mm_srli_epi32(spread, 5)),
112 b5 = _mm_and_si128(_mm_set1_epi32(31), spread); 108 b5 = _mm_and_si128(_mm_set1_epi32(31), spread);
113 109
114 // Scale 565 precision up to 8-bit each, filling low 323 bits with high bits of each component. 110 // Scale 565 precision up to 8-bit each, filling low 323 bits with high bits of each component.
115 auto r8 = _mm_or_si128(_mm_slli_epi32(r5, 3), _mm_srli_epi32(r5, 2)), 111 auto r8 = _mm_or_si128(_mm_slli_epi32(r5, 3), _mm_srli_epi32(r5, 2)),
116 g8 = _mm_or_si128(_mm_slli_epi32(g6, 2), _mm_srli_epi32(g6, 4)), 112 g8 = _mm_or_si128(_mm_slli_epi32(g6, 2), _mm_srli_epi32(g6, 4)),
117 b8 = _mm_or_si128(_mm_slli_epi32(b5, 3), _mm_srli_epi32(b5, 2)); 113 b8 = _mm_or_si128(_mm_slli_epi32(b5, 3), _mm_srli_epi32(b5, 2));
118 114
119 // Now put all the 8-bit components into SkPMColor order. 115 // Now put all the 8-bit components into SkPMColor order.
120 return _mm_or_si128(_mm_slli_epi32(r8, SK_R32_SHIFT), // TODO: one of thes e shifts is zero... 116 return _mm_or_si128(_mm_slli_epi32(r8, SK_R32_SHIFT), // TODO: one of thes e shifts is zero...
121 _mm_or_si128(_mm_slli_epi32(g8, SK_G32_SHIFT), 117 _mm_or_si128(_mm_slli_epi32(g8, SK_G32_SHIFT),
122 _mm_or_si128(_mm_slli_epi32(b8, SK_B32_SHIFT), 118 _mm_or_si128(_mm_slli_epi32(b8, SK_B32_SHIFT),
123 _mm_set1_epi32(0xFF << SK_A32_SHIFT)))); 119 _mm_set1_epi32(0xFF << SK_A32_SHIFT))));
124 } 120 }
125 121
126 static SK_ALWAYS_INLINE __m128i narrow_to_565(__m128i w) { 122 static inline __m128i narrow_to_565(__m128i w) {
127 // Extract out top RGB 565 bits of each pixel, with no rounding. 123 // Extract out top RGB 565 bits of each pixel, with no rounding.
128 auto r5 = _mm_and_si128(_mm_set1_epi32(31), _mm_srli_epi32(w, SK_R32_SHIFT + 3)), 124 auto r5 = _mm_and_si128(_mm_set1_epi32(31), _mm_srli_epi32(w, SK_R32_SHIFT + 3)),
129 g6 = _mm_and_si128(_mm_set1_epi32(63), _mm_srli_epi32(w, SK_G32_SHIFT + 2)), 125 g6 = _mm_and_si128(_mm_set1_epi32(63), _mm_srli_epi32(w, SK_G32_SHIFT + 2)),
130 b5 = _mm_and_si128(_mm_set1_epi32(31), _mm_srli_epi32(w, SK_B32_SHIFT + 3)); 126 b5 = _mm_and_si128(_mm_set1_epi32(31), _mm_srli_epi32(w, SK_B32_SHIFT + 3));
131 127
132 // Now put the bits in place in the low 16-bits of each 32-bit lane. 128 // Now put the bits in place in the low 16-bits of each 32-bit lane.
133 auto spread = _mm_or_si128(_mm_slli_epi32(r5, 11), 129 auto spread = _mm_or_si128(_mm_slli_epi32(r5, 11),
134 _mm_or_si128(_mm_slli_epi32(g6, 5), 130 _mm_or_si128(_mm_slli_epi32(g6, 5),
135 b5)); 131 b5));
136 132
137 // We want to pack the bottom 16-bits of spread down into the low half of th e register, v. 133 // We want to pack the bottom 16-bits of spread down into the low half of th e register, v.
138 // spread == 0000 rgb3 0000 rgb2 0000 rgb1 0000 rgb0 134 // spread == 0000 rgb3 0000 rgb2 0000 rgb1 0000 rgb0
139 // v == ____ ____ ____ ____ rgb3 rgb2 rgb1 rgb0 135 // v == ____ ____ ____ ____ rgb3 rgb2 rgb1 rgb0
140 136
141 // Ideally now we'd use _mm_packus_epi32(spread, <anything>) to pack v. But that's from SSE4. 137 // Ideally now we'd use _mm_packus_epi32(spread, <anything>) to pack v. But that's from SSE4.
142 // With only SSE2, we need to use _mm_packs_epi32. That does signed saturat ion, and 138 // With only SSE2, we need to use _mm_packs_epi32. That does signed saturat ion, and
143 // we need to preserve all 16 bits. So we pretend our data is signed by sig n-extending first. 139 // we need to preserve all 16 bits. So we pretend our data is signed by sig n-extending first.
144 // TODO: is it faster to just _mm_shuffle_epi8 this when we have SSSE3? 140 // TODO: is it faster to just _mm_shuffle_epi8 this when we have SSSE3?
145 auto signExtended = _mm_srai_epi32(_mm_slli_epi32(spread, 16), 16); 141 auto signExtended = _mm_srai_epi32(_mm_slli_epi32(spread, 16), 16);
146 auto v = _mm_packs_epi32(signExtended, signExtended); 142 auto v = _mm_packs_epi32(signExtended, signExtended);
147 return v; 143 return v;
148 } 144 }
149 145
150 SK_ALWAYS_INLINE Sk4px Sk4px::Load4(const SkPMColor16 src[4]) { 146 inline Sk4px Sk4px::Load4(const SkPMColor16 src[4]) {
151 return Sk16b(widen_low_half_to_8888(_mm_loadl_epi64((const __m128i*)src))); 147 return Sk16b(widen_low_half_to_8888(_mm_loadl_epi64((const __m128i*)src)));
152 } 148 }
153 SK_ALWAYS_INLINE Sk4px Sk4px::Load2(const SkPMColor16 src[2]) { 149 inline Sk4px Sk4px::Load2(const SkPMColor16 src[2]) {
154 auto src2 = ((uint32_t)src[0] ) 150 auto src2 = ((uint32_t)src[0] )
155 | ((uint32_t)src[1] << 16); 151 | ((uint32_t)src[1] << 16);
156 return Sk16b(widen_low_half_to_8888(_mm_cvtsi32_si128(src2))); 152 return Sk16b(widen_low_half_to_8888(_mm_cvtsi32_si128(src2)));
157 } 153 }
158 SK_ALWAYS_INLINE Sk4px Sk4px::Load1(const SkPMColor16 src[1]) { 154 inline Sk4px Sk4px::Load1(const SkPMColor16 src[1]) {
159 return Sk16b(widen_low_half_to_8888(_mm_insert_epi16(_mm_setzero_si128(), sr c[0], 0))); 155 return Sk16b(widen_low_half_to_8888(_mm_insert_epi16(_mm_setzero_si128(), sr c[0], 0)));
160 } 156 }
161 157
162 SK_ALWAYS_INLINE void Sk4px::store4(SkPMColor16 dst[4]) const { 158 inline void Sk4px::store4(SkPMColor16 dst[4]) const {
163 _mm_storel_epi64((__m128i*)dst, narrow_to_565(this->fVec)); 159 _mm_storel_epi64((__m128i*)dst, narrow_to_565(this->fVec));
164 } 160 }
165 SK_ALWAYS_INLINE void Sk4px::store2(SkPMColor16 dst[2]) const { 161 inline void Sk4px::store2(SkPMColor16 dst[2]) const {
166 uint32_t dst2 = _mm_cvtsi128_si32(narrow_to_565(this->fVec)); 162 uint32_t dst2 = _mm_cvtsi128_si32(narrow_to_565(this->fVec));
167 dst[0] = dst2; 163 dst[0] = dst2;
168 dst[1] = dst2 >> 16; 164 dst[1] = dst2 >> 16;
169 } 165 }
170 SK_ALWAYS_INLINE void Sk4px::store1(SkPMColor16 dst[1]) const { 166 inline void Sk4px::store1(SkPMColor16 dst[1]) const {
171 uint32_t dst2 = _mm_cvtsi128_si32(narrow_to_565(this->fVec)); 167 uint32_t dst2 = _mm_cvtsi128_si32(narrow_to_565(this->fVec));
172 dst[0] = dst2; 168 dst[0] = dst2;
173 } 169 }
170
171 } // namespace
OLDNEW
« no previous file with comments | « src/opts/Sk4px_NEON.h ('k') | src/opts/Sk4px_none.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698