OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkOpts.h" | 8 #include "SkOpts.h" |
9 | 9 |
10 #define SK_OPTS_NS sk_sse41 | 10 #define SK_OPTS_NS sk_sse41 |
11 #include "SkBlurImageFilter_opts.h" | 11 #include "SkBlurImageFilter_opts.h" |
12 #include "SkBlitRow_opts.h" | 12 #include "SkBlitRow_opts.h" |
13 #include "SkBlend_opts.h" | 13 #include "SkBlend_opts.h" |
14 | 14 |
| 15 #ifndef SK_SUPPORT_LEGACY_X86_BLITS |
| 16 |
| 17 namespace sk_sse41_new { |
| 18 |
| 19 // An SSE register holding at most 64 bits of useful data in the low lanes. |
| 20 struct m64i { |
| 21 __m128i v; |
| 22 /*implicit*/ m64i(__m128i v) : v(v) {} |
| 23 operator __m128i() const { return v; } |
| 24 }; |
| 25 |
| 26 // Load 4, 2, or 1 constant pixels or coverages (4x replicated). |
| 27 static __m128i next4(uint32_t val) { return _mm_set1_epi32(val); } |
| 28 static m64i next2(uint32_t val) { return _mm_set1_epi32(val); } |
| 29 static m64i next1(uint32_t val) { return _mm_set1_epi32(val); } |
| 30 |
| 31 static __m128i next4(uint8_t val) { return _mm_set1_epi8(val); } |
| 32 static m64i next2(uint8_t val) { return _mm_set1_epi8(val); } |
| 33 static m64i next1(uint8_t val) { return _mm_set1_epi8(val); } |
| 34 |
| 35 // Load 4, 2, or 1 variable pixels or coverages (4x replicated), |
| 36 // incrementing the pointer past what we read. |
| 37 static __m128i next4(const uint32_t*& ptr) { |
| 38 auto r = _mm_loadu_si128((const __m128i*)ptr); |
| 39 ptr += 4; |
| 40 return r; |
| 41 } |
| 42 static m64i next2(const uint32_t*& ptr) { |
| 43 auto r = _mm_loadl_epi64((const __m128i*)ptr); |
| 44 ptr += 2; |
| 45 return r; |
| 46 } |
| 47 static m64i next1(const uint32_t*& ptr) { |
| 48 auto r = _mm_cvtsi32_si128(*ptr); |
| 49 ptr += 1; |
| 50 return r; |
| 51 } |
| 52 |
| 53 // xyzw -> xxxx yyyy zzzz wwww |
| 54 static __m128i replicate_coverage(__m128i xyzw) { |
| 55 return _mm_shuffle_epi8(xyzw, _mm_setr_epi8(0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
2, 3, 3, 3, 3)); |
| 56 } |
| 57 |
| 58 static __m128i next4(const uint8_t*& ptr) { |
| 59 auto r = replicate_coverage(_mm_cvtsi32_si128(*(const uint32_t*)ptr)); |
| 60 ptr += 4; |
| 61 return r; |
| 62 } |
| 63 static m64i next2(const uint8_t*& ptr) { |
| 64 auto r = replicate_coverage(_mm_cvtsi32_si128(*(const uint16_t*)ptr)); |
| 65 ptr += 2; |
| 66 return r; |
| 67 } |
| 68 static m64i next1(const uint8_t*& ptr) { |
| 69 auto r = replicate_coverage(_mm_cvtsi32_si128(*ptr)); |
| 70 ptr += 1; |
| 71 return r; |
| 72 } |
| 73 |
| 74 // For i = 0...n, tgt = fn(dst,src,cov), where Dst,Src,and Cov can be constants
or arrays. |
| 75 template <typename Dst, typename Src, typename Cov, typename Fn> |
| 76 static void loop(int n, uint32_t* t, const Dst dst, const Src src, const Cov cov
, Fn&& fn) { |
| 77 // We don't want to muck with the callers' pointers, so we make them const a
nd copy here. |
| 78 Dst d = dst; |
| 79 Src s = src; |
| 80 Cov c = cov; |
| 81 |
| 82 // Writing this as a single while-loop helps hoist loop invariants from fn. |
| 83 while (n) { |
| 84 if (n >= 4) { |
| 85 _mm_storeu_si128((__m128i*)t, fn(next4(d), next4(s), next4(c))); |
| 86 t += 4; |
| 87 n -= 4; |
| 88 continue; |
| 89 } |
| 90 if (n & 2) { |
| 91 _mm_storel_epi64((__m128i*)t, fn(next2(d), next2(s), next2(c))); |
| 92 t += 2; |
| 93 } |
| 94 if (n & 1) { |
| 95 *t = _mm_cvtsi128_si32(fn(next1(d), next1(s), next1(c))); |
| 96 } |
| 97 return; |
| 98 } |
| 99 } |
| 100 |
| 101 // packed |
| 102 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~ // |
| 103 // unpacked |
| 104 |
| 105 // Everything on the packed side of the squiggly line deals with densely packed
8-bit data, |
| 106 // e.g. [BGRA bgra ... ] for pixels or [ CCCC cccc ... ] for coverage. |
| 107 // |
| 108 // Everything on the unpacked side of the squiggly line deals with unpacked 8-bi
t data, |
| 109 // e.g [B_G_ R_A_ b_g_ r_a_ ] for pixels or [ C_C_ C_C_ c_c_ c_c_ c_c_ ] for cov
erage, |
| 110 // where _ is a zero byte. |
| 111 // |
| 112 // Adapt<Fn> / adapt(fn) allow the two sides to interoperate, |
| 113 // by unpacking arguments, calling fn, then packing the results. |
| 114 // |
| 115 // This lets us write most of our code in terms of unpacked inputs (considerably
simpler) |
| 116 // and all the packing and unpacking is handled automatically. |
| 117 |
| 118 template <typename Fn> |
| 119 struct Adapt { |
| 120 Fn fn; |
| 121 |
| 122 __m128i operator()(__m128i d, __m128i s, __m128i c) { |
| 123 auto lo = [](__m128i x) { return _mm_unpacklo_epi8(x, _mm_setzero_si128(
)); }; |
| 124 auto hi = [](__m128i x) { return _mm_unpackhi_epi8(x, _mm_setzero_si128(
)); }; |
| 125 return _mm_packus_epi16(fn(lo(d), lo(s), lo(c)), |
| 126 fn(hi(d), hi(s), hi(c))); |
| 127 } |
| 128 |
| 129 m64i operator()(const m64i& d, const m64i& s, const m64i& c) { |
| 130 auto lo = [](__m128i x) { return _mm_unpacklo_epi8(x, _mm_setzero_si128(
)); }; |
| 131 auto r = fn(lo(d), lo(s), lo(c)); |
| 132 return _mm_packus_epi16(r, r); |
| 133 } |
| 134 }; |
| 135 |
| 136 template <typename Fn> |
| 137 static Adapt<Fn> adapt(Fn&& fn) { return { fn }; } |
| 138 |
| 139 // These helpers all work exclusively with unpacked 8-bit values, |
| 140 // except div255() with is 16-bit -> unpacked 8-bit, and mul255() which is the r
everse. |
| 141 |
| 142 // Divide by 255 with rounding. |
| 143 // (x+127)/255 == ((x+128)*257)>>16. |
| 144 // Sometimes we can be more efficient by breaking this into two parts. |
| 145 static __m128i div255_part1(__m128i x) { return _mm_add_epi16(x, _mm_set1_epi16(
128)); } |
| 146 static __m128i div255_part2(__m128i x) { return _mm_mulhi_epu16(x, _mm_set1_epi1
6(257)); } |
| 147 static __m128i div255(__m128i x) { return div255_part2(div255_part1(x)); } |
| 148 |
| 149 // (x*y+127)/255, a byte multiply. |
| 150 static __m128i scale(__m128i x, __m128i y) { return div255(_mm_mullo_epi16(x, y)
); } |
| 151 |
| 152 // (255 * x). |
| 153 static __m128i mul255(__m128i x) { return _mm_sub_epi16(_mm_slli_epi16(x, 8), x)
; } |
| 154 |
| 155 // (255 - x). |
| 156 static __m128i inv(__m128i x) { return _mm_xor_si128(_mm_set1_epi16(0x00ff), x);
} |
| 157 |
| 158 // ARGB argb -> AAAA aaaa |
| 159 static __m128i alphas(__m128i px) { |
| 160 const int a = 2 * (SK_A32_SHIFT/8); // SK_A32_SHIFT is typically 24, so thi
s is typically 6. |
| 161 const int _ = ~0; |
| 162 return _mm_shuffle_epi8(px, _mm_setr_epi8(a+0,_,a+0,_,a+0,_,a+0,_, a+8,_,a+8
,_,a+8,_,a+8,_)); |
| 163 } |
| 164 |
| 165 // SrcOver, with a constant source and full coverage. |
| 166 static void blit_row_color32(SkPMColor* tgt, const SkPMColor* dst, int n, SkPMCo
lor src) { |
| 167 // We want to calculate s + (d * inv(alphas(s)) + 127)/255. |
| 168 // We'd generally do that div255 as s + ((d * inv(alphas(s)) + 128)*257)>>16
. |
| 169 |
| 170 // But we can go one step further to ((s*255 + 128 + d*inv(alphas(s)))*257)>
>16. |
| 171 // This lets us hoist (s*255+128) and inv(alphas(s)) out of the loop. |
| 172 __m128i s = _mm_unpacklo_epi8(_mm_set1_epi32(src), _mm_setzero_si128()), |
| 173 s_255_128 = div255_part1(mul255(s)), |
| 174 A = inv(alphas(s)); |
| 175 |
| 176 const uint8_t cov = 0xff; |
| 177 loop(n, tgt, dst, src, cov, adapt([=](__m128i d, __m128i, __m128i) { |
| 178 return div255_part2(_mm_add_epi16(s_255_128, _mm_mullo_epi16(d, A))); |
| 179 })); |
| 180 } |
| 181 |
| 182 // SrcOver, with a constant source and variable coverage. |
| 183 // If the source is opaque, SrcOver becomes Src. |
| 184 static void blit_mask_d32_a8(SkPMColor* dst, size_t dstRB, |
| 185 const SkAlpha* cov, size_t covRB, |
| 186 SkColor color, int w, int h) { |
| 187 if (SkColorGetA(color) == 0xFF) { |
| 188 const SkPMColor src = SkSwizzle_BGRA_to_PMColor(color); |
| 189 while (h --> 0) { |
| 190 loop(w, dst, (const SkPMColor*)dst, src, cov, |
| 191 adapt([](__m128i d, __m128i s, __m128i c) { |
| 192 // Src blend mode: a simple lerp from d to s by c. |
| 193 // TODO: try a pmaddubsw version? |
| 194 return div255(_mm_add_epi16(_mm_mullo_epi16(inv(c),d), |
| 195 _mm_mullo_epi16( c ,s))); |
| 196 })); |
| 197 dst += dstRB / sizeof(*dst); |
| 198 cov += covRB / sizeof(*cov); |
| 199 } |
| 200 } else { |
| 201 const SkPMColor src = SkPreMultiplyColor(color); |
| 202 while (h --> 0) { |
| 203 loop(w, dst, (const SkPMColor*)dst, src, cov, |
| 204 adapt([](__m128i d, __m128i s, __m128i c) { |
| 205 // SrcOver blend mode, with coverage folded into source alpha. |
| 206 __m128i sc = scale(s,c), |
| 207 AC = inv(alphas(sc)); |
| 208 return _mm_add_epi16(sc, scale(d,AC)); |
| 209 })); |
| 210 dst += dstRB / sizeof(*dst); |
| 211 cov += covRB / sizeof(*cov); |
| 212 } |
| 213 } |
| 214 } |
| 215 } // namespace sk_sse41_new |
| 216 |
| 217 #endif |
| 218 |
15 namespace SkOpts { | 219 namespace SkOpts { |
16 void Init_sse41() { | 220 void Init_sse41() { |
17 box_blur_xx = sk_sse41::box_blur_xx; | 221 box_blur_xx = sk_sse41::box_blur_xx; |
18 box_blur_xy = sk_sse41::box_blur_xy; | 222 box_blur_xy = sk_sse41::box_blur_xy; |
19 box_blur_yx = sk_sse41::box_blur_yx; | 223 box_blur_yx = sk_sse41::box_blur_yx; |
20 srcover_srgb_srgb = sk_sse41::srcover_srgb_srgb; | 224 srcover_srgb_srgb = sk_sse41::srcover_srgb_srgb; |
| 225 |
| 226 #ifndef SK_SUPPORT_LEGACY_X86_BLITS |
| 227 blit_row_color32 = sk_sse41_new::blit_row_color32; |
| 228 blit_mask_d32_a8 = sk_sse41_new::blit_mask_d32_a8; |
| 229 #endif |
21 blit_row_s32a_opaque = sk_sse41::blit_row_s32a_opaque; | 230 blit_row_s32a_opaque = sk_sse41::blit_row_s32a_opaque; |
22 } | 231 } |
23 } | 232 } |
OLD | NEW |