Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 /* | |
| 2 * Copyright 2015 Google Inc. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license that can be | |
| 5 * found in the LICENSE file. | |
| 6 */ | |
| 7 | |
| 8 #include "SkOpts.h" | |
| 9 #define SK_OPTS_NS sk_avx2 | |
| 10 | |
| 11 #ifndef SK_SUPPORT_LEGACY_X86_BLITS | |
| 12 | |
| 13 namespace sk_avx2 { | |
| 14 | |
| 15 // AVX2 has masked loads and stores. We'll use them for N<4 pixels. | |
| 16 static __m128i mask(int n) { | |
| 17 static const int masks[][4] = { | |
| 18 { 0, 0, 0, 0}, | |
| 19 {~0, 0, 0, 0}, | |
| 20 {~0,~0, 0, 0}, | |
| 21 {~0,~0,~0, 0}, | |
| 22 }; | |
| 23 return _mm_load_si128((const __m128i*)masks+n); | |
| 24 } | |
| 25 | |
| 26 // Load 8, 4, or 1-3 constant pixels or coverages (4x replicated). | |
| 27 static __m256i next8(uint32_t val) { return _mm256_set1_epi32(val); } | |
| 28 static __m128i next4(uint32_t val) { return _mm_set1_epi32(val); } | |
| 29 static __m128i tail(int, uint32_t val) { return next4(val); } | |
| 30 | |
| 31 static __m256i next8(uint8_t val) { return _mm256_set1_epi8 (val); } | |
| 32 static __m128i next4(uint8_t val) { return _mm_set1_epi8 (val); } | |
| 33 static __m128i tail(int, uint8_t val) { return next4(val); } | |
| 34 | |
| 35 // Load 8, 4, or 1-3 variable pixels or coverages (4x replicated). | |
| 36 // next8() and next4() increment their pointer past what they just read. tail() doesn't bother. | |
| 37 static __m256i next8(const uint32_t*& ptr) { | |
| 38 auto r = _mm256_loadu_si256((const __m256i*)ptr); | |
| 39 ptr += 8; | |
| 40 return r; | |
| 41 } | |
| 42 static __m128i next4(const uint32_t*& ptr) { | |
| 43 auto r = _mm_loadu_si128((const __m128i*)ptr); | |
| 44 ptr += 4; | |
| 45 return r; | |
| 46 } | |
| 47 static __m128i tail(int n, const uint32_t* ptr) { | |
| 48 return _mm_maskload_epi32((const int*)ptr, mask(n)); | |
| 49 } | |
| 50 | |
| 51 static __m256i next8(const uint8_t*& ptr) { | |
| 52 auto r = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*)ptr)); | |
| 53 r = _mm256_shuffle_epi8(r, _mm256_setr_epi8(0,0,0,0, 4,4,4,4, 8,8,8,8, 12,12 ,12,12, | |
| 54 0,0,0,0, 4,4,4,4, 8,8,8,8, 12,12 ,12,12)); | |
| 55 ptr += 8; | |
| 56 return r; | |
| 57 } | |
| 58 static __m128i next4(const uint8_t*& ptr) { | |
| 59 auto r = _mm_shuffle_epi8(_mm_cvtsi32_si128(*(const uint32_t*)ptr), | |
| 60 _mm_setr_epi8(0,0,0,0, 1,1,1,1, 2,2,2,2, 3,3,3,3)) ; | |
| 61 ptr += 4; | |
| 62 return r; | |
| 63 } | |
| 64 static __m128i tail(int n, const uint8_t* ptr) { | |
| 65 // TODO: we should be able to use _mm_insert_epi8 here, but the codegen look s terrible | |
| 66 uint32_t x = 0; | |
| 67 switch (n) { | |
| 68 case 3: x |= (uint32_t)ptr[2] << 16; | |
| 69 case 2: x |= (uint32_t)ptr[1] << 8; | |
| 70 case 1: x |= (uint32_t)ptr[0] << 0; | |
| 71 } | |
| 72 auto p = (const uint8_t*)&x; | |
| 73 return next4(p); | |
| 74 } | |
| 75 | |
| 76 // For i = 0...n, tgt = fn(dst,src,cov), where Dst,Src,and Cov can be constants or arrays. | |
| 77 template <typename Dst, typename Src, typename Cov, typename Fn> | |
| 78 static void loop(int n, uint32_t* t, const Dst dst, const Src src, const Cov cov , Fn&& fn) { | |
| 79 // We don't want to muck with the callers' pointers, so we make them const a nd copy here. | |
| 80 Dst d = dst; | |
| 81 Src s = src; | |
| 82 Cov c = cov; | |
| 83 | |
| 84 // Writing this as a single while-loop helps hoist loop invariants from fn. | |
| 85 while (n) { | |
| 86 if (n >= 8) { | |
| 87 _mm256_storeu_si256((__m256i*)t, fn(next8(d), next8(s), next8(c))); | |
| 88 t += 8; | |
| 89 n -= 8; | |
| 90 continue; | |
| 91 } | |
| 92 if (n >= 4) { | |
| 93 _mm_storeu_si128((__m128i*)t, fn(next4(d), next4(s), next4(c))); | |
| 94 t += 4; | |
| 95 n -= 4; | |
| 96 } | |
| 97 if (n) { | |
| 98 _mm_maskstore_epi32((int*)t, mask(n), fn(tail(n,d), tail(n,s), tail( n,c))); | |
| 99 } | |
| 100 return; | |
| 101 } | |
| 102 } | |
| 103 | |
| 104 // packed // | |
| 105 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~ // | |
| 106 // unpacked // | |
| 107 | |
| 108 // Everything on the packed side of the squiggly line deals with densely packed 8-bit data, | |
| 109 // e.g [ BGRA bgra ... ] for pixels or [ CCCC cccc ... ] for coverage. | |
| 110 // | |
| 111 // Everything on the unpacked side of the squiggly line deals with unpacked 8-bi t data, | |
| 112 // e.g. [ B_G_ R_A b_g_ r_a_ ... ] for pixels or [ C_C_ C_C_ c_c_ c_c_ ... ] for coverage, | |
| 113 // where _ is a zero byte. | |
| 114 // | |
| 115 // Adapt<Fn> / adapt(fn) allow the two sides to interoperate, | |
| 116 // by unpacking arguments, calling fn, then packing the results. | |
| 117 // | |
| 118 // This lets us write most of our code in terms of unpacked inputs (considerably simpler) | |
| 119 // and all the packing and unpacking is handled automatically. | |
| 120 | |
| 121 template <typename Fn> | |
| 122 struct Adapt { | |
| 123 Fn fn; | |
| 124 | |
| 125 __m256i operator()(__m256i d, __m256i s, __m256i c) { | |
| 126 auto lo = [](__m256i x) { return _mm256_unpacklo_epi8(x, _mm256_setzero_ si256()); }; | |
| 127 auto hi = [](__m256i x) { return _mm256_unpackhi_epi8(x, _mm256_setzero_ si256()); }; | |
| 128 return _mm256_packus_epi16(fn(lo(d), lo(s), lo(c)), | |
| 129 fn(hi(d), hi(s), hi(c))); | |
| 130 } | |
| 131 | |
| 132 __m128i operator()(__m128i d, __m128i s, __m128i c) { | |
| 133 auto unpack = [](__m128i x) { return _mm256_cvtepu8_epi16(x); }; | |
| 134 auto pack = [](__m256i x) { | |
| 135 auto x01 = x, | |
| 136 x23 = _mm256_permute4x64_epi64(x, 0xe); // 0b1110 | |
| 137 return _mm256_castsi256_si128(_mm256_packus_epi16(x01, x23)); | |
| 138 }; | |
| 139 return pack(fn(unpack(d), unpack(s), unpack(c))); | |
| 140 } | |
| 141 }; | |
| 142 | |
| 143 template <typename Fn> | |
| 144 Adapt<Fn> adapt(Fn&& fn) { return { fn }; } | |
| 145 | |
| 146 // These helpers all work exclusively with unpacked 8-bit values, | |
| 147 // except div255() which is 16-bit -> unpacked 8-bit. | |
| 148 | |
| 149 // Divide by 255 with rounding. | |
| 150 // (x+127)/255 == ((x+128)*257)>>16. | |
| 151 // Sometimes we can be more efficient by breaking this into two parts. | |
| 152 static __m256i div255_part1(__m256i x) { return _mm256_add_epi16 (x, _mm256_set 1_epi16(128)); } | |
| 153 static __m256i div255_part2(__m256i x) { return _mm256_mulhi_epu16(x, _mm256_set 1_epi16(257)); } | |
| 154 static __m256i div255(__m256i x) { return div255_part2(div255_part1(x)); } | |
| 155 | |
| 156 // (x*y+127)/255, a byte multiply. | |
| 157 static __m256i scale(__m256i x, __m256i y) { return div255(_mm256_mullo_epi16(x, y)); } | |
| 158 | |
| 159 // (255 - x). | |
| 160 static __m256i inv(__m256i x) { return _mm256_xor_si256(_mm256_set1_epi16(0x00ff ), x); } | |
| 161 | |
| 162 // ARGB argb ... -> AAAA aaaa ... | |
| 163 static __m256i alphas(__m256i px) { | |
| 164 const int a = 2 * (SK_A32_SHIFT/8); // SK_A32_SHIFT is typically 24, so thi s is typically 6. | |
| 165 const int _ = ~0; | |
| 166 return _mm256_shuffle_epi8(px, _mm256_setr_epi8(a+0,_,a+0,_,a+0,_,a+0,_, | |
| 167 a+8,_,a+8,_,a+8,_,a+8,_, | |
| 168 a+0,_,a+0,_,a+0,_,a+0,_, | |
| 169 a+8,_,a+8,_,a+8,_,a+8,_)); | |
| 170 } | |
| 171 | |
| 172 | |
| 173 // SrcOver, with a constant source and full coverage. | |
| 174 static void blit_row_color32(SkPMColor* tgt, const SkPMColor* dst, int n, SkPMCo lor src) { | |
| 175 // We want to calculate s + (d * inv(alphas(s)) + 127)/255. | |
| 176 // We'd generally do that div255 as s + ((d * inv(alphas(s)) + 128)*257)>>16 . | |
| 177 | |
| 178 // But we can go one step further to ((s*255 + 128 + d*inv(alphas(s)))*257)> >16. | |
| 179 // This lets us hoist (s*255+128) and inv(alphas(s)) out of the loop. | |
| 180 auto s = _mm256_cvtepu8_epi16(_mm_set1_epi32(src)), | |
| 181 s_255_128 = div255_part1(_mm256_mullo_epi16(s, _mm256_set1_epi16(255))) , | |
|
herb_g
2015/12/17 21:37:26
Instead of _mm256_mullo_epi16, could we use _mm256
mtklein
2015/12/18 14:36:06
Done. I admit I didn't pay much attention to thin
| |
| 182 A = inv(alphas(s)); | |
| 183 | |
| 184 const uint8_t cov = 0xff; | |
| 185 loop(n, tgt, dst, src, cov, adapt([=](__m256i d, __m256i, __m256i) { | |
| 186 return div255_part2(_mm256_add_epi16(s_255_128, _mm256_mullo_epi16(d, A) )); | |
| 187 })); | |
| 188 } | |
| 189 | |
| 190 // SrcOver, with a constant source and variable coverage. | |
| 191 // If the source is opaque, SrcOver becomes Src. | |
| 192 static void blit_mask_d32_a8(SkPMColor* dst, size_t dstRB, | |
| 193 const SkAlpha* cov, size_t covRB, | |
| 194 SkColor color, int w, int h) { | |
| 195 if (SkColorGetA(color) == 0xFF) { | |
| 196 const SkPMColor src = SkSwizzle_BGRA_to_PMColor(color); | |
| 197 while (h --> 0) { | |
| 198 loop(w, dst, (const SkPMColor*)dst, src, cov, | |
| 199 adapt([](__m256i d, __m256i s, __m256i c) { | |
| 200 // Src blend mode: a simple lerp from d to s by c. | |
| 201 // TODO: try a pmaddubsw version? | |
| 202 return div255(_mm256_add_epi16(_mm256_mullo_epi16(inv(c),d), | |
| 203 _mm256_mullo_epi16( c ,s))); | |
| 204 })); | |
| 205 dst += dstRB / sizeof(*dst); | |
|
herb_g
2015/12/17 21:37:26
My paranoid lizard brain says that we should hoist
mtklein
2015/12/18 14:36:06
Nah, `ptr += k / sizeof(*ptr)` is as cheap as you
| |
| 206 cov += covRB / sizeof(*cov); | |
| 207 } | |
| 208 } else { | |
| 209 const SkPMColor src = SkPreMultiplyColor(color); | |
| 210 while (h --> 0) { | |
| 211 loop(w, dst, (const SkPMColor*)dst, src, cov, | |
| 212 adapt([](__m256i d, __m256i s, __m256i c) { | |
| 213 // SrcOver blend mode, with coverage folded into source alpha. | |
| 214 auto sc = scale(s,c), | |
| 215 AC = inv(alphas(sc)); | |
| 216 return _mm256_add_epi16(sc, scale(d,AC)); | |
| 217 })); | |
| 218 dst += dstRB / sizeof(*dst); | |
| 219 cov += covRB / sizeof(*cov); | |
| 220 } | |
| 221 } | |
| 222 } | |
| 223 | |
| 224 } // namespace sk_avx2 | |
| 225 | |
| 226 #endif | |
| 227 | |
| 228 namespace SkOpts { | |
| 229 void Init_avx2() { | |
| 230 #ifndef SK_SUPPORT_LEGACY_X86_BLITS | |
| 231 blit_row_color32 = sk_avx2::blit_row_color32; | |
| 232 blit_mask_d32_a8 = sk_avx2::blit_mask_d32_a8; | |
| 233 #endif | |
| 234 } | |
| 235 } | |
| OLD | NEW |