| OLD | NEW |
| (Empty) |
| 1 #include "SkBlitRow_opts_SSE4.h" | |
| 2 | |
| 3 // Some compilers can't compile SSSE3 or SSE4 intrinsics. We give them stub met
hods. | |
| 4 // The stubs should never be called, so we make them crash just to confirm that. | |
| 5 #if SK_CPU_SSE_LEVEL < SK_CPU_SSE_LEVEL_SSE41 | |
| 6 void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT, const SkPMColor* SK_REST
RICT, int, U8CPU) { | |
| 7 sk_throw(); | |
| 8 } | |
| 9 | |
| 10 #else | |
| 11 | |
| 12 #include <emmintrin.h> // SSE2: Most _mm_foo() in this file. | |
| 13 #include <smmintrin.h> // SSE4.1: _mm_testz_si128 and _mm_testc_si128. | |
| 14 | |
| 15 #include "SkColorPriv.h" | |
| 16 #include "SkColor_opts_SSE2.h" | |
| 17 | |
| 18 void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT dst, | |
| 19 const SkPMColor* SK_RESTRICT src, | |
| 20 int count, | |
| 21 U8CPU alpha) { | |
| 22 SkASSERT(alpha == 255); | |
| 23 // As long as we can, we'll work on 16 pixel pairs at once. | |
| 24 int count16 = count / 16; | |
| 25 __m128i* dst4 = (__m128i*)dst; | |
| 26 const __m128i* src4 = (const __m128i*)src; | |
| 27 | |
| 28 for (int i = 0; i < count16 * 4; i += 4) { | |
| 29 // Load 16 source pixels. | |
| 30 __m128i s0 = _mm_loadu_si128(src4+i+0), | |
| 31 s1 = _mm_loadu_si128(src4+i+1), | |
| 32 s2 = _mm_loadu_si128(src4+i+2), | |
| 33 s3 = _mm_loadu_si128(src4+i+3); | |
| 34 | |
| 35 const __m128i alphaMask = _mm_set1_epi32(0xFF << SK_A32_SHIFT); | |
| 36 const __m128i ORed = _mm_or_si128(s3, _mm_or_si128(s2, _mm_or_si128(s1,
s0))); | |
| 37 if (_mm_testz_si128(ORed, alphaMask)) { | |
| 38 // All 16 source pixels are fully transparent. There's nothing to d
o! | |
| 39 continue; | |
| 40 } | |
| 41 const __m128i ANDed = _mm_and_si128(s3, _mm_and_si128(s2, _mm_and_si128(
s1, s0))); | |
| 42 if (_mm_testc_si128(ANDed, alphaMask)) { | |
| 43 // All 16 source pixels are fully opaque. There's no need to read d
st or blend it. | |
| 44 _mm_storeu_si128(dst4+i+0, s0); | |
| 45 _mm_storeu_si128(dst4+i+1, s1); | |
| 46 _mm_storeu_si128(dst4+i+2, s2); | |
| 47 _mm_storeu_si128(dst4+i+3, s3); | |
| 48 continue; | |
| 49 } | |
| 50 // The general slow case: do the blend for all 16 pixels. | |
| 51 _mm_storeu_si128(dst4+i+0, SkPMSrcOver_SSE2(s0, _mm_loadu_si128(dst4+i+0
))); | |
| 52 _mm_storeu_si128(dst4+i+1, SkPMSrcOver_SSE2(s1, _mm_loadu_si128(dst4+i+1
))); | |
| 53 _mm_storeu_si128(dst4+i+2, SkPMSrcOver_SSE2(s2, _mm_loadu_si128(dst4+i+2
))); | |
| 54 _mm_storeu_si128(dst4+i+3, SkPMSrcOver_SSE2(s3, _mm_loadu_si128(dst4+i+3
))); | |
| 55 } | |
| 56 | |
| 57 // Wrap up the last <= 15 pixels. | |
| 58 for (int i = count16*16; i < count; i++) { | |
| 59 // This check is not really necessarily, but it prevents pointless autov
ectorization. | |
| 60 if (src[i] & 0xFF000000) { | |
| 61 dst[i] = SkPMSrcOver(src[i], dst[i]); | |
| 62 } | |
| 63 } | |
| 64 } | |
| 65 | |
| 66 #endif | |
| OLD | NEW |