| Index: source/row_gcc.cc
|
| diff --git a/source/row_gcc.cc b/source/row_gcc.cc
|
| index 12c7dd884de344f8cbe533b5bb430e353223fcaf..d406c7c77f2e03f16db6544281b70a110b4914c1 100644
|
| --- a/source/row_gcc.cc
|
| +++ b/source/row_gcc.cc
|
| @@ -3469,7 +3469,10 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
|
|
|
| #ifdef HAS_BLENDPLANEROW_SSSE3
|
| // Blend 8 pixels at a time.
|
| -// =((G2*C2)+(H2*(D2))+32768+127)/256
|
| +// unsigned version of math
|
| +// =((A2*C2)+(B2*(255-C2))+255)/256
|
| +// signed version of math
|
| +// =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256
|
| void BlendPlaneRow_SSSE3(const uint8* src0, const uint8* src1,
|
| const uint8* alpha, uint8* dst, int width) {
|
| asm volatile (
|
| @@ -3514,8 +3517,11 @@ void BlendPlaneRow_SSSE3(const uint8* src0, const uint8* src1,
|
| #endif // HAS_BLENDPLANEROW_SSSE3
|
|
|
| #ifdef HAS_BLENDPLANEROW_AVX2
|
| -// Blend 16 pixels at a time.
|
| -// =((G2*C2)+(H2*(D2))+32768+127)/256
|
| +// Blend 32 pixels at a time.
|
| +// unsigned version of math
|
| +// =((A2*C2)+(B2*(255-C2))+255)/256
|
| +// signed version of math
|
| +// =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256
|
| void BlendPlaneRow_AVX2(const uint8* src0, const uint8* src1,
|
| const uint8* alpha, uint8* dst, int width) {
|
| asm volatile (
|
| @@ -3531,27 +3537,30 @@ void BlendPlaneRow_AVX2(const uint8* src0, const uint8* src1,
|
| "sub %2,%1 \n"
|
| "sub %2,%3 \n"
|
|
|
| - // 16 pixel loop.
|
| + // 32 pixel loop.
|
| LABELALIGN
|
| "1: \n"
|
| - "vmovdqu (%2),%%xmm0 \n"
|
| - "vpermq $0xd8,%%ymm0,%%ymm0 \n"
|
| + "vmovdqu (%2),%%ymm0 \n"
|
| + "vpunpckhbw %%ymm0,%%ymm0,%%ymm3 \n"
|
| "vpunpcklbw %%ymm0,%%ymm0,%%ymm0 \n"
|
| + "vpxor %%ymm5,%%ymm3,%%ymm3 \n"
|
| "vpxor %%ymm5,%%ymm0,%%ymm0 \n"
|
| - "vmovdqu (%0,%2,1),%%xmm1 \n"
|
| - "vmovdqu (%1,%2,1),%%xmm2 \n"
|
| - "vpermq $0xd8,%%ymm1,%%ymm1 \n"
|
| - "vpermq $0xd8,%%ymm2,%%ymm2 \n"
|
| + "vmovdqu (%0,%2,1),%%ymm1 \n"
|
| + "vmovdqu (%1,%2,1),%%ymm2 \n"
|
| + "vpunpckhbw %%ymm2,%%ymm1,%%ymm4 \n"
|
| "vpunpcklbw %%ymm2,%%ymm1,%%ymm1 \n"
|
| + "vpsubb %%ymm6,%%ymm1,%%ymm4 \n"
|
| "vpsubb %%ymm6,%%ymm1,%%ymm1 \n"
|
| + "vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n"
|
| "vpmaddubsw %%ymm1,%%ymm0,%%ymm0 \n"
|
| + "vpaddw %%ymm7,%%ymm3,%%ymm3 \n"
|
| "vpaddw %%ymm7,%%ymm0,%%ymm0 \n"
|
| + "vpsrlw $0x8,%%ymm3,%%ymm3 \n"
|
| "vpsrlw $0x8,%%ymm0,%%ymm0 \n"
|
| - "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n"
|
| - "vpermq $0xd8,%%ymm0,%%ymm0 \n"
|
| - "vmovdqu %%xmm0,(%3,%2,1) \n"
|
| - "lea 0x10(%2),%2 \n"
|
| - "sub $0x10,%4 \n"
|
| + "vpackuswb %%ymm3,%%ymm0,%%ymm0 \n"
|
| + "vmovdqu %%ymm0,(%3,%2,1) \n"
|
| + "lea 0x20(%2),%2 \n"
|
| + "sub $0x20,%4 \n"
|
| "jg 1b \n"
|
| "vzeroupper \n"
|
| : "+r"(src0), // %0
|
| @@ -3559,7 +3568,8 @@ void BlendPlaneRow_AVX2(const uint8* src0, const uint8* src1,
|
| "+r"(alpha), // %2
|
| "+r"(dst), // %3
|
| "+r"(width) // %4
|
| - :: "memory", "cc", "eax", "xmm0", "xmm1", "xmm2", "xmm5", "xmm6", "xmm7"
|
| + :: "memory", "cc", "eax",
|
| + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
|
| );
|
| }
|
| #endif // HAS_BLENDPLANEROW_AVX2
|
|
|