Index: source/row_gcc.cc |
diff --git a/source/row_gcc.cc b/source/row_gcc.cc |
index 12c7dd884de344f8cbe533b5bb430e353223fcaf..112a7208532281a5ebee5d5439ed5a719397fe92 100644 |
--- a/source/row_gcc.cc |
+++ b/source/row_gcc.cc |
@@ -3531,27 +3531,30 @@ void BlendPlaneRow_AVX2(const uint8* src0, const uint8* src1, |
"sub %2,%1 \n" |
"sub %2,%3 \n" |
- // 16 pixel loop. |
+ // 32 pixel loop. |
LABELALIGN |
"1: \n" |
- "vmovdqu (%2),%%xmm0 \n" |
- "vpermq $0xd8,%%ymm0,%%ymm0 \n" |
+ "vmovdqu (%2),%%ymm0 \n" |
+ "vpunpckhbw %%ymm0,%%ymm0,%%ymm3 \n" |
"vpunpcklbw %%ymm0,%%ymm0,%%ymm0 \n" |
+ "vpxor %%ymm5,%%ymm3,%%ymm3 \n" |
"vpxor %%ymm5,%%ymm0,%%ymm0 \n" |
- "vmovdqu (%0,%2,1),%%xmm1 \n" |
- "vmovdqu (%1,%2,1),%%xmm2 \n" |
- "vpermq $0xd8,%%ymm1,%%ymm1 \n" |
- "vpermq $0xd8,%%ymm2,%%ymm2 \n" |
+ "vmovdqu (%0,%2,1),%%ymm1 \n" |
+ "vmovdqu (%1,%2,1),%%ymm2 \n" |
+ "vpunpckhbw %%ymm2,%%ymm1,%%ymm4 \n" |
"vpunpcklbw %%ymm2,%%ymm1,%%ymm1 \n" |
+ "vpsubb %%ymm6,%%ymm1,%%ymm4 \n" |
"vpsubb %%ymm6,%%ymm1,%%ymm1 \n" |
+ "vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n" |
"vpmaddubsw %%ymm1,%%ymm0,%%ymm0 \n" |
+ "vpaddw %%ymm7,%%ymm3,%%ymm3 \n" |
"vpaddw %%ymm7,%%ymm0,%%ymm0 \n" |
+ "vpsrlw $0x8,%%ymm3,%%ymm3 \n" |
"vpsrlw $0x8,%%ymm0,%%ymm0 \n" |
- "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n" |
- "vpermq $0xd8,%%ymm0,%%ymm0 \n" |
- "vmovdqu %%xmm0,(%3,%2,1) \n" |
- "lea 0x10(%2),%2 \n" |
- "sub $0x10,%4 \n" |
+ "vpackuswb %%ymm3,%%ymm0,%%ymm0 \n" |
+ "vmovdqu %%ymm0,(%3,%2,1) \n" |
+ "lea 0x20(%2),%2 \n" |
+ "sub $0x20,%4 \n" |
"jg 1b \n" |
"vzeroupper \n" |
: "+r"(src0), // %0 |
@@ -3559,7 +3562,8 @@ void BlendPlaneRow_AVX2(const uint8* src0, const uint8* src1, |
"+r"(alpha), // %2 |
"+r"(dst), // %3 |
"+r"(width) // %4 |
- :: "memory", "cc", "eax", "xmm0", "xmm1", "xmm2", "xmm5", "xmm6", "xmm7" |
+ :: "memory", "cc", "eax", |
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" |
); |
} |
#endif // HAS_BLENDPLANEROW_AVX2 |