| Index: source/scale_gcc.cc
|
| diff --git a/source/scale_gcc.cc b/source/scale_gcc.cc
|
| index 9424eceddb4339eaa651bcd1695993f1d53f3c5d..5d10a01fa3831867f45d961f9fb203695a335d4c 100644
|
| --- a/source/scale_gcc.cc
|
| +++ b/source/scale_gcc.cc
|
| @@ -286,7 +286,7 @@ void ScaleRowDown2Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
|
| }
|
| #endif // HAS_SCALEROWDOWN2_AVX2
|
|
|
| -void ScaleRowDown4_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
|
| +void ScaleRowDown4_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride,
|
| uint8* dst_ptr, int dst_width) {
|
| asm volatile (
|
| "pcmpeqb %%xmm5,%%xmm5 \n"
|
| @@ -314,12 +314,15 @@ void ScaleRowDown4_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
|
| );
|
| }
|
|
|
| -void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
|
| +void ScaleRowDown4Box_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride,
|
| uint8* dst_ptr, int dst_width) {
|
| intptr_t stridex3 = 0;
|
| asm volatile (
|
| - "pcmpeqb %%xmm7,%%xmm7 \n"
|
| - "psrlw $0x8,%%xmm7 \n"
|
| + "pcmpeqb %%xmm4,%%xmm4 \n"
|
| + "psrlw $0xf,%%xmm4 \n"
|
| + "movdqa %%xmm4,%%xmm5 \n"
|
| + "packuswb %%xmm4,%%xmm4 \n"
|
| + "psllw $0x3,%%xmm5 \n"
|
| "lea " MEMLEA4(0x00,4,4,2) ",%3 \n"
|
|
|
| LABELALIGN
|
| @@ -328,31 +331,29 @@ void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
|
| "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
|
| MEMOPREG(movdqu,0x00,0,4,1,xmm2) // movdqu (%0,%4,1),%%xmm2
|
| MEMOPREG(movdqu,0x10,0,4,1,xmm3) // movdqu 0x10(%0,%4,1),%%xmm3
|
| - "pavgb %%xmm2,%%xmm0 \n"
|
| - "pavgb %%xmm3,%%xmm1 \n"
|
| + "pmaddubsw %%xmm4,%%xmm0 \n"
|
| + "pmaddubsw %%xmm4,%%xmm1 \n"
|
| + "pmaddubsw %%xmm4,%%xmm2 \n"
|
| + "pmaddubsw %%xmm4,%%xmm3 \n"
|
| + "paddw %%xmm2,%%xmm0 \n"
|
| + "paddw %%xmm3,%%xmm1 \n"
|
| MEMOPREG(movdqu,0x00,0,4,2,xmm2) // movdqu (%0,%4,2),%%xmm2
|
| MEMOPREG(movdqu,0x10,0,4,2,xmm3) // movdqu 0x10(%0,%4,2),%%xmm3
|
| - MEMOPREG(movdqu,0x00,0,3,1,xmm4) // movdqu (%0,%3,1),%%xmm4
|
| - MEMOPREG(movdqu,0x10,0,3,1,xmm5) // movdqu 0x10(%0,%3,1),%%xmm5
|
| + "pmaddubsw %%xmm4,%%xmm2 \n"
|
| + "pmaddubsw %%xmm4,%%xmm3 \n"
|
| + "paddw %%xmm2,%%xmm0 \n"
|
| + "paddw %%xmm3,%%xmm1 \n"
|
| + MEMOPREG(movdqu,0x00,0,3,1,xmm2) // movdqu (%0,%3,1),%%xmm2
|
| + MEMOPREG(movdqu,0x10,0,3,1,xmm3) // movdqu 0x10(%0,%3,1),%%xmm3
|
| "lea " MEMLEA(0x20,0) ",%0 \n"
|
| - "pavgb %%xmm4,%%xmm2 \n"
|
| - "pavgb %%xmm2,%%xmm0 \n"
|
| - "pavgb %%xmm5,%%xmm3 \n"
|
| - "pavgb %%xmm3,%%xmm1 \n"
|
| - "movdqa %%xmm0,%%xmm2 \n"
|
| - "psrlw $0x8,%%xmm0 \n"
|
| - "movdqa %%xmm1,%%xmm3 \n"
|
| - "psrlw $0x8,%%xmm1 \n"
|
| - "pand %%xmm7,%%xmm2 \n"
|
| - "pand %%xmm7,%%xmm3 \n"
|
| - "pavgw %%xmm2,%%xmm0 \n"
|
| - "pavgw %%xmm3,%%xmm1 \n"
|
| - "packuswb %%xmm1,%%xmm0 \n"
|
| - "movdqa %%xmm0,%%xmm2 \n"
|
| - "psrlw $0x8,%%xmm0 \n"
|
| - "pand %%xmm7,%%xmm2 \n"
|
| - "pavgw %%xmm2,%%xmm0 \n"
|
| - "packuswb %%xmm0,%%xmm0 \n"
|
| + "pmaddubsw %%xmm4,%%xmm2 \n"
|
| + "pmaddubsw %%xmm4,%%xmm3 \n"
|
| + "paddw %%xmm2,%%xmm0 \n"
|
| + "paddw %%xmm3,%%xmm1 \n"
|
| + "phaddw %%xmm1,%%xmm0 \n"
|
| + "paddw %%xmm5,%%xmm0 \n"
|
| + "psrlw $0x4,%%xmm0 \n"
|
| + "packuswb %%xmm0,%%xmm0 \n"
|
| "movq %%xmm0," MEMACCESS(1) " \n"
|
| "lea " MEMLEA(0x8,1) ",%1 \n"
|
| "sub $0x8,%2 \n"
|
| @@ -363,7 +364,7 @@ void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
|
| "+r"(stridex3) // %3
|
| : "r"((intptr_t)(src_stride)) // %4
|
| : "memory", "cc", NACL_R14
|
| - "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm7"
|
| + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
|
| );
|
| }
|
|
|
|
|