Index: libvpx/source/libvpx/vp8/encoder/x86/sad_sse2.asm |
diff --git a/libvpx/source/libvpx/vp8/encoder/x86/sad_sse2.asm b/libvpx/source/libvpx/vp8/encoder/x86/sad_sse2.asm |
index 1011c95538d6d64e0a22d7ff246208e19cdd4a36..39ed796049bc0b5c01dbdd0b99e72106f20081c3 100644 |
--- a/libvpx/source/libvpx/vp8/encoder/x86/sad_sse2.asm |
+++ b/libvpx/source/libvpx/vp8/encoder/x86/sad_sse2.asm |
@@ -21,7 +21,6 @@ sym(vp8_sad16x16_wmt): |
push rbp |
mov rbp, rsp |
SHADOW_ARGS_TO_STACK 4 |
- SAVE_XMM 6 |
push rsi |
push rdi |
; end prolog |
@@ -35,7 +34,7 @@ sym(vp8_sad16x16_wmt): |
lea rcx, [rsi+rax*8] |
lea rcx, [rcx+rax*8] |
- pxor xmm6, xmm6 |
+ pxor xmm7, xmm7 |
x16x16sad_wmt_loop: |
@@ -53,33 +52,32 @@ x16x16sad_wmt_loop: |
punpcklbw xmm1, xmm3 |
psadbw xmm0, xmm1 |
- movq xmm2, QWORD PTR [rsi+rax+8] |
+ movq xmm6, QWORD PTR [rsi+rax+8] |
movq xmm3, QWORD PTR [rdi+rdx+8] |
lea rsi, [rsi+rax*2] |
lea rdi, [rdi+rdx*2] |
- punpcklbw xmm4, xmm2 |
+ punpcklbw xmm4, xmm6 |
punpcklbw xmm5, xmm3 |
psadbw xmm4, xmm5 |
- paddw xmm6, xmm0 |
- paddw xmm6, xmm4 |
+ paddw xmm7, xmm0 |
+ paddw xmm7, xmm4 |
cmp rsi, rcx |
jne x16x16sad_wmt_loop |
- movq xmm0, xmm6 |
- psrldq xmm6, 8 |
+ movq xmm0, xmm7 |
+ psrldq xmm7, 8 |
- paddw xmm0, xmm6 |
+ paddw xmm0, xmm7 |
movq rax, xmm0 |
; begin epilog |
pop rdi |
pop rsi |
- RESTORE_XMM |
UNSHADOW_ARGS |
pop rbp |
ret |
@@ -114,7 +112,7 @@ sym(vp8_sad8x16_wmt): |
x8x16sad_wmt_loop: |
movq rax, mm7 |
- cmp eax, arg(4) |
+ cmp rax, arg(4) |
jg x8x16sad_wmt_early_exit |
movq mm0, QWORD PTR [rsi] |
@@ -175,7 +173,7 @@ sym(vp8_sad8x8_wmt): |
x8x8sad_wmt_loop: |
movq rax, mm7 |
- cmp eax, arg(4) |
+ cmp rax, arg(4) |
jg x8x8sad_wmt_early_exit |
movq mm0, QWORD PTR [rsi] |
@@ -284,7 +282,7 @@ sym(vp8_sad16x8_wmt): |
x16x8sad_wmt_loop: |
movq rax, mm7 |
- cmp eax, arg(4) |
+ cmp rax, arg(4) |
jg x16x8sad_wmt_early_exit |
movq mm0, QWORD PTR [rsi] |
@@ -328,83 +326,3 @@ x16x8sad_wmt_early_exit: |
UNSHADOW_ARGS |
pop rbp |
ret |
- |
-;void vp8_copy32xn_sse2( |
-; unsigned char *src_ptr, |
-; int src_stride, |
-; unsigned char *dst_ptr, |
-; int dst_stride, |
-; int height); |
-global sym(vp8_copy32xn_sse2) |
-sym(vp8_copy32xn_sse2): |
- push rbp |
- mov rbp, rsp |
- SHADOW_ARGS_TO_STACK 5 |
- SAVE_XMM 7 |
- push rsi |
- push rdi |
- ; end prolog |
- |
- mov rsi, arg(0) ;src_ptr |
- mov rdi, arg(2) ;dst_ptr |
- |
- movsxd rax, dword ptr arg(1) ;src_stride |
- movsxd rdx, dword ptr arg(3) ;dst_stride |
- movsxd rcx, dword ptr arg(4) ;height |
- |
-block_copy_sse2_loopx4: |
- movdqu xmm0, XMMWORD PTR [rsi] |
- movdqu xmm1, XMMWORD PTR [rsi + 16] |
- movdqu xmm2, XMMWORD PTR [rsi + rax] |
- movdqu xmm3, XMMWORD PTR [rsi + rax + 16] |
- |
- lea rsi, [rsi+rax*2] |
- |
- movdqu xmm4, XMMWORD PTR [rsi] |
- movdqu xmm5, XMMWORD PTR [rsi + 16] |
- movdqu xmm6, XMMWORD PTR [rsi + rax] |
- movdqu xmm7, XMMWORD PTR [rsi + rax + 16] |
- |
- lea rsi, [rsi+rax*2] |
- |
- movdqa XMMWORD PTR [rdi], xmm0 |
- movdqa XMMWORD PTR [rdi + 16], xmm1 |
- movdqa XMMWORD PTR [rdi + rdx], xmm2 |
- movdqa XMMWORD PTR [rdi + rdx + 16], xmm3 |
- |
- lea rdi, [rdi+rdx*2] |
- |
- movdqa XMMWORD PTR [rdi], xmm4 |
- movdqa XMMWORD PTR [rdi + 16], xmm5 |
- movdqa XMMWORD PTR [rdi + rdx], xmm6 |
- movdqa XMMWORD PTR [rdi + rdx + 16], xmm7 |
- |
- lea rdi, [rdi+rdx*2] |
- |
- sub rcx, 4 |
- cmp rcx, 4 |
- jge block_copy_sse2_loopx4 |
- |
- cmp rcx, 0 |
- je copy_is_done |
- |
-block_copy_sse2_loop: |
- movdqu xmm0, XMMWORD PTR [rsi] |
- movdqu xmm1, XMMWORD PTR [rsi + 16] |
- lea rsi, [rsi+rax] |
- |
- movdqa XMMWORD PTR [rdi], xmm0 |
- movdqa XMMWORD PTR [rdi + 16], xmm1 |
- lea rdi, [rdi+rdx] |
- |
- sub rcx, 1 |
- jne block_copy_sse2_loop |
- |
-copy_is_done: |
- ; begin epilog |
- pop rdi |
- pop rsi |
- RESTORE_XMM |
- UNSHADOW_ARGS |
- pop rbp |
- ret |