Index: libvpx/source/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm |
diff --git a/libvpx/source/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm b/libvpx/source/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm |
index 1b5489795634c2e6d9ad8f1d06498a2fbed28d5a..2350f3e8b086d8e061e22cf9264bad7b6e713882 100644 |
--- a/libvpx/source/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm |
+++ b/libvpx/source/libvpx/vp8/encoder/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm |
@@ -25,10 +25,6 @@ |
|vp8_variance_halfpixvar16x16_h_armv6| PROC |
stmfd sp!, {r4-r12, lr} |
- |
- pld [r0, r1, lsl #0] |
- pld [r2, r3, lsl #0] |
- |
mov r8, #0 ; initialize sum = 0 |
ldr r10, c80808080 |
mov r11, #0 ; initialize sse = 0 |
@@ -46,10 +42,8 @@ loop |
eor r4, r4, r10 |
usub8 r6, r4, r5 ; calculate difference |
- pld [r0, r1, lsl #1] |
sel r7, r6, lr ; select bytes with positive difference |
usub8 r6, r5, r4 ; calculate difference with reversed operands |
- pld [r2, r3, lsl #1] |
sel r6, r6, lr ; select bytes with negative difference |
; calculate partial sums |