Index: source/libvpx/third_party/libyuv/source/scale_neon.cc |
=================================================================== |
--- source/libvpx/third_party/libyuv/source/scale_neon.cc (revision 290053) |
+++ source/libvpx/third_party/libyuv/source/scale_neon.cc (working copy) |
@@ -8,7 +8,7 @@ |
* be found in the AUTHORS file in the root of the source tree. |
*/ |
-#include "third_party/libyuv/include/libyuv/row.h" |
+#include "libyuv/row.h" |
#ifdef __cplusplus |
namespace libyuv { |
@@ -28,8 +28,10 @@ |
".p2align 2 \n" |
"1: \n" |
// load even pixels into q0, odd into q1 |
+ MEMACCESS(0) |
"vld2.8 {q0, q1}, [%0]! \n" |
"subs %2, %2, #16 \n" // 16 processed per loop |
+ MEMACCESS(1) |
"vst1.8 {q1}, [%1]! \n" // store odd pixels |
"bgt 1b \n" |
: "+r"(src_ptr), // %0 |
@@ -48,7 +50,9 @@ |
"add %1, %0 \n" |
".p2align 2 \n" |
"1: \n" |
+ MEMACCESS(0) |
"vld1.8 {q0, q1}, [%0]! \n" // load row 1 and post inc |
+ MEMACCESS(1) |
"vld1.8 {q2, q3}, [%1]! \n" // load row 2 and post inc |
"subs %3, %3, #16 \n" // 16 processed per loop |
"vpaddl.u8 q0, q0 \n" // row 1 add adjacent |
@@ -57,6 +61,7 @@ |
"vpadal.u8 q1, q3 \n" |
"vrshrn.u16 d0, q0, #2 \n" // downshift, round and pack |
"vrshrn.u16 d1, q1, #2 \n" |
+ MEMACCESS(2) |
"vst1.8 {q0}, [%2]! \n" |
"bgt 1b \n" |
: "+r"(src_ptr), // %0 |
@@ -73,8 +78,10 @@ |
asm volatile ( |
".p2align 2 \n" |
"1: \n" |
+ MEMACCESS(0) |
"vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0 |
"subs %2, %2, #8 \n" // 8 processed per loop |
+ MEMACCESS(1) |
"vst1.8 {d2}, [%1]! \n" |
"bgt 1b \n" |
: "+r"(src_ptr), // %0 |
@@ -87,16 +94,20 @@ |
void ScaleRowDown4Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride, |
uint8* dst_ptr, int dst_width) { |
- asm volatile ( |
- "add r4, %0, %3 \n" |
- "add r5, r4, %3 \n" |
- "add %3, r5, %3 \n" |
+ const uint8* src_ptr1 = src_ptr + src_stride; |
+ const uint8* src_ptr2 = src_ptr + src_stride * 2; |
+ const uint8* src_ptr3 = src_ptr + src_stride * 3; |
+asm volatile ( |
".p2align 2 \n" |
"1: \n" |
+ MEMACCESS(0) |
"vld1.8 {q0}, [%0]! \n" // load up 16x4 |
- "vld1.8 {q1}, [r4]! \n" |
- "vld1.8 {q2}, [r5]! \n" |
- "vld1.8 {q3}, [%3]! \n" |
+ MEMACCESS(3) |
+ "vld1.8 {q1}, [%3]! \n" |
+ MEMACCESS(4) |
+ "vld1.8 {q2}, [%4]! \n" |
+ MEMACCESS(5) |
+ "vld1.8 {q3}, [%5]! \n" |
"subs %2, %2, #4 \n" |
"vpaddl.u8 q0, q0 \n" |
"vpadal.u8 q0, q1 \n" |
@@ -105,13 +116,17 @@ |
"vpaddl.u16 q0, q0 \n" |
"vrshrn.u32 d0, q0, #4 \n" // divide by 16 w/rounding |
"vmovn.u16 d0, q0 \n" |
+ MEMACCESS(1) |
"vst1.32 {d0[0]}, [%1]! \n" |
"bgt 1b \n" |
- : "+r"(src_ptr), // %0 |
- "+r"(dst_ptr), // %1 |
- "+r"(dst_width) // %2 |
- : "r"(src_stride) // %3 |
- : "r4", "r5", "q0", "q1", "q2", "q3", "memory", "cc" |
+ : "+r"(src_ptr), // %0 |
+ "+r"(dst_ptr), // %1 |
+ "+r"(dst_width), // %2 |
+ "+r"(src_ptr1), // %3 |
+ "+r"(src_ptr2), // %4 |
+ "+r"(src_ptr3) // %5 |
+ : |
+ : "q0", "q1", "q2", "q3", "memory", "cc" |
); |
} |
@@ -124,9 +139,11 @@ |
asm volatile ( |
".p2align 2 \n" |
"1: \n" |
+ MEMACCESS(0) |
"vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0 |
"subs %2, %2, #24 \n" |
"vmov d2, d3 \n" // order d0, d1, d2 |
+ MEMACCESS(1) |
"vst3.8 {d0, d1, d2}, [%1]! \n" |
"bgt 1b \n" |
: "+r"(src_ptr), // %0 |
@@ -145,7 +162,9 @@ |
"add %3, %0 \n" |
".p2align 2 \n" |
"1: \n" |
+ MEMACCESS(0) |
"vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0 |
+ MEMACCESS(3) |
"vld4.8 {d4, d5, d6, d7}, [%3]! \n" // src line 1 |
"subs %2, %2, #24 \n" |
@@ -182,6 +201,7 @@ |
"vmlal.u8 q8, d3, d24 \n" |
"vqrshrn.u16 d2, q8, #2 \n" |
+ MEMACCESS(1) |
"vst3.8 {d0, d1, d2}, [%1]! \n" |
"bgt 1b \n" |
@@ -202,7 +222,9 @@ |
"add %3, %0 \n" |
".p2align 2 \n" |
"1: \n" |
+ MEMACCESS(0) |
"vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0 |
+ MEMACCESS(3) |
"vld4.8 {d4, d5, d6, d7}, [%3]! \n" // src line 1 |
"subs %2, %2, #24 \n" |
// average src line 0 with src line 1 |
@@ -222,6 +244,7 @@ |
"vmlal.u8 q3, d3, d24 \n" |
"vqrshrn.u16 d2, q3, #2 \n" |
+ MEMACCESS(1) |
"vst3.8 {d0, d1, d2}, [%1]! \n" |
"bgt 1b \n" |
: "+r"(src_ptr), // %0 |
@@ -250,14 +273,18 @@ |
ptrdiff_t src_stride, |
uint8* dst_ptr, int dst_width) { |
asm volatile ( |
+ MEMACCESS(3) |
"vld1.8 {q3}, [%3] \n" |
".p2align 2 \n" |
"1: \n" |
+ MEMACCESS(0) |
"vld1.8 {d0, d1, d2, d3}, [%0]! \n" |
"subs %2, %2, #12 \n" |
"vtbl.u8 d4, {d0, d1, d2, d3}, d6 \n" |
"vtbl.u8 d5, {d0, d1, d2, d3}, d7 \n" |
+ MEMACCESS(1) |
"vst1.8 {d4}, [%1]! \n" |
+ MEMACCESS(1) |
"vst1.32 {d5[0]}, [%1]! \n" |
"bgt 1b \n" |
: "+r"(src_ptr), // %0 |
@@ -272,11 +299,15 @@ |
void OMITFP ScaleRowDown38_3_Box_NEON(const uint8* src_ptr, |
ptrdiff_t src_stride, |
uint8* dst_ptr, int dst_width) { |
+ const uint8* src_ptr1 = src_ptr + src_stride * 2; |
+ |
asm volatile ( |
- "vld1.16 {q13}, [%4] \n" |
- "vld1.8 {q14}, [%5] \n" |
- "vld1.8 {q15}, [%6] \n" |
- "add r4, %0, %3, lsl #1 \n" |
+ MEMACCESS(5) |
+ "vld1.16 {q13}, [%5] \n" |
+ MEMACCESS(6) |
+ "vld1.8 {q14}, [%6] \n" |
+ MEMACCESS(7) |
+ "vld1.8 {q15}, [%7] \n" |
"add %3, %0 \n" |
".p2align 2 \n" |
"1: \n" |
@@ -285,9 +316,12 @@ |
// d1 = 10 50 11 51 12 52 13 53 |
// d2 = 20 60 21 61 22 62 23 63 |
// d3 = 30 70 31 71 32 72 33 73 |
+ MEMACCESS(0) |
"vld4.8 {d0, d1, d2, d3}, [%0]! \n" |
+ MEMACCESS(3) |
"vld4.8 {d4, d5, d6, d7}, [%3]! \n" |
- "vld4.8 {d16, d17, d18, d19}, [r4]! \n" |
+ MEMACCESS(4) |
+ "vld4.8 {d16, d17, d18, d19}, [%4]! \n" |
"subs %2, %2, #12 \n" |
// Shuffle the input data around to get align the data |
@@ -364,18 +398,20 @@ |
"vtbl.u8 d3, {d0, d1, d2}, d28 \n" |
"vtbl.u8 d4, {d0, d1, d2}, d29 \n" |
+ MEMACCESS(1) |
"vst1.8 {d3}, [%1]! \n" |
+ MEMACCESS(1) |
"vst1.32 {d4[0]}, [%1]! \n" |
"bgt 1b \n" |
: "+r"(src_ptr), // %0 |
"+r"(dst_ptr), // %1 |
"+r"(dst_width), // %2 |
- "+r"(src_stride) // %3 |
- : "r"(&kMult38_Div6), // %4 |
- "r"(&kShuf38_2), // %5 |
- "r"(&kMult38_Div9) // %6 |
- : "r4", "q0", "q1", "q2", "q3", "q8", "q9", |
- "q13", "q14", "q15", "memory", "cc" |
+ "+r"(src_stride), // %3 |
+ "+r"(src_ptr1) // %4 |
+ : "r"(&kMult38_Div6), // %5 |
+ "r"(&kShuf38_2), // %6 |
+ "r"(&kMult38_Div9) // %7 |
+ : "q0", "q1", "q2", "q3", "q8", "q9", "q13", "q14", "q15", "memory", "cc" |
); |
} |
@@ -384,7 +420,9 @@ |
ptrdiff_t src_stride, |
uint8* dst_ptr, int dst_width) { |
asm volatile ( |
+ MEMACCESS(4) |
"vld1.16 {q13}, [%4] \n" |
+ MEMACCESS(5) |
"vld1.8 {q14}, [%5] \n" |
"add %3, %0 \n" |
".p2align 2 \n" |
@@ -394,7 +432,9 @@ |
// d1 = 10 50 11 51 12 52 13 53 |
// d2 = 20 60 21 61 22 62 23 63 |
// d3 = 30 70 31 71 32 72 33 73 |
+ MEMACCESS(0) |
"vld4.8 {d0, d1, d2, d3}, [%0]! \n" |
+ MEMACCESS(3) |
"vld4.8 {d4, d5, d6, d7}, [%3]! \n" |
"subs %2, %2, #12 \n" |
@@ -461,7 +501,9 @@ |
"vtbl.u8 d3, {d0, d1, d2}, d28 \n" |
"vtbl.u8 d4, {d0, d1, d2}, d29 \n" |
+ MEMACCESS(1) |
"vst1.8 {d3}, [%1]! \n" |
+ MEMACCESS(1) |
"vst1.32 {d4[0]}, [%1]! \n" |
"bgt 1b \n" |
: "+r"(src_ptr), // %0 |
@@ -494,7 +536,9 @@ |
"vdup.8 d4, %4 \n" |
// General purpose row blend. |
"1: \n" |
+ MEMACCESS(1) |
"vld1.8 {q0}, [%1]! \n" |
+ MEMACCESS(2) |
"vld1.8 {q1}, [%2]! \n" |
"subs %3, %3, #16 \n" |
"vmull.u8 q13, d0, d4 \n" |
@@ -503,50 +547,63 @@ |
"vmlal.u8 q14, d3, d5 \n" |
"vrshrn.u16 d0, q13, #8 \n" |
"vrshrn.u16 d1, q14, #8 \n" |
+ MEMACCESS(0) |
"vst1.8 {q0}, [%0]! \n" |
"bgt 1b \n" |
"b 99f \n" |
// Blend 25 / 75. |
"25: \n" |
+ MEMACCESS(1) |
"vld1.8 {q0}, [%1]! \n" |
+ MEMACCESS(2) |
"vld1.8 {q1}, [%2]! \n" |
"subs %3, %3, #16 \n" |
"vrhadd.u8 q0, q1 \n" |
"vrhadd.u8 q0, q1 \n" |
+ MEMACCESS(0) |
"vst1.8 {q0}, [%0]! \n" |
"bgt 25b \n" |
"b 99f \n" |
// Blend 50 / 50. |
"50: \n" |
+ MEMACCESS(1) |
"vld1.8 {q0}, [%1]! \n" |
+ MEMACCESS(2) |
"vld1.8 {q1}, [%2]! \n" |
"subs %3, %3, #16 \n" |
"vrhadd.u8 q0, q1 \n" |
+ MEMACCESS(0) |
"vst1.8 {q0}, [%0]! \n" |
"bgt 50b \n" |
"b 99f \n" |
// Blend 75 / 25. |
"75: \n" |
+ MEMACCESS(1) |
"vld1.8 {q1}, [%1]! \n" |
+ MEMACCESS(2) |
"vld1.8 {q0}, [%2]! \n" |
"subs %3, %3, #16 \n" |
"vrhadd.u8 q0, q1 \n" |
"vrhadd.u8 q0, q1 \n" |
+ MEMACCESS(0) |
"vst1.8 {q0}, [%0]! \n" |
"bgt 75b \n" |
"b 99f \n" |
// Blend 100 / 0 - Copy row unchanged. |
"100: \n" |
+ MEMACCESS(1) |
"vld1.8 {q0}, [%1]! \n" |
"subs %3, %3, #16 \n" |
+ MEMACCESS(0) |
"vst1.8 {q0}, [%0]! \n" |
"bgt 100b \n" |
"99: \n" |
+ MEMACCESS(0) |
"vst1.8 {d1[7]}, [%0] \n" |
: "+r"(dst_ptr), // %0 |
"+r"(src_ptr), // %1 |
@@ -564,10 +621,14 @@ |
".p2align 2 \n" |
"1: \n" |
// load even pixels into q0, odd into q1 |
+ MEMACCESS(0) |
"vld2.32 {q0, q1}, [%0]! \n" |
+ MEMACCESS(0) |
"vld2.32 {q2, q3}, [%0]! \n" |
"subs %2, %2, #8 \n" // 8 processed per loop |
+ MEMACCESS(1) |
"vst1.8 {q1}, [%1]! \n" // store odd pixels |
+ MEMACCESS(1) |
"vst1.8 {q3}, [%1]! \n" |
"bgt 1b \n" |
: "+r"(src_ptr), // %0 |
@@ -585,14 +646,18 @@ |
"add %1, %1, %0 \n" |
".p2align 2 \n" |
"1: \n" |
+ MEMACCESS(0) |
"vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels. |
+ MEMACCESS(0) |
"vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels. |
"subs %3, %3, #8 \n" // 8 processed per loop. |
"vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts. |
"vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts. |
"vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts. |
"vpaddl.u8 q3, q3 \n" // A 16 bytes -> 8 shorts. |
+ MEMACCESS(1) |
"vld4.8 {d16, d18, d20, d22}, [%1]! \n" // load 8 more ARGB pixels. |
+ MEMACCESS(1) |
"vld4.8 {d17, d19, d21, d23}, [%1]! \n" // load last 8 ARGB pixels. |
"vpadal.u8 q0, q8 \n" // B 16 bytes -> 8 shorts. |
"vpadal.u8 q1, q9 \n" // G 16 bytes -> 8 shorts. |
@@ -602,6 +667,7 @@ |
"vrshrn.u16 d1, q1, #2 \n" |
"vrshrn.u16 d2, q2, #2 \n" |
"vrshrn.u16 d3, q3, #2 \n" |
+ MEMACCESS(2) |
"vst4.8 {d0, d1, d2, d3}, [%2]! \n" |
"bgt 1b \n" |
: "+r"(src_ptr), // %0 |
@@ -621,11 +687,16 @@ |
"mov r12, %3, lsl #2 \n" |
".p2align 2 \n" |
"1: \n" |
+ MEMACCESS(0) |
"vld1.32 {d0[0]}, [%0], r12 \n" |
+ MEMACCESS(0) |
"vld1.32 {d0[1]}, [%0], r12 \n" |
+ MEMACCESS(0) |
"vld1.32 {d1[0]}, [%0], r12 \n" |
+ MEMACCESS(0) |
"vld1.32 {d1[1]}, [%0], r12 \n" |
"subs %2, %2, #4 \n" // 4 pixels per loop. |
+ MEMACCESS(1) |
"vst1.8 {q0}, [%1]! \n" |
"bgt 1b \n" |
: "+r"(src_argb), // %0 |
@@ -646,13 +717,21 @@ |
"add %1, %1, %0 \n" |
".p2align 2 \n" |
"1: \n" |
+ MEMACCESS(0) |
"vld1.8 {d0}, [%0], r12 \n" // Read 4 2x2 blocks -> 2x1 |
+ MEMACCESS(1) |
"vld1.8 {d1}, [%1], r12 \n" |
+ MEMACCESS(0) |
"vld1.8 {d2}, [%0], r12 \n" |
+ MEMACCESS(1) |
"vld1.8 {d3}, [%1], r12 \n" |
+ MEMACCESS(0) |
"vld1.8 {d4}, [%0], r12 \n" |
+ MEMACCESS(1) |
"vld1.8 {d5}, [%1], r12 \n" |
+ MEMACCESS(0) |
"vld1.8 {d6}, [%0], r12 \n" |
+ MEMACCESS(1) |
"vld1.8 {d7}, [%1], r12 \n" |
"vaddl.u8 q0, d0, d1 \n" |
"vaddl.u8 q1, d2, d3 \n" |
@@ -665,6 +744,7 @@ |
"vrshrn.u16 d0, q0, #2 \n" // first 2 pixels. |
"vrshrn.u16 d1, q2, #2 \n" // next 2 pixels. |
"subs %3, %3, #4 \n" // 4 pixels per loop. |
+ MEMACCESS(2) |
"vst1.8 {q0}, [%2]! \n" |
"bgt 1b \n" |
: "+r"(src_argb), // %0 |