Index: source/libvpx/vpx_dsp/x86/sad4d_avx2.c |
diff --git a/source/libvpx/vpx_dsp/x86/sad4d_avx2.c b/source/libvpx/vpx_dsp/x86/sad4d_avx2.c |
index 4128f2ac37c6652c171e439254d1db17333afa0e..793658f9ea937098aa17394e0de65d0ce7485ce9 100644 |
--- a/source/libvpx/vpx_dsp/x86/sad4d_avx2.c |
+++ b/source/libvpx/vpx_dsp/x86/sad4d_avx2.c |
@@ -8,18 +8,19 @@ |
* be found in the AUTHORS file in the root of the source tree. |
*/ |
#include <immintrin.h> // AVX2 |
+#include "./vpx_dsp_rtcd.h" |
#include "vpx/vpx_integer.h" |
-void vpx_sad32x32x4d_avx2(uint8_t *src, |
+void vpx_sad32x32x4d_avx2(const uint8_t *src, |
int src_stride, |
- uint8_t *ref[4], |
+ const uint8_t *const ref[4], |
int ref_stride, |
uint32_t res[4]) { |
__m256i src_reg, ref0_reg, ref1_reg, ref2_reg, ref3_reg; |
__m256i sum_ref0, sum_ref1, sum_ref2, sum_ref3; |
__m256i sum_mlow, sum_mhigh; |
int i; |
- uint8_t *ref0, *ref1, *ref2, *ref3; |
+ const uint8_t *ref0, *ref1, *ref2, *ref3; |
ref0 = ref[0]; |
ref1 = ref[1]; |
@@ -31,11 +32,11 @@ void vpx_sad32x32x4d_avx2(uint8_t *src, |
sum_ref3 = _mm256_set1_epi16(0); |
for (i = 0; i < 32 ; i++) { |
// load src and all refs |
- src_reg = _mm256_loadu_si256((__m256i *)(src)); |
- ref0_reg = _mm256_loadu_si256((__m256i *) (ref0)); |
- ref1_reg = _mm256_loadu_si256((__m256i *) (ref1)); |
- ref2_reg = _mm256_loadu_si256((__m256i *) (ref2)); |
- ref3_reg = _mm256_loadu_si256((__m256i *) (ref3)); |
+ src_reg = _mm256_loadu_si256((const __m256i *)src); |
+ ref0_reg = _mm256_loadu_si256((const __m256i *)ref0); |
+ ref1_reg = _mm256_loadu_si256((const __m256i *)ref1); |
+ ref2_reg = _mm256_loadu_si256((const __m256i *)ref2); |
+ ref3_reg = _mm256_loadu_si256((const __m256i *)ref3); |
// sum of the absolute differences between every ref-i to src |
ref0_reg = _mm256_sad_epu8(ref0_reg, src_reg); |
ref1_reg = _mm256_sad_epu8(ref1_reg, src_reg); |
@@ -80,9 +81,9 @@ void vpx_sad32x32x4d_avx2(uint8_t *src, |
} |
} |
-void vpx_sad64x64x4d_avx2(uint8_t *src, |
+void vpx_sad64x64x4d_avx2(const uint8_t *src, |
int src_stride, |
- uint8_t *ref[4], |
+ const uint8_t *const ref[4], |
int ref_stride, |
uint32_t res[4]) { |
__m256i src_reg, srcnext_reg, ref0_reg, ref0next_reg; |
@@ -91,7 +92,7 @@ void vpx_sad64x64x4d_avx2(uint8_t *src, |
__m256i sum_ref0, sum_ref1, sum_ref2, sum_ref3; |
__m256i sum_mlow, sum_mhigh; |
int i; |
- uint8_t *ref0, *ref1, *ref2, *ref3; |
+ const uint8_t *ref0, *ref1, *ref2, *ref3; |
ref0 = ref[0]; |
ref1 = ref[1]; |
@@ -103,16 +104,16 @@ void vpx_sad64x64x4d_avx2(uint8_t *src, |
sum_ref3 = _mm256_set1_epi16(0); |
for (i = 0; i < 64 ; i++) { |
// load 64 bytes from src and all refs |
- src_reg = _mm256_loadu_si256((__m256i *)(src)); |
- srcnext_reg = _mm256_loadu_si256((__m256i *)(src + 32)); |
- ref0_reg = _mm256_loadu_si256((__m256i *) (ref0)); |
- ref0next_reg = _mm256_loadu_si256((__m256i *) (ref0 + 32)); |
- ref1_reg = _mm256_loadu_si256((__m256i *) (ref1)); |
- ref1next_reg = _mm256_loadu_si256((__m256i *) (ref1 + 32)); |
- ref2_reg = _mm256_loadu_si256((__m256i *) (ref2)); |
- ref2next_reg = _mm256_loadu_si256((__m256i *) (ref2 + 32)); |
- ref3_reg = _mm256_loadu_si256((__m256i *) (ref3)); |
- ref3next_reg = _mm256_loadu_si256((__m256i *) (ref3 + 32)); |
+ src_reg = _mm256_loadu_si256((const __m256i *)src); |
+ srcnext_reg = _mm256_loadu_si256((const __m256i *)(src + 32)); |
+ ref0_reg = _mm256_loadu_si256((const __m256i *)ref0); |
+ ref0next_reg = _mm256_loadu_si256((const __m256i *)(ref0 + 32)); |
+ ref1_reg = _mm256_loadu_si256((const __m256i *)ref1); |
+ ref1next_reg = _mm256_loadu_si256((const __m256i *)(ref1 + 32)); |
+ ref2_reg = _mm256_loadu_si256((const __m256i *)ref2); |
+ ref2next_reg = _mm256_loadu_si256((const __m256i *)(ref2 + 32)); |
+ ref3_reg = _mm256_loadu_si256((const __m256i *)ref3); |
+ ref3next_reg = _mm256_loadu_si256((const __m256i *)(ref3 + 32)); |
// sum of the absolute differences between every ref-i to src |
ref0_reg = _mm256_sad_epu8(ref0_reg, src_reg); |
ref1_reg = _mm256_sad_epu8(ref1_reg, src_reg); |