Index: source/libvpx/vpx_dsp/x86/inv_txfm_sse2.c |
diff --git a/source/libvpx/vpx_dsp/x86/inv_txfm_sse2.c b/source/libvpx/vpx_dsp/x86/inv_txfm_sse2.c |
index f894ee6789ffa98a134c1329360fc6520a7d7982..f3af68f08af1bfb72150cfdd57172427dbdb87b9 100644 |
--- a/source/libvpx/vpx_dsp/x86/inv_txfm_sse2.c |
+++ b/source/libvpx/vpx_dsp/x86/inv_txfm_sse2.c |
@@ -8,6 +8,7 @@ |
* be found in the AUTHORS file in the root of the source tree. |
*/ |
+#include "./vpx_dsp_rtcd.h" |
#include "vpx_dsp/x86/inv_txfm_sse2.h" |
#include "vpx_dsp/x86/txfm_common_sse2.h" |
@@ -20,7 +21,7 @@ |
*(int *)(dest) = _mm_cvtsi128_si32(d0); \ |
} |
-void vp9_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
+void vpx_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
const __m128i zero = _mm_setzero_si128(); |
const __m128i eight = _mm_set1_epi16(8); |
const __m128i cst = _mm_setr_epi16( |
@@ -150,7 +151,7 @@ void vp9_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
} |
} |
-void vp9_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
+void vpx_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
__m128i dc_value; |
const __m128i zero = _mm_setzero_si128(); |
int a; |
@@ -448,7 +449,7 @@ void iadst4_sse2(__m128i *in) { |
out7 = _mm_subs_epi16(stp1_0, stp2_7); \ |
} |
-void vp9_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
+void vpx_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
const __m128i zero = _mm_setzero_si128(); |
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); |
const __m128i final_rounding = _mm_set1_epi16(1 << 4); |
@@ -479,7 +480,7 @@ void vp9_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
// 2-D |
for (i = 0; i < 2; i++) { |
- // 8x8 Transpose is copied from vp9_fdct8x8_sse2() |
+ // 8x8 Transpose is copied from vpx_fdct8x8_sse2() |
TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, |
in0, in1, in2, in3, in4, in5, in6, in7); |
@@ -517,7 +518,7 @@ void vp9_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
RECON_AND_STORE(dest + 7 * stride, in7); |
} |
-void vp9_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
+void vpx_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
__m128i dc_value; |
const __m128i zero = _mm_setzero_si128(); |
int a; |
@@ -554,7 +555,7 @@ void idct8_sse2(__m128i *in) { |
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7; |
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; |
- // 8x8 Transpose is copied from vp9_fdct8x8_sse2() |
+ // 8x8 Transpose is copied from vpx_fdct8x8_sse2() |
TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], |
in0, in1, in2, in3, in4, in5, in6, in7); |
@@ -791,7 +792,7 @@ void iadst8_sse2(__m128i *in) { |
in[7] = _mm_sub_epi16(k__const_0, s1); |
} |
-void vp9_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
+void vpx_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
const __m128i zero = _mm_setzero_si128(); |
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); |
const __m128i final_rounding = _mm_set1_epi16(1 << 4); |
@@ -1168,7 +1169,7 @@ void vp9_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
stp2_10, stp2_13, stp2_11, stp2_12) \ |
} |
-void vp9_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest, |
+void vpx_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest, |
int stride) { |
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); |
const __m128i final_rounding = _mm_set1_epi16(1 << 5); |
@@ -1293,7 +1294,7 @@ void vp9_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest, |
} |
} |
-void vp9_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
+void vpx_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
__m128i dc_value; |
const __m128i zero = _mm_setzero_si128(); |
int a, i; |
@@ -2151,7 +2152,7 @@ void iadst16_sse2(__m128i *in0, __m128i *in1) { |
iadst16_8col(in1); |
} |
-void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest, |
+void vpx_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest, |
int stride) { |
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); |
const __m128i final_rounding = _mm_set1_epi16(1 << 5); |
@@ -3028,7 +3029,7 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest, |
} |
// Only upper-left 8x8 has non-zero coeff |
-void vp9_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest, |
+void vpx_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest, |
int stride) { |
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); |
const __m128i final_rounding = _mm_set1_epi16(1<<5); |
@@ -3187,7 +3188,7 @@ void vp9_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest, |
} |
} |
-void vp9_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest, |
+void vpx_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest, |
int stride) { |
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); |
const __m128i final_rounding = _mm_set1_epi16(1 << 5); |
@@ -3463,7 +3464,7 @@ void vp9_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest, |
} |
} |
-void vp9_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
+void vpx_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) { |
__m128i dc_value; |
const __m128i zero = _mm_setzero_si128(); |
int a, i; |
@@ -3497,7 +3498,7 @@ static INLINE __m128i clamp_high_sse2(__m128i value, int bd) { |
return retval; |
} |
-void vp9_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8, |
+void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8, |
int stride, int bd) { |
tran_low_t out[4 * 4]; |
tran_low_t *outptr = out; |
@@ -3560,7 +3561,7 @@ void vp9_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8, |
} else { |
// Run the un-optimised row transform |
for (i = 0; i < 4; ++i) { |
- vp9_highbd_idct4_c(input, outptr, bd); |
+ vpx_highbd_idct4_c(input, outptr, bd); |
input += 4; |
outptr += 4; |
} |
@@ -3604,7 +3605,7 @@ void vp9_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8, |
for (i = 0; i < 4; ++i) { |
for (j = 0; j < 4; ++j) |
temp_in[j] = out[j * 4 + i]; |
- vp9_highbd_idct4_c(temp_in, temp_out, bd); |
+ vpx_highbd_idct4_c(temp_in, temp_out, bd); |
for (j = 0; j < 4; ++j) { |
dest[j * stride + i] = highbd_clip_pixel_add( |
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd); |
@@ -3613,7 +3614,7 @@ void vp9_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8, |
} |
} |
-void vp9_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8, |
+void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8, |
int stride, int bd) { |
tran_low_t out[8 * 8]; |
tran_low_t *outptr = out; |
@@ -3678,7 +3679,7 @@ void vp9_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8, |
} else { |
// Run the un-optimised row transform |
for (i = 0; i < 8; ++i) { |
- vp9_highbd_idct8_c(input, outptr, bd); |
+ vpx_highbd_idct8_c(input, outptr, bd); |
input += 8; |
outptr += 8; |
} |
@@ -3705,7 +3706,7 @@ void vp9_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8, |
for (i = 0; i < 8; ++i) { |
for (j = 0; j < 8; ++j) |
temp_in[j] = out[j * 8 + i]; |
- vp9_highbd_idct8_c(temp_in, temp_out, bd); |
+ vpx_highbd_idct8_c(temp_in, temp_out, bd); |
for (j = 0; j < 8; ++j) { |
dest[j * stride + i] = highbd_clip_pixel_add( |
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd); |
@@ -3714,7 +3715,7 @@ void vp9_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8, |
} |
} |
-void vp9_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8, |
+void vpx_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8, |
int stride, int bd) { |
tran_low_t out[8 * 8] = { 0 }; |
tran_low_t *outptr = out; |
@@ -3782,7 +3783,7 @@ void vp9_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8, |
} else { |
// Run the un-optimised row transform |
for (i = 0; i < 4; ++i) { |
- vp9_highbd_idct8_c(input, outptr, bd); |
+ vpx_highbd_idct8_c(input, outptr, bd); |
input += 8; |
outptr += 8; |
} |
@@ -3809,7 +3810,7 @@ void vp9_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8, |
for (i = 0; i < 8; ++i) { |
for (j = 0; j < 8; ++j) |
temp_in[j] = out[j * 8 + i]; |
- vp9_highbd_idct8_c(temp_in, temp_out, bd); |
+ vpx_highbd_idct8_c(temp_in, temp_out, bd); |
for (j = 0; j < 8; ++j) { |
dest[j * stride + i] = highbd_clip_pixel_add( |
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd); |
@@ -3818,7 +3819,7 @@ void vp9_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8, |
} |
} |
-void vp9_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8, |
+void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8, |
int stride, int bd) { |
tran_low_t out[16 * 16]; |
tran_low_t *outptr = out; |
@@ -3891,7 +3892,7 @@ void vp9_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8, |
} else { |
// Run the un-optimised row transform |
for (i = 0; i < 16; ++i) { |
- vp9_highbd_idct16_c(input, outptr, bd); |
+ vpx_highbd_idct16_c(input, outptr, bd); |
input += 16; |
outptr += 16; |
} |
@@ -3923,7 +3924,7 @@ void vp9_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8, |
for (i = 0; i < 16; ++i) { |
for (j = 0; j < 16; ++j) |
temp_in[j] = out[j * 16 + i]; |
- vp9_highbd_idct16_c(temp_in, temp_out, bd); |
+ vpx_highbd_idct16_c(temp_in, temp_out, bd); |
for (j = 0; j < 16; ++j) { |
dest[j * stride + i] = highbd_clip_pixel_add( |
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd); |
@@ -3932,7 +3933,7 @@ void vp9_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8, |
} |
} |
-void vp9_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8, |
+void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8, |
int stride, int bd) { |
tran_low_t out[16 * 16] = { 0 }; |
tran_low_t *outptr = out; |
@@ -4010,7 +4011,7 @@ void vp9_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8, |
} else { |
// Run the un-optimised row transform |
for (i = 0; i < 4; ++i) { |
- vp9_highbd_idct16_c(input, outptr, bd); |
+ vpx_highbd_idct16_c(input, outptr, bd); |
input += 16; |
outptr += 16; |
} |
@@ -4042,7 +4043,7 @@ void vp9_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8, |
for (i = 0; i < 16; ++i) { |
for (j = 0; j < 16; ++j) |
temp_in[j] = out[j * 16 + i]; |
- vp9_highbd_idct16_c(temp_in, temp_out, bd); |
+ vpx_highbd_idct16_c(temp_in, temp_out, bd); |
for (j = 0; j < 16; ++j) { |
dest[j * stride + i] = highbd_clip_pixel_add( |
dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd); |