| Index: source/libvpx/vpx_dsp/mips/idct16x16_msa.c
|
| diff --git a/source/libvpx/vpx_dsp/mips/idct16x16_msa.c b/source/libvpx/vpx_dsp/mips/idct16x16_msa.c
|
| index 3da0edccabf718e1c53f86758e4e8e77c04dace0..5faac715e8ceb29e6942e63224886676f19f9ae3 100644
|
| --- a/source/libvpx/vpx_dsp/mips/idct16x16_msa.c
|
| +++ b/source/libvpx/vpx_dsp/mips/idct16x16_msa.c
|
| @@ -10,7 +10,7 @@
|
|
|
| #include "vpx_dsp/mips/inv_txfm_msa.h"
|
|
|
| -void vp9_idct16_1d_rows_msa(const int16_t *input, int16_t *output) {
|
| +void vpx_idct16_1d_rows_msa(const int16_t *input, int16_t *output) {
|
| v8i16 loc0, loc1, loc2, loc3;
|
| v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
|
| v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15;
|
| @@ -103,7 +103,7 @@ void vp9_idct16_1d_rows_msa(const int16_t *input, int16_t *output) {
|
| ST_SH8(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, (output + 8), 16);
|
| }
|
|
|
| -void vp9_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
|
| +void vpx_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
|
| int32_t dst_stride) {
|
| v8i16 loc0, loc1, loc2, loc3;
|
| v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
|
| @@ -201,7 +201,7 @@ void vp9_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
|
| VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15);
|
| }
|
|
|
| -void vp9_idct16x16_256_add_msa(const int16_t *input, uint8_t *dst,
|
| +void vpx_idct16x16_256_add_msa(const int16_t *input, uint8_t *dst,
|
| int32_t dst_stride) {
|
| int32_t i;
|
| DECLARE_ALIGNED(32, int16_t, out_arr[16 * 16]);
|
| @@ -210,25 +210,25 @@ void vp9_idct16x16_256_add_msa(const int16_t *input, uint8_t *dst,
|
| /* transform rows */
|
| for (i = 0; i < 2; ++i) {
|
| /* process 16 * 8 block */
|
| - vp9_idct16_1d_rows_msa((input + (i << 7)), (out + (i << 7)));
|
| + vpx_idct16_1d_rows_msa((input + (i << 7)), (out + (i << 7)));
|
| }
|
|
|
| /* transform columns */
|
| for (i = 0; i < 2; ++i) {
|
| /* process 8 * 16 block */
|
| - vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
|
| + vpx_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
|
| dst_stride);
|
| }
|
| }
|
|
|
| -void vp9_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst,
|
| +void vpx_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst,
|
| int32_t dst_stride) {
|
| uint8_t i;
|
| DECLARE_ALIGNED(32, int16_t, out_arr[16 * 16]);
|
| int16_t *out = out_arr;
|
|
|
| /* process 16 * 8 block */
|
| - vp9_idct16_1d_rows_msa(input, out);
|
| + vpx_idct16_1d_rows_msa(input, out);
|
|
|
| /* short case just considers top 4 rows as valid output */
|
| out += 4 * 16;
|
| @@ -255,12 +255,12 @@ void vp9_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst,
|
| /* transform columns */
|
| for (i = 0; i < 2; ++i) {
|
| /* process 8 * 16 block */
|
| - vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
|
| + vpx_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
|
| dst_stride);
|
| }
|
| }
|
|
|
| -void vp9_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst,
|
| +void vpx_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst,
|
| int32_t dst_stride) {
|
| uint8_t i;
|
| int16_t out;
|
| @@ -290,7 +290,7 @@ void vp9_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst,
|
| }
|
| }
|
|
|
| -void vp9_iadst16_1d_rows_msa(const int16_t *input, int16_t *output) {
|
| +void vpx_iadst16_1d_rows_msa(const int16_t *input, int16_t *output) {
|
| v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
|
| v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
|
|
|
| @@ -321,7 +321,7 @@ void vp9_iadst16_1d_rows_msa(const int16_t *input, int16_t *output) {
|
| ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16);
|
| }
|
|
|
| -void vp9_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
|
| +void vpx_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
|
| int32_t dst_stride) {
|
| v8i16 v0, v2, v4, v6, k0, k1, k2, k3;
|
| v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
|
|
|