| Index: source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c
|
| diff --git a/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c b/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c
|
| index 4d4b3d9e353864f9e897ce681fcb7639e71d69a1..0164e41aa161a3c5614b86c2ac71687089a29083 100644
|
| --- a/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c
|
| +++ b/source/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c
|
| @@ -8,6 +8,7 @@
|
| * be found in the AUTHORS file in the root of the source tree.
|
| */
|
|
|
| +#include <assert.h>
|
| #include "./vpx_dsp_rtcd.h"
|
| #include "vpx_dsp/mips/vpx_convolve_msa.h"
|
|
|
| @@ -283,7 +284,6 @@ static void common_vt_2t_and_aver_dst_4x4_msa(const uint8_t *src,
|
| ILVR_D2_UB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
|
| DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1);
|
| SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
|
| - SAT_UH2_UH(tmp0, tmp1, 7);
|
|
|
| out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
|
| out = __msa_aver_u_b(out, dst0);
|
| @@ -323,7 +323,6 @@ static void common_vt_2t_and_aver_dst_4x8_msa(const uint8_t *src,
|
| DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0,
|
| tmp0, tmp1, tmp2, tmp3);
|
| SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
|
| - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
|
| PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src2110, src4332);
|
| AVER_UB2_UB(src2110, dst0, src4332, dst1, src2110, src4332);
|
| ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride);
|
| @@ -365,7 +364,6 @@ static void common_vt_2t_and_aver_dst_8x4_msa(const uint8_t *src,
|
| DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
|
| tmp2, tmp3);
|
| SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
|
| - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
|
| PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3,
|
| dst, dst_stride);
|
| }
|
| @@ -402,7 +400,6 @@ static void common_vt_2t_and_aver_dst_8x8mult_msa(const uint8_t *src,
|
| DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
|
| tmp2, tmp3);
|
| SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
|
| - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
|
| PCKEV_AVG_ST8x4_UB(tmp0, dst1, tmp1, dst2, tmp2, dst3, tmp3, dst4,
|
| dst, dst_stride);
|
| dst += (4 * dst_stride);
|
| @@ -410,7 +407,6 @@ static void common_vt_2t_and_aver_dst_8x8mult_msa(const uint8_t *src,
|
| DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1,
|
| tmp2, tmp3);
|
| SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
|
| - SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
|
| PCKEV_AVG_ST8x4_UB(tmp0, dst5, tmp1, dst6, tmp2, dst7, tmp3, dst8,
|
| dst, dst_stride);
|
| dst += (4 * dst_stride);
|
| @@ -460,7 +456,6 @@ static void common_vt_2t_and_aver_dst_16w_msa(const uint8_t *src,
|
| ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
|
| DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
|
| SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
|
| - SAT_UH2_UH(tmp0, tmp1, 7);
|
| PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst);
|
| dst += dst_stride;
|
|
|
| @@ -468,19 +463,16 @@ static void common_vt_2t_and_aver_dst_16w_msa(const uint8_t *src,
|
| ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
|
| DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
|
| SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
|
| - SAT_UH2_UH(tmp2, tmp3, 7);
|
| PCKEV_AVG_ST_UB(tmp3, tmp2, dst1, dst);
|
| dst += dst_stride;
|
|
|
| DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
|
| SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
|
| - SAT_UH2_UH(tmp0, tmp1, 7);
|
| PCKEV_AVG_ST_UB(tmp1, tmp0, dst2, dst);
|
| dst += dst_stride;
|
|
|
| DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
|
| SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
|
| - SAT_UH2_UH(tmp2, tmp3, 7);
|
| PCKEV_AVG_ST_UB(tmp3, tmp2, dst3, dst);
|
| dst += dst_stride;
|
|
|
| @@ -519,48 +511,40 @@ static void common_vt_2t_and_aver_dst_32w_msa(const uint8_t *src,
|
|
|
| DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
|
| SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
|
| - SAT_UH2_UH(tmp0, tmp1, 7);
|
| PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst);
|
|
|
| DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
|
| SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
|
| - SAT_UH2_UH(tmp2, tmp3, 7);
|
| PCKEV_AVG_ST_UB(tmp3, tmp2, dst1, dst + dst_stride);
|
|
|
| ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
|
| ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
|
| DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
|
| SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
|
| - SAT_UH2_UH(tmp0, tmp1, 7);
|
| PCKEV_AVG_ST_UB(tmp1, tmp0, dst2, dst + 2 * dst_stride);
|
|
|
| DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
|
| SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
|
| - SAT_UH2_UH(tmp2, tmp3, 7);
|
| PCKEV_AVG_ST_UB(tmp3, tmp2, dst3, dst + 3 * dst_stride);
|
|
|
| ILVR_B2_UB(src6, src5, src7, src6, vec0, vec2);
|
| ILVL_B2_UB(src6, src5, src7, src6, vec1, vec3);
|
| DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
|
| SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
|
| - SAT_UH2_UH(tmp0, tmp1, 7);
|
| PCKEV_AVG_ST_UB(tmp1, tmp0, dst4, dst + 16);
|
|
|
| DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
|
| SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
|
| - SAT_UH2_UH(tmp2, tmp3, 7);
|
| PCKEV_AVG_ST_UB(tmp3, tmp2, dst5, dst + 16 + dst_stride);
|
|
|
| ILVR_B2_UB(src8, src7, src9, src8, vec4, vec6);
|
| ILVL_B2_UB(src8, src7, src9, src8, vec5, vec7);
|
| DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
|
| SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
|
| - SAT_UH2_UH(tmp0, tmp1, 7);
|
| PCKEV_AVG_ST_UB(tmp1, tmp0, dst6, dst + 16 + 2 * dst_stride);
|
|
|
| DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
|
| SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
|
| - SAT_UH2_UH(tmp2, tmp3, 7);
|
| PCKEV_AVG_ST_UB(tmp3, tmp2, dst7, dst + 16 + 3 * dst_stride);
|
| dst += (4 * dst_stride);
|
|
|
| @@ -605,48 +589,40 @@ static void common_vt_2t_and_aver_dst_64w_msa(const uint8_t *src,
|
| ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
|
| DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
|
| SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
|
| - SAT_UH2_UH(tmp0, tmp1, 7);
|
| PCKEV_AVG_ST_UB(tmp1, tmp0, dst0, dst);
|
|
|
| DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
|
| SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
|
| - SAT_UH2_UH(tmp2, tmp3, 7);
|
| PCKEV_AVG_ST_UB(tmp3, tmp2, dst1, dst + dst_stride);
|
|
|
| ILVR_B2_UB(src4, src3, src5, src4, vec4, vec6);
|
| ILVL_B2_UB(src4, src3, src5, src4, vec5, vec7);
|
| DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5);
|
| SRARI_H2_UH(tmp4, tmp5, FILTER_BITS);
|
| - SAT_UH2_UH(tmp4, tmp5, 7);
|
| PCKEV_AVG_ST_UB(tmp5, tmp4, dst2, dst + 16);
|
|
|
| DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7);
|
| SRARI_H2_UH(tmp6, tmp7, FILTER_BITS);
|
| - SAT_UH2_UH(tmp6, tmp7, 7);
|
| PCKEV_AVG_ST_UB(tmp7, tmp6, dst3, dst + 16 + dst_stride);
|
|
|
| ILVR_B2_UB(src7, src6, src8, src7, vec0, vec2);
|
| ILVL_B2_UB(src7, src6, src8, src7, vec1, vec3);
|
| DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
|
| SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
|
| - SAT_UH2_UH(tmp0, tmp1, 7);
|
| PCKEV_AVG_ST_UB(tmp1, tmp0, dst4, dst + 32);
|
|
|
| DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
|
| SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
|
| - SAT_UH2_UH(tmp2, tmp3, 7);
|
| PCKEV_AVG_ST_UB(tmp3, tmp2, dst5, dst + 32 + dst_stride);
|
|
|
| ILVR_B2_UB(src10, src9, src11, src10, vec4, vec6);
|
| ILVL_B2_UB(src10, src9, src11, src10, vec5, vec7);
|
| DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5);
|
| SRARI_H2_UH(tmp4, tmp5, FILTER_BITS);
|
| - SAT_UH2_UH(tmp4, tmp5, 7);
|
| PCKEV_AVG_ST_UB(tmp5, tmp4, dst6, (dst + 48));
|
|
|
| DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7);
|
| SRARI_H2_UH(tmp6, tmp7, FILTER_BITS);
|
| - SAT_UH2_UH(tmp6, tmp7, 7);
|
| PCKEV_AVG_ST_UB(tmp7, tmp6, dst7, dst + 48 + dst_stride);
|
| dst += (2 * dst_stride);
|
|
|
| @@ -664,19 +640,8 @@ void vpx_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride,
|
| int w, int h) {
|
| int8_t cnt, filt_ver[8];
|
|
|
| - if (16 != y_step_q4) {
|
| - vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride,
|
| - filter_x, x_step_q4, filter_y, y_step_q4,
|
| - w, h);
|
| - return;
|
| - }
|
| -
|
| - if (((const int32_t *)filter_y)[1] == 0x800000) {
|
| - vpx_convolve_avg(src, src_stride, dst, dst_stride,
|
| - filter_x, x_step_q4, filter_y, y_step_q4,
|
| - w, h);
|
| - return;
|
| - }
|
| + assert(y_step_q4 == 16);
|
| + assert(((const int32_t *)filter_y)[1] != 0x800000);
|
|
|
| for (cnt = 0; cnt < 8; ++cnt) {
|
| filt_ver[cnt] = filter_y[cnt];
|
|
|