Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(10)

Unified Diff: source/libvpx/vp9/common/mips/msa/vp9_convolve8_vert_msa.c

Issue 1169543007: libvpx: Pull from upstream (Closed) Base URL: https://chromium.googlesource.com/chromium/deps/libvpx.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: source/libvpx/vp9/common/mips/msa/vp9_convolve8_vert_msa.c
diff --git a/source/libvpx/vp9/common/mips/msa/vp9_convolve8_vert_msa.c b/source/libvpx/vp9/common/mips/msa/vp9_convolve8_vert_msa.c
index 6b71ec1c0e42ec9a31490e65c80eb016241c8a68..e9ec2507a0a3b8ca5d20a95ef1ef795737a5d158 100644
--- a/source/libvpx/vp9/common/mips/msa/vp9_convolve8_vert_msa.c
+++ b/source/libvpx/vp9/common/mips/msa/vp9_convolve8_vert_msa.c
@@ -16,58 +16,48 @@ static void common_vt_8t_4w_msa(const uint8_t *src, int32_t src_stride,
int8_t *filter, int32_t height) {
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
- v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
- v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
- v16i8 src2110, src4332, src6554, src8776, src10998;
+ v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
+ v16i8 src65_r, src87_r, src109_r, src2110, src4332, src6554, src8776;
+ v16i8 src10998, filt0, filt1, filt2, filt3;
+ v16u8 out;
v8i16 filt, out10, out32;
- v16i8 filt0, filt1, filt2, filt3;
src -= (3 * src_stride);
- filt = LOAD_SH(filter);
- filt0 = (v16i8)__msa_splati_h(filt, 0);
- filt1 = (v16i8)__msa_splati_h(filt, 1);
- filt2 = (v16i8)__msa_splati_h(filt, 2);
- filt3 = (v16i8)__msa_splati_h(filt, 3);
+ filt = LD_SH(filter);
+ SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- LOAD_7VECS_SB(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
+ LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
src += (7 * src_stride);
- ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
- src1, src3, src5, src2, src4, src6,
- src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
-
- ILVR_D_3VECS_SB(src2110, src21_r, src10_r, src4332, src43_r, src32_r,
- src6554, src65_r, src54_r);
-
- XORI_B_3VECS_SB(src2110, src4332, src6554, src2110, src4332, src6554, 128);
+ ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
+ src54_r, src21_r);
+ ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
+ ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src2110,
+ src4332, src6554);
+ XORI_B3_128_SB(src2110, src4332, src6554);
for (loop_cnt = (height >> 2); loop_cnt--;) {
- LOAD_4VECS_SB(src, src_stride, src7, src8, src9, src10);
+ LD_SB4(src, src_stride, src7, src8, src9, src10);
src += (4 * src_stride);
- ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
- src76_r, src87_r, src98_r, src109_r);
-
- ILVR_D_2VECS_SB(src8776, src87_r, src76_r, src10998, src109_r, src98_r);
-
- XORI_B_2VECS_SB(src8776, src10998, src8776, src10998, 128);
-
- out10 = FILT_8TAP_DPADD_S_H(src2110, src4332, src6554, src8776,
- filt0, filt1, filt2, filt3);
- out32 = FILT_8TAP_DPADD_S_H(src4332, src6554, src8776, src10998,
- filt0, filt1, filt2, filt3);
-
- out10 = SRARI_SATURATE_SIGNED_H(out10, FILTER_BITS, 7);
- out32 = SRARI_SATURATE_SIGNED_H(out32, FILTER_BITS, 7);
-
- PCKEV_2B_XORI128_STORE_4_BYTES_4(out10, out32, dst, dst_stride);
+ ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
+ src87_r, src98_r, src109_r);
+ ILVR_D2_SB(src87_r, src76_r, src109_r, src98_r, src8776, src10998);
+ XORI_B2_128_SB(src8776, src10998);
+ out10 = FILT_8TAP_DPADD_S_H(src2110, src4332, src6554, src8776, filt0,
+ filt1, filt2, filt3);
+ out32 = FILT_8TAP_DPADD_S_H(src4332, src6554, src8776, src10998, filt0,
+ filt1, filt2, filt3);
+ SRARI_H2_SH(out10, out32, FILTER_BITS);
+ SAT_SH2_SH(out10, out32, 7);
+ out = PCKEV_XORI128_UB(out10, out32);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
dst += (4 * dst_stride);
src2110 = src6554;
src4332 = src8776;
src6554 = src10998;
-
src6 = src10;
}
}
@@ -77,54 +67,115 @@ static void common_vt_8t_8w_msa(const uint8_t *src, int32_t src_stride,
int8_t *filter, int32_t height) {
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
- v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
- v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
- v16i8 filt0, filt1, filt2, filt3;
+ v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
+ v16i8 src65_r, src87_r, src109_r, filt0, filt1, filt2, filt3;
+ v16u8 tmp0, tmp1;
v8i16 filt, out0_r, out1_r, out2_r, out3_r;
src -= (3 * src_stride);
- filt = LOAD_SH(filter);
- filt0 = (v16i8)__msa_splati_h(filt, 0);
- filt1 = (v16i8)__msa_splati_h(filt, 1);
- filt2 = (v16i8)__msa_splati_h(filt, 2);
- filt3 = (v16i8)__msa_splati_h(filt, 3);
+ filt = LD_SH(filter);
+ SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
- LOAD_7VECS_SB(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
+ LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
src += (7 * src_stride);
-
- XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
- src0, src1, src2, src3, src4, src5, src6, 128);
-
- ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
- src1, src3, src5, src2, src4, src6,
- src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
+ ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
+ src54_r, src21_r);
+ ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
for (loop_cnt = (height >> 2); loop_cnt--;) {
- LOAD_4VECS_SB(src, src_stride, src7, src8, src9, src10);
+ LD_SB4(src, src_stride, src7, src8, src9, src10);
+ XORI_B4_128_SB(src7, src8, src9, src10);
src += (4 * src_stride);
- XORI_B_4VECS_SB(src7, src8, src9, src10, src7, src8, src9, src10, 128);
+ ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
+ src87_r, src98_r, src109_r);
+ out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0,
+ filt1, filt2, filt3);
+ out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0,
+ filt1, filt2, filt3);
+ out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0,
+ filt1, filt2, filt3);
+ out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0,
+ filt1, filt2, filt3);
+ SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, FILTER_BITS);
+ SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ tmp0 = PCKEV_XORI128_UB(out0_r, out1_r);
+ tmp1 = PCKEV_XORI128_UB(out2_r, out3_r);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
+ dst += (4 * dst_stride);
- ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
- src76_r, src87_r, src98_r, src109_r);
+ src10_r = src54_r;
+ src32_r = src76_r;
+ src54_r = src98_r;
+ src21_r = src65_r;
+ src43_r = src87_r;
+ src65_r = src109_r;
+ src6 = src10;
+ }
+}
- out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r,
- filt0, filt1, filt2, filt3);
- out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r,
- filt0, filt1, filt2, filt3);
- out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r,
- filt0, filt1, filt2, filt3);
- out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r,
- filt0, filt1, filt2, filt3);
+static void common_vt_8t_16w_msa(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ int8_t *filter, int32_t height) {
+ uint32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ v16i8 filt0, filt1, filt2, filt3;
+ v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
+ v16i8 src65_r, src87_r, src109_r, src10_l, src32_l, src54_l, src76_l;
+ v16i8 src98_l, src21_l, src43_l, src65_l, src87_l, src109_l;
+ v16u8 tmp0, tmp1, tmp2, tmp3;
+ v8i16 filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
+
+ src -= (3 * src_stride);
- out0_r = SRARI_SATURATE_SIGNED_H(out0_r, FILTER_BITS, 7);
- out1_r = SRARI_SATURATE_SIGNED_H(out1_r, FILTER_BITS, 7);
- out2_r = SRARI_SATURATE_SIGNED_H(out2_r, FILTER_BITS, 7);
- out3_r = SRARI_SATURATE_SIGNED_H(out3_r, FILTER_BITS, 7);
+ filt = LD_SH(filter);
+ SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
+
+ LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
+ src += (7 * src_stride);
+ ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r,
+ src54_r, src21_r);
+ ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
+ ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l, src32_l,
+ src54_l, src21_l);
+ ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ LD_SB4(src, src_stride, src7, src8, src9, src10);
+ XORI_B4_128_SB(src7, src8, src9, src10);
+ src += (4 * src_stride);
- PCKEV_B_4_XORI128_STORE_8_BYTES_4(out0_r, out1_r, out2_r, out3_r,
- dst, dst_stride);
+ ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
+ src87_r, src98_r, src109_r);
+ ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_l,
+ src87_l, src98_l, src109_l);
+ out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0,
+ filt1, filt2, filt3);
+ out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0,
+ filt1, filt2, filt3);
+ out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0,
+ filt1, filt2, filt3);
+ out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0,
+ filt1, filt2, filt3);
+ out0_l = FILT_8TAP_DPADD_S_H(src10_l, src32_l, src54_l, src76_l, filt0,
+ filt1, filt2, filt3);
+ out1_l = FILT_8TAP_DPADD_S_H(src21_l, src43_l, src65_l, src87_l, filt0,
+ filt1, filt2, filt3);
+ out2_l = FILT_8TAP_DPADD_S_H(src32_l, src54_l, src76_l, src98_l, filt0,
+ filt1, filt2, filt3);
+ out3_l = FILT_8TAP_DPADD_S_H(src43_l, src65_l, src87_l, src109_l, filt0,
+ filt1, filt2, filt3);
+ SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, FILTER_BITS);
+ SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, FILTER_BITS);
+ SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7);
+ PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, out3_r,
+ tmp0, tmp1, tmp2, tmp3);
+ XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3);
+ ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
dst += (4 * dst_stride);
src10_r = src54_r;
@@ -133,7 +184,12 @@ static void common_vt_8t_8w_msa(const uint8_t *src, int32_t src_stride,
src21_r = src65_r;
src43_r = src87_r;
src65_r = src109_r;
-
+ src10_l = src54_l;
+ src32_l = src76_l;
+ src54_l = src98_l;
+ src21_l = src65_l;
+ src43_l = src87_l;
+ src65_l = src109_l;
src6 = src10;
}
}
@@ -147,89 +203,63 @@ static void common_vt_8t_16w_mult_msa(const uint8_t *src, int32_t src_stride,
uint32_t loop_cnt, cnt;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
v16i8 filt0, filt1, filt2, filt3;
- v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
- v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
- v16i8 src10_l, src32_l, src54_l, src76_l, src98_l;
- v16i8 src21_l, src43_l, src65_l, src87_l, src109_l;
- v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
- v8i16 filt;
+ v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
+ v16i8 src65_r, src87_r, src109_r, src10_l, src32_l, src54_l, src76_l;
+ v16i8 src98_l, src21_l, src43_l, src65_l, src87_l, src109_l;
v16u8 tmp0, tmp1, tmp2, tmp3;
+ v8i16 filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
src -= (3 * src_stride);
- filt = LOAD_SH(filter);
- filt0 = (v16i8)__msa_splati_h(filt, 0);
- filt1 = (v16i8)__msa_splati_h(filt, 1);
- filt2 = (v16i8)__msa_splati_h(filt, 2);
- filt3 = (v16i8)__msa_splati_h(filt, 3);
+ filt = LD_SH(filter);
+ SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
for (cnt = (width >> 4); cnt--;) {
src_tmp = src;
dst_tmp = dst;
- LOAD_7VECS_SB(src_tmp, src_stride,
- src0, src1, src2, src3, src4, src5, src6);
+ LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
+ XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
src_tmp += (7 * src_stride);
-
- XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
- src0, src1, src2, src3, src4, src5, src6, 128);
-
- ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
- src1, src3, src5, src2, src4, src6,
- src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
-
- ILVL_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
- src1, src3, src5, src2, src4, src6,
- src10_l, src32_l, src54_l, src21_l, src43_l, src65_l);
+ ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r,
+ src32_r, src54_r, src21_r);
+ ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
+ ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l,
+ src32_l, src54_l, src21_l);
+ ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
for (loop_cnt = (height >> 2); loop_cnt--;) {
- LOAD_4VECS_SB(src_tmp, src_stride, src7, src8, src9, src10);
+ LD_SB4(src_tmp, src_stride, src7, src8, src9, src10);
+ XORI_B4_128_SB(src7, src8, src9, src10);
src_tmp += (4 * src_stride);
-
- XORI_B_4VECS_SB(src7, src8, src9, src10, src7, src8, src9, src10, 128);
-
- ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
- src76_r, src87_r, src98_r, src109_r);
-
- ILVL_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
- src76_l, src87_l, src98_l, src109_l);
-
- out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r,
- filt0, filt1, filt2, filt3);
- out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r,
- filt0, filt1, filt2, filt3);
- out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r,
- filt0, filt1, filt2, filt3);
- out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r,
- filt0, filt1, filt2, filt3);
-
- out0_l = FILT_8TAP_DPADD_S_H(src10_l, src32_l, src54_l, src76_l,
- filt0, filt1, filt2, filt3);
- out1_l = FILT_8TAP_DPADD_S_H(src21_l, src43_l, src65_l, src87_l,
- filt0, filt1, filt2, filt3);
- out2_l = FILT_8TAP_DPADD_S_H(src32_l, src54_l, src76_l, src98_l,
- filt0, filt1, filt2, filt3);
- out3_l = FILT_8TAP_DPADD_S_H(src43_l, src65_l, src87_l, src109_l,
- filt0, filt1, filt2, filt3);
-
- out0_r = SRARI_SATURATE_SIGNED_H(out0_r, FILTER_BITS, 7);
- out1_r = SRARI_SATURATE_SIGNED_H(out1_r, FILTER_BITS, 7);
- out2_r = SRARI_SATURATE_SIGNED_H(out2_r, FILTER_BITS, 7);
- out3_r = SRARI_SATURATE_SIGNED_H(out3_r, FILTER_BITS, 7);
- out0_l = SRARI_SATURATE_SIGNED_H(out0_l, FILTER_BITS, 7);
- out1_l = SRARI_SATURATE_SIGNED_H(out1_l, FILTER_BITS, 7);
- out2_l = SRARI_SATURATE_SIGNED_H(out2_l, FILTER_BITS, 7);
- out3_l = SRARI_SATURATE_SIGNED_H(out3_l, FILTER_BITS, 7);
-
- out0_r = (v8i16)__msa_pckev_b((v16i8)out0_l, (v16i8)out0_r);
- out1_r = (v8i16)__msa_pckev_b((v16i8)out1_l, (v16i8)out1_r);
- out2_r = (v8i16)__msa_pckev_b((v16i8)out2_l, (v16i8)out2_r);
- out3_r = (v8i16)__msa_pckev_b((v16i8)out3_l, (v16i8)out3_r);
-
- XORI_B_4VECS_UB(out0_r, out1_r, out2_r, out3_r,
- tmp0, tmp1, tmp2, tmp3, 128);
-
- STORE_4VECS_UB(dst_tmp, dst_stride, tmp0, tmp1, tmp2, tmp3);
+ ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
+ src87_r, src98_r, src109_r);
+ ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_l,
+ src87_l, src98_l, src109_l);
+ out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0,
+ filt1, filt2, filt3);
+ out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0,
+ filt1, filt2, filt3);
+ out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0,
+ filt1, filt2, filt3);
+ out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0,
+ filt1, filt2, filt3);
+ out0_l = FILT_8TAP_DPADD_S_H(src10_l, src32_l, src54_l, src76_l, filt0,
+ filt1, filt2, filt3);
+ out1_l = FILT_8TAP_DPADD_S_H(src21_l, src43_l, src65_l, src87_l, filt0,
+ filt1, filt2, filt3);
+ out2_l = FILT_8TAP_DPADD_S_H(src32_l, src54_l, src76_l, src98_l, filt0,
+ filt1, filt2, filt3);
+ out3_l = FILT_8TAP_DPADD_S_H(src43_l, src65_l, src87_l, src109_l, filt0,
+ filt1, filt2, filt3);
+ SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, FILTER_BITS);
+ SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, FILTER_BITS);
+ SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7);
+ SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7);
+ PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l,
+ out3_r, tmp0, tmp1, tmp2, tmp3);
+ XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3);
+ ST_UB4(tmp0, tmp1, tmp2, tmp3, dst_tmp, dst_stride);
dst_tmp += (4 * dst_stride);
src10_r = src54_r;
@@ -238,14 +268,12 @@ static void common_vt_8t_16w_mult_msa(const uint8_t *src, int32_t src_stride,
src21_r = src65_r;
src43_r = src87_r;
src65_r = src109_r;
-
src10_l = src54_l;
src32_l = src76_l;
src54_l = src98_l;
src21_l = src65_l;
src43_l = src87_l;
src65_l = src109_l;
-
src6 = src10;
}
@@ -254,134 +282,77 @@ static void common_vt_8t_16w_mult_msa(const uint8_t *src, int32_t src_stride,
}
}
-static void common_vt_8t_16w_msa(const uint8_t *src, int32_t src_stride,
- uint8_t *dst, int32_t dst_stride,
- int8_t *filter, int32_t height) {
- common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride,
- filter, height, 16);
-}
-
static void common_vt_8t_32w_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter, int32_t height) {
- common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride,
- filter, height, 32);
+ common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride, filter, height,
+ 32);
}
static void common_vt_8t_64w_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter, int32_t height) {
- common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride,
- filter, height, 64);
+ common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride, filter, height,
+ 64);
}
static void common_vt_2t_4x4_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter) {
- uint32_t out0, out1, out2, out3;
v16i8 src0, src1, src2, src3, src4;
v16i8 src10_r, src32_r, src21_r, src43_r, src2110, src4332;
- v16i8 filt0;
- v8u16 filt;
+ v16u8 filt0;
+ v8i16 filt;
+ v8u16 tmp0, tmp1;
- filt = LOAD_UH(filter);
- filt0 = (v16i8)__msa_splati_h((v8i16)filt, 0);
+ filt = LD_SH(filter);
+ filt0 = (v16u8)__msa_splati_h(filt, 0);
- LOAD_5VECS_SB(src, src_stride, src0, src1, src2, src3, src4);
+ LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
src += (5 * src_stride);
- ILVR_B_4VECS_SB(src0, src1, src2, src3, src1, src2, src3, src4,
- src10_r, src21_r, src32_r, src43_r);
-
- ILVR_D_2VECS_SB(src2110, src21_r, src10_r, src4332, src43_r, src32_r);
-
- src2110 = (v16i8)__msa_dotp_u_h((v16u8)src2110, (v16u8)filt0);
- src4332 = (v16i8)__msa_dotp_u_h((v16u8)src4332, (v16u8)filt0);
-
- src2110 = (v16i8)SRARI_SATURATE_UNSIGNED_H(src2110, FILTER_BITS, 7);
- src4332 = (v16i8)SRARI_SATURATE_UNSIGNED_H(src4332, FILTER_BITS, 7);
-
- src2110 = (v16i8)__msa_pckev_b((v16i8)src4332, (v16i8)src2110);
-
- out0 = __msa_copy_u_w((v4i32)src2110, 0);
- out1 = __msa_copy_u_w((v4i32)src2110, 1);
- out2 = __msa_copy_u_w((v4i32)src2110, 2);
- out3 = __msa_copy_u_w((v4i32)src2110, 3);
-
- STORE_WORD(dst, out0);
- dst += dst_stride;
- STORE_WORD(dst, out1);
- dst += dst_stride;
- STORE_WORD(dst, out2);
- dst += dst_stride;
- STORE_WORD(dst, out3);
+ ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
+ src32_r, src43_r);
+ ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
+ DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ src2110 = __msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
+ ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride);
}
static void common_vt_2t_4x8_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter) {
- uint32_t out0, out1, out2, out3, out4, out5, out6, out7;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r;
v16i8 src65_r, src87_r, src2110, src4332, src6554, src8776;
- v16i8 filt0;
- v8u16 filt;
+ v8u16 tmp0, tmp1, tmp2, tmp3;
+ v16u8 filt0;
+ v8i16 filt;
- filt = LOAD_UH(filter);
- filt0 = (v16i8)__msa_splati_h((v8i16)filt, 0);
+ filt = LD_SH(filter);
+ filt0 = (v16u8)__msa_splati_h(filt, 0);
- LOAD_8VECS_SB(src, src_stride,
- src0, src1, src2, src3, src4, src5, src6, src7);
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
src += (8 * src_stride);
- src8 = LOAD_SB(src);
+ src8 = LD_SB(src);
src += src_stride;
- ILVR_B_8VECS_SB(src0, src1, src2, src3, src4, src5, src6, src7,
- src1, src2, src3, src4, src5, src6, src7, src8,
- src10_r, src21_r, src32_r, src43_r,
- src54_r, src65_r, src76_r, src87_r);
-
- ILVR_D_4VECS_SB(src2110, src21_r, src10_r, src4332, src43_r, src32_r,
- src6554, src65_r, src54_r, src8776, src87_r, src76_r);
-
- src2110 = (v16i8)__msa_dotp_u_h((v16u8)src2110, (v16u8)filt0);
- src4332 = (v16i8)__msa_dotp_u_h((v16u8)src4332, (v16u8)filt0);
- src6554 = (v16i8)__msa_dotp_u_h((v16u8)src6554, (v16u8)filt0);
- src8776 = (v16i8)__msa_dotp_u_h((v16u8)src8776, (v16u8)filt0);
-
- src2110 = (v16i8)SRARI_SATURATE_UNSIGNED_H(src2110, FILTER_BITS, 7);
- src4332 = (v16i8)SRARI_SATURATE_UNSIGNED_H(src4332, FILTER_BITS, 7);
- src6554 = (v16i8)SRARI_SATURATE_UNSIGNED_H(src6554, FILTER_BITS, 7);
- src8776 = (v16i8)SRARI_SATURATE_UNSIGNED_H(src8776, FILTER_BITS, 7);
-
- src2110 = (v16i8)__msa_pckev_b((v16i8)src4332, (v16i8)src2110);
- src4332 = (v16i8)__msa_pckev_b((v16i8)src8776, (v16i8)src6554);
-
- out0 = __msa_copy_u_w((v4i32)src2110, 0);
- out1 = __msa_copy_u_w((v4i32)src2110, 1);
- out2 = __msa_copy_u_w((v4i32)src2110, 2);
- out3 = __msa_copy_u_w((v4i32)src2110, 3);
- out4 = __msa_copy_u_w((v4i32)src4332, 0);
- out5 = __msa_copy_u_w((v4i32)src4332, 1);
- out6 = __msa_copy_u_w((v4i32)src4332, 2);
- out7 = __msa_copy_u_w((v4i32)src4332, 3);
-
- STORE_WORD(dst, out0);
- dst += dst_stride;
- STORE_WORD(dst, out1);
- dst += dst_stride;
- STORE_WORD(dst, out2);
- dst += dst_stride;
- STORE_WORD(dst, out3);
- dst += dst_stride;
- STORE_WORD(dst, out4);
- dst += dst_stride;
- STORE_WORD(dst, out5);
- dst += dst_stride;
- STORE_WORD(dst, out6);
- dst += dst_stride;
- STORE_WORD(dst, out7);
+ ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
+ src32_r, src43_r);
+ ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r,
+ src76_r, src87_r);
+ ILVR_D4_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r,
+ src87_r, src76_r, src2110, src4332, src6554, src8776);
+ DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0,
+ tmp0, tmp1, tmp2, tmp3);
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
+ SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, src2110, src4332);
+ ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride);
+ ST4x4_UB(src4332, src4332, 0, 1, 2, 3, dst + 4 * dst_stride, dst_stride);
}
static void common_vt_2t_4w_msa(const uint8_t *src, int32_t src_stride,
@@ -397,32 +368,24 @@ static void common_vt_2t_4w_msa(const uint8_t *src, int32_t src_stride,
static void common_vt_2t_8x4_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter) {
- v16u8 src0, src1, src2, src3, src4;
- v16u8 vec0, vec1, vec2, vec3, filt0;
+ v16u8 src0, src1, src2, src3, src4, vec0, vec1, vec2, vec3, filt0;
+ v16i8 out0, out1;
v8u16 tmp0, tmp1, tmp2, tmp3;
- v8u16 filt;
+ v8i16 filt;
/* rearranging filter_y */
- filt = LOAD_UH(filter);
- filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
- LOAD_5VECS_UB(src, src_stride, src0, src1, src2, src3, src4);
-
- ILVR_B_2VECS_UB(src0, src1, src1, src2, vec0, vec1);
- ILVR_B_2VECS_UB(src2, src3, src3, src4, vec2, vec3);
-
- /* filter calc */
- tmp0 = __msa_dotp_u_h(vec0, filt0);
- tmp1 = __msa_dotp_u_h(vec1, filt0);
- tmp2 = __msa_dotp_u_h(vec2, filt0);
- tmp3 = __msa_dotp_u_h(vec3, filt0);
-
- tmp0 = SRARI_SATURATE_UNSIGNED_H(tmp0, FILTER_BITS, 7);
- tmp1 = SRARI_SATURATE_UNSIGNED_H(tmp1, FILTER_BITS, 7);
- tmp2 = SRARI_SATURATE_UNSIGNED_H(tmp2, FILTER_BITS, 7);
- tmp3 = SRARI_SATURATE_UNSIGNED_H(tmp3, FILTER_BITS, 7);
-
- PCKEV_B_STORE_8_BYTES_4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
+ filt = LD_SH(filter);
+ filt0 = (v16u8)__msa_splati_h(filt, 0);
+
+ LD_UB5(src, src_stride, src0, src1, src2, src3, src4);
+ ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1);
+ ILVR_B2_UB(src3, src2, src4, src3, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
+ tmp2, tmp3);
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
+ SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
}
static void common_vt_2t_8x8mult_msa(const uint8_t *src, int32_t src_stride,
@@ -431,51 +394,39 @@ static void common_vt_2t_8x8mult_msa(const uint8_t *src, int32_t src_stride,
uint32_t loop_cnt;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
+ v16i8 out0, out1;
v8u16 tmp0, tmp1, tmp2, tmp3;
- v8u16 filt;
+ v8i16 filt;
/* rearranging filter_y */
- filt = LOAD_UH(filter);
- filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
+ filt = LD_SH(filter);
+ filt0 = (v16u8)__msa_splati_h(filt, 0);
- src0 = LOAD_UB(src);
+ src0 = LD_UB(src);
src += src_stride;
for (loop_cnt = (height >> 3); loop_cnt--;) {
- LOAD_8VECS_UB(src, src_stride,
- src1, src2, src3, src4, src5, src6, src7, src8);
+ LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8);
src += (8 * src_stride);
- ILVR_B_4VECS_UB(src0, src1, src2, src3, src1, src2, src3, src4,
- vec0, vec1, vec2, vec3);
-
- ILVR_B_4VECS_UB(src4, src5, src6, src7, src5, src6, src7, src8,
- vec4, vec5, vec6, vec7);
-
- tmp0 = __msa_dotp_u_h(vec0, filt0);
- tmp1 = __msa_dotp_u_h(vec1, filt0);
- tmp2 = __msa_dotp_u_h(vec2, filt0);
- tmp3 = __msa_dotp_u_h(vec3, filt0);
-
- tmp0 = SRARI_SATURATE_UNSIGNED_H(tmp0, FILTER_BITS, 7);
- tmp1 = SRARI_SATURATE_UNSIGNED_H(tmp1, FILTER_BITS, 7);
- tmp2 = SRARI_SATURATE_UNSIGNED_H(tmp2, FILTER_BITS, 7);
- tmp3 = SRARI_SATURATE_UNSIGNED_H(tmp3, FILTER_BITS, 7);
-
- PCKEV_B_STORE_8_BYTES_4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
+ ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1,
+ vec2, vec3);
+ ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5,
+ vec6, vec7);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
+ tmp2, tmp3);
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
+ SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
dst += (4 * dst_stride);
- tmp0 = __msa_dotp_u_h(vec4, filt0);
- tmp1 = __msa_dotp_u_h(vec5, filt0);
- tmp2 = __msa_dotp_u_h(vec6, filt0);
- tmp3 = __msa_dotp_u_h(vec7, filt0);
-
- tmp0 = SRARI_SATURATE_UNSIGNED_H(tmp0, FILTER_BITS, 7);
- tmp1 = SRARI_SATURATE_UNSIGNED_H(tmp1, FILTER_BITS, 7);
- tmp2 = SRARI_SATURATE_UNSIGNED_H(tmp2, FILTER_BITS, 7);
- tmp3 = SRARI_SATURATE_UNSIGNED_H(tmp3, FILTER_BITS, 7);
-
- PCKEV_B_STORE_8_BYTES_4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
+ DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1,
+ tmp2, tmp3);
+ SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
+ SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
+ PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
dst += (4 * dst_stride);
src0 = src8;
@@ -499,57 +450,45 @@ static void common_vt_2t_16w_msa(const uint8_t *src, int32_t src_stride,
v16u8 src0, src1, src2, src3, src4;
v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
v8u16 tmp0, tmp1, tmp2, tmp3;
- v8u16 filt;
+ v8i16 filt;
/* rearranging filter_y */
- filt = LOAD_UH(filter);
- filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
+ filt = LD_SH(filter);
+ filt0 = (v16u8)__msa_splati_h(filt, 0);
- src0 = LOAD_UB(src);
+ src0 = LD_UB(src);
src += src_stride;
for (loop_cnt = (height >> 2); loop_cnt--;) {
- LOAD_4VECS_UB(src, src_stride, src1, src2, src3, src4);
+ LD_UB4(src, src_stride, src1, src2, src3, src4);
src += (4 * src_stride);
- ILV_B_LRLR_UB(src0, src1, src1, src2, vec1, vec0, vec3, vec2);
-
- tmp0 = __msa_dotp_u_h(vec0, filt0);
- tmp1 = __msa_dotp_u_h(vec1, filt0);
-
- tmp0 = SRARI_SATURATE_UNSIGNED_H(tmp0, FILTER_BITS, 7);
- tmp1 = SRARI_SATURATE_UNSIGNED_H(tmp1, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp1, tmp0, dst);
+ ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
+ ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
+ DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ PCKEV_ST_SB(tmp0, tmp1, dst);
dst += dst_stride;
- ILV_B_LRLR_UB(src2, src3, src3, src4, vec5, vec4, vec7, vec6);
-
- tmp2 = __msa_dotp_u_h(vec2, filt0);
- tmp3 = __msa_dotp_u_h(vec3, filt0);
-
- tmp3 = SRARI_SATURATE_UNSIGNED_H(tmp3, FILTER_BITS, 7);
- tmp2 = SRARI_SATURATE_UNSIGNED_H(tmp2, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp3, tmp2, dst);
+ ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
+ ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
+ DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
+ SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
+ SAT_UH2_UH(tmp2, tmp3, 7);
+ PCKEV_ST_SB(tmp2, tmp3, dst);
dst += dst_stride;
- tmp0 = __msa_dotp_u_h(vec4, filt0);
- tmp1 = __msa_dotp_u_h(vec5, filt0);
-
- tmp0 = SRARI_SATURATE_UNSIGNED_H(tmp0, FILTER_BITS, 7);
- tmp1 = SRARI_SATURATE_UNSIGNED_H(tmp1, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp1, tmp0, dst);
+ DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ PCKEV_ST_SB(tmp0, tmp1, dst);
dst += dst_stride;
- tmp2 = __msa_dotp_u_h(vec6, filt0);
- tmp3 = __msa_dotp_u_h(vec7, filt0);
-
- tmp2 = SRARI_SATURATE_UNSIGNED_H(tmp2, FILTER_BITS, 7);
- tmp3 = SRARI_SATURATE_UNSIGNED_H(tmp3, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp3, tmp2, dst);
+ DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
+ SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
+ SAT_UH2_UH(tmp2, tmp3, 7);
+ PCKEV_ST_SB(tmp2, tmp3, dst);
dst += dst_stride;
src0 = src4;
@@ -563,93 +502,68 @@ static void common_vt_2t_32w_msa(const uint8_t *src, int32_t src_stride,
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9;
v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
v8u16 tmp0, tmp1, tmp2, tmp3;
- v8u16 filt;
+ v8i16 filt;
/* rearranging filter_y */
- filt = LOAD_UH(filter);
- filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
+ filt = LD_SH(filter);
+ filt0 = (v16u8)__msa_splati_h(filt, 0);
- src0 = LOAD_UB(src);
- src5 = LOAD_UB(src + 16);
+ src0 = LD_UB(src);
+ src5 = LD_UB(src + 16);
src += src_stride;
for (loop_cnt = (height >> 2); loop_cnt--;) {
- LOAD_4VECS_UB(src, src_stride, src1, src2, src3, src4);
+ LD_UB4(src, src_stride, src1, src2, src3, src4);
+ ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
+ ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
- ILV_B_LRLR_UB(src0, src1, src1, src2, vec1, vec0, vec3, vec2);
-
- LOAD_4VECS_UB(src + 16, src_stride, src6, src7, src8, src9);
+ LD_UB4(src + 16, src_stride, src6, src7, src8, src9);
src += (4 * src_stride);
- tmp0 = __msa_dotp_u_h(vec0, filt0);
- tmp1 = __msa_dotp_u_h(vec1, filt0);
-
- tmp0 = SRARI_SATURATE_UNSIGNED_H(tmp0, FILTER_BITS, 7);
- tmp1 = SRARI_SATURATE_UNSIGNED_H(tmp1, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp1, tmp0, dst);
-
- tmp2 = __msa_dotp_u_h(vec2, filt0);
- tmp3 = __msa_dotp_u_h(vec3, filt0);
-
- tmp2 = SRARI_SATURATE_UNSIGNED_H(tmp2, FILTER_BITS, 7);
- tmp3 = SRARI_SATURATE_UNSIGNED_H(tmp3, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp3, tmp2, dst + dst_stride);
-
- ILV_B_LRLR_UB(src2, src3, src3, src4, vec5, vec4, vec7, vec6);
-
- tmp0 = __msa_dotp_u_h(vec4, filt0);
- tmp1 = __msa_dotp_u_h(vec5, filt0);
-
- tmp0 = SRARI_SATURATE_UNSIGNED_H(tmp0, FILTER_BITS, 7);
- tmp1 = SRARI_SATURATE_UNSIGNED_H(tmp1, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp1, tmp0, dst + 2 * dst_stride);
-
- tmp2 = __msa_dotp_u_h(vec6, filt0);
- tmp3 = __msa_dotp_u_h(vec7, filt0);
-
- tmp2 = SRARI_SATURATE_UNSIGNED_H(tmp2, FILTER_BITS, 7);
- tmp3 = SRARI_SATURATE_UNSIGNED_H(tmp3, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp3, tmp2, dst + 3 * dst_stride);
-
- ILV_B_LRLR_UB(src5, src6, src6, src7, vec1, vec0, vec3, vec2);
-
- tmp0 = __msa_dotp_u_h(vec0, filt0);
- tmp1 = __msa_dotp_u_h(vec1, filt0);
-
- tmp0 = SRARI_SATURATE_UNSIGNED_H(tmp0, FILTER_BITS, 7);
- tmp1 = SRARI_SATURATE_UNSIGNED_H(tmp1, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp1, tmp0, dst + 16);
-
- tmp2 = __msa_dotp_u_h(vec2, filt0);
- tmp3 = __msa_dotp_u_h(vec3, filt0);
-
- tmp2 = SRARI_SATURATE_UNSIGNED_H(tmp2, FILTER_BITS, 7);
- tmp3 = SRARI_SATURATE_UNSIGNED_H(tmp3, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp3, tmp2, dst + 16 + dst_stride);
-
- ILV_B_LRLR_UB(src7, src8, src8, src9, vec5, vec4, vec7, vec6);
-
- tmp0 = __msa_dotp_u_h(vec4, filt0);
- tmp1 = __msa_dotp_u_h(vec5, filt0);
-
- tmp0 = SRARI_SATURATE_UNSIGNED_H(tmp0, FILTER_BITS, 7);
- tmp1 = SRARI_SATURATE_UNSIGNED_H(tmp1, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp1, tmp0, dst + 16 + 2 * dst_stride);
-
- tmp2 = __msa_dotp_u_h(vec6, filt0);
- tmp3 = __msa_dotp_u_h(vec7, filt0);
-
- tmp2 = SRARI_SATURATE_UNSIGNED_H(tmp2, FILTER_BITS, 7);
- tmp3 = SRARI_SATURATE_UNSIGNED_H(tmp3, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp3, tmp2, dst + 16 + 3 * dst_stride);
+ DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ PCKEV_ST_SB(tmp0, tmp1, dst);
+ DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
+ SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
+ SAT_UH2_UH(tmp2, tmp3, 7);
+ PCKEV_ST_SB(tmp2, tmp3, dst + dst_stride);
+
+ ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
+ ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
+ DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ PCKEV_ST_SB(tmp0, tmp1, dst + 2 * dst_stride);
+
+ DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
+ SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
+ SAT_UH2_UH(tmp2, tmp3, 7);
+ PCKEV_ST_SB(tmp2, tmp3, dst + 3 * dst_stride);
+
+ ILVR_B2_UB(src6, src5, src7, src6, vec0, vec2);
+ ILVL_B2_UB(src6, src5, src7, src6, vec1, vec3);
+ DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ PCKEV_ST_SB(tmp0, tmp1, dst + 16);
+
+ DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
+ SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
+ SAT_UH2_UH(tmp2, tmp3, 7);
+ PCKEV_ST_SB(tmp2, tmp3, dst + 16 + dst_stride);
+
+ ILVR_B2_UB(src8, src7, src9, src8, vec4, vec6);
+ ILVL_B2_UB(src8, src7, src9, src8, vec5, vec7);
+ DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ PCKEV_ST_SB(tmp0, tmp1, dst + 16 + 2 * dst_stride);
+
+ DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
+ SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
+ SAT_UH2_UH(tmp2, tmp3, 7);
+ PCKEV_ST_SB(tmp2, tmp3, dst + 16 + 3 * dst_stride);
dst += (4 * dst_stride);
src0 = src4;
@@ -661,97 +575,72 @@ static void common_vt_2t_64w_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter, int32_t height) {
uint32_t loop_cnt;
- v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
- v16u8 src8, src9, src10, src11;
- v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
+ v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ v16u8 src11, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0;
v8u16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
- v8u16 filt;
+ v8i16 filt;
/* rearranging filter_y */
- filt = LOAD_UH(filter);
- filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
+ filt = LD_SH(filter);
+ filt0 = (v16u8)__msa_splati_h(filt, 0);
- LOAD_4VECS_UB(src, 16, src0, src3, src6, src9);
+ LD_UB4(src, 16, src0, src3, src6, src9);
src += src_stride;
for (loop_cnt = (height >> 1); loop_cnt--;) {
- LOAD_2VECS_UB(src, src_stride, src1, src2);
- LOAD_2VECS_UB(src + 16, src_stride, src4, src5);
- LOAD_2VECS_UB(src + 32, src_stride, src7, src8);
- LOAD_2VECS_UB(src + 48, src_stride, src10, src11);
+ LD_UB2(src, src_stride, src1, src2);
+ LD_UB2(src + 16, src_stride, src4, src5);
+ LD_UB2(src + 32, src_stride, src7, src8);
+ LD_UB2(src + 48, src_stride, src10, src11);
src += (2 * src_stride);
- ILV_B_LRLR_UB(src0, src1, src1, src2, vec1, vec0, vec3, vec2);
-
- tmp0 = __msa_dotp_u_h(vec0, filt0);
- tmp1 = __msa_dotp_u_h(vec1, filt0);
-
- tmp0 = SRARI_SATURATE_UNSIGNED_H(tmp0, FILTER_BITS, 7);
- tmp1 = SRARI_SATURATE_UNSIGNED_H(tmp1, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp1, tmp0, dst);
-
- tmp2 = __msa_dotp_u_h(vec2, filt0);
- tmp3 = __msa_dotp_u_h(vec3, filt0);
-
- tmp3 = SRARI_SATURATE_UNSIGNED_H(tmp3, FILTER_BITS, 7);
- tmp2 = SRARI_SATURATE_UNSIGNED_H(tmp2, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp3, tmp2, dst + dst_stride);
-
- ILV_B_LRLR_UB(src3, src4, src4, src5, vec5, vec4, vec7, vec6);
-
- tmp4 = __msa_dotp_u_h(vec4, filt0);
- tmp5 = __msa_dotp_u_h(vec5, filt0);
-
- tmp4 = SRARI_SATURATE_UNSIGNED_H(tmp4, FILTER_BITS, 7);
- tmp5 = SRARI_SATURATE_UNSIGNED_H(tmp5, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp5, tmp4, dst + 16);
-
- tmp6 = __msa_dotp_u_h(vec6, filt0);
- tmp7 = __msa_dotp_u_h(vec7, filt0);
-
- tmp6 = SRARI_SATURATE_UNSIGNED_H(tmp6, FILTER_BITS, 7);
- tmp7 = SRARI_SATURATE_UNSIGNED_H(tmp7, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp7, tmp6, dst + 16 + dst_stride);
-
- ILV_B_LRLR_UB(src6, src7, src7, src8, vec1, vec0, vec3, vec2);
-
- tmp0 = __msa_dotp_u_h(vec0, filt0);
- tmp1 = __msa_dotp_u_h(vec1, filt0);
-
- tmp0 = SRARI_SATURATE_UNSIGNED_H(tmp0, FILTER_BITS, 7);
- tmp1 = SRARI_SATURATE_UNSIGNED_H(tmp1, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp1, tmp0, dst + 32);
-
- tmp2 = __msa_dotp_u_h(vec2, filt0);
- tmp3 = __msa_dotp_u_h(vec3, filt0);
-
- tmp2 = SRARI_SATURATE_UNSIGNED_H(tmp2, FILTER_BITS, 7);
- tmp3 = SRARI_SATURATE_UNSIGNED_H(tmp3, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp3, tmp2, dst + 32 + dst_stride);
-
- ILV_B_LRLR_UB(src9, src10, src10, src11, vec5, vec4, vec7, vec6);
-
- tmp4 = __msa_dotp_u_h(vec4, filt0);
- tmp5 = __msa_dotp_u_h(vec5, filt0);
-
- tmp4 = SRARI_SATURATE_UNSIGNED_H(tmp4, FILTER_BITS, 7);
- tmp5 = SRARI_SATURATE_UNSIGNED_H(tmp5, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp5, tmp4, dst + 48);
-
- tmp6 = __msa_dotp_u_h(vec6, filt0);
- tmp7 = __msa_dotp_u_h(vec7, filt0);
-
- tmp6 = SRARI_SATURATE_UNSIGNED_H(tmp6, FILTER_BITS, 7);
- tmp7 = SRARI_SATURATE_UNSIGNED_H(tmp7, FILTER_BITS, 7);
-
- PCKEV_B_STORE_VEC(tmp7, tmp6, dst + 48 + dst_stride);
+ ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
+ ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
+ DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ PCKEV_ST_SB(tmp0, tmp1, dst);
+
+ DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
+ SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
+ SAT_UH2_UH(tmp2, tmp3, 7);
+ PCKEV_ST_SB(tmp2, tmp3, dst + dst_stride);
+
+ ILVR_B2_UB(src4, src3, src5, src4, vec4, vec6);
+ ILVL_B2_UB(src4, src3, src5, src4, vec5, vec7);
+ DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5);
+ SRARI_H2_UH(tmp4, tmp5, FILTER_BITS);
+ SAT_UH2_UH(tmp4, tmp5, 7);
+ PCKEV_ST_SB(tmp4, tmp5, dst + 16);
+
+ DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7);
+ SRARI_H2_UH(tmp6, tmp7, FILTER_BITS);
+ SAT_UH2_UH(tmp6, tmp7, 7);
+ PCKEV_ST_SB(tmp6, tmp7, dst + 16 + dst_stride);
+
+ ILVR_B2_UB(src7, src6, src8, src7, vec0, vec2);
+ ILVL_B2_UB(src7, src6, src8, src7, vec1, vec3);
+ DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
+ SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
+ SAT_UH2_UH(tmp0, tmp1, 7);
+ PCKEV_ST_SB(tmp0, tmp1, dst + 32);
+
+ DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
+ SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
+ SAT_UH2_UH(tmp2, tmp3, 7);
+ PCKEV_ST_SB(tmp2, tmp3, dst + 32 + dst_stride);
+
+ ILVR_B2_UB(src10, src9, src11, src10, vec4, vec6);
+ ILVL_B2_UB(src10, src9, src11, src10, vec5, vec7);
+ DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp4, tmp5);
+ SRARI_H2_UH(tmp4, tmp5, FILTER_BITS);
+ SAT_UH2_UH(tmp4, tmp5, 7);
+ PCKEV_ST_SB(tmp4, tmp5, dst + 48);
+
+ DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp6, tmp7);
+ SRARI_H2_UH(tmp6, tmp7, FILTER_BITS);
+ SAT_UH2_UH(tmp6, tmp7, 7);
+ PCKEV_ST_SB(tmp6, tmp7, dst + 48 + dst_stride);
dst += (2 * dst_stride);
src0 = src2;
« no previous file with comments | « source/libvpx/vp9/common/mips/msa/vp9_convolve8_msa.c ('k') | source/libvpx/vp9/common/mips/msa/vp9_convolve_avg_msa.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698