Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Unified Diff: source/libvpx/vp9/common/mips/msa/vp9_convolve8_horiz_msa.c

Issue 1169543007: libvpx: Pull from upstream (Closed) Base URL: https://chromium.googlesource.com/chromium/deps/libvpx.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: source/libvpx/vp9/common/mips/msa/vp9_convolve8_horiz_msa.c
diff --git a/source/libvpx/vp9/common/mips/msa/vp9_convolve8_horiz_msa.c b/source/libvpx/vp9/common/mips/msa/vp9_convolve8_horiz_msa.c
index e2247435e88da3564c8baba8f9604198e12a52c6..f175bf9b66398aa1af5475a860bf02f1bae9a8e7 100644
--- a/source/libvpx/vp9/common/mips/msa/vp9_convolve8_horiz_msa.c
+++ b/source/libvpx/vp9/common/mips/msa/vp9_convolve8_horiz_msa.c
@@ -14,37 +14,29 @@
static void common_hz_8t_4x4_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter) {
- v16i8 filt0, filt1, filt2, filt3;
- v16i8 src0, src1, src2, src3;
- v16u8 mask0, mask1, mask2, mask3;
+ v16u8 mask0, mask1, mask2, mask3, out;
+ v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
v8i16 filt, out0, out1;
- mask0 = LOAD_UB(&mc_filt_mask_arr[16]);
-
+ mask0 = LD_UB(&mc_filt_mask_arr[16]);
src -= 3;
/* rearranging filter */
- filt = LOAD_SH(filter);
- filt0 = (v16i8)__msa_splati_h(filt, 0);
- filt1 = (v16i8)__msa_splati_h(filt, 1);
- filt2 = (v16i8)__msa_splati_h(filt, 2);
- filt3 = (v16i8)__msa_splati_h(filt, 3);
+ filt = LD_SH(filter);
+ SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
-
- XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
-
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
filt0, filt1, filt2, filt3, out0, out1);
-
- out0 = SRARI_SATURATE_SIGNED_H(out0, FILTER_BITS, 7);
- out1 = SRARI_SATURATE_SIGNED_H(out1, FILTER_BITS, 7);
-
- PCKEV_2B_XORI128_STORE_4_BYTES_4(out0, out1, dst, dst_stride);
+ SRARI_H2_SH(out0, out1, FILTER_BITS);
+ SAT_SH2_SH(out0, out1, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
}
static void common_hz_8t_4x8_msa(const uint8_t *src, int32_t src_stride,
@@ -52,47 +44,36 @@ static void common_hz_8t_4x8_msa(const uint8_t *src, int32_t src_stride,
int8_t *filter) {
v16i8 filt0, filt1, filt2, filt3;
v16i8 src0, src1, src2, src3;
- v16u8 mask0, mask1, mask2, mask3;
+ v16u8 mask0, mask1, mask2, mask3, out;
v8i16 filt, out0, out1, out2, out3;
- mask0 = LOAD_UB(&mc_filt_mask_arr[16]);
-
+ mask0 = LD_UB(&mc_filt_mask_arr[16]);
src -= 3;
/* rearranging filter */
- filt = LOAD_SH(filter);
- filt0 = (v16i8)__msa_splati_h(filt, 0);
- filt1 = (v16i8)__msa_splati_h(filt, 1);
- filt2 = (v16i8)__msa_splati_h(filt, 2);
- filt3 = (v16i8)__msa_splati_h(filt, 3);
+ filt = LD_SH(filter);
+ SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
src += (4 * src_stride);
-
- XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
-
HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
filt0, filt1, filt2, filt3, out0, out1);
-
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
-
- XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
-
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
filt0, filt1, filt2, filt3, out2, out3);
-
- out0 = SRARI_SATURATE_SIGNED_H(out0, FILTER_BITS, 7);
- out1 = SRARI_SATURATE_SIGNED_H(out1, FILTER_BITS, 7);
- out2 = SRARI_SATURATE_SIGNED_H(out2, FILTER_BITS, 7);
- out3 = SRARI_SATURATE_SIGNED_H(out3, FILTER_BITS, 7);
-
- PCKEV_2B_XORI128_STORE_4_BYTES_4(out0, out1, dst, dst_stride);
+ SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
dst += (4 * dst_stride);
- PCKEV_2B_XORI128_STORE_4_BYTES_4(out2, out3, dst, dst_stride);
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
}
static void common_hz_8t_4w_msa(const uint8_t *src, int32_t src_stride,
@@ -108,82 +89,64 @@ static void common_hz_8t_4w_msa(const uint8_t *src, int32_t src_stride,
static void common_hz_8t_8x4_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter) {
- v16i8 filt0, filt1, filt2, filt3;
- v16i8 src0, src1, src2, src3;
- v16u8 mask0, mask1, mask2, mask3;
+ v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
+ v16u8 mask0, mask1, mask2, mask3, tmp0, tmp1;
v8i16 filt, out0, out1, out2, out3;
- mask0 = LOAD_UB(&mc_filt_mask_arr[0]);
-
+ mask0 = LD_UB(&mc_filt_mask_arr[0]);
src -= 3;
/* rearranging filter */
- filt = LOAD_SH(filter);
- filt0 = (v16i8)__msa_splati_h(filt, 0);
- filt1 = (v16i8)__msa_splati_h(filt, 1);
- filt2 = (v16i8)__msa_splati_h(filt, 2);
- filt3 = (v16i8)__msa_splati_h(filt, 3);
+ filt = LD_SH(filter);
+ SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
-
- XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
-
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
filt0, filt1, filt2, filt3, out0, out1, out2,
out3);
-
- out0 = SRARI_SATURATE_SIGNED_H(out0, FILTER_BITS, 7);
- out1 = SRARI_SATURATE_SIGNED_H(out1, FILTER_BITS, 7);
- out2 = SRARI_SATURATE_SIGNED_H(out2, FILTER_BITS, 7);
- out3 = SRARI_SATURATE_SIGNED_H(out3, FILTER_BITS, 7);
-
- PCKEV_B_4_XORI128_STORE_8_BYTES_4(out0, out1, out2, out3, dst, dst_stride);
+ SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ tmp0 = PCKEV_XORI128_UB(out0, out1);
+ tmp1 = PCKEV_XORI128_UB(out2, out3);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
}
static void common_hz_8t_8x8mult_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter, int32_t height) {
uint32_t loop_cnt;
- v16i8 filt0, filt1, filt2, filt3;
- v16i8 src0, src1, src2, src3;
- v16u8 mask0, mask1, mask2, mask3;
+ v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
+ v16u8 mask0, mask1, mask2, mask3, tmp0, tmp1;
v8i16 filt, out0, out1, out2, out3;
- mask0 = LOAD_UB(&mc_filt_mask_arr[0]);
-
+ mask0 = LD_UB(&mc_filt_mask_arr[0]);
src -= 3;
/* rearranging filter */
- filt = LOAD_SH(filter);
- filt0 = (v16i8)__msa_splati_h(filt, 0);
- filt1 = (v16i8)__msa_splati_h(filt, 1);
- filt2 = (v16i8)__msa_splati_h(filt, 2);
- filt3 = (v16i8)__msa_splati_h(filt, 3);
+ filt = LD_SH(filter);
+ SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
for (loop_cnt = (height >> 2); loop_cnt--;) {
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
src += (4 * src_stride);
-
- XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
-
HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
mask3, filt0, filt1, filt2, filt3, out0, out1,
out2, out3);
-
- out0 = SRARI_SATURATE_SIGNED_H(out0, FILTER_BITS, 7);
- out1 = SRARI_SATURATE_SIGNED_H(out1, FILTER_BITS, 7);
- out2 = SRARI_SATURATE_SIGNED_H(out2, FILTER_BITS, 7);
- out3 = SRARI_SATURATE_SIGNED_H(out3, FILTER_BITS, 7);
-
- PCKEV_B_4_XORI128_STORE_8_BYTES_4(out0, out1, out2, out3, dst, dst_stride);
+ SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ tmp0 = PCKEV_XORI128_UB(out0, out1);
+ tmp1 = PCKEV_XORI128_UB(out2, out3);
+ ST8x4_UB(tmp0, tmp1, dst, dst_stride);
dst += (4 * dst_stride);
}
}
@@ -202,48 +165,36 @@ static void common_hz_8t_16w_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter, int32_t height) {
uint32_t loop_cnt;
- v16i8 src0, src1, src2, src3;
- v16i8 filt0, filt1, filt2, filt3;
- v16u8 mask0, mask1, mask2, mask3;
+ v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
+ v16u8 mask0, mask1, mask2, mask3, out;
v8i16 filt, out0, out1, out2, out3;
- mask0 = LOAD_UB(&mc_filt_mask_arr[0]);
-
+ mask0 = LD_UB(&mc_filt_mask_arr[0]);
src -= 3;
/* rearranging filter */
- filt = LOAD_SH(filter);
- filt0 = (v16i8)__msa_splati_h(filt, 0);
- filt1 = (v16i8)__msa_splati_h(filt, 1);
- filt2 = (v16i8)__msa_splati_h(filt, 2);
- filt3 = (v16i8)__msa_splati_h(filt, 3);
+ filt = LD_SH(filter);
+ SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
for (loop_cnt = (height >> 1); loop_cnt--;) {
- src0 = LOAD_SB(src);
- src1 = LOAD_SB(src + 8);
- src += src_stride;
- src2 = LOAD_SB(src);
- src3 = LOAD_SB(src + 8);
- src += src_stride;
-
- XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
-
+ LD_SB2(src, src_stride, src0, src2);
+ LD_SB2(src + 8, src_stride, src1, src3);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ src += (2 * src_stride);
HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
mask3, filt0, filt1, filt2, filt3, out0, out1,
out2, out3);
-
- out0 = SRARI_SATURATE_SIGNED_H(out0, FILTER_BITS, 7);
- out1 = SRARI_SATURATE_SIGNED_H(out1, FILTER_BITS, 7);
- out2 = SRARI_SATURATE_SIGNED_H(out2, FILTER_BITS, 7);
- out3 = SRARI_SATURATE_SIGNED_H(out3, FILTER_BITS, 7);
-
- PCKEV_B_XORI128_STORE_VEC(out1, out0, dst);
+ SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST_UB(out, dst);
dst += dst_stride;
- PCKEV_B_XORI128_STORE_VEC(out3, out2, dst);
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST_UB(out, dst);
dst += dst_stride;
}
}
@@ -252,68 +203,56 @@ static void common_hz_8t_32w_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter, int32_t height) {
uint32_t loop_cnt;
- v16i8 src0, src1, src2, src3;
- v16i8 filt0, filt1, filt2, filt3;
- v16u8 mask0, mask1, mask2, mask3;
+ v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
+ v16u8 mask0, mask1, mask2, mask3, out;
v8i16 filt, out0, out1, out2, out3;
- mask0 = LOAD_UB(&mc_filt_mask_arr[0]);
-
+ mask0 = LD_UB(&mc_filt_mask_arr[0]);
src -= 3;
/* rearranging filter */
- filt = LOAD_SH(filter);
- filt0 = (v16i8)__msa_splati_h(filt, 0);
- filt1 = (v16i8)__msa_splati_h(filt, 1);
- filt2 = (v16i8)__msa_splati_h(filt, 2);
- filt3 = (v16i8)__msa_splati_h(filt, 3);
+ filt = LD_SH(filter);
+ SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
for (loop_cnt = (height >> 1); loop_cnt--;) {
- src0 = LOAD_SB(src);
- src2 = LOAD_SB(src + 16);
- src3 = LOAD_SB(src + 24);
- src1 = __msa_sld_b((v16i8)src2, (v16i8)src0, 8);
+ src0 = LD_SB(src);
+ src2 = LD_SB(src + 16);
+ src3 = LD_SB(src + 24);
+ src1 = __msa_sldi_b(src2, src0, 8);
src += src_stride;
-
- XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
-
+ XORI_B4_128_SB(src0, src1, src2, src3);
HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
mask3, filt0, filt1, filt2, filt3, out0, out1,
out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
- out0 = SRARI_SATURATE_SIGNED_H(out0, FILTER_BITS, 7);
- out1 = SRARI_SATURATE_SIGNED_H(out1, FILTER_BITS, 7);
- out2 = SRARI_SATURATE_SIGNED_H(out2, FILTER_BITS, 7);
- out3 = SRARI_SATURATE_SIGNED_H(out3, FILTER_BITS, 7);
-
- src0 = LOAD_SB(src);
- src2 = LOAD_SB(src + 16);
- src3 = LOAD_SB(src + 24);
- src1 = __msa_sld_b((v16i8)src2, (v16i8)src0, 8);
+ src0 = LD_SB(src);
+ src2 = LD_SB(src + 16);
+ src3 = LD_SB(src + 24);
+ src1 = __msa_sldi_b(src2, src0, 8);
+ src += src_stride;
- PCKEV_B_XORI128_STORE_VEC(out1, out0, dst);
- PCKEV_B_XORI128_STORE_VEC(out3, out2, (dst + 16));
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST_UB(out, dst);
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST_UB(out, dst + 16);
dst += dst_stride;
- XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
-
+ XORI_B4_128_SB(src0, src1, src2, src3);
HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
mask3, filt0, filt1, filt2, filt3, out0, out1,
out2, out3);
-
- out0 = SRARI_SATURATE_SIGNED_H(out0, FILTER_BITS, 7);
- out1 = SRARI_SATURATE_SIGNED_H(out1, FILTER_BITS, 7);
- out2 = SRARI_SATURATE_SIGNED_H(out2, FILTER_BITS, 7);
- out3 = SRARI_SATURATE_SIGNED_H(out3, FILTER_BITS, 7);
-
- PCKEV_B_XORI128_STORE_VEC(out1, out0, dst);
- PCKEV_B_XORI128_STORE_VEC(out3, out2, (dst + 16));
-
- src += src_stride;
+ SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST_UB(out, dst);
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST_UB(out, dst + 16);
dst += dst_stride;
}
}
@@ -321,50 +260,55 @@ static void common_hz_8t_32w_msa(const uint8_t *src, int32_t src_stride,
static void common_hz_8t_64w_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter, int32_t height) {
- uint32_t loop_cnt, cnt;
- v16i8 src0, src1, src2, src3;
- v16i8 filt0, filt1, filt2, filt3;
- v16u8 mask0, mask1, mask2, mask3;
+ int32_t loop_cnt;
+ v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
+ v16u8 mask0, mask1, mask2, mask3, out;
v8i16 filt, out0, out1, out2, out3;
- mask0 = LOAD_UB(&mc_filt_mask_arr[0]);
-
+ mask0 = LD_UB(&mc_filt_mask_arr[0]);
src -= 3;
/* rearranging filter */
- filt = LOAD_SH(filter);
- filt0 = (v16i8)__msa_splati_h(filt, 0);
- filt1 = (v16i8)__msa_splati_h(filt, 1);
- filt2 = (v16i8)__msa_splati_h(filt, 2);
- filt3 = (v16i8)__msa_splati_h(filt, 3);
+ filt = LD_SH(filter);
+ SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
for (loop_cnt = height; loop_cnt--;) {
- for (cnt = 0; cnt < 2; ++cnt) {
- src0 = LOAD_SB(&src[cnt << 5]);
- src2 = LOAD_SB(&src[16 + (cnt << 5)]);
- src3 = LOAD_SB(&src[24 + (cnt << 5)]);
- src1 = __msa_sld_b((v16i8)src2, (v16i8)src0, 8);
-
- XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
-
- HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
- mask3, filt0, filt1, filt2, filt3, out0, out1,
- out2, out3);
-
- out0 = SRARI_SATURATE_SIGNED_H(out0, FILTER_BITS, 7);
- out1 = SRARI_SATURATE_SIGNED_H(out1, FILTER_BITS, 7);
- out2 = SRARI_SATURATE_SIGNED_H(out2, FILTER_BITS, 7);
- out3 = SRARI_SATURATE_SIGNED_H(out3, FILTER_BITS, 7);
-
- PCKEV_B_XORI128_STORE_VEC(out1, out0, &dst[cnt << 5]);
- PCKEV_B_XORI128_STORE_VEC(out3, out2, &dst[16 + (cnt << 5)]);
- }
+ src0 = LD_SB(src);
+ src2 = LD_SB(src + 16);
+ src3 = LD_SB(src + 24);
+ src1 = __msa_sldi_b(src2, src0, 8);
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filt0, filt1, filt2, filt3, out0, out1,
+ out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST_UB(out, dst);
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST_UB(out, dst + 16);
+
+ src0 = LD_SB(src + 32);
+ src2 = LD_SB(src + 48);
+ src3 = LD_SB(src + 56);
+ src1 = __msa_sldi_b(src2, src0, 8);
src += src_stride;
+
+ XORI_B4_128_SB(src0, src1, src2, src3);
+ HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filt0, filt1, filt2, filt3, out0, out1,
+ out2, out3);
+ SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
+ SAT_SH4_SH(out0, out1, out2, out3, 7);
+ out = PCKEV_XORI128_UB(out0, out1);
+ ST_UB(out, dst + 32);
+ out = PCKEV_XORI128_UB(out2, out3);
+ ST_UB(out, dst + 48);
dst += dst_stride;
}
}
@@ -372,124 +316,55 @@ static void common_hz_8t_64w_msa(const uint8_t *src, int32_t src_stride,
static void common_hz_2t_4x4_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter) {
- uint32_t out0, out1, out2, out3;
v16i8 src0, src1, src2, src3, mask;
- v16u8 vec0, vec1, filt0;
- v16i8 res0, res1;
+ v16u8 filt0, vec0, vec1, res0, res1;
v8u16 vec2, vec3, filt, const255;
- mask = LOAD_SB(&mc_filt_mask_arr[16]);
+ mask = LD_SB(&mc_filt_mask_arr[16]);
/* rearranging filter */
- filt = LOAD_UH(filter);
- filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
- const255 = (v8u16)__msa_ldi_h(255);
-
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
-
- vec0 = (v16u8)__msa_vshf_b(mask, src1, src0);
- vec1 = (v16u8)__msa_vshf_b(mask, src3, src2);
-
- vec2 = __msa_dotp_u_h(vec0, filt0);
- vec3 = __msa_dotp_u_h(vec1, filt0);
-
- vec2 = (v8u16)__msa_srari_h((v8i16)vec2, FILTER_BITS);
- vec3 = (v8u16)__msa_srari_h((v8i16)vec3, FILTER_BITS);
-
- vec2 = __msa_min_u_h(vec2, const255);
- vec3 = __msa_min_u_h(vec3, const255);
-
- res0 = __msa_pckev_b((v16i8)vec2, (v16i8)vec2);
- res1 = __msa_pckev_b((v16i8)vec3, (v16i8)vec3);
-
- out0 = __msa_copy_u_w((v4i32)res0, 0);
- out1 = __msa_copy_u_w((v4i32)res0, 1);
- out2 = __msa_copy_u_w((v4i32)res1, 0);
- out3 = __msa_copy_u_w((v4i32)res1, 1);
-
- STORE_WORD(dst, out0);
- dst += dst_stride;
- STORE_WORD(dst, out1);
- dst += dst_stride;
- STORE_WORD(dst, out2);
- dst += dst_stride;
- STORE_WORD(dst, out3);
+ filt = LD_UH(filter);
+ filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ const255 = (v8u16) __msa_ldi_h(255);
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
+ DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3);
+ SRARI_H2_UH(vec2, vec3, FILTER_BITS);
+ MIN_UH2_UH(vec2, vec3, const255);
+ PCKEV_B2_UB(vec2, vec2, vec3, vec3, res0, res1);
+ ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
}
static void common_hz_2t_4x8_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter) {
- uint32_t out0, out1, out2, out3;
- v16u8 filt0;
+ v16u8 vec0, vec1, vec2, vec3, filt0;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
- v16u8 vec0, vec1, vec2, vec3;
- v8u16 vec4, vec5, vec6, vec7;
v16i8 res0, res1, res2, res3;
- v8u16 filt, const255;
+ v8u16 vec4, vec5, vec6, vec7, filt, const255;
- mask = LOAD_SB(&mc_filt_mask_arr[16]);
+ mask = LD_SB(&mc_filt_mask_arr[16]);
/* rearranging filter */
- filt = LOAD_UH(filter);
- filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
- const255 = (v8u16)__msa_ldi_h(255);
-
- LOAD_8VECS_SB(src, src_stride,
- src0, src1, src2, src3, src4, src5, src6, src7);
-
- vec0 = (v16u8)__msa_vshf_b(mask, src1, src0);
- vec1 = (v16u8)__msa_vshf_b(mask, src3, src2);
- vec2 = (v16u8)__msa_vshf_b(mask, src5, src4);
- vec3 = (v16u8)__msa_vshf_b(mask, src7, src6);
-
- vec4 = __msa_dotp_u_h(vec0, filt0);
- vec5 = __msa_dotp_u_h(vec1, filt0);
- vec6 = __msa_dotp_u_h(vec2, filt0);
- vec7 = __msa_dotp_u_h(vec3, filt0);
-
- vec4 = (v8u16)__msa_srari_h((v8i16)vec4, FILTER_BITS);
- vec5 = (v8u16)__msa_srari_h((v8i16)vec5, FILTER_BITS);
- vec6 = (v8u16)__msa_srari_h((v8i16)vec6, FILTER_BITS);
- vec7 = (v8u16)__msa_srari_h((v8i16)vec7, FILTER_BITS);
-
- vec4 = __msa_min_u_h(vec4, const255);
- vec5 = __msa_min_u_h(vec5, const255);
- vec6 = __msa_min_u_h(vec6, const255);
- vec7 = __msa_min_u_h(vec7, const255);
-
- res0 = __msa_pckev_b((v16i8)vec4, (v16i8)vec4);
- res1 = __msa_pckev_b((v16i8)vec5, (v16i8)vec5);
- res2 = __msa_pckev_b((v16i8)vec6, (v16i8)vec6);
- res3 = __msa_pckev_b((v16i8)vec7, (v16i8)vec7);
-
- out0 = __msa_copy_u_w((v4i32)res0, 0);
- out1 = __msa_copy_u_w((v4i32)res0, 1);
- out2 = __msa_copy_u_w((v4i32)res1, 0);
- out3 = __msa_copy_u_w((v4i32)res1, 1);
-
- STORE_WORD(dst, out0);
- dst += dst_stride;
- STORE_WORD(dst, out1);
- dst += dst_stride;
- STORE_WORD(dst, out2);
- dst += dst_stride;
- STORE_WORD(dst, out3);
- dst += dst_stride;
-
- out0 = __msa_copy_u_w((v4i32)res2, 0);
- out1 = __msa_copy_u_w((v4i32)res2, 1);
- out2 = __msa_copy_u_w((v4i32)res3, 0);
- out3 = __msa_copy_u_w((v4i32)res3, 1);
-
- STORE_WORD(dst, out0);
- dst += dst_stride;
- STORE_WORD(dst, out1);
- dst += dst_stride;
- STORE_WORD(dst, out2);
- dst += dst_stride;
- STORE_WORD(dst, out3);
+ filt = LD_UH(filter);
+ filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ const255 = (v8u16) __msa_ldi_h(255);
+
+ LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
+ VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
+ VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5,
+ vec6, vec7);
+ SRARI_H4_UH(vec4, vec5, vec6, vec7, FILTER_BITS);
+ MIN_UH4_UH(vec4, vec5, vec6, vec7, const255);
+ PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1,
+ res2, res3);
+ ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
+ dst += (4 * dst_stride);
+ ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
}
static void common_hz_2t_4w_msa(const uint8_t *src, int32_t src_stride,
@@ -507,149 +382,93 @@ static void common_hz_2t_8x4_msa(const uint8_t *src, int32_t src_stride,
int8_t *filter) {
v16u8 filt0;
v16i8 src0, src1, src2, src3, mask;
- v8u16 vec0, vec1, vec2, vec3;
- v8u16 out0, out1, out2, out3;
- v8u16 const255, filt;
+ v8u16 vec0, vec1, vec2, vec3, const255, filt;
- mask = LOAD_SB(&mc_filt_mask_arr[0]);
+ mask = LD_SB(&mc_filt_mask_arr[0]);
/* rearranging filter */
- filt = LOAD_UH(filter);
- filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
- const255 = (v8u16)__msa_ldi_h(255);
-
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
-
- vec0 = (v8u16)__msa_vshf_b(mask, src0, src0);
- vec1 = (v8u16)__msa_vshf_b(mask, src1, src1);
- vec2 = (v8u16)__msa_vshf_b(mask, src2, src2);
- vec3 = (v8u16)__msa_vshf_b(mask, src3, src3);
-
- vec0 = __msa_dotp_u_h((v16u8)vec0, filt0);
- vec1 = __msa_dotp_u_h((v16u8)vec1, filt0);
- vec2 = __msa_dotp_u_h((v16u8)vec2, filt0);
- vec3 = __msa_dotp_u_h((v16u8)vec3, filt0);
-
- SRARI_H_4VECS_UH(vec0, vec1, vec2, vec3, vec0, vec1, vec2, vec3, FILTER_BITS);
-
- out0 = __msa_min_u_h(vec0, const255);
- out1 = __msa_min_u_h(vec1, const255);
- out2 = __msa_min_u_h(vec2, const255);
- out3 = __msa_min_u_h(vec3, const255);
-
- PCKEV_B_STORE_8_BYTES_4(out0, out1, out2, out3, dst, dst_stride);
+ filt = LD_UH(filter);
+ filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ const255 = (v8u16) __msa_ldi_h(255);
+
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+ vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
+ MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, src0, src1);
+ ST8x4_UB(src0, src1, dst, dst_stride);
}
static void common_hz_2t_8x8mult_msa(const uint8_t *src, int32_t src_stride,
uint8_t *dst, int32_t dst_stride,
int8_t *filter, int32_t height) {
v16u8 filt0;
- v16i8 src0, src1, src2, src3, mask;
- v8u16 vec0, vec1, vec2, vec3;
- v8u16 filt, const255;
+ v16i8 src0, src1, src2, src3, mask, out0, out1;
+ v8u16 vec0, vec1, vec2, vec3, filt, const255;
- mask = LOAD_SB(&mc_filt_mask_arr[0]);
+ mask = LD_SB(&mc_filt_mask_arr[0]);
/* rearranging filter */
- filt = LOAD_UH(filter);
- filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
+ filt = LD_UH(filter);
+ filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
- const255 = (v8u16)__msa_ldi_h(255);
+ const255 = (v8u16) __msa_ldi_h(255);
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
- vec0 = (v8u16)__msa_vshf_b(mask, src0, src0);
- vec1 = (v8u16)__msa_vshf_b(mask, src1, src1);
- vec2 = (v8u16)__msa_vshf_b(mask, src2, src2);
- vec3 = (v8u16)__msa_vshf_b(mask, src3, src3);
-
- vec0 = __msa_dotp_u_h((v16u8)vec0, filt0);
- vec1 = __msa_dotp_u_h((v16u8)vec1, filt0);
- vec2 = __msa_dotp_u_h((v16u8)vec2, filt0);
- vec3 = __msa_dotp_u_h((v16u8)vec3, filt0);
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+ vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
+ MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
- SRARI_H_4VECS_UH(vec0, vec1, vec2, vec3, vec0, vec1, vec2, vec3, FILTER_BITS);
-
- vec0 = __msa_min_u_h(vec0, const255);
- vec1 = __msa_min_u_h(vec1, const255);
- vec2 = __msa_min_u_h(vec2, const255);
- vec3 = __msa_min_u_h(vec3, const255);
-
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
- PCKEV_B_STORE_8_BYTES_4(vec0, vec1, vec2, vec3, dst, dst_stride);
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
dst += (4 * dst_stride);
- vec0 = (v8u16)__msa_vshf_b(mask, src0, src0);
- vec1 = (v8u16)__msa_vshf_b(mask, src1, src1);
- vec2 = (v8u16)__msa_vshf_b(mask, src2, src2);
- vec3 = (v8u16)__msa_vshf_b(mask, src3, src3);
-
- vec0 = __msa_dotp_u_h((v16u8)vec0, filt0);
- vec1 = __msa_dotp_u_h((v16u8)vec1, filt0);
- vec2 = __msa_dotp_u_h((v16u8)vec2, filt0);
- vec3 = __msa_dotp_u_h((v16u8)vec3, filt0);
-
- SRARI_H_4VECS_UH(vec0, vec1, vec2, vec3, vec0, vec1, vec2, vec3, FILTER_BITS);
-
- vec0 = __msa_min_u_h(vec0, const255);
- vec1 = __msa_min_u_h(vec1, const255);
- vec2 = __msa_min_u_h(vec2, const255);
- vec3 = __msa_min_u_h(vec3, const255);
-
- PCKEV_B_STORE_8_BYTES_4(vec0, vec1, vec2, vec3, dst, dst_stride);
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+ vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
+ MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
dst += (4 * dst_stride);
if (16 == height) {
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
- vec0 = (v8u16)__msa_vshf_b(mask, src0, src0);
- vec1 = (v8u16)__msa_vshf_b(mask, src1, src1);
- vec2 = (v8u16)__msa_vshf_b(mask, src2, src2);
- vec3 = (v8u16)__msa_vshf_b(mask, src3, src3);
-
- vec0 = __msa_dotp_u_h((v16u8)vec0, filt0);
- vec1 = __msa_dotp_u_h((v16u8)vec1, filt0);
- vec2 = __msa_dotp_u_h((v16u8)vec2, filt0);
- vec3 = __msa_dotp_u_h((v16u8)vec3, filt0);
-
- SRARI_H_4VECS_UH(vec0, vec1, vec2, vec3,
- vec0, vec1, vec2, vec3, FILTER_BITS);
-
- vec0 = __msa_min_u_h(vec0, const255);
- vec1 = __msa_min_u_h(vec1, const255);
- vec2 = __msa_min_u_h(vec2, const255);
- vec3 = __msa_min_u_h(vec3, const255);
-
- LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+ vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
+ MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
+ LD_SB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
- PCKEV_B_STORE_8_BYTES_4(vec0, vec1, vec2, vec3, dst, dst_stride);
- dst += (4 * dst_stride);
-
- vec0 = (v8u16)__msa_vshf_b(mask, src0, src0);
- vec1 = (v8u16)__msa_vshf_b(mask, src1, src1);
- vec2 = (v8u16)__msa_vshf_b(mask, src2, src2);
- vec3 = (v8u16)__msa_vshf_b(mask, src3, src3);
-
- vec0 = __msa_dotp_u_h((v16u8)vec0, filt0);
- vec1 = __msa_dotp_u_h((v16u8)vec1, filt0);
- vec2 = __msa_dotp_u_h((v16u8)vec2, filt0);
- vec3 = __msa_dotp_u_h((v16u8)vec3, filt0);
-
- SRARI_H_4VECS_UH(vec0, vec1, vec2, vec3,
- vec0, vec1, vec2, vec3, FILTER_BITS);
-
- vec0 = __msa_min_u_h(vec0, const255);
- vec1 = __msa_min_u_h(vec1, const255);
- vec2 = __msa_min_u_h(vec2, const255);
- vec3 = __msa_min_u_h(vec3, const255);
-
- PCKEV_B_STORE_8_BYTES_4(vec0, vec1, vec2, vec3, dst, dst_stride);
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
+ ST8x4_UB(out0, out1, dst, dst_stride);
+
+ VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
+ vec2, vec3);
+ SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
+ MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
+ PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1);
+ ST8x4_UB(out0, out1, dst + 4 * dst_stride, dst_stride);
}
}
@@ -668,136 +487,68 @@ static void common_hz_2t_16w_msa(const uint8_t *src, int32_t src_stride,
int8_t *filter, int32_t height) {
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
- v16u8 filt0;
- v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v8u16 out0, out1, out2, out3, out4, out5, out6, out7;
- v8u16 filt, const255;
+ v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt, const255;
- mask = LOAD_SB(&mc_filt_mask_arr[0]);
+ mask = LD_SB(&mc_filt_mask_arr[0]);
loop_cnt = (height >> 2) - 1;
/* rearranging filter */
- filt = LOAD_UH(filter);
- filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
-
- const255 = (v8u16)__msa_ldi_h(255);
-
- src0 = LOAD_SB(src);
- src1 = LOAD_SB(src + 8);
- src += src_stride;
- src2 = LOAD_SB(src);
- src3 = LOAD_SB(src + 8);
- src += src_stride;
- src4 = LOAD_SB(src);
- src5 = LOAD_SB(src + 8);
- src += src_stride;
- src6 = LOAD_SB(src);
- src7 = LOAD_SB(src + 8);
- src += src_stride;
-
- vec0 = (v16u8)__msa_vshf_b(mask, src0, src0);
- vec1 = (v16u8)__msa_vshf_b(mask, src1, src1);
- vec2 = (v16u8)__msa_vshf_b(mask, src2, src2);
- vec3 = (v16u8)__msa_vshf_b(mask, src3, src3);
- vec4 = (v16u8)__msa_vshf_b(mask, src4, src4);
- vec5 = (v16u8)__msa_vshf_b(mask, src5, src5);
- vec6 = (v16u8)__msa_vshf_b(mask, src6, src6);
- vec7 = (v16u8)__msa_vshf_b(mask, src7, src7);
-
- out0 = __msa_dotp_u_h(vec0, filt0);
- out1 = __msa_dotp_u_h(vec1, filt0);
- out2 = __msa_dotp_u_h(vec2, filt0);
- out3 = __msa_dotp_u_h(vec3, filt0);
- out4 = __msa_dotp_u_h(vec4, filt0);
- out5 = __msa_dotp_u_h(vec5, filt0);
- out6 = __msa_dotp_u_h(vec6, filt0);
- out7 = __msa_dotp_u_h(vec7, filt0);
-
- out0 = (v8u16)__msa_srari_h((v8i16)out0, FILTER_BITS);
- out1 = (v8u16)__msa_srari_h((v8i16)out1, FILTER_BITS);
- out2 = (v8u16)__msa_srari_h((v8i16)out2, FILTER_BITS);
- out3 = (v8u16)__msa_srari_h((v8i16)out3, FILTER_BITS);
- out4 = (v8u16)__msa_srari_h((v8i16)out4, FILTER_BITS);
- out5 = (v8u16)__msa_srari_h((v8i16)out5, FILTER_BITS);
- out6 = (v8u16)__msa_srari_h((v8i16)out6, FILTER_BITS);
- out7 = (v8u16)__msa_srari_h((v8i16)out7, FILTER_BITS);
-
- out0 = __msa_min_u_h(out0, const255);
- out1 = __msa_min_u_h(out1, const255);
- out2 = __msa_min_u_h(out2, const255);
- out3 = __msa_min_u_h(out3, const255);
- out4 = __msa_min_u_h(out4, const255);
- out5 = __msa_min_u_h(out5, const255);
- out6 = __msa_min_u_h(out6, const255);
- out7 = __msa_min_u_h(out7, const255);
-
- PCKEV_B_STORE_VEC(out1, out0, dst);
+ filt = LD_UH(filter);
+ filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
+
+ const255 = (v8u16) __msa_ldi_h(255);
+
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
+ src += (4 * src_stride);
+
+ VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
+ VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
+ out2, out3);
+ DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
+ out6, out7);
+ SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
+ SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
+ MIN_UH4_UH(out0, out1, out2, out3, const255);
+ MIN_UH4_UH(out4, out5, out6, out7, const255);
+ PCKEV_ST_SB(out0, out1, dst);
dst += dst_stride;
- PCKEV_B_STORE_VEC(out3, out2, dst);
+ PCKEV_ST_SB(out2, out3, dst);
dst += dst_stride;
- PCKEV_B_STORE_VEC(out5, out4, dst);
+ PCKEV_ST_SB(out4, out5, dst);
dst += dst_stride;
- PCKEV_B_STORE_VEC(out7, out6, dst);
+ PCKEV_ST_SB(out6, out7, dst);
dst += dst_stride;
for (; loop_cnt--;) {
- src0 = LOAD_SB(src);
- src1 = LOAD_SB(src + 8);
- src += src_stride;
- src2 = LOAD_SB(src);
- src3 = LOAD_SB(src + 8);
- src += src_stride;
- src4 = LOAD_SB(src);
- src5 = LOAD_SB(src + 8);
- src += src_stride;
- src6 = LOAD_SB(src);
- src7 = LOAD_SB(src + 8);
- src += src_stride;
+ LD_SB4(src, src_stride, src0, src2, src4, src6);
+ LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
+ src += (4 * src_stride);
- vec0 = (v16u8)__msa_vshf_b(mask, src0, src0);
- vec1 = (v16u8)__msa_vshf_b(mask, src1, src1);
- vec2 = (v16u8)__msa_vshf_b(mask, src2, src2);
- vec3 = (v16u8)__msa_vshf_b(mask, src3, src3);
- vec4 = (v16u8)__msa_vshf_b(mask, src4, src4);
- vec5 = (v16u8)__msa_vshf_b(mask, src5, src5);
- vec6 = (v16u8)__msa_vshf_b(mask, src6, src6);
- vec7 = (v16u8)__msa_vshf_b(mask, src7, src7);
-
- out0 = __msa_dotp_u_h(vec0, filt0);
- out1 = __msa_dotp_u_h(vec1, filt0);
- out2 = __msa_dotp_u_h(vec2, filt0);
- out3 = __msa_dotp_u_h(vec3, filt0);
- out4 = __msa_dotp_u_h(vec4, filt0);
- out5 = __msa_dotp_u_h(vec5, filt0);
- out6 = __msa_dotp_u_h(vec6, filt0);
- out7 = __msa_dotp_u_h(vec7, filt0);
-
- out0 = (v8u16)__msa_srari_h((v8i16)out0, FILTER_BITS);
- out1 = (v8u16)__msa_srari_h((v8i16)out1, FILTER_BITS);
- out2 = (v8u16)__msa_srari_h((v8i16)out2, FILTER_BITS);
- out3 = (v8u16)__msa_srari_h((v8i16)out3, FILTER_BITS);
- out4 = (v8u16)__msa_srari_h((v8i16)out4, FILTER_BITS);
- out5 = (v8u16)__msa_srari_h((v8i16)out5, FILTER_BITS);
- out6 = (v8u16)__msa_srari_h((v8i16)out6, FILTER_BITS);
- out7 = (v8u16)__msa_srari_h((v8i16)out7, FILTER_BITS);
-
- out0 = __msa_min_u_h(out0, const255);
- out1 = __msa_min_u_h(out1, const255);
- out2 = __msa_min_u_h(out2, const255);
- out3 = __msa_min_u_h(out3, const255);
- out4 = __msa_min_u_h(out4, const255);
- out5 = __msa_min_u_h(out5, const255);
- out6 = __msa_min_u_h(out6, const255);
- out7 = __msa_min_u_h(out7, const255);
-
- PCKEV_B_STORE_VEC(out1, out0, dst);
+ VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
+ VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
+ out2, out3);
+ DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
+ out6, out7);
+ SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
+ SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
+ MIN_UH4_UH(out0, out1, out2, out3, const255);
+ MIN_UH4_UH(out4, out5, out6, out7, const255);
+ PCKEV_ST_SB(out0, out1, dst);
dst += dst_stride;
- PCKEV_B_STORE_VEC(out3, out2, dst);
+ PCKEV_ST_SB(out2, out3, dst);
dst += dst_stride;
- PCKEV_B_STORE_VEC(out5, out4, dst);
+ PCKEV_ST_SB(out4, out5, dst);
dst += dst_stride;
- PCKEV_B_STORE_VEC(out7, out6, dst);
+ PCKEV_ST_SB(out6, out7, dst);
dst += dst_stride;
}
}
@@ -807,72 +558,46 @@ static void common_hz_2t_32w_msa(const uint8_t *src, int32_t src_stride,
int8_t *filter, int32_t height) {
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
- v16u8 filt0;
- v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v8u16 out0, out1, out2, out3, out4, out5, out6, out7;
- v8u16 filt, const255;
+ v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt, const255;
- mask = LOAD_SB(&mc_filt_mask_arr[0]);
+ mask = LD_SB(&mc_filt_mask_arr[0]);
/* rearranging filter */
- filt = LOAD_UH(filter);
- filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
+ filt = LD_UH(filter);
+ filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
- const255 = (v8u16)__msa_ldi_h(255);
+ const255 = (v8u16) __msa_ldi_h(255);
for (loop_cnt = height >> 1; loop_cnt--;) {
- src0 = LOAD_SB(src);
- src2 = LOAD_SB(src + 16);
- src3 = LOAD_SB(src + 24);
- src1 = __msa_sld_b(src2, src0, 8);
+ src0 = LD_SB(src);
+ src2 = LD_SB(src + 16);
+ src3 = LD_SB(src + 24);
+ src1 = __msa_sldi_b(src2, src0, 8);
src += src_stride;
- src4 = LOAD_SB(src);
- src6 = LOAD_SB(src + 16);
- src7 = LOAD_SB(src + 24);
- src5 = __msa_sld_b(src6, src4, 8);
+ src4 = LD_SB(src);
+ src6 = LD_SB(src + 16);
+ src7 = LD_SB(src + 24);
+ src5 = __msa_sldi_b(src6, src4, 8);
src += src_stride;
- vec0 = (v16u8)__msa_vshf_b(mask, src0, src0);
- vec1 = (v16u8)__msa_vshf_b(mask, src1, src1);
- vec2 = (v16u8)__msa_vshf_b(mask, src2, src2);
- vec3 = (v16u8)__msa_vshf_b(mask, src3, src3);
- vec4 = (v16u8)__msa_vshf_b(mask, src4, src4);
- vec5 = (v16u8)__msa_vshf_b(mask, src5, src5);
- vec6 = (v16u8)__msa_vshf_b(mask, src6, src6);
- vec7 = (v16u8)__msa_vshf_b(mask, src7, src7);
-
- out0 = __msa_dotp_u_h(vec0, filt0);
- out1 = __msa_dotp_u_h(vec1, filt0);
- out2 = __msa_dotp_u_h(vec2, filt0);
- out3 = __msa_dotp_u_h(vec3, filt0);
- out4 = __msa_dotp_u_h(vec4, filt0);
- out5 = __msa_dotp_u_h(vec5, filt0);
- out6 = __msa_dotp_u_h(vec6, filt0);
- out7 = __msa_dotp_u_h(vec7, filt0);
-
- out0 = (v8u16)__msa_srari_h((v8i16)out0, FILTER_BITS);
- out1 = (v8u16)__msa_srari_h((v8i16)out1, FILTER_BITS);
- out2 = (v8u16)__msa_srari_h((v8i16)out2, FILTER_BITS);
- out3 = (v8u16)__msa_srari_h((v8i16)out3, FILTER_BITS);
- out4 = (v8u16)__msa_srari_h((v8i16)out4, FILTER_BITS);
- out5 = (v8u16)__msa_srari_h((v8i16)out5, FILTER_BITS);
- out6 = (v8u16)__msa_srari_h((v8i16)out6, FILTER_BITS);
- out7 = (v8u16)__msa_srari_h((v8i16)out7, FILTER_BITS);
-
- out0 = __msa_min_u_h(out0, const255);
- out1 = __msa_min_u_h(out1, const255);
- out2 = __msa_min_u_h(out2, const255);
- out3 = __msa_min_u_h(out3, const255);
- out4 = __msa_min_u_h(out4, const255);
- out5 = __msa_min_u_h(out5, const255);
- out6 = __msa_min_u_h(out6, const255);
- out7 = __msa_min_u_h(out7, const255);
-
- PCKEV_B_STORE_VEC(out1, out0, dst);
- PCKEV_B_STORE_VEC(out3, out2, dst + 16);
+ VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
+ VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
+ out2, out3);
+ DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
+ out6, out7);
+ SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
+ SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
+ MIN_UH4_UH(out0, out1, out2, out3, const255);
+ MIN_UH4_UH(out4, out5, out6, out7, const255);
+ PCKEV_ST_SB(out0, out1, dst);
+ PCKEV_ST_SB(out2, out3, dst + 16);
dst += dst_stride;
- PCKEV_B_STORE_VEC(out5, out4, dst);
- PCKEV_B_STORE_VEC(out7, out6, dst + 16);
+ PCKEV_ST_SB(out4, out5, dst);
+ PCKEV_ST_SB(out6, out7, dst + 16);
dst += dst_stride;
}
}
@@ -882,70 +607,42 @@ static void common_hz_2t_64w_msa(const uint8_t *src, int32_t src_stride,
int8_t *filter, int32_t height) {
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
- v16u8 filt0;
- v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v8u16 out0, out1, out2, out3, out4, out5, out6, out7;
- v8u16 filt, const255;
+ v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt, const255;
- mask = LOAD_SB(&mc_filt_mask_arr[0]);
+ mask = LD_SB(&mc_filt_mask_arr[0]);
/* rearranging filter */
- filt = LOAD_UH(filter);
- filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
+ filt = LD_UH(filter);
+ filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0);
- const255 = (v8u16)__msa_ldi_h(255);
+ const255 = (v8u16) __msa_ldi_h(255);
for (loop_cnt = height; loop_cnt--;) {
- src0 = LOAD_SB(src);
- src2 = LOAD_SB(src + 16);
- src4 = LOAD_SB(src + 32);
- src6 = LOAD_SB(src + 48);
- src7 = LOAD_SB(src + 56);
- src1 = __msa_sld_b(src2, src0, 8);
- src3 = __msa_sld_b(src4, src2, 8);
- src5 = __msa_sld_b(src6, src4, 8);
+ src0 = LD_SB(src);
+ src2 = LD_SB(src + 16);
+ src4 = LD_SB(src + 32);
+ src6 = LD_SB(src + 48);
+ src7 = LD_SB(src + 56);
+ SLDI_B3_SB(src2, src4, src6, src0, src2, src4, src1, src3, src5, 8);
src += src_stride;
- vec0 = (v16u8)__msa_vshf_b(mask, src0, src0);
- vec1 = (v16u8)__msa_vshf_b(mask, src1, src1);
- vec2 = (v16u8)__msa_vshf_b(mask, src2, src2);
- vec3 = (v16u8)__msa_vshf_b(mask, src3, src3);
- vec4 = (v16u8)__msa_vshf_b(mask, src4, src4);
- vec5 = (v16u8)__msa_vshf_b(mask, src5, src5);
- vec6 = (v16u8)__msa_vshf_b(mask, src6, src6);
- vec7 = (v16u8)__msa_vshf_b(mask, src7, src7);
-
- out0 = __msa_dotp_u_h(vec0, filt0);
- out1 = __msa_dotp_u_h(vec1, filt0);
- out2 = __msa_dotp_u_h(vec2, filt0);
- out3 = __msa_dotp_u_h(vec3, filt0);
- out4 = __msa_dotp_u_h(vec4, filt0);
- out5 = __msa_dotp_u_h(vec5, filt0);
- out6 = __msa_dotp_u_h(vec6, filt0);
- out7 = __msa_dotp_u_h(vec7, filt0);
-
- out0 = (v8u16)__msa_srari_h((v8i16)out0, FILTER_BITS);
- out1 = (v8u16)__msa_srari_h((v8i16)out1, FILTER_BITS);
- out2 = (v8u16)__msa_srari_h((v8i16)out2, FILTER_BITS);
- out3 = (v8u16)__msa_srari_h((v8i16)out3, FILTER_BITS);
- out4 = (v8u16)__msa_srari_h((v8i16)out4, FILTER_BITS);
- out5 = (v8u16)__msa_srari_h((v8i16)out5, FILTER_BITS);
- out6 = (v8u16)__msa_srari_h((v8i16)out6, FILTER_BITS);
- out7 = (v8u16)__msa_srari_h((v8i16)out7, FILTER_BITS);
-
- out0 = __msa_min_u_h(out0, const255);
- out1 = __msa_min_u_h(out1, const255);
- out2 = __msa_min_u_h(out2, const255);
- out3 = __msa_min_u_h(out3, const255);
- out4 = __msa_min_u_h(out4, const255);
- out5 = __msa_min_u_h(out5, const255);
- out6 = __msa_min_u_h(out6, const255);
- out7 = __msa_min_u_h(out7, const255);
-
- PCKEV_B_STORE_VEC(out1, out0, dst);
- PCKEV_B_STORE_VEC(out3, out2, dst + 16);
- PCKEV_B_STORE_VEC(out5, out4, dst + 32);
- PCKEV_B_STORE_VEC(out7, out6, dst + 48);
+ VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
+ VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
+ VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
+ VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
+ DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
+ out2, out3);
+ DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
+ out6, out7);
+ SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
+ SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
+ MIN_UH4_UH(out0, out1, out2, out3, const255);
+ MIN_UH4_UH(out4, out5, out6, out7, const255);
+ PCKEV_ST_SB(out0, out1, dst);
+ PCKEV_ST_SB(out2, out3, dst + 16);
+ PCKEV_ST_SB(out4, out5, dst + 32);
+ PCKEV_ST_SB(out6, out7, dst + 48);
dst += dst_stride;
}
}
« no previous file with comments | « source/libvpx/vp9/common/mips/msa/vp9_convolve8_avg_vert_msa.c ('k') | source/libvpx/vp9/common/mips/msa/vp9_convolve8_msa.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698