Chromium Code Reviews| Index: source/row_msa.cc |
| diff --git a/source/row_msa.cc b/source/row_msa.cc |
| index acc60520af6dc8a5b1ada6fade21e7f391cea9a9..e885f8d8cdd7df0922e97052aaad70f7b51ad11e 100644 |
| --- a/source/row_msa.cc |
| +++ b/source/row_msa.cc |
| @@ -221,6 +221,192 @@ void UYVYToUV422Row_MSA(const uint8* src_uyvy, uint8* dst_u, uint8* dst_v, |
| } |
| } |
| +void ARGB4444ToYRow_MSA(const uint8* src_argb4444, uint8* dst_y, int width) { |
| + int x; |
| + const uint16* src_argb4444_u16 = (uint16*) src_argb4444; |
| + v8u16 src0, src1; |
| + v8u16 vec0, vec1, vec2, vec3, vec4, vec5; |
| + v16u8 dst0; |
| + v8u16 const_0x19 = (v8u16) __msa_ldi_h(0x19); |
| + v8u16 const_0x81 = (v8u16) __msa_ldi_h(0x81); |
| + v8u16 const_0x42 = (v8u16) __msa_ldi_h(0x42); |
| + v8u16 const_0x1080 = (v8u16) __msa_fill_h(0x1080); |
| + v8u16 const_0x0F = (v8u16) __msa_ldi_h(0x0F); |
| + |
| + for (x = 0; x < width; x += 16) { |
| + LD_UH2(src_argb4444_u16, 8, src0, src1); |
| + vec0 = src0 & const_0x0F; |
| + vec1 = src1 & const_0x0F; |
| + src0 = (v8u16) __msa_srai_h((v8i16) src0, 4); |
| + src1 = (v8u16) __msa_srai_h((v8i16) src1, 4); |
| + vec2 = src0 & const_0x0F; |
| + vec3 = src1 & const_0x0F; |
| + src0 = (v8u16) __msa_srai_h((v8i16) src0, 4); |
| + src1 = (v8u16) __msa_srai_h((v8i16) src1, 4); |
| + vec4 = src0 & const_0x0F; |
| + vec5 = src1 & const_0x0F; |
| + vec0 |= (v8u16) __msa_slli_h((v8i16) vec0, 4); |
| + vec1 |= (v8u16) __msa_slli_h((v8i16) vec1, 4); |
| + vec2 |= (v8u16) __msa_slli_h((v8i16) vec2, 4); |
| + vec3 |= (v8u16) __msa_slli_h((v8i16) vec3, 4); |
| + vec4 |= (v8u16) __msa_slli_h((v8i16) vec4, 4); |
| + vec5 |= (v8u16) __msa_slli_h((v8i16) vec5, 4); |
| + vec0 *= const_0x19; |
|
fbarchard1
2016/10/14 21:35:16
FYI The YUV to RGB functions now take constants as
manojkumar.bhosale
2016/10/19 11:56:27
OK. will then fix them when changes happens.
|
| + vec1 *= const_0x19; |
| + vec2 *= const_0x81; |
| + vec3 *= const_0x81; |
| + vec4 *= const_0x42; |
| + vec5 *= const_0x42; |
| + vec0 += vec2; |
| + vec1 += vec3; |
| + vec0 += vec4; |
| + vec1 += vec5; |
| + vec0 += const_0x1080; |
| + vec1 += const_0x1080; |
| + vec0 = (v8u16) __msa_srai_h((v8i16) vec0, 8); |
| + vec1 = (v8u16) __msa_srai_h((v8i16) vec1, 8); |
| + dst0 = (v16u8) __msa_pckev_b((v16i8) vec1, (v16i8) vec0); |
| + ST_UB(dst0, dst_y); |
| + src_argb4444_u16 += 16; |
| + dst_y += 16; |
| + } |
| +} |
| + |
| +void ARGB4444ToUVRow_MSA(const uint8* src_argb4444, |
| + int src_stride_argb4444, |
| + uint8* dst_u, uint8* dst_v, int width) { |
| + int x; |
| + const uint8* src_argb4444_next = src_argb4444 + src_stride_argb4444; |
| + v16u8 src0, src1, src2, src3, src4, src5, src6, src7; |
| + v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9; |
| + v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9; |
| + v16u8 dst0, dst1; |
| + v8u16 const_0x70 = (v8u16) __msa_ldi_h(0x70); |
| + v8u16 const_0x4A = (v8u16) __msa_ldi_h(0x4A); |
| + v8u16 const_0x26 = (v8u16) __msa_ldi_h(0x26); |
| + v8u16 const_0x5E = (v8u16) __msa_ldi_h(0x5E); |
| + v8u16 const_0x12 = (v8u16) __msa_ldi_h(0x12); |
| + v8u16 const_0x8080 = (v8u16) __msa_fill_h(0x8080); |
| + |
| + for (x = 0; x < width; x += 32) { |
| + LD_UB4(src_argb4444, 16, src0, src1, src2, src3); |
| + LD_UB4(src_argb4444_next, 16, src4, src5, src6, src7); |
| + reg0 = __msa_andi_b(src0, 0x0F); |
| + reg1 = __msa_andi_b(src1, 0x0F); |
| + reg2 = __msa_andi_b(src2, 0x0F); |
| + reg3 = __msa_andi_b(src3, 0x0F); |
| + reg0 += __msa_andi_b(src4, 0x0F); |
| + reg1 += __msa_andi_b(src5, 0x0F); |
| + reg2 += __msa_andi_b(src6, 0x0F); |
| + reg3 += __msa_andi_b(src7, 0x0F); |
| + src0 = __msa_andi_b(src0, 0xF0); |
| + src1 = __msa_andi_b(src1, 0xF0); |
| + src2 = __msa_andi_b(src2, 0xF0); |
| + src3 = __msa_andi_b(src3, 0xF0); |
| + src4 = __msa_andi_b(src4, 0xF0); |
| + src5 = __msa_andi_b(src5, 0xF0); |
| + src6 = __msa_andi_b(src6, 0xF0); |
| + src7 = __msa_andi_b(src7, 0xF0); |
| + reg4 = (v16u8) __msa_srli_b((v16i8) src0, 4); |
| + reg5 = (v16u8) __msa_srli_b((v16i8) src1, 4); |
| + reg6 = (v16u8) __msa_srli_b((v16i8) src2, 4); |
| + reg7 = (v16u8) __msa_srli_b((v16i8) src3, 4); |
| + reg4 += (v16u8) __msa_srli_b((v16i8) src4, 4); |
| + reg5 += (v16u8) __msa_srli_b((v16i8) src5, 4); |
| + reg6 += (v16u8) __msa_srli_b((v16i8) src6, 4); |
| + reg7 += (v16u8) __msa_srli_b((v16i8) src7, 4); |
| + reg8 = (v16u8) __msa_pckod_b((v16i8) reg1, (v16i8) reg0); |
| + reg9 = (v16u8) __msa_pckod_b((v16i8) reg3, (v16i8) reg2); |
| + reg0 = (v16u8) __msa_pckev_b((v16i8) reg1, (v16i8) reg0); |
| + reg1 = (v16u8) __msa_pckev_b((v16i8) reg3, (v16i8) reg2); |
| + reg2 = (v16u8) __msa_pckev_b((v16i8) reg5, (v16i8) reg4); |
| + reg3 = (v16u8) __msa_pckev_b((v16i8) reg7, (v16i8) reg6); |
| + vec0 = __msa_hadd_u_h(reg0, reg0); |
| + vec1 = __msa_hadd_u_h(reg1, reg1); |
| + vec2 = __msa_hadd_u_h(reg2, reg2); |
| + vec3 = __msa_hadd_u_h(reg3, reg3); |
| + vec4 = __msa_hadd_u_h(reg8, reg8); |
| + vec5 = __msa_hadd_u_h(reg9, reg9); |
| + vec0 = (v8u16) __msa_slli_h((v8i16) vec0, 2); |
| + vec1 = (v8u16) __msa_slli_h((v8i16) vec1, 2); |
| + vec2 = (v8u16) __msa_slli_h((v8i16) vec2, 2); |
| + vec3 = (v8u16) __msa_slli_h((v8i16) vec3, 2); |
| + vec4 = (v8u16) __msa_slli_h((v8i16) vec4, 2); |
| + vec5 = (v8u16) __msa_slli_h((v8i16) vec5, 2); |
| + vec0 |= (v8u16) __msa_srai_h((v8i16) vec0, 6); |
| + vec1 |= (v8u16) __msa_srai_h((v8i16) vec1, 6); |
| + vec2 |= (v8u16) __msa_srai_h((v8i16) vec2, 6); |
| + vec3 |= (v8u16) __msa_srai_h((v8i16) vec3, 6); |
| + vec4 |= (v8u16) __msa_srai_h((v8i16) vec4, 6); |
| + vec5 |= (v8u16) __msa_srai_h((v8i16) vec5, 6); |
|
fbarchard1
2016/10/14 21:35:16
I'm concerned that this is a lot of code for a for
manojkumar.bhosale
2016/10/19 11:56:27
Done.
|
| + vec6 = vec0 * const_0x70; |
| + vec7 = vec1 * const_0x70; |
| + vec8 = vec2 * const_0x4A; |
| + vec9 = vec3 * const_0x4A; |
| + vec0 *= const_0x12; |
| + vec1 *= const_0x12; |
| + vec2 *= const_0x5E; |
| + vec3 *= const_0x5E; |
| + vec6 += const_0x8080; |
| + vec7 += const_0x8080; |
| + vec8 += vec4 * const_0x26; |
| + vec9 += vec5 * const_0x26; |
| + vec4 *= const_0x70; |
| + vec5 *= const_0x70; |
| + vec2 += vec0; |
| + vec3 += vec1; |
| + vec4 += const_0x8080; |
| + vec5 += const_0x8080; |
| + vec0 = vec6 - vec8; |
| + vec1 = vec7 - vec9; |
| + vec2 = vec4 - vec2; |
| + vec3 = vec5 - vec3; |
| + vec0 = (v8u16) __msa_srli_h((v8i16) vec0, 8); |
| + vec1 = (v8u16) __msa_srli_h((v8i16) vec1, 8); |
| + vec2 = (v8u16) __msa_srli_h((v8i16) vec2, 8); |
| + vec3 = (v8u16) __msa_srli_h((v8i16) vec3, 8); |
| + dst0 = (v16u8) __msa_pckev_b((v16i8) vec1, (v16i8) vec0); |
| + dst1 = (v16u8) __msa_pckev_b((v16i8) vec3, (v16i8) vec2); |
| + ST_UB(dst0, dst_u); |
| + ST_UB(dst1, dst_v); |
| + src_argb4444 += 64; |
| + src_argb4444_next += 64; |
|
fbarchard1
2016/10/14 21:35:16
on other platforms I'd typically unroll less than
|
| + dst_u += 16; |
| + dst_v += 16; |
| + } |
| +} |
| + |
| +void ARGB4444ToARGBRow_MSA(const uint8* src_argb4444, uint8* dst_argb, |
| + int width) { |
| + int x; |
| + v16u8 src0, src1; |
| + v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; |
| + v16u8 dst0, dst1, dst2, dst3; |
| + |
| + for (x = 0; x < width; x += 16) { |
| + LD_UB2(src_argb4444, 16, src0, src1); |
| + vec0 = (v8u16) __msa_andi_b(src0, 0x0F); |
| + vec1 = (v8u16) __msa_andi_b(src1, 0x0F); |
| + vec2 = (v8u16) __msa_andi_b(src0, 0xF0); |
| + vec3 = (v8u16) __msa_andi_b(src1, 0xF0); |
| + vec4 = (v8u16) __msa_slli_b((v16i8) vec0, 4); |
| + vec5 = (v8u16) __msa_slli_b((v16i8) vec1, 4); |
| + vec6 = (v8u16) __msa_srli_b((v16i8) vec2, 4); |
| + vec7 = (v8u16) __msa_srli_b((v16i8) vec3, 4); |
| + vec0 |= vec4; |
| + vec1 |= vec5; |
| + vec2 |= vec6; |
| + vec3 |= vec7; |
| + dst0 = (v16u8) __msa_ilvr_b((v16i8) vec2, (v16i8) vec0); |
| + dst1 = (v16u8) __msa_ilvl_b((v16i8) vec2, (v16i8) vec0); |
| + dst2 = (v16u8) __msa_ilvr_b((v16i8) vec3, (v16i8) vec1); |
| + dst3 = (v16u8) __msa_ilvl_b((v16i8) vec3, (v16i8) vec1); |
| + ST_UB4(dst0, dst1, dst2, dst3, dst_argb, 16); |
| + src_argb4444 += 32; |
| + dst_argb += 64; |
| + } |
| +} |
| + |
| #ifdef __cplusplus |
| } // extern "C" |
| } // namespace libyuv |