OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 |
| 11 #include "vp9/common/mips/msa/vp9_idct_msa.h" |
| 12 |
| 13 void vp9_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst, |
| 14 int32_t dst_stride) { |
| 15 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; |
| 16 |
| 17 /* load vector elements of 8x8 block */ |
| 18 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); |
| 19 |
| 20 /* rows transform */ |
| 21 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, |
| 22 in0, in1, in2, in3, in4, in5, in6, in7); |
| 23 /* 1D idct8x8 */ |
| 24 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, |
| 25 in0, in1, in2, in3, in4, in5, in6, in7); |
| 26 /* columns transform */ |
| 27 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, |
| 28 in0, in1, in2, in3, in4, in5, in6, in7); |
| 29 /* 1D idct8x8 */ |
| 30 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, |
| 31 in0, in1, in2, in3, in4, in5, in6, in7); |
| 32 /* final rounding (add 2^4, divide by 2^5) and shift */ |
| 33 SRARI_H4_SH(in0, in1, in2, in3, 5); |
| 34 SRARI_H4_SH(in4, in5, in6, in7, 5); |
| 35 /* add block and store 8x8 */ |
| 36 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3); |
| 37 dst += (4 * dst_stride); |
| 38 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7); |
| 39 } |
| 40 |
| 41 void vp9_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst, |
| 42 int32_t dst_stride) { |
| 43 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; |
| 44 v8i16 s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3; |
| 45 v4i32 tmp0, tmp1, tmp2, tmp3; |
| 46 v8i16 zero = { 0 }; |
| 47 |
| 48 /* load vector elements of 8x8 block */ |
| 49 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); |
| 50 TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); |
| 51 |
| 52 /* stage1 */ |
| 53 ILVL_H2_SH(in3, in0, in2, in1, s0, s1); |
| 54 k0 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); |
| 55 k1 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); |
| 56 k2 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); |
| 57 k3 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); |
| 58 DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3); |
| 59 SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS); |
| 60 PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1); |
| 61 PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3); |
| 62 BUTTERFLY_4(s0, s1, s3, s2, s4, s7, s6, s5); |
| 63 |
| 64 /* stage2 */ |
| 65 ILVR_H2_SH(in3, in1, in2, in0, s1, s0); |
| 66 k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); |
| 67 k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); |
| 68 k2 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); |
| 69 k3 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); |
| 70 DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3); |
| 71 SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS); |
| 72 PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1); |
| 73 PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3); |
| 74 BUTTERFLY_4(s0, s1, s2, s3, m0, m1, m2, m3); |
| 75 |
| 76 /* stage3 */ |
| 77 s0 = __msa_ilvr_h(s6, s5); |
| 78 |
| 79 k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); |
| 80 DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1); |
| 81 SRARI_W2_SW(tmp0, tmp1, DCT_CONST_BITS); |
| 82 PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3); |
| 83 |
| 84 /* stage4 */ |
| 85 BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7, |
| 86 in0, in1, in2, in3, in4, in5, in6, in7); |
| 87 TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, |
| 88 in0, in1, in2, in3, in4, in5, in6, in7); |
| 89 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, |
| 90 in0, in1, in2, in3, in4, in5, in6, in7); |
| 91 |
| 92 /* final rounding (add 2^4, divide by 2^5) and shift */ |
| 93 SRARI_H4_SH(in0, in1, in2, in3, 5); |
| 94 SRARI_H4_SH(in4, in5, in6, in7, 5); |
| 95 |
| 96 /* add block and store 8x8 */ |
| 97 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3); |
| 98 dst += (4 * dst_stride); |
| 99 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7); |
| 100 } |
| 101 |
| 102 void vp9_idct8x8_1_add_msa(const int16_t *input, uint8_t *dst, |
| 103 int32_t dst_stride) { |
| 104 int16_t out; |
| 105 int32_t val; |
| 106 v8i16 vec; |
| 107 |
| 108 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS); |
| 109 out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS); |
| 110 val = ROUND_POWER_OF_TWO(out, 5); |
| 111 vec = __msa_fill_h(val); |
| 112 |
| 113 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec); |
| 114 dst += (4 * dst_stride); |
| 115 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec); |
| 116 } |
| 117 |
| 118 void vp9_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst, |
| 119 int32_t dst_stride, int32_t tx_type) { |
| 120 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; |
| 121 |
| 122 /* load vector elements of 8x8 block */ |
| 123 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); |
| 124 |
| 125 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, |
| 126 in0, in1, in2, in3, in4, in5, in6, in7); |
| 127 |
| 128 switch (tx_type) { |
| 129 case DCT_DCT: |
| 130 /* DCT in horizontal */ |
| 131 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, |
| 132 in0, in1, in2, in3, in4, in5, in6, in7); |
| 133 /* DCT in vertical */ |
| 134 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, |
| 135 in0, in1, in2, in3, in4, in5, in6, in7); |
| 136 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, |
| 137 in0, in1, in2, in3, in4, in5, in6, in7); |
| 138 break; |
| 139 case ADST_DCT: |
| 140 /* DCT in horizontal */ |
| 141 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, |
| 142 in0, in1, in2, in3, in4, in5, in6, in7); |
| 143 /* ADST in vertical */ |
| 144 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, |
| 145 in0, in1, in2, in3, in4, in5, in6, in7); |
| 146 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, |
| 147 in0, in1, in2, in3, in4, in5, in6, in7); |
| 148 break; |
| 149 case DCT_ADST: |
| 150 /* ADST in horizontal */ |
| 151 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, |
| 152 in0, in1, in2, in3, in4, in5, in6, in7); |
| 153 /* DCT in vertical */ |
| 154 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, |
| 155 in0, in1, in2, in3, in4, in5, in6, in7); |
| 156 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, |
| 157 in0, in1, in2, in3, in4, in5, in6, in7); |
| 158 break; |
| 159 case ADST_ADST: |
| 160 /* ADST in horizontal */ |
| 161 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, |
| 162 in0, in1, in2, in3, in4, in5, in6, in7); |
| 163 /* ADST in vertical */ |
| 164 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, |
| 165 in0, in1, in2, in3, in4, in5, in6, in7); |
| 166 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, |
| 167 in0, in1, in2, in3, in4, in5, in6, in7); |
| 168 break; |
| 169 default: |
| 170 assert(0); |
| 171 break; |
| 172 } |
| 173 |
| 174 /* final rounding (add 2^4, divide by 2^5) and shift */ |
| 175 SRARI_H4_SH(in0, in1, in2, in3, 5); |
| 176 SRARI_H4_SH(in4, in5, in6, in7, 5); |
| 177 |
| 178 /* add block and store 8x8 */ |
| 179 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3); |
| 180 dst += (4 * dst_stride); |
| 181 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7); |
| 182 } |
OLD | NEW |