OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 |
| 11 #include "vpx_ports/mem.h" |
| 12 #include "vp9/common/vp9_idct.h" |
| 13 #include "vp9/common/mips/msa/vp9_macros_msa.h" |
| 14 |
| 15 #define DOTP_CONST_PAIR(reg0, reg1, const0, const1, out0, out1) { \ |
| 16 v8i16 k0_m = __msa_fill_h(const0); \ |
| 17 v8i16 s0_m, s1_m, s2_m, s3_m; \ |
| 18 \ |
| 19 s0_m = __msa_fill_h(const1); \ |
| 20 k0_m = __msa_ilvev_h(s0_m, k0_m); \ |
| 21 \ |
| 22 s0_m = __msa_ilvl_h(-reg1, reg0); \ |
| 23 s1_m = __msa_ilvr_h(-reg1, reg0); \ |
| 24 s2_m = __msa_ilvl_h(reg0, reg1); \ |
| 25 s3_m = __msa_ilvr_h(reg0, reg1); \ |
| 26 s1_m = (v8i16)__msa_dotp_s_w(s1_m, k0_m); \ |
| 27 s0_m = (v8i16)__msa_dotp_s_w(s0_m, k0_m); \ |
| 28 s1_m = (v8i16)__msa_srari_w((v4i32)s1_m, DCT_CONST_BITS); \ |
| 29 s0_m = (v8i16)__msa_srari_w((v4i32)s0_m, DCT_CONST_BITS); \ |
| 30 out0 = __msa_pckev_h(s0_m, s1_m); \ |
| 31 \ |
| 32 s1_m = (v8i16)__msa_dotp_s_w(s3_m, k0_m); \ |
| 33 s0_m = (v8i16)__msa_dotp_s_w(s2_m, k0_m); \ |
| 34 s1_m = (v8i16)__msa_srari_w((v4i32)s1_m, DCT_CONST_BITS); \ |
| 35 s0_m = (v8i16)__msa_srari_w((v4i32)s0_m, DCT_CONST_BITS); \ |
| 36 out1 = __msa_pckev_h(s0_m, s1_m); \ |
| 37 } |
| 38 |
| 39 #define VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS(dest, dest_stride, \ |
| 40 in0, in1, in2, in3) { \ |
| 41 uint64_t out0_m, out1_m, out2_m, out3_m; \ |
| 42 v8i16 res0_m, res1_m, res2_m, res3_m; \ |
| 43 v16u8 dest0_m, dest1_m, dest2_m, dest3_m; \ |
| 44 v16i8 tmp0_m, tmp1_m; \ |
| 45 v16i8 zero_m = { 0 }; \ |
| 46 uint8_t *dst_m = (uint8_t *)(dest); \ |
| 47 \ |
| 48 dest0_m = LOAD_UB(dst_m); \ |
| 49 dest1_m = LOAD_UB(dst_m + 4 * dest_stride); \ |
| 50 dest2_m = LOAD_UB(dst_m + 8 * dest_stride); \ |
| 51 dest3_m = LOAD_UB(dst_m + 12 * dest_stride); \ |
| 52 \ |
| 53 res0_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest0_m); \ |
| 54 res1_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest1_m); \ |
| 55 res2_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest2_m); \ |
| 56 res3_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest3_m); \ |
| 57 \ |
| 58 res0_m += (v8i16)(in0); \ |
| 59 res1_m += (v8i16)(in1); \ |
| 60 res2_m += (v8i16)(in2); \ |
| 61 res3_m += (v8i16)(in3); \ |
| 62 \ |
| 63 res0_m = CLIP_UNSIGNED_CHAR_H(res0_m); \ |
| 64 res1_m = CLIP_UNSIGNED_CHAR_H(res1_m); \ |
| 65 res2_m = CLIP_UNSIGNED_CHAR_H(res2_m); \ |
| 66 res3_m = CLIP_UNSIGNED_CHAR_H(res3_m); \ |
| 67 \ |
| 68 tmp0_m = __msa_pckev_b((v16i8)res1_m, (v16i8)res0_m); \ |
| 69 tmp1_m = __msa_pckev_b((v16i8)res3_m, (v16i8)res2_m); \ |
| 70 \ |
| 71 out0_m = __msa_copy_u_d((v2i64)tmp0_m, 0); \ |
| 72 out1_m = __msa_copy_u_d((v2i64)tmp0_m, 1); \ |
| 73 out2_m = __msa_copy_u_d((v2i64)tmp1_m, 0); \ |
| 74 out3_m = __msa_copy_u_d((v2i64)tmp1_m, 1); \ |
| 75 \ |
| 76 STORE_DWORD(dst_m, out0_m); \ |
| 77 dst_m += (4 * dest_stride); \ |
| 78 STORE_DWORD(dst_m, out1_m); \ |
| 79 dst_m += (4 * dest_stride); \ |
| 80 STORE_DWORD(dst_m, out2_m); \ |
| 81 dst_m += (4 * dest_stride); \ |
| 82 STORE_DWORD(dst_m, out3_m); \ |
| 83 } |
| 84 |
| 85 static void vp9_idct32x8_row_transpose_store(const int16_t *input, |
| 86 int16_t *tmp_buf) { |
| 87 v8i16 m0, m1, m2, m3, m4, m5, m6, m7; |
| 88 v8i16 n0, n1, n2, n3, n4, n5, n6, n7; |
| 89 |
| 90 /* 1st & 2nd 8x8 */ |
| 91 LOAD_8VECS_SH(input, 32, m0, n0, m1, n1, m2, n2, m3, n3); |
| 92 LOAD_8VECS_SH((input + 8), 32, m4, n4, m5, n5, m6, n6, m7, n7); |
| 93 TRANSPOSE8x8_H_SH(m0, n0, m1, n1, m2, n2, m3, n3, |
| 94 m0, n0, m1, n1, m2, n2, m3, n3); |
| 95 TRANSPOSE8x8_H_SH(m4, n4, m5, n5, m6, n6, m7, n7, |
| 96 m4, n4, m5, n5, m6, n6, m7, n7); |
| 97 STORE_4VECS_SH((tmp_buf), 8, m0, n0, m1, n1); |
| 98 STORE_4VECS_SH((tmp_buf + 4 * 8), 8, m2, n2, m3, n3); |
| 99 STORE_4VECS_SH((tmp_buf + 8 * 8), 8, m4, n4, m5, n5); |
| 100 STORE_4VECS_SH((tmp_buf + 12 * 8), 8, m6, n6, m7, n7); |
| 101 |
| 102 /* 3rd & 4th 8x8 */ |
| 103 LOAD_8VECS_SH((input + 16), 32, m0, n0, m1, n1, m2, n2, m3, n3); |
| 104 LOAD_8VECS_SH((input + 24), 32, m4, n4, m5, n5, m6, n6, m7, n7); |
| 105 TRANSPOSE8x8_H_SH(m0, n0, m1, n1, m2, n2, m3, n3, |
| 106 m0, n0, m1, n1, m2, n2, m3, n3); |
| 107 TRANSPOSE8x8_H_SH(m4, n4, m5, n5, m6, n6, m7, n7, |
| 108 m4, n4, m5, n5, m6, n6, m7, n7); |
| 109 STORE_4VECS_SH((tmp_buf + 16 * 8), 8, m0, n0, m1, n1); |
| 110 STORE_4VECS_SH((tmp_buf + 20 * 8), 8, m2, n2, m3, n3); |
| 111 STORE_4VECS_SH((tmp_buf + 24 * 8), 8, m4, n4, m5, n5); |
| 112 STORE_4VECS_SH((tmp_buf + 28 * 8), 8, m6, n6, m7, n7); |
| 113 } |
| 114 |
| 115 static void vp9_idct32x8_row_even_process_store(int16_t *tmp_buf, |
| 116 int16_t *tmp_eve_buf) { |
| 117 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; |
| 118 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; |
| 119 v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7; |
| 120 |
| 121 /* Even stage 1 */ |
| 122 LOAD_8VECS_SH(tmp_buf, 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); |
| 123 |
| 124 DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7); |
| 125 DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3); |
| 126 |
| 127 vec0 = reg1 - reg5; |
| 128 vec1 = reg1 + reg5; |
| 129 vec2 = reg7 - reg3; |
| 130 vec3 = reg7 + reg3; |
| 131 |
| 132 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3); |
| 133 |
| 134 loc1 = vec3; |
| 135 loc0 = vec1; |
| 136 |
| 137 DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4); |
| 138 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); |
| 139 |
| 140 vec0 = reg4 - reg6; |
| 141 vec1 = reg4 + reg6; |
| 142 vec2 = reg0 - reg2; |
| 143 vec3 = reg0 + reg2; |
| 144 |
| 145 stp4 = vec0 - loc0; |
| 146 stp3 = vec0 + loc0; |
| 147 stp7 = vec1 - loc1; |
| 148 stp0 = vec1 + loc1; |
| 149 stp5 = vec2 - loc2; |
| 150 stp2 = vec2 + loc2; |
| 151 stp6 = vec3 - loc3; |
| 152 stp1 = vec3 + loc3; |
| 153 |
| 154 /* Even stage 2 */ |
| 155 LOAD_8VECS_SH((tmp_buf + 16), 32, |
| 156 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); |
| 157 |
| 158 DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7); |
| 159 DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3); |
| 160 DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5); |
| 161 DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1); |
| 162 |
| 163 vec0 = reg0 + reg4; |
| 164 reg0 = reg0 - reg4; |
| 165 reg4 = reg6 + reg2; |
| 166 reg6 = reg6 - reg2; |
| 167 reg2 = reg1 + reg5; |
| 168 reg1 = reg1 - reg5; |
| 169 reg5 = reg7 + reg3; |
| 170 reg7 = reg7 - reg3; |
| 171 reg3 = vec0; |
| 172 |
| 173 vec1 = reg2; |
| 174 reg2 = reg3 + reg4; |
| 175 reg3 = reg3 - reg4; |
| 176 reg4 = reg5 - vec1; |
| 177 reg5 = reg5 + vec1; |
| 178 |
| 179 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); |
| 180 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1); |
| 181 |
| 182 vec0 = reg0 - reg6; |
| 183 reg0 = reg0 + reg6; |
| 184 vec1 = reg7 - reg1; |
| 185 reg7 = reg7 + reg1; |
| 186 |
| 187 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1); |
| 188 DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4); |
| 189 |
| 190 /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */ |
| 191 loc0 = stp0 - reg5; |
| 192 loc1 = stp0 + reg5; |
| 193 loc2 = stp1 - reg7; |
| 194 loc3 = stp1 + reg7; |
| 195 STORE_SH(loc0, (tmp_eve_buf + 15 * 8)); |
| 196 STORE_SH(loc1, (tmp_eve_buf)); |
| 197 STORE_SH(loc2, (tmp_eve_buf + 14 * 8)); |
| 198 STORE_SH(loc3, (tmp_eve_buf + 8)); |
| 199 |
| 200 loc0 = stp2 - reg1; |
| 201 loc1 = stp2 + reg1; |
| 202 loc2 = stp3 - reg4; |
| 203 loc3 = stp3 + reg4; |
| 204 STORE_SH(loc0, (tmp_eve_buf + 13 * 8)); |
| 205 STORE_SH(loc1, (tmp_eve_buf + 2 * 8)); |
| 206 STORE_SH(loc2, (tmp_eve_buf + 12 * 8)); |
| 207 STORE_SH(loc3, (tmp_eve_buf + 3 * 8)); |
| 208 |
| 209 /* Store 8 */ |
| 210 loc0 = stp4 - reg3; |
| 211 loc1 = stp4 + reg3; |
| 212 loc2 = stp5 - reg6; |
| 213 loc3 = stp5 + reg6; |
| 214 STORE_SH(loc0, (tmp_eve_buf + 11 * 8)); |
| 215 STORE_SH(loc1, (tmp_eve_buf + 4 * 8)); |
| 216 STORE_SH(loc2, (tmp_eve_buf + 10 * 8)); |
| 217 STORE_SH(loc3, (tmp_eve_buf + 5 * 8)); |
| 218 |
| 219 loc0 = stp6 - reg0; |
| 220 loc1 = stp6 + reg0; |
| 221 loc2 = stp7 - reg2; |
| 222 loc3 = stp7 + reg2; |
| 223 STORE_SH(loc0, (tmp_eve_buf + 9 * 8)); |
| 224 STORE_SH(loc1, (tmp_eve_buf + 6 * 8)); |
| 225 STORE_SH(loc2, (tmp_eve_buf + 8 * 8)); |
| 226 STORE_SH(loc3, (tmp_eve_buf + 7 * 8)); |
| 227 } |
| 228 |
| 229 static void vp9_idct32x8_row_odd_process_store(int16_t *tmp_buf, |
| 230 int16_t *tmp_odd_buf) { |
| 231 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; |
| 232 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; |
| 233 |
| 234 /* Odd stage 1 */ |
| 235 reg0 = LOAD_SH(tmp_buf + 8); |
| 236 reg1 = LOAD_SH(tmp_buf + 7 * 8); |
| 237 reg2 = LOAD_SH(tmp_buf + 9 * 8); |
| 238 reg3 = LOAD_SH(tmp_buf + 15 * 8); |
| 239 reg4 = LOAD_SH(tmp_buf + 17 * 8); |
| 240 reg5 = LOAD_SH(tmp_buf + 23 * 8); |
| 241 reg6 = LOAD_SH(tmp_buf + 25 * 8); |
| 242 reg7 = LOAD_SH(tmp_buf + 31 * 8); |
| 243 |
| 244 DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7); |
| 245 DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4); |
| 246 DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5); |
| 247 DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6); |
| 248 |
| 249 vec0 = reg0 + reg3; |
| 250 reg0 = reg0 - reg3; |
| 251 reg3 = reg7 + reg4; |
| 252 reg7 = reg7 - reg4; |
| 253 reg4 = reg1 + reg2; |
| 254 reg1 = reg1 - reg2; |
| 255 reg2 = reg6 + reg5; |
| 256 reg6 = reg6 - reg5; |
| 257 reg5 = vec0; |
| 258 |
| 259 /* 4 Stores */ |
| 260 vec0 = reg5 + reg4; |
| 261 vec1 = reg3 + reg2; |
| 262 STORE_SH(vec0, (tmp_odd_buf + 4 * 8)); |
| 263 STORE_SH(vec1, (tmp_odd_buf + 5 * 8)); |
| 264 |
| 265 vec0 = reg5 - reg4; |
| 266 vec1 = reg3 - reg2; |
| 267 DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1); |
| 268 STORE_SH(vec0, (tmp_odd_buf)); |
| 269 STORE_SH(vec1, (tmp_odd_buf + 8)); |
| 270 |
| 271 /* 4 Stores */ |
| 272 DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7); |
| 273 DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6); |
| 274 |
| 275 vec0 = reg0 + reg1; |
| 276 vec2 = reg7 - reg6; |
| 277 vec1 = reg7 + reg6; |
| 278 vec3 = reg0 - reg1; |
| 279 STORE_SH(vec0, (tmp_odd_buf + 6 * 8)); |
| 280 STORE_SH(vec1, (tmp_odd_buf + 7 * 8)); |
| 281 |
| 282 DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3); |
| 283 STORE_SH(vec2, (tmp_odd_buf + 2 * 8)); |
| 284 STORE_SH(vec3, (tmp_odd_buf + 3 * 8)); |
| 285 |
| 286 /* Odd stage 2 */ |
| 287 |
| 288 /* 8 loads */ |
| 289 reg0 = LOAD_SH(tmp_buf + 3 * 8); |
| 290 reg1 = LOAD_SH(tmp_buf + 5 * 8); |
| 291 reg2 = LOAD_SH(tmp_buf + 11 * 8); |
| 292 reg3 = LOAD_SH(tmp_buf + 13 * 8); |
| 293 reg4 = LOAD_SH(tmp_buf + 19 * 8); |
| 294 reg5 = LOAD_SH(tmp_buf + 21 * 8); |
| 295 reg6 = LOAD_SH(tmp_buf + 27 * 8); |
| 296 reg7 = LOAD_SH(tmp_buf + 29 * 8); |
| 297 |
| 298 DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6); |
| 299 DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5); |
| 300 DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4); |
| 301 DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7); |
| 302 |
| 303 /* 4 Stores */ |
| 304 vec0 = reg1 - reg2; |
| 305 vec1 = reg6 - reg5; |
| 306 vec2 = reg0 - reg3; |
| 307 vec3 = reg7 - reg4; |
| 308 DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1); |
| 309 DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3); |
| 310 |
| 311 vec2 = loc2 - loc0; |
| 312 vec3 = loc3 - loc1; |
| 313 vec0 = loc2 + loc0; |
| 314 vec1 = loc3 + loc1; |
| 315 STORE_SH(vec0, (tmp_odd_buf + 12 * 8)); |
| 316 STORE_SH(vec1, (tmp_odd_buf + 15 * 8)); |
| 317 |
| 318 DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1); |
| 319 |
| 320 STORE_SH(vec0, (tmp_odd_buf + 10 * 8)); |
| 321 STORE_SH(vec1, (tmp_odd_buf + 11 * 8)); |
| 322 |
| 323 /* 4 Stores */ |
| 324 vec0 = reg0 + reg3; |
| 325 vec1 = reg1 + reg2; |
| 326 vec2 = reg6 + reg5; |
| 327 vec3 = reg7 + reg4; |
| 328 reg0 = vec0 + vec1; |
| 329 reg1 = vec3 + vec2; |
| 330 reg2 = vec0 - vec1; |
| 331 reg3 = vec3 - vec2; |
| 332 STORE_SH(reg0, (tmp_odd_buf + 13 * 8)); |
| 333 STORE_SH(reg1, (tmp_odd_buf + 14 * 8)); |
| 334 |
| 335 DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1); |
| 336 |
| 337 STORE_SH(reg0, (tmp_odd_buf + 8 * 8)); |
| 338 STORE_SH(reg1, (tmp_odd_buf + 9 * 8)); |
| 339 |
| 340 /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */ |
| 341 |
| 342 /* Load 8 & Store 8 */ |
| 343 reg0 = LOAD_SH(tmp_odd_buf); |
| 344 reg1 = LOAD_SH(tmp_odd_buf + 1 * 8); |
| 345 reg2 = LOAD_SH(tmp_odd_buf + 2 * 8); |
| 346 reg3 = LOAD_SH(tmp_odd_buf + 3 * 8); |
| 347 reg4 = LOAD_SH(tmp_odd_buf + 8 * 8); |
| 348 reg5 = LOAD_SH(tmp_odd_buf + 9 * 8); |
| 349 reg6 = LOAD_SH(tmp_odd_buf + 10 * 8); |
| 350 reg7 = LOAD_SH(tmp_odd_buf + 11 * 8); |
| 351 |
| 352 loc0 = reg0 + reg4; |
| 353 loc1 = reg1 + reg5; |
| 354 loc2 = reg2 + reg6; |
| 355 loc3 = reg3 + reg7; |
| 356 STORE_SH(loc0, (tmp_odd_buf)); |
| 357 STORE_SH(loc1, (tmp_odd_buf + 1 * 8)); |
| 358 STORE_SH(loc2, (tmp_odd_buf + 2 * 8)); |
| 359 STORE_SH(loc3, (tmp_odd_buf + 3 * 8)); |
| 360 |
| 361 vec0 = reg0 - reg4; |
| 362 vec1 = reg1 - reg5; |
| 363 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1); |
| 364 |
| 365 vec0 = reg2 - reg6; |
| 366 vec1 = reg3 - reg7; |
| 367 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); |
| 368 |
| 369 STORE_SH(loc0, (tmp_odd_buf + 8 * 8)); |
| 370 STORE_SH(loc1, (tmp_odd_buf + 9 * 8)); |
| 371 STORE_SH(loc2, (tmp_odd_buf + 10 * 8)); |
| 372 STORE_SH(loc3, (tmp_odd_buf + 11 * 8)); |
| 373 |
| 374 /* Load 8 & Store 8 */ |
| 375 reg1 = LOAD_SH(tmp_odd_buf + 4 * 8); |
| 376 reg2 = LOAD_SH(tmp_odd_buf + 5 * 8); |
| 377 reg0 = LOAD_SH(tmp_odd_buf + 6 * 8); |
| 378 reg3 = LOAD_SH(tmp_odd_buf + 7 * 8); |
| 379 reg4 = LOAD_SH(tmp_odd_buf + 12 * 8); |
| 380 reg5 = LOAD_SH(tmp_odd_buf + 13 * 8); |
| 381 reg6 = LOAD_SH(tmp_odd_buf + 14 * 8); |
| 382 reg7 = LOAD_SH(tmp_odd_buf + 15 * 8); |
| 383 |
| 384 loc0 = reg0 + reg4; |
| 385 loc1 = reg1 + reg5; |
| 386 loc2 = reg2 + reg6; |
| 387 loc3 = reg3 + reg7; |
| 388 STORE_SH(loc0, (tmp_odd_buf + 4 * 8)); |
| 389 STORE_SH(loc1, (tmp_odd_buf + 5 * 8)); |
| 390 STORE_SH(loc2, (tmp_odd_buf + 6 * 8)); |
| 391 STORE_SH(loc3, (tmp_odd_buf + 7 * 8)); |
| 392 |
| 393 vec0 = reg0 - reg4; |
| 394 vec1 = reg3 - reg7; |
| 395 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1); |
| 396 |
| 397 vec0 = reg1 - reg5; |
| 398 vec1 = reg2 - reg6; |
| 399 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); |
| 400 |
| 401 STORE_SH(loc0, (tmp_odd_buf + 12 * 8)); |
| 402 STORE_SH(loc1, (tmp_odd_buf + 13 * 8)); |
| 403 STORE_SH(loc2, (tmp_odd_buf + 14 * 8)); |
| 404 STORE_SH(loc3, (tmp_odd_buf + 15 * 8)); |
| 405 } |
| 406 |
| 407 static void vp9_idct_butterfly_transpose_store(int16_t *tmp_buf, |
| 408 int16_t *tmp_eve_buf, |
| 409 int16_t *tmp_odd_buf, |
| 410 int16_t *dest) { |
| 411 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; |
| 412 v8i16 m0, m1, m2, m3, m4, m5, m6, m7; |
| 413 v8i16 n0, n1, n2, n3, n4, n5, n6, n7; |
| 414 |
| 415 /* FINAL BUTTERFLY : Dependency on Even & Odd */ |
| 416 /* Total: 32 loads, 32 stores */ |
| 417 vec0 = LOAD_SH(tmp_odd_buf); |
| 418 vec1 = LOAD_SH(tmp_odd_buf + 9 * 8); |
| 419 vec2 = LOAD_SH(tmp_odd_buf + 14 * 8); |
| 420 vec3 = LOAD_SH(tmp_odd_buf + 6 * 8); |
| 421 loc0 = LOAD_SH(tmp_eve_buf); |
| 422 loc1 = LOAD_SH(tmp_eve_buf + 8 * 8); |
| 423 loc2 = LOAD_SH(tmp_eve_buf + 4 * 8); |
| 424 loc3 = LOAD_SH(tmp_eve_buf + 12 * 8); |
| 425 |
| 426 m0 = (loc0 + vec3); |
| 427 STORE_SH((loc0 - vec3), (tmp_buf + 31 * 8)); |
| 428 STORE_SH((loc1 - vec2), (tmp_buf + 23 * 8)); |
| 429 m4 = (loc1 + vec2); |
| 430 STORE_SH((loc2 - vec1), (tmp_buf + 27 * 8)); |
| 431 m2 = (loc2 + vec1); |
| 432 STORE_SH((loc3 - vec0), (tmp_buf + 19 * 8)); |
| 433 m6 = (loc3 + vec0); |
| 434 |
| 435 /* Load 8 & Store 8 */ |
| 436 vec0 = LOAD_SH(tmp_odd_buf + 4 * 8); |
| 437 vec1 = LOAD_SH(tmp_odd_buf + 13 * 8); |
| 438 vec2 = LOAD_SH(tmp_odd_buf + 10 * 8); |
| 439 vec3 = LOAD_SH(tmp_odd_buf + 3 * 8); |
| 440 loc0 = LOAD_SH(tmp_eve_buf + 2 * 8); |
| 441 loc1 = LOAD_SH(tmp_eve_buf + 10 * 8); |
| 442 loc2 = LOAD_SH(tmp_eve_buf + 6 * 8); |
| 443 loc3 = LOAD_SH(tmp_eve_buf + 14 * 8); |
| 444 |
| 445 m1 = (loc0 + vec3); |
| 446 STORE_SH((loc0 - vec3), (tmp_buf + 29 * 8)); |
| 447 STORE_SH((loc1 - vec2), (tmp_buf + 21 * 8)); |
| 448 m5 = (loc1 + vec2); |
| 449 STORE_SH((loc2 - vec1), (tmp_buf + 25 * 8)); |
| 450 m3 = (loc2 + vec1); |
| 451 STORE_SH((loc3 - vec0), (tmp_buf + 17 * 8)); |
| 452 m7 = (loc3 + vec0); |
| 453 |
| 454 /* Load 8 & Store 8 */ |
| 455 vec0 = LOAD_SH(tmp_odd_buf + 2 * 8); |
| 456 vec1 = LOAD_SH(tmp_odd_buf + 11 * 8); |
| 457 vec2 = LOAD_SH(tmp_odd_buf + 12 * 8); |
| 458 vec3 = LOAD_SH(tmp_odd_buf + 7 * 8); |
| 459 loc0 = LOAD_SH(tmp_eve_buf + 1 * 8); |
| 460 loc1 = LOAD_SH(tmp_eve_buf + 9 * 8); |
| 461 loc2 = LOAD_SH(tmp_eve_buf + 5 * 8); |
| 462 loc3 = LOAD_SH(tmp_eve_buf + 13 * 8); |
| 463 |
| 464 n0 = (loc0 + vec3); |
| 465 STORE_SH((loc0 - vec3), (tmp_buf + 30 * 8)); |
| 466 STORE_SH((loc1 - vec2), (tmp_buf + 22 * 8)); |
| 467 n4 = (loc1 + vec2); |
| 468 STORE_SH((loc2 - vec1), (tmp_buf + 26 * 8)); |
| 469 n2 = (loc2 + vec1); |
| 470 STORE_SH((loc3 - vec0), (tmp_buf + 18 * 8)); |
| 471 n6 = (loc3 + vec0); |
| 472 |
| 473 /* Load 8 & Store 8 */ |
| 474 vec0 = LOAD_SH(tmp_odd_buf + 5 * 8); |
| 475 vec1 = LOAD_SH(tmp_odd_buf + 15 * 8); |
| 476 vec2 = LOAD_SH(tmp_odd_buf + 8 * 8); |
| 477 vec3 = LOAD_SH(tmp_odd_buf + 1 * 8); |
| 478 loc0 = LOAD_SH(tmp_eve_buf + 3 * 8); |
| 479 loc1 = LOAD_SH(tmp_eve_buf + 11 * 8); |
| 480 loc2 = LOAD_SH(tmp_eve_buf + 7 * 8); |
| 481 loc3 = LOAD_SH(tmp_eve_buf + 15 * 8); |
| 482 |
| 483 n1 = (loc0 + vec3); |
| 484 STORE_SH((loc0 - vec3), (tmp_buf + 28 * 8)); |
| 485 STORE_SH((loc1 - vec2), (tmp_buf + 20 * 8)); |
| 486 n5 = (loc1 + vec2); |
| 487 STORE_SH((loc2 - vec1), (tmp_buf + 24 * 8)); |
| 488 n3 = (loc2 + vec1); |
| 489 STORE_SH((loc3 - vec0), (tmp_buf + 16 * 8)); |
| 490 n7 = (loc3 + vec0); |
| 491 |
| 492 /* Transpose : 16 vectors */ |
| 493 /* 1st & 2nd 8x8 */ |
| 494 TRANSPOSE8x8_H_SH(m0, n0, m1, n1, m2, n2, m3, n3, |
| 495 m0, n0, m1, n1, m2, n2, m3, n3); |
| 496 STORE_4VECS_SH((dest + 0), 32, m0, n0, m1, n1); |
| 497 STORE_4VECS_SH((dest + 4 * 32), 32, m2, n2, m3, n3); |
| 498 |
| 499 TRANSPOSE8x8_H_SH(m4, n4, m5, n5, m6, n6, m7, n7, |
| 500 m4, n4, m5, n5, m6, n6, m7, n7); |
| 501 STORE_4VECS_SH((dest + 8), 32, m4, n4, m5, n5); |
| 502 STORE_4VECS_SH((dest + 8 + 4 * 32), 32, m6, n6, m7, n7); |
| 503 |
| 504 /* 3rd & 4th 8x8 */ |
| 505 LOAD_8VECS_SH((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3); |
| 506 LOAD_8VECS_SH((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7); |
| 507 TRANSPOSE8x8_H_SH(m0, n0, m1, n1, m2, n2, m3, n3, |
| 508 m0, n0, m1, n1, m2, n2, m3, n3); |
| 509 STORE_4VECS_SH((dest + 16), 32, m0, n0, m1, n1); |
| 510 STORE_4VECS_SH((dest + 16 + 4 * 32), 32, m2, n2, m3, n3); |
| 511 |
| 512 TRANSPOSE8x8_H_SH(m4, n4, m5, n5, m6, n6, m7, n7, |
| 513 m4, n4, m5, n5, m6, n6, m7, n7); |
| 514 STORE_4VECS_SH((dest + 24), 32, m4, n4, m5, n5); |
| 515 STORE_4VECS_SH((dest + 24 + 4 * 32), 32, m6, n6, m7, n7); |
| 516 } |
| 517 |
| 518 static void vp9_idct32x8_1d_rows_msa(const int16_t *input, int16_t *output) { |
| 519 DECLARE_ALIGNED(32, int16_t, tmp_buf[8 * 32]); |
| 520 DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]); |
| 521 DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]); |
| 522 |
| 523 vp9_idct32x8_row_transpose_store(input, &tmp_buf[0]); |
| 524 |
| 525 vp9_idct32x8_row_even_process_store(&tmp_buf[0], &tmp_eve_buf[0]); |
| 526 |
| 527 vp9_idct32x8_row_odd_process_store(&tmp_buf[0], &tmp_odd_buf[0]); |
| 528 |
| 529 vp9_idct_butterfly_transpose_store(&tmp_buf[0], &tmp_eve_buf[0], |
| 530 &tmp_odd_buf[0], output); |
| 531 } |
| 532 |
| 533 static void vp9_idct8x32_column_even_process_store(int16_t *tmp_buf, |
| 534 int16_t *tmp_eve_buf) { |
| 535 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; |
| 536 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; |
| 537 v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7; |
| 538 |
| 539 /* Even stage 1 */ |
| 540 LOAD_8VECS_SH(tmp_buf, (4 * 32), |
| 541 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); |
| 542 |
| 543 DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7); |
| 544 DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3); |
| 545 |
| 546 vec0 = reg1 - reg5; |
| 547 vec1 = reg1 + reg5; |
| 548 vec2 = reg7 - reg3; |
| 549 vec3 = reg7 + reg3; |
| 550 |
| 551 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3); |
| 552 |
| 553 loc1 = vec3; |
| 554 loc0 = vec1; |
| 555 |
| 556 DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4); |
| 557 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); |
| 558 |
| 559 vec0 = reg4 - reg6; |
| 560 vec1 = reg4 + reg6; |
| 561 vec2 = reg0 - reg2; |
| 562 vec3 = reg0 + reg2; |
| 563 |
| 564 stp4 = vec0 - loc0; |
| 565 stp3 = vec0 + loc0; |
| 566 stp7 = vec1 - loc1; |
| 567 stp0 = vec1 + loc1; |
| 568 stp5 = vec2 - loc2; |
| 569 stp2 = vec2 + loc2; |
| 570 stp6 = vec3 - loc3; |
| 571 stp1 = vec3 + loc3; |
| 572 |
| 573 /* Even stage 2 */ |
| 574 /* Load 8 */ |
| 575 LOAD_8VECS_SH((tmp_buf + 2 * 32), (4 * 32), |
| 576 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); |
| 577 |
| 578 DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7); |
| 579 DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3); |
| 580 DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5); |
| 581 DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1); |
| 582 |
| 583 vec0 = reg0 + reg4; |
| 584 reg0 = reg0 - reg4; |
| 585 reg4 = reg6 + reg2; |
| 586 reg6 = reg6 - reg2; |
| 587 reg2 = reg1 + reg5; |
| 588 reg1 = reg1 - reg5; |
| 589 reg5 = reg7 + reg3; |
| 590 reg7 = reg7 - reg3; |
| 591 reg3 = vec0; |
| 592 |
| 593 vec1 = reg2; |
| 594 reg2 = reg3 + reg4; |
| 595 reg3 = reg3 - reg4; |
| 596 reg4 = reg5 - vec1; |
| 597 reg5 = reg5 + vec1; |
| 598 |
| 599 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); |
| 600 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1); |
| 601 |
| 602 vec0 = reg0 - reg6; |
| 603 reg0 = reg0 + reg6; |
| 604 vec1 = reg7 - reg1; |
| 605 reg7 = reg7 + reg1; |
| 606 |
| 607 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1); |
| 608 DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4); |
| 609 |
| 610 /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */ |
| 611 /* Store 8 */ |
| 612 loc0 = stp0 - reg5; |
| 613 loc1 = stp0 + reg5; |
| 614 loc2 = stp1 - reg7; |
| 615 loc3 = stp1 + reg7; |
| 616 STORE_SH(loc0, (tmp_eve_buf + 15 * 8)); |
| 617 STORE_SH(loc1, (tmp_eve_buf)); |
| 618 STORE_SH(loc2, (tmp_eve_buf + 14 * 8)); |
| 619 STORE_SH(loc3, (tmp_eve_buf + 1 * 8)); |
| 620 |
| 621 loc0 = stp2 - reg1; |
| 622 loc1 = stp2 + reg1; |
| 623 loc2 = stp3 - reg4; |
| 624 loc3 = stp3 + reg4; |
| 625 STORE_SH(loc0, (tmp_eve_buf + 13 * 8)); |
| 626 STORE_SH(loc1, (tmp_eve_buf + 2 * 8)); |
| 627 STORE_SH(loc2, (tmp_eve_buf + 12 * 8)); |
| 628 STORE_SH(loc3, (tmp_eve_buf + 3 * 8)); |
| 629 |
| 630 /* Store 8 */ |
| 631 loc0 = stp4 - reg3; |
| 632 loc1 = stp4 + reg3; |
| 633 loc2 = stp5 - reg6; |
| 634 loc3 = stp5 + reg6; |
| 635 STORE_SH(loc0, (tmp_eve_buf + 11 * 8)); |
| 636 STORE_SH(loc1, (tmp_eve_buf + 4 * 8)); |
| 637 STORE_SH(loc2, (tmp_eve_buf + 10 * 8)); |
| 638 STORE_SH(loc3, (tmp_eve_buf + 5 * 8)); |
| 639 |
| 640 loc0 = stp6 - reg0; |
| 641 loc1 = stp6 + reg0; |
| 642 loc2 = stp7 - reg2; |
| 643 loc3 = stp7 + reg2; |
| 644 STORE_SH(loc0, (tmp_eve_buf + 9 * 8)); |
| 645 STORE_SH(loc1, (tmp_eve_buf + 6 * 8)); |
| 646 STORE_SH(loc2, (tmp_eve_buf + 8 * 8)); |
| 647 STORE_SH(loc3, (tmp_eve_buf + 7 * 8)); |
| 648 } |
| 649 |
| 650 static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf, |
| 651 int16_t *tmp_odd_buf) { |
| 652 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; |
| 653 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; |
| 654 |
| 655 /* Odd stage 1 */ |
| 656 reg0 = LOAD_SH(tmp_buf + 32); |
| 657 reg1 = LOAD_SH(tmp_buf + 7 * 32); |
| 658 reg2 = LOAD_SH(tmp_buf + 9 * 32); |
| 659 reg3 = LOAD_SH(tmp_buf + 15 * 32); |
| 660 reg4 = LOAD_SH(tmp_buf + 17 * 32); |
| 661 reg5 = LOAD_SH(tmp_buf + 23 * 32); |
| 662 reg6 = LOAD_SH(tmp_buf + 25 * 32); |
| 663 reg7 = LOAD_SH(tmp_buf + 31 * 32); |
| 664 |
| 665 DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7); |
| 666 DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4); |
| 667 DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5); |
| 668 DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6); |
| 669 |
| 670 vec0 = reg0 + reg3; |
| 671 reg0 = reg0 - reg3; |
| 672 reg3 = reg7 + reg4; |
| 673 reg7 = reg7 - reg4; |
| 674 reg4 = reg1 + reg2; |
| 675 reg1 = reg1 - reg2; |
| 676 reg2 = reg6 + reg5; |
| 677 reg6 = reg6 - reg5; |
| 678 reg5 = vec0; |
| 679 |
| 680 /* 4 Stores */ |
| 681 vec0 = reg5 + reg4; |
| 682 vec1 = reg3 + reg2; |
| 683 STORE_SH(vec0, (tmp_odd_buf + 4 * 8)); |
| 684 STORE_SH(vec1, (tmp_odd_buf + 5 * 8)); |
| 685 |
| 686 vec0 = reg5 - reg4; |
| 687 vec1 = reg3 - reg2; |
| 688 DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1); |
| 689 STORE_SH(vec0, (tmp_odd_buf)); |
| 690 STORE_SH(vec1, (tmp_odd_buf + 1 * 8)); |
| 691 |
| 692 /* 4 Stores */ |
| 693 DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7); |
| 694 DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6); |
| 695 |
| 696 vec0 = reg0 + reg1; |
| 697 vec2 = reg7 - reg6; |
| 698 vec1 = reg7 + reg6; |
| 699 vec3 = reg0 - reg1; |
| 700 STORE_SH(vec0, (tmp_odd_buf + 6 * 8)); |
| 701 STORE_SH(vec1, (tmp_odd_buf + 7 * 8)); |
| 702 |
| 703 DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3); |
| 704 STORE_SH(vec2, (tmp_odd_buf + 2 * 8)); |
| 705 STORE_SH(vec3, (tmp_odd_buf + 3 * 8)); |
| 706 |
| 707 /* Odd stage 2 */ |
| 708 /* 8 loads */ |
| 709 reg0 = LOAD_SH(tmp_buf + 3 * 32); |
| 710 reg1 = LOAD_SH(tmp_buf + 5 * 32); |
| 711 reg2 = LOAD_SH(tmp_buf + 11 * 32); |
| 712 reg3 = LOAD_SH(tmp_buf + 13 * 32); |
| 713 reg4 = LOAD_SH(tmp_buf + 19 * 32); |
| 714 reg5 = LOAD_SH(tmp_buf + 21 * 32); |
| 715 reg6 = LOAD_SH(tmp_buf + 27 * 32); |
| 716 reg7 = LOAD_SH(tmp_buf + 29 * 32); |
| 717 |
| 718 DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6); |
| 719 DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5); |
| 720 DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4); |
| 721 DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7); |
| 722 |
| 723 /* 4 Stores */ |
| 724 vec0 = reg1 - reg2; |
| 725 vec1 = reg6 - reg5; |
| 726 vec2 = reg0 - reg3; |
| 727 vec3 = reg7 - reg4; |
| 728 DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1); |
| 729 DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3); |
| 730 |
| 731 vec2 = loc2 - loc0; |
| 732 vec3 = loc3 - loc1; |
| 733 vec0 = loc2 + loc0; |
| 734 vec1 = loc3 + loc1; |
| 735 STORE_SH(vec0, (tmp_odd_buf + 12 * 8)); |
| 736 STORE_SH(vec1, (tmp_odd_buf + 15 * 8)); |
| 737 |
| 738 DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1); |
| 739 |
| 740 STORE_SH(vec0, (tmp_odd_buf + 10 * 8)); |
| 741 STORE_SH(vec1, (tmp_odd_buf + 11 * 8)); |
| 742 |
| 743 /* 4 Stores */ |
| 744 vec0 = reg0 + reg3; |
| 745 vec1 = reg1 + reg2; |
| 746 vec2 = reg6 + reg5; |
| 747 vec3 = reg7 + reg4; |
| 748 reg0 = vec0 + vec1; |
| 749 reg1 = vec3 + vec2; |
| 750 reg2 = vec0 - vec1; |
| 751 reg3 = vec3 - vec2; |
| 752 STORE_SH(reg0, (tmp_odd_buf + 13 * 8)); |
| 753 STORE_SH(reg1, (tmp_odd_buf + 14 * 8)); |
| 754 |
| 755 DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1); |
| 756 |
| 757 STORE_SH(reg0, (tmp_odd_buf + 8 * 8)); |
| 758 STORE_SH(reg1, (tmp_odd_buf + 9 * 8)); |
| 759 |
| 760 /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */ |
| 761 /* Load 8 & Store 8 */ |
| 762 reg0 = LOAD_SH(tmp_odd_buf); |
| 763 reg1 = LOAD_SH(tmp_odd_buf + 1 * 8); |
| 764 reg2 = LOAD_SH(tmp_odd_buf + 2 * 8); |
| 765 reg3 = LOAD_SH(tmp_odd_buf + 3 * 8); |
| 766 reg4 = LOAD_SH(tmp_odd_buf + 8 * 8); |
| 767 reg5 = LOAD_SH(tmp_odd_buf + 9 * 8); |
| 768 reg6 = LOAD_SH(tmp_odd_buf + 10 * 8); |
| 769 reg7 = LOAD_SH(tmp_odd_buf + 11 * 8); |
| 770 |
| 771 loc0 = reg0 + reg4; |
| 772 loc1 = reg1 + reg5; |
| 773 loc2 = reg2 + reg6; |
| 774 loc3 = reg3 + reg7; |
| 775 STORE_SH(loc0, (tmp_odd_buf)); |
| 776 STORE_SH(loc1, (tmp_odd_buf + 1 * 8)); |
| 777 STORE_SH(loc2, (tmp_odd_buf + 2 * 8)); |
| 778 STORE_SH(loc3, (tmp_odd_buf + 3 * 8)); |
| 779 |
| 780 vec0 = reg0 - reg4; |
| 781 vec1 = reg1 - reg5; |
| 782 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1); |
| 783 |
| 784 vec0 = reg2 - reg6; |
| 785 vec1 = reg3 - reg7; |
| 786 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); |
| 787 |
| 788 STORE_SH(loc0, (tmp_odd_buf + 8 * 8)); |
| 789 STORE_SH(loc1, (tmp_odd_buf + 9 * 8)); |
| 790 STORE_SH(loc2, (tmp_odd_buf + 10 * 8)); |
| 791 STORE_SH(loc3, (tmp_odd_buf + 11 * 8)); |
| 792 |
| 793 /* Load 8 & Store 8 */ |
| 794 reg1 = LOAD_SH(tmp_odd_buf + 4 * 8); |
| 795 reg2 = LOAD_SH(tmp_odd_buf + 5 * 8); |
| 796 reg0 = LOAD_SH(tmp_odd_buf + 6 * 8); |
| 797 reg3 = LOAD_SH(tmp_odd_buf + 7 * 8); |
| 798 reg4 = LOAD_SH(tmp_odd_buf + 12 * 8); |
| 799 reg5 = LOAD_SH(tmp_odd_buf + 13 * 8); |
| 800 reg6 = LOAD_SH(tmp_odd_buf + 14 * 8); |
| 801 reg7 = LOAD_SH(tmp_odd_buf + 15 * 8); |
| 802 |
| 803 loc0 = reg0 + reg4; |
| 804 loc1 = reg1 + reg5; |
| 805 loc2 = reg2 + reg6; |
| 806 loc3 = reg3 + reg7; |
| 807 STORE_SH(loc0, (tmp_odd_buf + 4 * 8)); |
| 808 STORE_SH(loc1, (tmp_odd_buf + 5 * 8)); |
| 809 STORE_SH(loc2, (tmp_odd_buf + 6 * 8)); |
| 810 STORE_SH(loc3, (tmp_odd_buf + 7 * 8)); |
| 811 |
| 812 vec0 = reg0 - reg4; |
| 813 vec1 = reg3 - reg7; |
| 814 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1); |
| 815 |
| 816 vec0 = reg1 - reg5; |
| 817 vec1 = reg2 - reg6; |
| 818 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); |
| 819 |
| 820 STORE_SH(loc0, (tmp_odd_buf + 12 * 8)); |
| 821 STORE_SH(loc1, (tmp_odd_buf + 13 * 8)); |
| 822 STORE_SH(loc2, (tmp_odd_buf + 14 * 8)); |
| 823 STORE_SH(loc3, (tmp_odd_buf + 15 * 8)); |
| 824 } |
| 825 |
| 826 static void vp9_idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf, |
| 827 int16_t *tmp_odd_buf, |
| 828 uint8_t *dest, |
| 829 int32_t dest_stride) { |
| 830 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; |
| 831 v8i16 m0, m1, m2, m3, m4, m5, m6, m7; |
| 832 v8i16 n0, n1, n2, n3, n4, n5, n6, n7; |
| 833 |
| 834 /* FINAL BUTTERFLY : Dependency on Even & Odd */ |
| 835 vec0 = LOAD_SH(tmp_odd_buf); |
| 836 vec1 = LOAD_SH(tmp_odd_buf + 9 * 8); |
| 837 vec2 = LOAD_SH(tmp_odd_buf + 14 * 8); |
| 838 vec3 = LOAD_SH(tmp_odd_buf + 6 * 8); |
| 839 loc0 = LOAD_SH(tmp_eve_buf); |
| 840 loc1 = LOAD_SH(tmp_eve_buf + 8 * 8); |
| 841 loc2 = LOAD_SH(tmp_eve_buf + 4 * 8); |
| 842 loc3 = LOAD_SH(tmp_eve_buf + 12 * 8); |
| 843 |
| 844 m0 = (loc0 + vec3); |
| 845 m4 = (loc1 + vec2); |
| 846 m2 = (loc2 + vec1); |
| 847 m6 = (loc3 + vec0); |
| 848 SRARI_H_4VECS_SH(m0, m2, m4, m6, m0, m2, m4, m6, 6); |
| 849 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS(dest, dest_stride, m0, m2, m4, m6); |
| 850 |
| 851 m6 = (loc0 - vec3); |
| 852 m2 = (loc1 - vec2); |
| 853 m4 = (loc2 - vec1); |
| 854 m0 = (loc3 - vec0); |
| 855 SRARI_H_4VECS_SH(m0, m2, m4, m6, m0, m2, m4, m6, 6); |
| 856 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 19 * dest_stride), |
| 857 dest_stride, m0, m2, m4, m6); |
| 858 |
| 859 /* Load 8 & Store 8 */ |
| 860 vec0 = LOAD_SH(tmp_odd_buf + 4 * 8); |
| 861 vec1 = LOAD_SH(tmp_odd_buf + 13 * 8); |
| 862 vec2 = LOAD_SH(tmp_odd_buf + 10 * 8); |
| 863 vec3 = LOAD_SH(tmp_odd_buf + 3 * 8); |
| 864 loc0 = LOAD_SH(tmp_eve_buf + 2 * 8); |
| 865 loc1 = LOAD_SH(tmp_eve_buf + 10 * 8); |
| 866 loc2 = LOAD_SH(tmp_eve_buf + 6 * 8); |
| 867 loc3 = LOAD_SH(tmp_eve_buf + 14 * 8); |
| 868 |
| 869 m1 = (loc0 + vec3); |
| 870 m5 = (loc1 + vec2); |
| 871 m3 = (loc2 + vec1); |
| 872 m7 = (loc3 + vec0); |
| 873 SRARI_H_4VECS_SH(m1, m3, m5, m7, m1, m3, m5, m7, 6); |
| 874 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 2 * dest_stride), |
| 875 dest_stride, m1, m3, m5, m7); |
| 876 |
| 877 m7 = (loc0 - vec3); |
| 878 m3 = (loc1 - vec2); |
| 879 m5 = (loc2 - vec1); |
| 880 m1 = (loc3 - vec0); |
| 881 SRARI_H_4VECS_SH(m1, m3, m5, m7, m1, m3, m5, m7, 6); |
| 882 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 17 * dest_stride), |
| 883 dest_stride, m1, m3, m5, m7); |
| 884 |
| 885 /* Load 8 & Store 8 */ |
| 886 vec0 = LOAD_SH(tmp_odd_buf + 2 * 8); |
| 887 vec1 = LOAD_SH(tmp_odd_buf + 11 * 8); |
| 888 vec2 = LOAD_SH(tmp_odd_buf + 12 * 8); |
| 889 vec3 = LOAD_SH(tmp_odd_buf + 7 * 8); |
| 890 loc0 = LOAD_SH(tmp_eve_buf + 1 * 8); |
| 891 loc1 = LOAD_SH(tmp_eve_buf + 9 * 8); |
| 892 loc2 = LOAD_SH(tmp_eve_buf + 5 * 8); |
| 893 loc3 = LOAD_SH(tmp_eve_buf + 13 * 8); |
| 894 |
| 895 n0 = (loc0 + vec3); |
| 896 n4 = (loc1 + vec2); |
| 897 n2 = (loc2 + vec1); |
| 898 n6 = (loc3 + vec0); |
| 899 SRARI_H_4VECS_SH(n0, n2, n4, n6, n0, n2, n4, n6, 6); |
| 900 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 1 * dest_stride), |
| 901 dest_stride, n0, n2, n4, n6); |
| 902 |
| 903 n6 = (loc0 - vec3); |
| 904 n2 = (loc1 - vec2); |
| 905 n4 = (loc2 - vec1); |
| 906 n0 = (loc3 - vec0); |
| 907 SRARI_H_4VECS_SH(n0, n2, n4, n6, n0, n2, n4, n6, 6); |
| 908 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 18 * dest_stride), |
| 909 dest_stride, n0, n2, n4, n6); |
| 910 |
| 911 /* Load 8 & Store 8 */ |
| 912 vec0 = LOAD_SH(tmp_odd_buf + 5 * 8); |
| 913 vec1 = LOAD_SH(tmp_odd_buf + 15 * 8); |
| 914 vec2 = LOAD_SH(tmp_odd_buf + 8 * 8); |
| 915 vec3 = LOAD_SH(tmp_odd_buf + 1 * 8); |
| 916 loc0 = LOAD_SH(tmp_eve_buf + 3 * 8); |
| 917 loc1 = LOAD_SH(tmp_eve_buf + 11 * 8); |
| 918 loc2 = LOAD_SH(tmp_eve_buf + 7 * 8); |
| 919 loc3 = LOAD_SH(tmp_eve_buf + 15 * 8); |
| 920 |
| 921 n1 = (loc0 + vec3); |
| 922 n5 = (loc1 + vec2); |
| 923 n3 = (loc2 + vec1); |
| 924 n7 = (loc3 + vec0); |
| 925 SRARI_H_4VECS_SH(n1, n3, n5, n7, n1, n3, n5, n7, 6); |
| 926 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 3 * dest_stride), |
| 927 dest_stride, n1, n3, n5, n7); |
| 928 |
| 929 n7 = (loc0 - vec3); |
| 930 n3 = (loc1 - vec2); |
| 931 n5 = (loc2 - vec1); |
| 932 n1 = (loc3 - vec0); |
| 933 SRARI_H_4VECS_SH(n1, n3, n5, n7, n1, n3, n5, n7, 6); |
| 934 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 16 * dest_stride), |
| 935 dest_stride, n1, n3, n5, n7); |
| 936 } |
| 937 |
| 938 static void vp9_idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dest, |
| 939 int32_t dest_stride) { |
| 940 DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]); |
| 941 DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]); |
| 942 |
| 943 vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]); |
| 944 |
| 945 vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]); |
| 946 |
| 947 vp9_idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0], |
| 948 dest, dest_stride); |
| 949 } |
| 950 |
| 951 void vp9_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dest, |
| 952 int32_t dest_stride) { |
| 953 int32_t i; |
| 954 DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]); |
| 955 int16_t *out_ptr = out_arr; |
| 956 |
| 957 /* transform rows */ |
| 958 for (i = 0; i < 4; ++i) { |
| 959 /* process 32 * 8 block */ |
| 960 vp9_idct32x8_1d_rows_msa((input + (i << 8)), (out_ptr + (i << 8))); |
| 961 } |
| 962 |
| 963 /* transform columns */ |
| 964 for (i = 0; i < 4; ++i) { |
| 965 /* process 8 * 32 block */ |
| 966 vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dest + (i << 3)), |
| 967 dest_stride); |
| 968 } |
| 969 } |
| 970 |
| 971 void vp9_idct32x32_34_add_msa(const int16_t *input, uint8_t *dest, |
| 972 int32_t dest_stride) { |
| 973 int32_t i; |
| 974 DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]); |
| 975 int16_t *out_ptr = out_arr; |
| 976 |
| 977 for (i = 32; i--;) { |
| 978 __asm__ __volatile__ ( |
| 979 "sw $zero, 0(%[out_ptr]) \n\t" |
| 980 "sw $zero, 4(%[out_ptr]) \n\t" |
| 981 "sw $zero, 8(%[out_ptr]) \n\t" |
| 982 "sw $zero, 12(%[out_ptr]) \n\t" |
| 983 "sw $zero, 16(%[out_ptr]) \n\t" |
| 984 "sw $zero, 20(%[out_ptr]) \n\t" |
| 985 "sw $zero, 24(%[out_ptr]) \n\t" |
| 986 "sw $zero, 28(%[out_ptr]) \n\t" |
| 987 "sw $zero, 32(%[out_ptr]) \n\t" |
| 988 "sw $zero, 36(%[out_ptr]) \n\t" |
| 989 "sw $zero, 40(%[out_ptr]) \n\t" |
| 990 "sw $zero, 44(%[out_ptr]) \n\t" |
| 991 "sw $zero, 48(%[out_ptr]) \n\t" |
| 992 "sw $zero, 52(%[out_ptr]) \n\t" |
| 993 "sw $zero, 56(%[out_ptr]) \n\t" |
| 994 "sw $zero, 60(%[out_ptr]) \n\t" |
| 995 |
| 996 : |
| 997 : [out_ptr] "r" (out_ptr) |
| 998 ); |
| 999 |
| 1000 out_ptr += 32; |
| 1001 } |
| 1002 |
| 1003 out_ptr = out_arr; |
| 1004 |
| 1005 /* rows: only upper-left 8x8 has non-zero coeff */ |
| 1006 vp9_idct32x8_1d_rows_msa(input, out_ptr); |
| 1007 |
| 1008 /* transform columns */ |
| 1009 for (i = 0; i < 4; ++i) { |
| 1010 /* process 8 * 32 block */ |
| 1011 vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dest + (i << 3)), |
| 1012 dest_stride); |
| 1013 } |
| 1014 } |
| 1015 |
| 1016 void vp9_idct32x32_1_add_msa(const int16_t *input, uint8_t *dest, |
| 1017 int32_t dest_stride) { |
| 1018 int32_t i, const1; |
| 1019 v8i16 const2; |
| 1020 int16_t out; |
| 1021 v8i16 res0, res1, res2, res3, res4, res5, res6, res7; |
| 1022 v16u8 dest0, dest1, dest2, dest3; |
| 1023 v16u8 tmp0, tmp1, tmp2, tmp3; |
| 1024 v16i8 zero = { 0 }; |
| 1025 |
| 1026 out = dct_const_round_shift(input[0] * cospi_16_64); |
| 1027 out = dct_const_round_shift(out * cospi_16_64); |
| 1028 const1 = ROUND_POWER_OF_TWO(out, 6); |
| 1029 |
| 1030 const2 = __msa_fill_h(const1); |
| 1031 |
| 1032 for (i = 0; i < 16; ++i) { |
| 1033 dest0 = LOAD_UB(dest); |
| 1034 dest1 = LOAD_UB(dest + 16); |
| 1035 dest2 = LOAD_UB(dest + dest_stride); |
| 1036 dest3 = LOAD_UB(dest + dest_stride + 16); |
| 1037 |
| 1038 res0 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest0); |
| 1039 res1 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest1); |
| 1040 res2 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest2); |
| 1041 res3 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest3); |
| 1042 res4 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest0); |
| 1043 res5 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest1); |
| 1044 res6 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest2); |
| 1045 res7 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest3); |
| 1046 |
| 1047 res0 += const2; |
| 1048 res1 += const2; |
| 1049 res2 += const2; |
| 1050 res3 += const2; |
| 1051 res4 += const2; |
| 1052 res5 += const2; |
| 1053 res6 += const2; |
| 1054 res7 += const2; |
| 1055 |
| 1056 res0 = CLIP_UNSIGNED_CHAR_H(res0); |
| 1057 res1 = CLIP_UNSIGNED_CHAR_H(res1); |
| 1058 res2 = CLIP_UNSIGNED_CHAR_H(res2); |
| 1059 res3 = CLIP_UNSIGNED_CHAR_H(res3); |
| 1060 res4 = CLIP_UNSIGNED_CHAR_H(res4); |
| 1061 res5 = CLIP_UNSIGNED_CHAR_H(res5); |
| 1062 res6 = CLIP_UNSIGNED_CHAR_H(res6); |
| 1063 res7 = CLIP_UNSIGNED_CHAR_H(res7); |
| 1064 |
| 1065 tmp0 = (v16u8)__msa_pckev_b((v16i8)res4, (v16i8)res0); |
| 1066 tmp1 = (v16u8)__msa_pckev_b((v16i8)res5, (v16i8)res1); |
| 1067 tmp2 = (v16u8)__msa_pckev_b((v16i8)res6, (v16i8)res2); |
| 1068 tmp3 = (v16u8)__msa_pckev_b((v16i8)res7, (v16i8)res3); |
| 1069 |
| 1070 STORE_UB(tmp0, dest); |
| 1071 STORE_UB(tmp1, dest + 16); |
| 1072 dest += dest_stride; |
| 1073 STORE_UB(tmp2, dest); |
| 1074 STORE_UB(tmp3, dest + 16); |
| 1075 dest += dest_stride; |
| 1076 } |
| 1077 } |
OLD | NEW |