Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(94)

Side by Side Diff: source/libvpx/vp9/common/mips/msa/vp9_idct32x32_msa.c

Issue 1162573005: libvpx: Pull from upstream (Closed) Base URL: https://chromium.googlesource.com/chromium/deps/libvpx.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "vpx_ports/mem.h" 11 #include "vp9/common/mips/msa/vp9_idct_msa.h"
12 #include "vp9/common/vp9_idct.h"
13 #include "vp9/common/mips/msa/vp9_macros_msa.h"
14
15 #define DOTP_CONST_PAIR(reg0, reg1, const0, const1, out0, out1) { \
16 v8i16 k0_m = __msa_fill_h(const0); \
17 v8i16 s0_m, s1_m, s2_m, s3_m; \
18 \
19 s0_m = __msa_fill_h(const1); \
20 k0_m = __msa_ilvev_h(s0_m, k0_m); \
21 \
22 s0_m = __msa_ilvl_h(-reg1, reg0); \
23 s1_m = __msa_ilvr_h(-reg1, reg0); \
24 s2_m = __msa_ilvl_h(reg0, reg1); \
25 s3_m = __msa_ilvr_h(reg0, reg1); \
26 s1_m = (v8i16)__msa_dotp_s_w(s1_m, k0_m); \
27 s0_m = (v8i16)__msa_dotp_s_w(s0_m, k0_m); \
28 s1_m = (v8i16)__msa_srari_w((v4i32)s1_m, DCT_CONST_BITS); \
29 s0_m = (v8i16)__msa_srari_w((v4i32)s0_m, DCT_CONST_BITS); \
30 out0 = __msa_pckev_h(s0_m, s1_m); \
31 \
32 s1_m = (v8i16)__msa_dotp_s_w(s3_m, k0_m); \
33 s0_m = (v8i16)__msa_dotp_s_w(s2_m, k0_m); \
34 s1_m = (v8i16)__msa_srari_w((v4i32)s1_m, DCT_CONST_BITS); \
35 s0_m = (v8i16)__msa_srari_w((v4i32)s0_m, DCT_CONST_BITS); \
36 out1 = __msa_pckev_h(s0_m, s1_m); \
37 }
38
39 #define VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS(dest, dest_stride, \
40 in0, in1, in2, in3) { \
41 uint64_t out0_m, out1_m, out2_m, out3_m; \
42 v8i16 res0_m, res1_m, res2_m, res3_m; \
43 v16u8 dest0_m, dest1_m, dest2_m, dest3_m; \
44 v16i8 tmp0_m, tmp1_m; \
45 v16i8 zero_m = { 0 }; \
46 uint8_t *dst_m = (uint8_t *)(dest); \
47 \
48 dest0_m = LOAD_UB(dst_m); \
49 dest1_m = LOAD_UB(dst_m + 4 * dest_stride); \
50 dest2_m = LOAD_UB(dst_m + 8 * dest_stride); \
51 dest3_m = LOAD_UB(dst_m + 12 * dest_stride); \
52 \
53 res0_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest0_m); \
54 res1_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest1_m); \
55 res2_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest2_m); \
56 res3_m = (v8i16)__msa_ilvr_b(zero_m, (v16i8)dest3_m); \
57 \
58 res0_m += (v8i16)(in0); \
59 res1_m += (v8i16)(in1); \
60 res2_m += (v8i16)(in2); \
61 res3_m += (v8i16)(in3); \
62 \
63 res0_m = CLIP_UNSIGNED_CHAR_H(res0_m); \
64 res1_m = CLIP_UNSIGNED_CHAR_H(res1_m); \
65 res2_m = CLIP_UNSIGNED_CHAR_H(res2_m); \
66 res3_m = CLIP_UNSIGNED_CHAR_H(res3_m); \
67 \
68 tmp0_m = __msa_pckev_b((v16i8)res1_m, (v16i8)res0_m); \
69 tmp1_m = __msa_pckev_b((v16i8)res3_m, (v16i8)res2_m); \
70 \
71 out0_m = __msa_copy_u_d((v2i64)tmp0_m, 0); \
72 out1_m = __msa_copy_u_d((v2i64)tmp0_m, 1); \
73 out2_m = __msa_copy_u_d((v2i64)tmp1_m, 0); \
74 out3_m = __msa_copy_u_d((v2i64)tmp1_m, 1); \
75 \
76 STORE_DWORD(dst_m, out0_m); \
77 dst_m += (4 * dest_stride); \
78 STORE_DWORD(dst_m, out1_m); \
79 dst_m += (4 * dest_stride); \
80 STORE_DWORD(dst_m, out2_m); \
81 dst_m += (4 * dest_stride); \
82 STORE_DWORD(dst_m, out3_m); \
83 }
84 12
85 static void vp9_idct32x8_row_transpose_store(const int16_t *input, 13 static void vp9_idct32x8_row_transpose_store(const int16_t *input,
86 int16_t *tmp_buf) { 14 int16_t *tmp_buf) {
87 v8i16 m0, m1, m2, m3, m4, m5, m6, m7; 15 v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
88 v8i16 n0, n1, n2, n3, n4, n5, n6, n7;
89 16
90 /* 1st & 2nd 8x8 */ 17 /* 1st & 2nd 8x8 */
91 LOAD_8VECS_SH(input, 32, m0, n0, m1, n1, m2, n2, m3, n3); 18 LD_SH8(input, 32, m0, n0, m1, n1, m2, n2, m3, n3);
92 LOAD_8VECS_SH((input + 8), 32, m4, n4, m5, n5, m6, n6, m7, n7); 19 LD_SH8((input + 8), 32, m4, n4, m5, n5, m6, n6, m7, n7);
93 TRANSPOSE8x8_H_SH(m0, n0, m1, n1, m2, n2, m3, n3, 20 TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
94 m0, n0, m1, n1, m2, n2, m3, n3); 21 m0, n0, m1, n1, m2, n2, m3, n3);
95 TRANSPOSE8x8_H_SH(m4, n4, m5, n5, m6, n6, m7, n7, 22 TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
96 m4, n4, m5, n5, m6, n6, m7, n7); 23 m4, n4, m5, n5, m6, n6, m7, n7);
97 STORE_4VECS_SH((tmp_buf), 8, m0, n0, m1, n1); 24 ST_SH8(m0, n0, m1, n1, m2, n2, m3, n3, (tmp_buf), 8);
98 STORE_4VECS_SH((tmp_buf + 4 * 8), 8, m2, n2, m3, n3); 25 ST_SH4(m4, n4, m5, n5, (tmp_buf + 8 * 8), 8);
99 STORE_4VECS_SH((tmp_buf + 8 * 8), 8, m4, n4, m5, n5); 26 ST_SH4(m6, n6, m7, n7, (tmp_buf + 12 * 8), 8);
100 STORE_4VECS_SH((tmp_buf + 12 * 8), 8, m6, n6, m7, n7);
101 27
102 /* 3rd & 4th 8x8 */ 28 /* 3rd & 4th 8x8 */
103 LOAD_8VECS_SH((input + 16), 32, m0, n0, m1, n1, m2, n2, m3, n3); 29 LD_SH8((input + 16), 32, m0, n0, m1, n1, m2, n2, m3, n3);
104 LOAD_8VECS_SH((input + 24), 32, m4, n4, m5, n5, m6, n6, m7, n7); 30 LD_SH8((input + 24), 32, m4, n4, m5, n5, m6, n6, m7, n7);
105 TRANSPOSE8x8_H_SH(m0, n0, m1, n1, m2, n2, m3, n3, 31 TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
106 m0, n0, m1, n1, m2, n2, m3, n3); 32 m0, n0, m1, n1, m2, n2, m3, n3);
107 TRANSPOSE8x8_H_SH(m4, n4, m5, n5, m6, n6, m7, n7, 33 TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
108 m4, n4, m5, n5, m6, n6, m7, n7); 34 m4, n4, m5, n5, m6, n6, m7, n7);
109 STORE_4VECS_SH((tmp_buf + 16 * 8), 8, m0, n0, m1, n1); 35 ST_SH4(m0, n0, m1, n1, (tmp_buf + 16 * 8), 8);
110 STORE_4VECS_SH((tmp_buf + 20 * 8), 8, m2, n2, m3, n3); 36 ST_SH4(m2, n2, m3, n3, (tmp_buf + 20 * 8), 8);
111 STORE_4VECS_SH((tmp_buf + 24 * 8), 8, m4, n4, m5, n5); 37 ST_SH4(m4, n4, m5, n5, (tmp_buf + 24 * 8), 8);
112 STORE_4VECS_SH((tmp_buf + 28 * 8), 8, m6, n6, m7, n7); 38 ST_SH4(m6, n6, m7, n7, (tmp_buf + 28 * 8), 8);
113 } 39 }
114 40
115 static void vp9_idct32x8_row_even_process_store(int16_t *tmp_buf, 41 static void vp9_idct32x8_row_even_process_store(int16_t *tmp_buf,
116 int16_t *tmp_eve_buf) { 42 int16_t *tmp_eve_buf) {
117 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; 43 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
118 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; 44 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
119 v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7; 45 v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
120 46
121 /* Even stage 1 */ 47 /* Even stage 1 */
122 LOAD_8VECS_SH(tmp_buf, 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); 48 LD_SH8(tmp_buf, 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
123 49
124 DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7); 50 VP9_DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
125 DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3); 51 VP9_DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
126 52 BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
127 vec0 = reg1 - reg5; 53 VP9_DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
128 vec1 = reg1 + reg5;
129 vec2 = reg7 - reg3;
130 vec3 = reg7 + reg3;
131
132 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
133 54
134 loc1 = vec3; 55 loc1 = vec3;
135 loc0 = vec1; 56 loc0 = vec1;
136 57
137 DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4); 58 VP9_DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
138 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); 59 VP9_DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
139 60 BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
140 vec0 = reg4 - reg6; 61 BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
141 vec1 = reg4 + reg6; 62 BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
142 vec2 = reg0 - reg2;
143 vec3 = reg0 + reg2;
144
145 stp4 = vec0 - loc0;
146 stp3 = vec0 + loc0;
147 stp7 = vec1 - loc1;
148 stp0 = vec1 + loc1;
149 stp5 = vec2 - loc2;
150 stp2 = vec2 + loc2;
151 stp6 = vec3 - loc3;
152 stp1 = vec3 + loc3;
153 63
154 /* Even stage 2 */ 64 /* Even stage 2 */
155 LOAD_8VECS_SH((tmp_buf + 16), 32, 65 LD_SH8((tmp_buf + 16), 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
156 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); 66 VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
157 67 VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
158 DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7); 68 VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
159 DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3); 69 VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
160 DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
161 DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
162 70
163 vec0 = reg0 + reg4; 71 vec0 = reg0 + reg4;
164 reg0 = reg0 - reg4; 72 reg0 = reg0 - reg4;
165 reg4 = reg6 + reg2; 73 reg4 = reg6 + reg2;
166 reg6 = reg6 - reg2; 74 reg6 = reg6 - reg2;
167 reg2 = reg1 + reg5; 75 reg2 = reg1 + reg5;
168 reg1 = reg1 - reg5; 76 reg1 = reg1 - reg5;
169 reg5 = reg7 + reg3; 77 reg5 = reg7 + reg3;
170 reg7 = reg7 - reg3; 78 reg7 = reg7 - reg3;
171 reg3 = vec0; 79 reg3 = vec0;
172 80
173 vec1 = reg2; 81 vec1 = reg2;
174 reg2 = reg3 + reg4; 82 reg2 = reg3 + reg4;
175 reg3 = reg3 - reg4; 83 reg3 = reg3 - reg4;
176 reg4 = reg5 - vec1; 84 reg4 = reg5 - vec1;
177 reg5 = reg5 + vec1; 85 reg5 = reg5 + vec1;
178 86
179 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); 87 VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
180 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1); 88 VP9_DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
181 89
182 vec0 = reg0 - reg6; 90 vec0 = reg0 - reg6;
183 reg0 = reg0 + reg6; 91 reg0 = reg0 + reg6;
184 vec1 = reg7 - reg1; 92 vec1 = reg7 - reg1;
185 reg7 = reg7 + reg1; 93 reg7 = reg7 + reg1;
186 94
187 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1); 95 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
188 DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4); 96 VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
189 97
190 /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */ 98 /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
191 loc0 = stp0 - reg5; 99 BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
192 loc1 = stp0 + reg5; 100 ST_SH(loc0, (tmp_eve_buf + 15 * 8));
193 loc2 = stp1 - reg7; 101 ST_SH(loc1, (tmp_eve_buf));
194 loc3 = stp1 + reg7; 102 ST_SH(loc2, (tmp_eve_buf + 14 * 8));
195 STORE_SH(loc0, (tmp_eve_buf + 15 * 8)); 103 ST_SH(loc3, (tmp_eve_buf + 8));
196 STORE_SH(loc1, (tmp_eve_buf));
197 STORE_SH(loc2, (tmp_eve_buf + 14 * 8));
198 STORE_SH(loc3, (tmp_eve_buf + 8));
199 104
200 loc0 = stp2 - reg1; 105 BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
201 loc1 = stp2 + reg1; 106 ST_SH(loc0, (tmp_eve_buf + 13 * 8));
202 loc2 = stp3 - reg4; 107 ST_SH(loc1, (tmp_eve_buf + 2 * 8));
203 loc3 = stp3 + reg4; 108 ST_SH(loc2, (tmp_eve_buf + 12 * 8));
204 STORE_SH(loc0, (tmp_eve_buf + 13 * 8)); 109 ST_SH(loc3, (tmp_eve_buf + 3 * 8));
205 STORE_SH(loc1, (tmp_eve_buf + 2 * 8));
206 STORE_SH(loc2, (tmp_eve_buf + 12 * 8));
207 STORE_SH(loc3, (tmp_eve_buf + 3 * 8));
208 110
209 /* Store 8 */ 111 /* Store 8 */
210 loc0 = stp4 - reg3; 112 BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
211 loc1 = stp4 + reg3; 113 ST_SH(loc0, (tmp_eve_buf + 11 * 8));
212 loc2 = stp5 - reg6; 114 ST_SH(loc1, (tmp_eve_buf + 4 * 8));
213 loc3 = stp5 + reg6; 115 ST_SH(loc2, (tmp_eve_buf + 10 * 8));
214 STORE_SH(loc0, (tmp_eve_buf + 11 * 8)); 116 ST_SH(loc3, (tmp_eve_buf + 5 * 8));
215 STORE_SH(loc1, (tmp_eve_buf + 4 * 8));
216 STORE_SH(loc2, (tmp_eve_buf + 10 * 8));
217 STORE_SH(loc3, (tmp_eve_buf + 5 * 8));
218 117
219 loc0 = stp6 - reg0; 118 BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
220 loc1 = stp6 + reg0; 119 ST_SH(loc0, (tmp_eve_buf + 9 * 8));
221 loc2 = stp7 - reg2; 120 ST_SH(loc1, (tmp_eve_buf + 6 * 8));
222 loc3 = stp7 + reg2; 121 ST_SH(loc2, (tmp_eve_buf + 8 * 8));
223 STORE_SH(loc0, (tmp_eve_buf + 9 * 8)); 122 ST_SH(loc3, (tmp_eve_buf + 7 * 8));
224 STORE_SH(loc1, (tmp_eve_buf + 6 * 8));
225 STORE_SH(loc2, (tmp_eve_buf + 8 * 8));
226 STORE_SH(loc3, (tmp_eve_buf + 7 * 8));
227 } 123 }
228 124
229 static void vp9_idct32x8_row_odd_process_store(int16_t *tmp_buf, 125 static void vp9_idct32x8_row_odd_process_store(int16_t *tmp_buf,
230 int16_t *tmp_odd_buf) { 126 int16_t *tmp_odd_buf) {
231 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; 127 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
232 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; 128 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
233 129
234 /* Odd stage 1 */ 130 /* Odd stage 1 */
235 reg0 = LOAD_SH(tmp_buf + 8); 131 reg0 = LD_SH(tmp_buf + 8);
236 reg1 = LOAD_SH(tmp_buf + 7 * 8); 132 reg1 = LD_SH(tmp_buf + 7 * 8);
237 reg2 = LOAD_SH(tmp_buf + 9 * 8); 133 reg2 = LD_SH(tmp_buf + 9 * 8);
238 reg3 = LOAD_SH(tmp_buf + 15 * 8); 134 reg3 = LD_SH(tmp_buf + 15 * 8);
239 reg4 = LOAD_SH(tmp_buf + 17 * 8); 135 reg4 = LD_SH(tmp_buf + 17 * 8);
240 reg5 = LOAD_SH(tmp_buf + 23 * 8); 136 reg5 = LD_SH(tmp_buf + 23 * 8);
241 reg6 = LOAD_SH(tmp_buf + 25 * 8); 137 reg6 = LD_SH(tmp_buf + 25 * 8);
242 reg7 = LOAD_SH(tmp_buf + 31 * 8); 138 reg7 = LD_SH(tmp_buf + 31 * 8);
243 139
244 DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7); 140 VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
245 DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4); 141 VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
246 DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5); 142 VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
247 DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6); 143 VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
248 144
249 vec0 = reg0 + reg3; 145 vec0 = reg0 + reg3;
250 reg0 = reg0 - reg3; 146 reg0 = reg0 - reg3;
251 reg3 = reg7 + reg4; 147 reg3 = reg7 + reg4;
252 reg7 = reg7 - reg4; 148 reg7 = reg7 - reg4;
253 reg4 = reg1 + reg2; 149 reg4 = reg1 + reg2;
254 reg1 = reg1 - reg2; 150 reg1 = reg1 - reg2;
255 reg2 = reg6 + reg5; 151 reg2 = reg6 + reg5;
256 reg6 = reg6 - reg5; 152 reg6 = reg6 - reg5;
257 reg5 = vec0; 153 reg5 = vec0;
258 154
259 /* 4 Stores */ 155 /* 4 Stores */
260 vec0 = reg5 + reg4; 156 ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
261 vec1 = reg3 + reg2; 157 ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
262 STORE_SH(vec0, (tmp_odd_buf + 4 * 8));
263 STORE_SH(vec1, (tmp_odd_buf + 5 * 8));
264 158
265 vec0 = reg5 - reg4; 159 SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
266 vec1 = reg3 - reg2; 160 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
267 DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1); 161 ST_SH2(vec0, vec1, (tmp_odd_buf), 8);
268 STORE_SH(vec0, (tmp_odd_buf));
269 STORE_SH(vec1, (tmp_odd_buf + 8));
270 162
271 /* 4 Stores */ 163 /* 4 Stores */
272 DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7); 164 VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
273 DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6); 165 VP9_DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
166 BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
167 ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
274 168
275 vec0 = reg0 + reg1; 169 VP9_DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
276 vec2 = reg7 - reg6; 170 ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
277 vec1 = reg7 + reg6;
278 vec3 = reg0 - reg1;
279 STORE_SH(vec0, (tmp_odd_buf + 6 * 8));
280 STORE_SH(vec1, (tmp_odd_buf + 7 * 8));
281
282 DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
283 STORE_SH(vec2, (tmp_odd_buf + 2 * 8));
284 STORE_SH(vec3, (tmp_odd_buf + 3 * 8));
285 171
286 /* Odd stage 2 */ 172 /* Odd stage 2 */
173 /* 8 loads */
174 reg0 = LD_SH(tmp_buf + 3 * 8);
175 reg1 = LD_SH(tmp_buf + 5 * 8);
176 reg2 = LD_SH(tmp_buf + 11 * 8);
177 reg3 = LD_SH(tmp_buf + 13 * 8);
178 reg4 = LD_SH(tmp_buf + 19 * 8);
179 reg5 = LD_SH(tmp_buf + 21 * 8);
180 reg6 = LD_SH(tmp_buf + 27 * 8);
181 reg7 = LD_SH(tmp_buf + 29 * 8);
287 182
288 /* 8 loads */ 183 VP9_DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
289 reg0 = LOAD_SH(tmp_buf + 3 * 8); 184 VP9_DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
290 reg1 = LOAD_SH(tmp_buf + 5 * 8); 185 VP9_DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
291 reg2 = LOAD_SH(tmp_buf + 11 * 8); 186 VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
292 reg3 = LOAD_SH(tmp_buf + 13 * 8);
293 reg4 = LOAD_SH(tmp_buf + 19 * 8);
294 reg5 = LOAD_SH(tmp_buf + 21 * 8);
295 reg6 = LOAD_SH(tmp_buf + 27 * 8);
296 reg7 = LOAD_SH(tmp_buf + 29 * 8);
297
298 DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
299 DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
300 DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
301 DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
302 187
303 /* 4 Stores */ 188 /* 4 Stores */
304 vec0 = reg1 - reg2; 189 SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
305 vec1 = reg6 - reg5; 190 vec0, vec1, vec2, vec3);
306 vec2 = reg0 - reg3; 191 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
307 vec3 = reg7 - reg4; 192 VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
308 DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
309 DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
310 193
311 vec2 = loc2 - loc0; 194 BUTTERFLY_4(loc3, loc2, loc0, loc1, vec1, vec0, vec2, vec3);
312 vec3 = loc3 - loc1; 195 ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
313 vec0 = loc2 + loc0;
314 vec1 = loc3 + loc1;
315 STORE_SH(vec0, (tmp_odd_buf + 12 * 8));
316 STORE_SH(vec1, (tmp_odd_buf + 15 * 8));
317 196
318 DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1); 197 VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
319 198 ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
320 STORE_SH(vec0, (tmp_odd_buf + 10 * 8));
321 STORE_SH(vec1, (tmp_odd_buf + 11 * 8));
322 199
323 /* 4 Stores */ 200 /* 4 Stores */
324 vec0 = reg0 + reg3; 201 ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
325 vec1 = reg1 + reg2; 202 vec1, vec2, vec0, vec3);
326 vec2 = reg6 + reg5; 203 BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
327 vec3 = reg7 + reg4; 204 ST_SH(reg0, (tmp_odd_buf + 13 * 8));
328 reg0 = vec0 + vec1; 205 ST_SH(reg1, (tmp_odd_buf + 14 * 8));
329 reg1 = vec3 + vec2;
330 reg2 = vec0 - vec1;
331 reg3 = vec3 - vec2;
332 STORE_SH(reg0, (tmp_odd_buf + 13 * 8));
333 STORE_SH(reg1, (tmp_odd_buf + 14 * 8));
334 206
335 DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1); 207 VP9_DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
336 208 ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
337 STORE_SH(reg0, (tmp_odd_buf + 8 * 8));
338 STORE_SH(reg1, (tmp_odd_buf + 9 * 8));
339 209
340 /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */ 210 /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
341 211
342 /* Load 8 & Store 8 */ 212 /* Load 8 & Store 8 */
343 reg0 = LOAD_SH(tmp_odd_buf); 213 LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
344 reg1 = LOAD_SH(tmp_odd_buf + 1 * 8); 214 LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
345 reg2 = LOAD_SH(tmp_odd_buf + 2 * 8);
346 reg3 = LOAD_SH(tmp_odd_buf + 3 * 8);
347 reg4 = LOAD_SH(tmp_odd_buf + 8 * 8);
348 reg5 = LOAD_SH(tmp_odd_buf + 9 * 8);
349 reg6 = LOAD_SH(tmp_odd_buf + 10 * 8);
350 reg7 = LOAD_SH(tmp_odd_buf + 11 * 8);
351 215
352 loc0 = reg0 + reg4; 216 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
353 loc1 = reg1 + reg5; 217 loc0, loc1, loc2, loc3);
354 loc2 = reg2 + reg6; 218 ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
355 loc3 = reg3 + reg7;
356 STORE_SH(loc0, (tmp_odd_buf));
357 STORE_SH(loc1, (tmp_odd_buf + 1 * 8));
358 STORE_SH(loc2, (tmp_odd_buf + 2 * 8));
359 STORE_SH(loc3, (tmp_odd_buf + 3 * 8));
360 219
361 vec0 = reg0 - reg4; 220 SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
362 vec1 = reg1 - reg5; 221 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
363 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
364 222
365 vec0 = reg2 - reg6; 223 SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
366 vec1 = reg3 - reg7; 224 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
367 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); 225 ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
368
369 STORE_SH(loc0, (tmp_odd_buf + 8 * 8));
370 STORE_SH(loc1, (tmp_odd_buf + 9 * 8));
371 STORE_SH(loc2, (tmp_odd_buf + 10 * 8));
372 STORE_SH(loc3, (tmp_odd_buf + 11 * 8));
373 226
374 /* Load 8 & Store 8 */ 227 /* Load 8 & Store 8 */
375 reg1 = LOAD_SH(tmp_odd_buf + 4 * 8); 228 LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
376 reg2 = LOAD_SH(tmp_odd_buf + 5 * 8); 229 LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
377 reg0 = LOAD_SH(tmp_odd_buf + 6 * 8);
378 reg3 = LOAD_SH(tmp_odd_buf + 7 * 8);
379 reg4 = LOAD_SH(tmp_odd_buf + 12 * 8);
380 reg5 = LOAD_SH(tmp_odd_buf + 13 * 8);
381 reg6 = LOAD_SH(tmp_odd_buf + 14 * 8);
382 reg7 = LOAD_SH(tmp_odd_buf + 15 * 8);
383 230
384 loc0 = reg0 + reg4; 231 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
385 loc1 = reg1 + reg5; 232 loc0, loc1, loc2, loc3);
386 loc2 = reg2 + reg6; 233 ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
387 loc3 = reg3 + reg7;
388 STORE_SH(loc0, (tmp_odd_buf + 4 * 8));
389 STORE_SH(loc1, (tmp_odd_buf + 5 * 8));
390 STORE_SH(loc2, (tmp_odd_buf + 6 * 8));
391 STORE_SH(loc3, (tmp_odd_buf + 7 * 8));
392 234
393 vec0 = reg0 - reg4; 235 SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
394 vec1 = reg3 - reg7; 236 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
395 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
396 237
397 vec0 = reg1 - reg5; 238 SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
398 vec1 = reg2 - reg6; 239 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
399 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); 240 ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
400
401 STORE_SH(loc0, (tmp_odd_buf + 12 * 8));
402 STORE_SH(loc1, (tmp_odd_buf + 13 * 8));
403 STORE_SH(loc2, (tmp_odd_buf + 14 * 8));
404 STORE_SH(loc3, (tmp_odd_buf + 15 * 8));
405 } 241 }
406 242
407 static void vp9_idct_butterfly_transpose_store(int16_t *tmp_buf, 243 static void vp9_idct_butterfly_transpose_store(int16_t *tmp_buf,
408 int16_t *tmp_eve_buf, 244 int16_t *tmp_eve_buf,
409 int16_t *tmp_odd_buf, 245 int16_t *tmp_odd_buf,
410 int16_t *dest) { 246 int16_t *dst) {
411 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; 247 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
412 v8i16 m0, m1, m2, m3, m4, m5, m6, m7; 248 v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
413 v8i16 n0, n1, n2, n3, n4, n5, n6, n7;
414 249
415 /* FINAL BUTTERFLY : Dependency on Even & Odd */ 250 /* FINAL BUTTERFLY : Dependency on Even & Odd */
416 /* Total: 32 loads, 32 stores */ 251 vec0 = LD_SH(tmp_odd_buf);
417 vec0 = LOAD_SH(tmp_odd_buf); 252 vec1 = LD_SH(tmp_odd_buf + 9 * 8);
418 vec1 = LOAD_SH(tmp_odd_buf + 9 * 8); 253 vec2 = LD_SH(tmp_odd_buf + 14 * 8);
419 vec2 = LOAD_SH(tmp_odd_buf + 14 * 8); 254 vec3 = LD_SH(tmp_odd_buf + 6 * 8);
420 vec3 = LOAD_SH(tmp_odd_buf + 6 * 8); 255 loc0 = LD_SH(tmp_eve_buf);
421 loc0 = LOAD_SH(tmp_eve_buf); 256 loc1 = LD_SH(tmp_eve_buf + 8 * 8);
422 loc1 = LOAD_SH(tmp_eve_buf + 8 * 8); 257 loc2 = LD_SH(tmp_eve_buf + 4 * 8);
423 loc2 = LOAD_SH(tmp_eve_buf + 4 * 8); 258 loc3 = LD_SH(tmp_eve_buf + 12 * 8);
424 loc3 = LOAD_SH(tmp_eve_buf + 12 * 8);
425 259
426 m0 = (loc0 + vec3); 260 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
427 STORE_SH((loc0 - vec3), (tmp_buf + 31 * 8)); 261
428 STORE_SH((loc1 - vec2), (tmp_buf + 23 * 8)); 262 ST_SH((loc0 - vec3), (tmp_buf + 31 * 8));
429 m4 = (loc1 + vec2); 263 ST_SH((loc1 - vec2), (tmp_buf + 23 * 8));
430 STORE_SH((loc2 - vec1), (tmp_buf + 27 * 8)); 264 ST_SH((loc2 - vec1), (tmp_buf + 27 * 8));
431 m2 = (loc2 + vec1); 265 ST_SH((loc3 - vec0), (tmp_buf + 19 * 8));
432 STORE_SH((loc3 - vec0), (tmp_buf + 19 * 8));
433 m6 = (loc3 + vec0);
434 266
435 /* Load 8 & Store 8 */ 267 /* Load 8 & Store 8 */
436 vec0 = LOAD_SH(tmp_odd_buf + 4 * 8); 268 vec0 = LD_SH(tmp_odd_buf + 4 * 8);
437 vec1 = LOAD_SH(tmp_odd_buf + 13 * 8); 269 vec1 = LD_SH(tmp_odd_buf + 13 * 8);
438 vec2 = LOAD_SH(tmp_odd_buf + 10 * 8); 270 vec2 = LD_SH(tmp_odd_buf + 10 * 8);
439 vec3 = LOAD_SH(tmp_odd_buf + 3 * 8); 271 vec3 = LD_SH(tmp_odd_buf + 3 * 8);
440 loc0 = LOAD_SH(tmp_eve_buf + 2 * 8); 272 loc0 = LD_SH(tmp_eve_buf + 2 * 8);
441 loc1 = LOAD_SH(tmp_eve_buf + 10 * 8); 273 loc1 = LD_SH(tmp_eve_buf + 10 * 8);
442 loc2 = LOAD_SH(tmp_eve_buf + 6 * 8); 274 loc2 = LD_SH(tmp_eve_buf + 6 * 8);
443 loc3 = LOAD_SH(tmp_eve_buf + 14 * 8); 275 loc3 = LD_SH(tmp_eve_buf + 14 * 8);
444 276
445 m1 = (loc0 + vec3); 277 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
446 STORE_SH((loc0 - vec3), (tmp_buf + 29 * 8)); 278
447 STORE_SH((loc1 - vec2), (tmp_buf + 21 * 8)); 279 ST_SH((loc0 - vec3), (tmp_buf + 29 * 8));
448 m5 = (loc1 + vec2); 280 ST_SH((loc1 - vec2), (tmp_buf + 21 * 8));
449 STORE_SH((loc2 - vec1), (tmp_buf + 25 * 8)); 281 ST_SH((loc2 - vec1), (tmp_buf + 25 * 8));
450 m3 = (loc2 + vec1); 282 ST_SH((loc3 - vec0), (tmp_buf + 17 * 8));
451 STORE_SH((loc3 - vec0), (tmp_buf + 17 * 8));
452 m7 = (loc3 + vec0);
453 283
454 /* Load 8 & Store 8 */ 284 /* Load 8 & Store 8 */
455 vec0 = LOAD_SH(tmp_odd_buf + 2 * 8); 285 vec0 = LD_SH(tmp_odd_buf + 2 * 8);
456 vec1 = LOAD_SH(tmp_odd_buf + 11 * 8); 286 vec1 = LD_SH(tmp_odd_buf + 11 * 8);
457 vec2 = LOAD_SH(tmp_odd_buf + 12 * 8); 287 vec2 = LD_SH(tmp_odd_buf + 12 * 8);
458 vec3 = LOAD_SH(tmp_odd_buf + 7 * 8); 288 vec3 = LD_SH(tmp_odd_buf + 7 * 8);
459 loc0 = LOAD_SH(tmp_eve_buf + 1 * 8); 289 loc0 = LD_SH(tmp_eve_buf + 1 * 8);
460 loc1 = LOAD_SH(tmp_eve_buf + 9 * 8); 290 loc1 = LD_SH(tmp_eve_buf + 9 * 8);
461 loc2 = LOAD_SH(tmp_eve_buf + 5 * 8); 291 loc2 = LD_SH(tmp_eve_buf + 5 * 8);
462 loc3 = LOAD_SH(tmp_eve_buf + 13 * 8); 292 loc3 = LD_SH(tmp_eve_buf + 13 * 8);
463 293
464 n0 = (loc0 + vec3); 294 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
465 STORE_SH((loc0 - vec3), (tmp_buf + 30 * 8)); 295
466 STORE_SH((loc1 - vec2), (tmp_buf + 22 * 8)); 296 ST_SH((loc0 - vec3), (tmp_buf + 30 * 8));
467 n4 = (loc1 + vec2); 297 ST_SH((loc1 - vec2), (tmp_buf + 22 * 8));
468 STORE_SH((loc2 - vec1), (tmp_buf + 26 * 8)); 298 ST_SH((loc2 - vec1), (tmp_buf + 26 * 8));
469 n2 = (loc2 + vec1); 299 ST_SH((loc3 - vec0), (tmp_buf + 18 * 8));
470 STORE_SH((loc3 - vec0), (tmp_buf + 18 * 8));
471 n6 = (loc3 + vec0);
472 300
473 /* Load 8 & Store 8 */ 301 /* Load 8 & Store 8 */
474 vec0 = LOAD_SH(tmp_odd_buf + 5 * 8); 302 vec0 = LD_SH(tmp_odd_buf + 5 * 8);
475 vec1 = LOAD_SH(tmp_odd_buf + 15 * 8); 303 vec1 = LD_SH(tmp_odd_buf + 15 * 8);
476 vec2 = LOAD_SH(tmp_odd_buf + 8 * 8); 304 vec2 = LD_SH(tmp_odd_buf + 8 * 8);
477 vec3 = LOAD_SH(tmp_odd_buf + 1 * 8); 305 vec3 = LD_SH(tmp_odd_buf + 1 * 8);
478 loc0 = LOAD_SH(tmp_eve_buf + 3 * 8); 306 loc0 = LD_SH(tmp_eve_buf + 3 * 8);
479 loc1 = LOAD_SH(tmp_eve_buf + 11 * 8); 307 loc1 = LD_SH(tmp_eve_buf + 11 * 8);
480 loc2 = LOAD_SH(tmp_eve_buf + 7 * 8); 308 loc2 = LD_SH(tmp_eve_buf + 7 * 8);
481 loc3 = LOAD_SH(tmp_eve_buf + 15 * 8); 309 loc3 = LD_SH(tmp_eve_buf + 15 * 8);
482 310
483 n1 = (loc0 + vec3); 311 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
484 STORE_SH((loc0 - vec3), (tmp_buf + 28 * 8)); 312
485 STORE_SH((loc1 - vec2), (tmp_buf + 20 * 8)); 313 ST_SH((loc0 - vec3), (tmp_buf + 28 * 8));
486 n5 = (loc1 + vec2); 314 ST_SH((loc1 - vec2), (tmp_buf + 20 * 8));
487 STORE_SH((loc2 - vec1), (tmp_buf + 24 * 8)); 315 ST_SH((loc2 - vec1), (tmp_buf + 24 * 8));
488 n3 = (loc2 + vec1); 316 ST_SH((loc3 - vec0), (tmp_buf + 16 * 8));
489 STORE_SH((loc3 - vec0), (tmp_buf + 16 * 8));
490 n7 = (loc3 + vec0);
491 317
492 /* Transpose : 16 vectors */ 318 /* Transpose : 16 vectors */
493 /* 1st & 2nd 8x8 */ 319 /* 1st & 2nd 8x8 */
494 TRANSPOSE8x8_H_SH(m0, n0, m1, n1, m2, n2, m3, n3, 320 TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
495 m0, n0, m1, n1, m2, n2, m3, n3); 321 m0, n0, m1, n1, m2, n2, m3, n3);
496 STORE_4VECS_SH((dest + 0), 32, m0, n0, m1, n1); 322 ST_SH4(m0, n0, m1, n1, (dst + 0), 32);
497 STORE_4VECS_SH((dest + 4 * 32), 32, m2, n2, m3, n3); 323 ST_SH4(m2, n2, m3, n3, (dst + 4 * 32), 32);
498 324
499 TRANSPOSE8x8_H_SH(m4, n4, m5, n5, m6, n6, m7, n7, 325 TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
500 m4, n4, m5, n5, m6, n6, m7, n7); 326 m4, n4, m5, n5, m6, n6, m7, n7);
501 STORE_4VECS_SH((dest + 8), 32, m4, n4, m5, n5); 327 ST_SH4(m4, n4, m5, n5, (dst + 8), 32);
502 STORE_4VECS_SH((dest + 8 + 4 * 32), 32, m6, n6, m7, n7); 328 ST_SH4(m6, n6, m7, n7, (dst + 8 + 4 * 32), 32);
503 329
504 /* 3rd & 4th 8x8 */ 330 /* 3rd & 4th 8x8 */
505 LOAD_8VECS_SH((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3); 331 LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3);
506 LOAD_8VECS_SH((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7); 332 LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7);
507 TRANSPOSE8x8_H_SH(m0, n0, m1, n1, m2, n2, m3, n3, 333 TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
508 m0, n0, m1, n1, m2, n2, m3, n3); 334 m0, n0, m1, n1, m2, n2, m3, n3);
509 STORE_4VECS_SH((dest + 16), 32, m0, n0, m1, n1); 335 ST_SH4(m0, n0, m1, n1, (dst + 16), 32);
510 STORE_4VECS_SH((dest + 16 + 4 * 32), 32, m2, n2, m3, n3); 336 ST_SH4(m2, n2, m3, n3, (dst + 16 + 4 * 32), 32);
511 337
512 TRANSPOSE8x8_H_SH(m4, n4, m5, n5, m6, n6, m7, n7, 338 TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
513 m4, n4, m5, n5, m6, n6, m7, n7); 339 m4, n4, m5, n5, m6, n6, m7, n7);
514 STORE_4VECS_SH((dest + 24), 32, m4, n4, m5, n5); 340 ST_SH4(m4, n4, m5, n5, (dst + 24), 32);
515 STORE_4VECS_SH((dest + 24 + 4 * 32), 32, m6, n6, m7, n7); 341 ST_SH4(m6, n6, m7, n7, (dst + 24 + 4 * 32), 32);
516 } 342 }
517 343
518 static void vp9_idct32x8_1d_rows_msa(const int16_t *input, int16_t *output) { 344 static void vp9_idct32x8_1d_rows_msa(const int16_t *input, int16_t *output) {
519 DECLARE_ALIGNED(32, int16_t, tmp_buf[8 * 32]); 345 DECLARE_ALIGNED(32, int16_t, tmp_buf[8 * 32]);
520 DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]); 346 DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]);
521 DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]); 347 DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]);
522 348
523 vp9_idct32x8_row_transpose_store(input, &tmp_buf[0]); 349 vp9_idct32x8_row_transpose_store(input, &tmp_buf[0]);
524
525 vp9_idct32x8_row_even_process_store(&tmp_buf[0], &tmp_eve_buf[0]); 350 vp9_idct32x8_row_even_process_store(&tmp_buf[0], &tmp_eve_buf[0]);
526
527 vp9_idct32x8_row_odd_process_store(&tmp_buf[0], &tmp_odd_buf[0]); 351 vp9_idct32x8_row_odd_process_store(&tmp_buf[0], &tmp_odd_buf[0]);
528
529 vp9_idct_butterfly_transpose_store(&tmp_buf[0], &tmp_eve_buf[0], 352 vp9_idct_butterfly_transpose_store(&tmp_buf[0], &tmp_eve_buf[0],
530 &tmp_odd_buf[0], output); 353 &tmp_odd_buf[0], output);
531 } 354 }
532 355
533 static void vp9_idct8x32_column_even_process_store(int16_t *tmp_buf, 356 static void vp9_idct8x32_column_even_process_store(int16_t *tmp_buf,
534 int16_t *tmp_eve_buf) { 357 int16_t *tmp_eve_buf) {
535 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; 358 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
536 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; 359 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
537 v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7; 360 v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
538 361
539 /* Even stage 1 */ 362 /* Even stage 1 */
540 LOAD_8VECS_SH(tmp_buf, (4 * 32), 363 LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
541 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); 364 tmp_buf += (2 * 32);
542 365
543 DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7); 366 VP9_DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
544 DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3); 367 VP9_DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
545 368 BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
546 vec0 = reg1 - reg5; 369 VP9_DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
547 vec1 = reg1 + reg5;
548 vec2 = reg7 - reg3;
549 vec3 = reg7 + reg3;
550
551 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
552 370
553 loc1 = vec3; 371 loc1 = vec3;
554 loc0 = vec1; 372 loc0 = vec1;
555 373
556 DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4); 374 VP9_DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
557 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); 375 VP9_DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
558 376 BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
559 vec0 = reg4 - reg6; 377 BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
560 vec1 = reg4 + reg6; 378 BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
561 vec2 = reg0 - reg2;
562 vec3 = reg0 + reg2;
563
564 stp4 = vec0 - loc0;
565 stp3 = vec0 + loc0;
566 stp7 = vec1 - loc1;
567 stp0 = vec1 + loc1;
568 stp5 = vec2 - loc2;
569 stp2 = vec2 + loc2;
570 stp6 = vec3 - loc3;
571 stp1 = vec3 + loc3;
572 379
573 /* Even stage 2 */ 380 /* Even stage 2 */
574 /* Load 8 */ 381 /* Load 8 */
575 LOAD_8VECS_SH((tmp_buf + 2 * 32), (4 * 32), 382 LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
576 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
577 383
578 DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7); 384 VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
579 DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3); 385 VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
580 DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5); 386 VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
581 DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1); 387 VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
582 388
583 vec0 = reg0 + reg4; 389 vec0 = reg0 + reg4;
584 reg0 = reg0 - reg4; 390 reg0 = reg0 - reg4;
585 reg4 = reg6 + reg2; 391 reg4 = reg6 + reg2;
586 reg6 = reg6 - reg2; 392 reg6 = reg6 - reg2;
587 reg2 = reg1 + reg5; 393 reg2 = reg1 + reg5;
588 reg1 = reg1 - reg5; 394 reg1 = reg1 - reg5;
589 reg5 = reg7 + reg3; 395 reg5 = reg7 + reg3;
590 reg7 = reg7 - reg3; 396 reg7 = reg7 - reg3;
591 reg3 = vec0; 397 reg3 = vec0;
592 398
593 vec1 = reg2; 399 vec1 = reg2;
594 reg2 = reg3 + reg4; 400 reg2 = reg3 + reg4;
595 reg3 = reg3 - reg4; 401 reg3 = reg3 - reg4;
596 reg4 = reg5 - vec1; 402 reg4 = reg5 - vec1;
597 reg5 = reg5 + vec1; 403 reg5 = reg5 + vec1;
598 404
599 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); 405 VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
600 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1); 406 VP9_DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
601 407
602 vec0 = reg0 - reg6; 408 vec0 = reg0 - reg6;
603 reg0 = reg0 + reg6; 409 reg0 = reg0 + reg6;
604 vec1 = reg7 - reg1; 410 vec1 = reg7 - reg1;
605 reg7 = reg7 + reg1; 411 reg7 = reg7 + reg1;
606 412
607 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1); 413 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
608 DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4); 414 VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
609 415
610 /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */ 416 /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
611 /* Store 8 */ 417 /* Store 8 */
612 loc0 = stp0 - reg5; 418 BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
613 loc1 = stp0 + reg5; 419 ST_SH2(loc1, loc3, tmp_eve_buf, 8);
614 loc2 = stp1 - reg7; 420 ST_SH2(loc2, loc0, (tmp_eve_buf + 14 * 8), 8);
615 loc3 = stp1 + reg7;
616 STORE_SH(loc0, (tmp_eve_buf + 15 * 8));
617 STORE_SH(loc1, (tmp_eve_buf));
618 STORE_SH(loc2, (tmp_eve_buf + 14 * 8));
619 STORE_SH(loc3, (tmp_eve_buf + 1 * 8));
620 421
621 loc0 = stp2 - reg1; 422 BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
622 loc1 = stp2 + reg1; 423 ST_SH2(loc1, loc3, (tmp_eve_buf + 2 * 8), 8);
623 loc2 = stp3 - reg4; 424 ST_SH2(loc2, loc0, (tmp_eve_buf + 12 * 8), 8);
624 loc3 = stp3 + reg4;
625 STORE_SH(loc0, (tmp_eve_buf + 13 * 8));
626 STORE_SH(loc1, (tmp_eve_buf + 2 * 8));
627 STORE_SH(loc2, (tmp_eve_buf + 12 * 8));
628 STORE_SH(loc3, (tmp_eve_buf + 3 * 8));
629 425
630 /* Store 8 */ 426 /* Store 8 */
631 loc0 = stp4 - reg3; 427 BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
632 loc1 = stp4 + reg3; 428 ST_SH2(loc1, loc3, (tmp_eve_buf + 4 * 8), 8);
633 loc2 = stp5 - reg6; 429 ST_SH2(loc2, loc0, (tmp_eve_buf + 10 * 8), 8);
634 loc3 = stp5 + reg6;
635 STORE_SH(loc0, (tmp_eve_buf + 11 * 8));
636 STORE_SH(loc1, (tmp_eve_buf + 4 * 8));
637 STORE_SH(loc2, (tmp_eve_buf + 10 * 8));
638 STORE_SH(loc3, (tmp_eve_buf + 5 * 8));
639 430
640 loc0 = stp6 - reg0; 431 BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
641 loc1 = stp6 + reg0; 432 ST_SH2(loc1, loc3, (tmp_eve_buf + 6 * 8), 8);
642 loc2 = stp7 - reg2; 433 ST_SH2(loc2, loc0, (tmp_eve_buf + 8 * 8), 8);
643 loc3 = stp7 + reg2;
644 STORE_SH(loc0, (tmp_eve_buf + 9 * 8));
645 STORE_SH(loc1, (tmp_eve_buf + 6 * 8));
646 STORE_SH(loc2, (tmp_eve_buf + 8 * 8));
647 STORE_SH(loc3, (tmp_eve_buf + 7 * 8));
648 } 434 }
649 435
650 static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf, 436 static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf,
651 int16_t *tmp_odd_buf) { 437 int16_t *tmp_odd_buf) {
652 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; 438 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
653 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; 439 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
654 440
655 /* Odd stage 1 */ 441 /* Odd stage 1 */
656 reg0 = LOAD_SH(tmp_buf + 32); 442 reg0 = LD_SH(tmp_buf + 32);
657 reg1 = LOAD_SH(tmp_buf + 7 * 32); 443 reg1 = LD_SH(tmp_buf + 7 * 32);
658 reg2 = LOAD_SH(tmp_buf + 9 * 32); 444 reg2 = LD_SH(tmp_buf + 9 * 32);
659 reg3 = LOAD_SH(tmp_buf + 15 * 32); 445 reg3 = LD_SH(tmp_buf + 15 * 32);
660 reg4 = LOAD_SH(tmp_buf + 17 * 32); 446 reg4 = LD_SH(tmp_buf + 17 * 32);
661 reg5 = LOAD_SH(tmp_buf + 23 * 32); 447 reg5 = LD_SH(tmp_buf + 23 * 32);
662 reg6 = LOAD_SH(tmp_buf + 25 * 32); 448 reg6 = LD_SH(tmp_buf + 25 * 32);
663 reg7 = LOAD_SH(tmp_buf + 31 * 32); 449 reg7 = LD_SH(tmp_buf + 31 * 32);
664 450
665 DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7); 451 VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
666 DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4); 452 VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
667 DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5); 453 VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
668 DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6); 454 VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
669 455
670 vec0 = reg0 + reg3; 456 vec0 = reg0 + reg3;
671 reg0 = reg0 - reg3; 457 reg0 = reg0 - reg3;
672 reg3 = reg7 + reg4; 458 reg3 = reg7 + reg4;
673 reg7 = reg7 - reg4; 459 reg7 = reg7 - reg4;
674 reg4 = reg1 + reg2; 460 reg4 = reg1 + reg2;
675 reg1 = reg1 - reg2; 461 reg1 = reg1 - reg2;
676 reg2 = reg6 + reg5; 462 reg2 = reg6 + reg5;
677 reg6 = reg6 - reg5; 463 reg6 = reg6 - reg5;
678 reg5 = vec0; 464 reg5 = vec0;
679 465
680 /* 4 Stores */ 466 /* 4 Stores */
681 vec0 = reg5 + reg4; 467 ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
682 vec1 = reg3 + reg2; 468 ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
683 STORE_SH(vec0, (tmp_odd_buf + 4 * 8)); 469 SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
684 STORE_SH(vec1, (tmp_odd_buf + 5 * 8)); 470 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
685 471 ST_SH2(vec0, vec1, tmp_odd_buf, 8);
686 vec0 = reg5 - reg4;
687 vec1 = reg3 - reg2;
688 DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
689 STORE_SH(vec0, (tmp_odd_buf));
690 STORE_SH(vec1, (tmp_odd_buf + 1 * 8));
691 472
692 /* 4 Stores */ 473 /* 4 Stores */
693 DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7); 474 VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
694 DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6); 475 VP9_DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
695 476 BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
696 vec0 = reg0 + reg1; 477 ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
697 vec2 = reg7 - reg6; 478 VP9_DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
698 vec1 = reg7 + reg6; 479 ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
699 vec3 = reg0 - reg1;
700 STORE_SH(vec0, (tmp_odd_buf + 6 * 8));
701 STORE_SH(vec1, (tmp_odd_buf + 7 * 8));
702
703 DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
704 STORE_SH(vec2, (tmp_odd_buf + 2 * 8));
705 STORE_SH(vec3, (tmp_odd_buf + 3 * 8));
706 480
707 /* Odd stage 2 */ 481 /* Odd stage 2 */
708 /* 8 loads */ 482 /* 8 loads */
709 reg0 = LOAD_SH(tmp_buf + 3 * 32); 483 reg0 = LD_SH(tmp_buf + 3 * 32);
710 reg1 = LOAD_SH(tmp_buf + 5 * 32); 484 reg1 = LD_SH(tmp_buf + 5 * 32);
711 reg2 = LOAD_SH(tmp_buf + 11 * 32); 485 reg2 = LD_SH(tmp_buf + 11 * 32);
712 reg3 = LOAD_SH(tmp_buf + 13 * 32); 486 reg3 = LD_SH(tmp_buf + 13 * 32);
713 reg4 = LOAD_SH(tmp_buf + 19 * 32); 487 reg4 = LD_SH(tmp_buf + 19 * 32);
714 reg5 = LOAD_SH(tmp_buf + 21 * 32); 488 reg5 = LD_SH(tmp_buf + 21 * 32);
715 reg6 = LOAD_SH(tmp_buf + 27 * 32); 489 reg6 = LD_SH(tmp_buf + 27 * 32);
716 reg7 = LOAD_SH(tmp_buf + 29 * 32); 490 reg7 = LD_SH(tmp_buf + 29 * 32);
717 491
718 DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6); 492 VP9_DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
719 DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5); 493 VP9_DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
720 DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4); 494 VP9_DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
721 DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7); 495 VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
722 496
723 /* 4 Stores */ 497 /* 4 Stores */
724 vec0 = reg1 - reg2; 498 SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec0, vec1, vec2, vec3);
725 vec1 = reg6 - reg5; 499 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
726 vec2 = reg0 - reg3; 500 VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
727 vec3 = reg7 - reg4; 501 BUTTERFLY_4(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2);
728 DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1); 502 ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
729 DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3); 503 VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
730 504 ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
731 vec2 = loc2 - loc0;
732 vec3 = loc3 - loc1;
733 vec0 = loc2 + loc0;
734 vec1 = loc3 + loc1;
735 STORE_SH(vec0, (tmp_odd_buf + 12 * 8));
736 STORE_SH(vec1, (tmp_odd_buf + 15 * 8));
737
738 DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
739
740 STORE_SH(vec0, (tmp_odd_buf + 10 * 8));
741 STORE_SH(vec1, (tmp_odd_buf + 11 * 8));
742 505
743 /* 4 Stores */ 506 /* 4 Stores */
744 vec0 = reg0 + reg3; 507 ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7, vec0, vec1, vec2, vec3);
745 vec1 = reg1 + reg2; 508 BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
746 vec2 = reg6 + reg5; 509 ST_SH2(reg0, reg1, (tmp_odd_buf + 13 * 8), 8);
747 vec3 = reg7 + reg4; 510 VP9_DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
748 reg0 = vec0 + vec1; 511 ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
749 reg1 = vec3 + vec2;
750 reg2 = vec0 - vec1;
751 reg3 = vec3 - vec2;
752 STORE_SH(reg0, (tmp_odd_buf + 13 * 8));
753 STORE_SH(reg1, (tmp_odd_buf + 14 * 8));
754
755 DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
756
757 STORE_SH(reg0, (tmp_odd_buf + 8 * 8));
758 STORE_SH(reg1, (tmp_odd_buf + 9 * 8));
759 512
760 /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */ 513 /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
761 /* Load 8 & Store 8 */ 514 /* Load 8 & Store 8 */
762 reg0 = LOAD_SH(tmp_odd_buf); 515 LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
763 reg1 = LOAD_SH(tmp_odd_buf + 1 * 8); 516 LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
764 reg2 = LOAD_SH(tmp_odd_buf + 2 * 8);
765 reg3 = LOAD_SH(tmp_odd_buf + 3 * 8);
766 reg4 = LOAD_SH(tmp_odd_buf + 8 * 8);
767 reg5 = LOAD_SH(tmp_odd_buf + 9 * 8);
768 reg6 = LOAD_SH(tmp_odd_buf + 10 * 8);
769 reg7 = LOAD_SH(tmp_odd_buf + 11 * 8);
770 517
771 loc0 = reg0 + reg4; 518 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
772 loc1 = reg1 + reg5; 519 ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
773 loc2 = reg2 + reg6;
774 loc3 = reg3 + reg7;
775 STORE_SH(loc0, (tmp_odd_buf));
776 STORE_SH(loc1, (tmp_odd_buf + 1 * 8));
777 STORE_SH(loc2, (tmp_odd_buf + 2 * 8));
778 STORE_SH(loc3, (tmp_odd_buf + 3 * 8));
779 520
780 vec0 = reg0 - reg4; 521 SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
781 vec1 = reg1 - reg5; 522 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
782 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
783 523
784 vec0 = reg2 - reg6; 524 SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
785 vec1 = reg3 - reg7; 525 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
786 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); 526 ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
787
788 STORE_SH(loc0, (tmp_odd_buf + 8 * 8));
789 STORE_SH(loc1, (tmp_odd_buf + 9 * 8));
790 STORE_SH(loc2, (tmp_odd_buf + 10 * 8));
791 STORE_SH(loc3, (tmp_odd_buf + 11 * 8));
792 527
793 /* Load 8 & Store 8 */ 528 /* Load 8 & Store 8 */
794 reg1 = LOAD_SH(tmp_odd_buf + 4 * 8); 529 LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
795 reg2 = LOAD_SH(tmp_odd_buf + 5 * 8); 530 LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
796 reg0 = LOAD_SH(tmp_odd_buf + 6 * 8);
797 reg3 = LOAD_SH(tmp_odd_buf + 7 * 8);
798 reg4 = LOAD_SH(tmp_odd_buf + 12 * 8);
799 reg5 = LOAD_SH(tmp_odd_buf + 13 * 8);
800 reg6 = LOAD_SH(tmp_odd_buf + 14 * 8);
801 reg7 = LOAD_SH(tmp_odd_buf + 15 * 8);
802 531
803 loc0 = reg0 + reg4; 532 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
804 loc1 = reg1 + reg5; 533 ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
805 loc2 = reg2 + reg6;
806 loc3 = reg3 + reg7;
807 STORE_SH(loc0, (tmp_odd_buf + 4 * 8));
808 STORE_SH(loc1, (tmp_odd_buf + 5 * 8));
809 STORE_SH(loc2, (tmp_odd_buf + 6 * 8));
810 STORE_SH(loc3, (tmp_odd_buf + 7 * 8));
811 534
812 vec0 = reg0 - reg4; 535 SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
813 vec1 = reg3 - reg7; 536 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
814 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
815 537
816 vec0 = reg1 - reg5; 538 SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
817 vec1 = reg2 - reg6; 539 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
818 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); 540 ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
819
820 STORE_SH(loc0, (tmp_odd_buf + 12 * 8));
821 STORE_SH(loc1, (tmp_odd_buf + 13 * 8));
822 STORE_SH(loc2, (tmp_odd_buf + 14 * 8));
823 STORE_SH(loc3, (tmp_odd_buf + 15 * 8));
824 } 541 }
825 542
826 static void vp9_idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf, 543 static void vp9_idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
827 int16_t *tmp_odd_buf, 544 int16_t *tmp_odd_buf,
828 uint8_t *dest, 545 uint8_t *dst,
829 int32_t dest_stride) { 546 int32_t dst_stride) {
830 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; 547 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
831 v8i16 m0, m1, m2, m3, m4, m5, m6, m7; 548 v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
832 v8i16 n0, n1, n2, n3, n4, n5, n6, n7;
833 549
834 /* FINAL BUTTERFLY : Dependency on Even & Odd */ 550 /* FINAL BUTTERFLY : Dependency on Even & Odd */
835 vec0 = LOAD_SH(tmp_odd_buf); 551 vec0 = LD_SH(tmp_odd_buf);
836 vec1 = LOAD_SH(tmp_odd_buf + 9 * 8); 552 vec1 = LD_SH(tmp_odd_buf + 9 * 8);
837 vec2 = LOAD_SH(tmp_odd_buf + 14 * 8); 553 vec2 = LD_SH(tmp_odd_buf + 14 * 8);
838 vec3 = LOAD_SH(tmp_odd_buf + 6 * 8); 554 vec3 = LD_SH(tmp_odd_buf + 6 * 8);
839 loc0 = LOAD_SH(tmp_eve_buf); 555 loc0 = LD_SH(tmp_eve_buf);
840 loc1 = LOAD_SH(tmp_eve_buf + 8 * 8); 556 loc1 = LD_SH(tmp_eve_buf + 8 * 8);
841 loc2 = LOAD_SH(tmp_eve_buf + 4 * 8); 557 loc2 = LD_SH(tmp_eve_buf + 4 * 8);
842 loc3 = LOAD_SH(tmp_eve_buf + 12 * 8); 558 loc3 = LD_SH(tmp_eve_buf + 12 * 8);
843 559
844 m0 = (loc0 + vec3); 560 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
845 m4 = (loc1 + vec2); 561 SRARI_H4_SH(m0, m2, m4, m6, 6);
846 m2 = (loc2 + vec1); 562 VP9_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
847 m6 = (loc3 + vec0);
848 SRARI_H_4VECS_SH(m0, m2, m4, m6, m0, m2, m4, m6, 6);
849 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS(dest, dest_stride, m0, m2, m4, m6);
850 563
851 m6 = (loc0 - vec3); 564 SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
852 m2 = (loc1 - vec2); 565 SRARI_H4_SH(m0, m2, m4, m6, 6);
853 m4 = (loc2 - vec1); 566 VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride),
854 m0 = (loc3 - vec0); 567 m0, m2, m4, m6);
855 SRARI_H_4VECS_SH(m0, m2, m4, m6, m0, m2, m4, m6, 6);
856 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 19 * dest_stride),
857 dest_stride, m0, m2, m4, m6);
858 568
859 /* Load 8 & Store 8 */ 569 /* Load 8 & Store 8 */
860 vec0 = LOAD_SH(tmp_odd_buf + 4 * 8); 570 vec0 = LD_SH(tmp_odd_buf + 4 * 8);
861 vec1 = LOAD_SH(tmp_odd_buf + 13 * 8); 571 vec1 = LD_SH(tmp_odd_buf + 13 * 8);
862 vec2 = LOAD_SH(tmp_odd_buf + 10 * 8); 572 vec2 = LD_SH(tmp_odd_buf + 10 * 8);
863 vec3 = LOAD_SH(tmp_odd_buf + 3 * 8); 573 vec3 = LD_SH(tmp_odd_buf + 3 * 8);
864 loc0 = LOAD_SH(tmp_eve_buf + 2 * 8); 574 loc0 = LD_SH(tmp_eve_buf + 2 * 8);
865 loc1 = LOAD_SH(tmp_eve_buf + 10 * 8); 575 loc1 = LD_SH(tmp_eve_buf + 10 * 8);
866 loc2 = LOAD_SH(tmp_eve_buf + 6 * 8); 576 loc2 = LD_SH(tmp_eve_buf + 6 * 8);
867 loc3 = LOAD_SH(tmp_eve_buf + 14 * 8); 577 loc3 = LD_SH(tmp_eve_buf + 14 * 8);
868 578
869 m1 = (loc0 + vec3); 579 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
870 m5 = (loc1 + vec2); 580 SRARI_H4_SH(m1, m3, m5, m7, 6);
871 m3 = (loc2 + vec1); 581 VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride),
872 m7 = (loc3 + vec0); 582 m1, m3, m5, m7);
873 SRARI_H_4VECS_SH(m1, m3, m5, m7, m1, m3, m5, m7, 6);
874 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 2 * dest_stride),
875 dest_stride, m1, m3, m5, m7);
876 583
877 m7 = (loc0 - vec3); 584 SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
878 m3 = (loc1 - vec2); 585 SRARI_H4_SH(m1, m3, m5, m7, 6);
879 m5 = (loc2 - vec1); 586 VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride),
880 m1 = (loc3 - vec0); 587 m1, m3, m5, m7);
881 SRARI_H_4VECS_SH(m1, m3, m5, m7, m1, m3, m5, m7, 6);
882 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 17 * dest_stride),
883 dest_stride, m1, m3, m5, m7);
884 588
885 /* Load 8 & Store 8 */ 589 /* Load 8 & Store 8 */
886 vec0 = LOAD_SH(tmp_odd_buf + 2 * 8); 590 vec0 = LD_SH(tmp_odd_buf + 2 * 8);
887 vec1 = LOAD_SH(tmp_odd_buf + 11 * 8); 591 vec1 = LD_SH(tmp_odd_buf + 11 * 8);
888 vec2 = LOAD_SH(tmp_odd_buf + 12 * 8); 592 vec2 = LD_SH(tmp_odd_buf + 12 * 8);
889 vec3 = LOAD_SH(tmp_odd_buf + 7 * 8); 593 vec3 = LD_SH(tmp_odd_buf + 7 * 8);
890 loc0 = LOAD_SH(tmp_eve_buf + 1 * 8); 594 loc0 = LD_SH(tmp_eve_buf + 1 * 8);
891 loc1 = LOAD_SH(tmp_eve_buf + 9 * 8); 595 loc1 = LD_SH(tmp_eve_buf + 9 * 8);
892 loc2 = LOAD_SH(tmp_eve_buf + 5 * 8); 596 loc2 = LD_SH(tmp_eve_buf + 5 * 8);
893 loc3 = LOAD_SH(tmp_eve_buf + 13 * 8); 597 loc3 = LD_SH(tmp_eve_buf + 13 * 8);
894 598
895 n0 = (loc0 + vec3); 599 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
896 n4 = (loc1 + vec2); 600 SRARI_H4_SH(n0, n2, n4, n6, 6);
897 n2 = (loc2 + vec1); 601 VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride),
898 n6 = (loc3 + vec0); 602 n0, n2, n4, n6);
899 SRARI_H_4VECS_SH(n0, n2, n4, n6, n0, n2, n4, n6, 6);
900 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 1 * dest_stride),
901 dest_stride, n0, n2, n4, n6);
902 603
903 n6 = (loc0 - vec3); 604 SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
904 n2 = (loc1 - vec2); 605 SRARI_H4_SH(n0, n2, n4, n6, 6);
905 n4 = (loc2 - vec1); 606 VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride),
906 n0 = (loc3 - vec0); 607 n0, n2, n4, n6);
907 SRARI_H_4VECS_SH(n0, n2, n4, n6, n0, n2, n4, n6, 6);
908 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 18 * dest_stride),
909 dest_stride, n0, n2, n4, n6);
910 608
911 /* Load 8 & Store 8 */ 609 /* Load 8 & Store 8 */
912 vec0 = LOAD_SH(tmp_odd_buf + 5 * 8); 610 vec0 = LD_SH(tmp_odd_buf + 5 * 8);
913 vec1 = LOAD_SH(tmp_odd_buf + 15 * 8); 611 vec1 = LD_SH(tmp_odd_buf + 15 * 8);
914 vec2 = LOAD_SH(tmp_odd_buf + 8 * 8); 612 vec2 = LD_SH(tmp_odd_buf + 8 * 8);
915 vec3 = LOAD_SH(tmp_odd_buf + 1 * 8); 613 vec3 = LD_SH(tmp_odd_buf + 1 * 8);
916 loc0 = LOAD_SH(tmp_eve_buf + 3 * 8); 614 loc0 = LD_SH(tmp_eve_buf + 3 * 8);
917 loc1 = LOAD_SH(tmp_eve_buf + 11 * 8); 615 loc1 = LD_SH(tmp_eve_buf + 11 * 8);
918 loc2 = LOAD_SH(tmp_eve_buf + 7 * 8); 616 loc2 = LD_SH(tmp_eve_buf + 7 * 8);
919 loc3 = LOAD_SH(tmp_eve_buf + 15 * 8); 617 loc3 = LD_SH(tmp_eve_buf + 15 * 8);
920 618
921 n1 = (loc0 + vec3); 619 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
922 n5 = (loc1 + vec2); 620 SRARI_H4_SH(n1, n3, n5, n7, 6);
923 n3 = (loc2 + vec1); 621 VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride),
924 n7 = (loc3 + vec0); 622 n1, n3, n5, n7);
925 SRARI_H_4VECS_SH(n1, n3, n5, n7, n1, n3, n5, n7, 6);
926 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 3 * dest_stride),
927 dest_stride, n1, n3, n5, n7);
928 623
929 n7 = (loc0 - vec3); 624 SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
930 n3 = (loc1 - vec2); 625 SRARI_H4_SH(n1, n3, n5, n7, 6);
931 n5 = (loc2 - vec1); 626 VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride),
932 n1 = (loc3 - vec0); 627 n1, n3, n5, n7);
933 SRARI_H_4VECS_SH(n1, n3, n5, n7, n1, n3, n5, n7, 6);
934 VP9_ADDBLK_CLIP_AND_STORE_OFF_4H_VECS((dest + 16 * dest_stride),
935 dest_stride, n1, n3, n5, n7);
936 } 628 }
937 629
938 static void vp9_idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dest, 630 static void vp9_idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
939 int32_t dest_stride) { 631 int32_t dst_stride) {
940 DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]); 632 DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]);
941 DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]); 633 DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]);
942 634
943 vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]); 635 vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
944
945 vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]); 636 vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
946
947 vp9_idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0], 637 vp9_idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0],
948 dest, dest_stride); 638 dst, dst_stride);
949 } 639 }
950 640
951 void vp9_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dest, 641 void vp9_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst,
952 int32_t dest_stride) { 642 int32_t dst_stride) {
953 int32_t i; 643 int32_t i;
954 DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]); 644 DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
955 int16_t *out_ptr = out_arr; 645 int16_t *out_ptr = out_arr;
956 646
957 /* transform rows */ 647 /* transform rows */
958 for (i = 0; i < 4; ++i) { 648 for (i = 0; i < 4; ++i) {
959 /* process 32 * 8 block */ 649 /* process 32 * 8 block */
960 vp9_idct32x8_1d_rows_msa((input + (i << 8)), (out_ptr + (i << 8))); 650 vp9_idct32x8_1d_rows_msa((input + (i << 8)), (out_ptr + (i << 8)));
961 } 651 }
962 652
963 /* transform columns */ 653 /* transform columns */
964 for (i = 0; i < 4; ++i) { 654 for (i = 0; i < 4; ++i) {
965 /* process 8 * 32 block */ 655 /* process 8 * 32 block */
966 vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dest + (i << 3)), 656 vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
967 dest_stride); 657 dst_stride);
968 } 658 }
969 } 659 }
970 660
971 void vp9_idct32x32_34_add_msa(const int16_t *input, uint8_t *dest, 661 void vp9_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst,
972 int32_t dest_stride) { 662 int32_t dst_stride) {
973 int32_t i; 663 int32_t i;
974 DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]); 664 DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
975 int16_t *out_ptr = out_arr; 665 int16_t *out_ptr = out_arr;
976 666
977 for (i = 32; i--;) { 667 for (i = 32; i--;) {
978 __asm__ __volatile__ ( 668 __asm__ __volatile__ (
979 "sw $zero, 0(%[out_ptr]) \n\t" 669 "sw $zero, 0(%[out_ptr]) \n\t"
980 "sw $zero, 4(%[out_ptr]) \n\t" 670 "sw $zero, 4(%[out_ptr]) \n\t"
981 "sw $zero, 8(%[out_ptr]) \n\t" 671 "sw $zero, 8(%[out_ptr]) \n\t"
982 "sw $zero, 12(%[out_ptr]) \n\t" 672 "sw $zero, 12(%[out_ptr]) \n\t"
(...skipping 18 matching lines...) Expand all
1001 } 691 }
1002 692
1003 out_ptr = out_arr; 693 out_ptr = out_arr;
1004 694
1005 /* rows: only upper-left 8x8 has non-zero coeff */ 695 /* rows: only upper-left 8x8 has non-zero coeff */
1006 vp9_idct32x8_1d_rows_msa(input, out_ptr); 696 vp9_idct32x8_1d_rows_msa(input, out_ptr);
1007 697
1008 /* transform columns */ 698 /* transform columns */
1009 for (i = 0; i < 4; ++i) { 699 for (i = 0; i < 4; ++i) {
1010 /* process 8 * 32 block */ 700 /* process 8 * 32 block */
1011 vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dest + (i << 3)), 701 vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
1012 dest_stride); 702 dst_stride);
1013 } 703 }
1014 } 704 }
1015 705
1016 void vp9_idct32x32_1_add_msa(const int16_t *input, uint8_t *dest, 706 void vp9_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst,
1017 int32_t dest_stride) { 707 int32_t dst_stride) {
1018 int32_t i, const1; 708 int32_t i;
1019 v8i16 const2;
1020 int16_t out; 709 int16_t out;
1021 v8i16 res0, res1, res2, res3, res4, res5, res6, res7; 710 v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
1022 v16u8 dest0, dest1, dest2, dest3; 711 v8i16 res0, res1, res2, res3, res4, res5, res6, res7, vec;
1023 v16u8 tmp0, tmp1, tmp2, tmp3;
1024 v16i8 zero = { 0 };
1025 712
1026 out = dct_const_round_shift(input[0] * cospi_16_64); 713 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
1027 out = dct_const_round_shift(out * cospi_16_64); 714 out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
1028 const1 = ROUND_POWER_OF_TWO(out, 6); 715 out = ROUND_POWER_OF_TWO(out, 6);
1029 716
1030 const2 = __msa_fill_h(const1); 717 vec = __msa_fill_h(out);
1031 718
1032 for (i = 0; i < 16; ++i) { 719 for (i = 16; i--;) {
1033 dest0 = LOAD_UB(dest); 720 LD_UB2(dst, 16, dst0, dst1);
1034 dest1 = LOAD_UB(dest + 16); 721 LD_UB2(dst + dst_stride, 16, dst2, dst3);
1035 dest2 = LOAD_UB(dest + dest_stride);
1036 dest3 = LOAD_UB(dest + dest_stride + 16);
1037 722
1038 res0 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest0); 723 UNPCK_UB_SH(dst0, res0, res4);
1039 res1 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest1); 724 UNPCK_UB_SH(dst1, res1, res5);
1040 res2 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest2); 725 UNPCK_UB_SH(dst2, res2, res6);
1041 res3 = (v8i16)__msa_ilvr_b(zero, (v16i8)dest3); 726 UNPCK_UB_SH(dst3, res3, res7);
1042 res4 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest0); 727 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
1043 res5 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest1); 728 ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7);
1044 res6 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest2); 729 CLIP_SH4_0_255(res0, res1, res2, res3);
1045 res7 = (v8i16)__msa_ilvl_b(zero, (v16i8)dest3); 730 CLIP_SH4_0_255(res4, res5, res6, res7);
731 PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3,
732 tmp0, tmp1, tmp2, tmp3);
1046 733
1047 res0 += const2; 734 ST_UB2(tmp0, tmp1, dst, 16);
1048 res1 += const2; 735 dst += dst_stride;
1049 res2 += const2; 736 ST_UB2(tmp2, tmp3, dst, 16);
1050 res3 += const2; 737 dst += dst_stride;
1051 res4 += const2;
1052 res5 += const2;
1053 res6 += const2;
1054 res7 += const2;
1055
1056 res0 = CLIP_UNSIGNED_CHAR_H(res0);
1057 res1 = CLIP_UNSIGNED_CHAR_H(res1);
1058 res2 = CLIP_UNSIGNED_CHAR_H(res2);
1059 res3 = CLIP_UNSIGNED_CHAR_H(res3);
1060 res4 = CLIP_UNSIGNED_CHAR_H(res4);
1061 res5 = CLIP_UNSIGNED_CHAR_H(res5);
1062 res6 = CLIP_UNSIGNED_CHAR_H(res6);
1063 res7 = CLIP_UNSIGNED_CHAR_H(res7);
1064
1065 tmp0 = (v16u8)__msa_pckev_b((v16i8)res4, (v16i8)res0);
1066 tmp1 = (v16u8)__msa_pckev_b((v16i8)res5, (v16i8)res1);
1067 tmp2 = (v16u8)__msa_pckev_b((v16i8)res6, (v16i8)res2);
1068 tmp3 = (v16u8)__msa_pckev_b((v16i8)res7, (v16i8)res3);
1069
1070 STORE_UB(tmp0, dest);
1071 STORE_UB(tmp1, dest + 16);
1072 dest += dest_stride;
1073 STORE_UB(tmp2, dest);
1074 STORE_UB(tmp3, dest + 16);
1075 dest += dest_stride;
1076 } 738 }
1077 } 739 }
OLDNEW
« no previous file with comments | « source/libvpx/vp9/common/mips/msa/vp9_idct16x16_msa.c ('k') | source/libvpx/vp9/common/mips/msa/vp9_idct8x8_msa.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698