OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include <immintrin.h> // AVX2 | 11 #include <immintrin.h> // AVX2 |
12 #include "vp9/common/vp9_idct.h" // for cospi constants | 12 #include "vp9/common/vp9_idct.h" // for cospi constants |
13 #include "vpx_ports/mem.h" | 13 #include "vpx_ports/mem.h" |
14 | 14 |
15 void vp9_fdct4x4_avx2(const int16_t *input, int16_t *output, int stride) { | |
16 // The 2D transform is done with two passes which are actually pretty | |
17 // similar. In the first one, we transform the columns and transpose | |
18 // the results. In the second one, we transform the rows. To achieve that, | |
19 // as the first pass results are transposed, we transpose the columns (that | |
20 // is the transposed rows) and transpose the results (so that it goes back | |
21 // in normal/row positions). | |
22 int pass; | |
23 // Constants | |
24 // When we use them, in one case, they are all the same. In all others | |
25 // it's a pair of them that we need to repeat four times. This is done | |
26 // by constructing the 32 bit constant corresponding to that pair. | |
27 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); | |
28 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); | |
29 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); | |
30 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); | |
31 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); | |
32 const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); | |
33 const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); | |
34 const __m128i kOne = _mm_set1_epi16(1); | |
35 __m128i in0, in1, in2, in3; | |
36 // Load inputs. | |
37 { | |
38 in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); | |
39 in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); | |
40 in2 = _mm_loadl_epi64((const __m128i *)(input + 2 * stride)); | |
41 in3 = _mm_loadl_epi64((const __m128i *)(input + 3 * stride)); | |
42 // x = x << 4 | |
43 in0 = _mm_slli_epi16(in0, 4); | |
44 in1 = _mm_slli_epi16(in1, 4); | |
45 in2 = _mm_slli_epi16(in2, 4); | |
46 in3 = _mm_slli_epi16(in3, 4); | |
47 // if (i == 0 && input[0]) input[0] += 1; | |
48 { | |
49 // The mask will only contain whether the first value is zero, all | |
50 // other comparison will fail as something shifted by 4 (above << 4) | |
51 // can never be equal to one. To increment in the non-zero case, we | |
52 // add the mask and one for the first element: | |
53 // - if zero, mask = -1, v = v - 1 + 1 = v | |
54 // - if non-zero, mask = 0, v = v + 0 + 1 = v + 1 | |
55 __m128i mask = _mm_cmpeq_epi16(in0, k__nonzero_bias_a); | |
56 in0 = _mm_add_epi16(in0, mask); | |
57 in0 = _mm_add_epi16(in0, k__nonzero_bias_b); | |
58 } | |
59 } | |
60 // Do the two transform/transpose passes | |
61 for (pass = 0; pass < 2; ++pass) { | |
62 // Transform 1/2: Add/subtract | |
63 const __m128i r0 = _mm_add_epi16(in0, in3); | |
64 const __m128i r1 = _mm_add_epi16(in1, in2); | |
65 const __m128i r2 = _mm_sub_epi16(in1, in2); | |
66 const __m128i r3 = _mm_sub_epi16(in0, in3); | |
67 // Transform 1/2: Interleave to do the multiply by constants which gets us | |
68 // into 32 bits. | |
69 const __m128i t0 = _mm_unpacklo_epi16(r0, r1); | |
70 const __m128i t2 = _mm_unpacklo_epi16(r2, r3); | |
71 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); | |
72 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16); | |
73 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08); | |
74 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24); | |
75 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
76 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
77 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); | |
78 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); | |
79 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
80 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
81 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); | |
82 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); | |
83 // Combine and transpose | |
84 const __m128i res0 = _mm_packs_epi32(w0, w2); | |
85 const __m128i res1 = _mm_packs_epi32(w4, w6); | |
86 // 00 01 02 03 20 21 22 23 | |
87 // 10 11 12 13 30 31 32 33 | |
88 const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1); | |
89 const __m128i tr0_1 = _mm_unpackhi_epi16(res0, res1); | |
90 // 00 10 01 11 02 12 03 13 | |
91 // 20 30 21 31 22 32 23 33 | |
92 in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); | |
93 in2 = _mm_unpackhi_epi32(tr0_0, tr0_1); | |
94 // 00 10 20 30 01 11 21 31 in0 contains 0 followed by 1 | |
95 // 02 12 22 32 03 13 23 33 in2 contains 2 followed by 3 | |
96 if (0 == pass) { | |
97 // Extract values in the high part for second pass as transform code | |
98 // only uses the first four values. | |
99 in1 = _mm_unpackhi_epi64(in0, in0); | |
100 in3 = _mm_unpackhi_epi64(in2, in2); | |
101 } else { | |
102 // Post-condition output and store it (v + 1) >> 2, taking advantage | |
103 // of the fact 1/3 are stored just after 0/2. | |
104 __m128i out01 = _mm_add_epi16(in0, kOne); | |
105 __m128i out23 = _mm_add_epi16(in2, kOne); | |
106 out01 = _mm_srai_epi16(out01, 2); | |
107 out23 = _mm_srai_epi16(out23, 2); | |
108 _mm_storeu_si128((__m128i *)(output + 0 * 4), out01); | |
109 _mm_storeu_si128((__m128i *)(output + 2 * 4), out23); | |
110 } | |
111 } | |
112 } | |
113 | |
114 static INLINE void load_buffer_4x4_avx2(const int16_t *input, __m128i *in, | |
115 int stride) { | |
116 const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); | |
117 const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); | |
118 __m128i mask; | |
119 | |
120 in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); | |
121 in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); | |
122 in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride)); | |
123 in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride)); | |
124 | |
125 in[0] = _mm_slli_epi16(in[0], 4); | |
126 in[1] = _mm_slli_epi16(in[1], 4); | |
127 in[2] = _mm_slli_epi16(in[2], 4); | |
128 in[3] = _mm_slli_epi16(in[3], 4); | |
129 | |
130 mask = _mm_cmpeq_epi16(in[0], k__nonzero_bias_a); | |
131 in[0] = _mm_add_epi16(in[0], mask); | |
132 in[0] = _mm_add_epi16(in[0], k__nonzero_bias_b); | |
133 } | |
134 | |
135 static INLINE void write_buffer_4x4_avx2(int16_t *output, __m128i *res) { | |
136 const __m128i kOne = _mm_set1_epi16(1); | |
137 __m128i in01 = _mm_unpacklo_epi64(res[0], res[1]); | |
138 __m128i in23 = _mm_unpacklo_epi64(res[2], res[3]); | |
139 __m128i out01 = _mm_add_epi16(in01, kOne); | |
140 __m128i out23 = _mm_add_epi16(in23, kOne); | |
141 out01 = _mm_srai_epi16(out01, 2); | |
142 out23 = _mm_srai_epi16(out23, 2); | |
143 _mm_store_si128((__m128i *)(output + 0 * 8), out01); | |
144 _mm_store_si128((__m128i *)(output + 1 * 8), out23); | |
145 } | |
146 | |
147 static INLINE void transpose_4x4_avx2(__m128i *res) { | |
148 // Combine and transpose | |
149 // 00 01 02 03 20 21 22 23 | |
150 // 10 11 12 13 30 31 32 33 | |
151 const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]); | |
152 const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]); | |
153 | |
154 // 00 10 01 11 02 12 03 13 | |
155 // 20 30 21 31 22 32 23 33 | |
156 res[0] = _mm_unpacklo_epi32(tr0_0, tr0_1); | |
157 res[2] = _mm_unpackhi_epi32(tr0_0, tr0_1); | |
158 | |
159 // 00 10 20 30 01 11 21 31 | |
160 // 02 12 22 32 03 13 23 33 | |
161 // only use the first 4 16-bit integers | |
162 res[1] = _mm_unpackhi_epi64(res[0], res[0]); | |
163 res[3] = _mm_unpackhi_epi64(res[2], res[2]); | |
164 } | |
165 | |
166 void fdct4_avx2(__m128i *in) { | |
167 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); | |
168 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); | |
169 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); | |
170 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); | |
171 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); | |
172 | |
173 __m128i u[4], v[4]; | |
174 u[0]=_mm_unpacklo_epi16(in[0], in[1]); | |
175 u[1]=_mm_unpacklo_epi16(in[3], in[2]); | |
176 | |
177 v[0] = _mm_add_epi16(u[0], u[1]); | |
178 v[1] = _mm_sub_epi16(u[0], u[1]); | |
179 | |
180 u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16); // 0 | |
181 u[1] = _mm_madd_epi16(v[0], k__cospi_p16_m16); // 2 | |
182 u[2] = _mm_madd_epi16(v[1], k__cospi_p08_p24); // 1 | |
183 u[3] = _mm_madd_epi16(v[1], k__cospi_p24_m08); // 3 | |
184 | |
185 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); | |
186 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); | |
187 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); | |
188 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); | |
189 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); | |
190 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); | |
191 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); | |
192 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); | |
193 | |
194 in[0] = _mm_packs_epi32(u[0], u[1]); | |
195 in[1] = _mm_packs_epi32(u[2], u[3]); | |
196 transpose_4x4_avx2(in); | |
197 } | |
198 | |
199 void fadst4_avx2(__m128i *in) { | |
200 const __m128i k__sinpi_p01_p02 = pair_set_epi16(sinpi_1_9, sinpi_2_9); | |
201 const __m128i k__sinpi_p04_m01 = pair_set_epi16(sinpi_4_9, -sinpi_1_9); | |
202 const __m128i k__sinpi_p03_p04 = pair_set_epi16(sinpi_3_9, sinpi_4_9); | |
203 const __m128i k__sinpi_m03_p02 = pair_set_epi16(-sinpi_3_9, sinpi_2_9); | |
204 const __m128i k__sinpi_p03_p03 = _mm_set1_epi16(sinpi_3_9); | |
205 const __m128i kZero = _mm_set1_epi16(0); | |
206 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); | |
207 __m128i u[8], v[8]; | |
208 __m128i in7 = _mm_add_epi16(in[0], in[1]); | |
209 | |
210 u[0] = _mm_unpacklo_epi16(in[0], in[1]); | |
211 u[1] = _mm_unpacklo_epi16(in[2], in[3]); | |
212 u[2] = _mm_unpacklo_epi16(in7, kZero); | |
213 u[3] = _mm_unpacklo_epi16(in[2], kZero); | |
214 u[4] = _mm_unpacklo_epi16(in[3], kZero); | |
215 | |
216 v[0] = _mm_madd_epi16(u[0], k__sinpi_p01_p02); // s0 + s2 | |
217 v[1] = _mm_madd_epi16(u[1], k__sinpi_p03_p04); // s4 + s5 | |
218 v[2] = _mm_madd_epi16(u[2], k__sinpi_p03_p03); // x1 | |
219 v[3] = _mm_madd_epi16(u[0], k__sinpi_p04_m01); // s1 - s3 | |
220 v[4] = _mm_madd_epi16(u[1], k__sinpi_m03_p02); // -s4 + s6 | |
221 v[5] = _mm_madd_epi16(u[3], k__sinpi_p03_p03); // s4 | |
222 v[6] = _mm_madd_epi16(u[4], k__sinpi_p03_p03); | |
223 | |
224 u[0] = _mm_add_epi32(v[0], v[1]); | |
225 u[1] = _mm_sub_epi32(v[2], v[6]); | |
226 u[2] = _mm_add_epi32(v[3], v[4]); | |
227 u[3] = _mm_sub_epi32(u[2], u[0]); | |
228 u[4] = _mm_slli_epi32(v[5], 2); | |
229 u[5] = _mm_sub_epi32(u[4], v[5]); | |
230 u[6] = _mm_add_epi32(u[3], u[5]); | |
231 | |
232 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); | |
233 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); | |
234 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); | |
235 v[3] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); | |
236 | |
237 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); | |
238 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); | |
239 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); | |
240 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); | |
241 | |
242 in[0] = _mm_packs_epi32(u[0], u[2]); | |
243 in[1] = _mm_packs_epi32(u[1], u[3]); | |
244 transpose_4x4_avx2(in); | |
245 } | |
246 | |
247 void vp9_fht4x4_avx2(const int16_t *input, int16_t *output, | |
248 int stride, int tx_type) { | |
249 __m128i in[4]; | |
250 | |
251 switch (tx_type) { | |
252 case DCT_DCT: | |
253 vp9_fdct4x4_avx2(input, output, stride); | |
254 break; | |
255 case ADST_DCT: | |
256 load_buffer_4x4_avx2(input, in, stride); | |
257 fadst4_avx2(in); | |
258 fdct4_avx2(in); | |
259 write_buffer_4x4_avx2(output, in); | |
260 break; | |
261 case DCT_ADST: | |
262 load_buffer_4x4_avx2(input, in, stride); | |
263 fdct4_avx2(in); | |
264 fadst4_avx2(in); | |
265 write_buffer_4x4_avx2(output, in); | |
266 break; | |
267 case ADST_ADST: | |
268 load_buffer_4x4_avx2(input, in, stride); | |
269 fadst4_avx2(in); | |
270 fadst4_avx2(in); | |
271 write_buffer_4x4_avx2(output, in); | |
272 break; | |
273 default: | |
274 assert(0); | |
275 break; | |
276 } | |
277 } | |
278 | |
279 void vp9_fdct8x8_avx2(const int16_t *input, int16_t *output, int stride) { | |
280 int pass; | |
281 // Constants | |
282 // When we use them, in one case, they are all the same. In all others | |
283 // it's a pair of them that we need to repeat four times. This is done | |
284 // by constructing the 32 bit constant corresponding to that pair. | |
285 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); | |
286 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); | |
287 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); | |
288 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); | |
289 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); | |
290 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); | |
291 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); | |
292 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); | |
293 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); | |
294 // Load input | |
295 __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); | |
296 __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); | |
297 __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); | |
298 __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); | |
299 __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); | |
300 __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); | |
301 __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); | |
302 __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); | |
303 // Pre-condition input (shift by two) | |
304 in0 = _mm_slli_epi16(in0, 2); | |
305 in1 = _mm_slli_epi16(in1, 2); | |
306 in2 = _mm_slli_epi16(in2, 2); | |
307 in3 = _mm_slli_epi16(in3, 2); | |
308 in4 = _mm_slli_epi16(in4, 2); | |
309 in5 = _mm_slli_epi16(in5, 2); | |
310 in6 = _mm_slli_epi16(in6, 2); | |
311 in7 = _mm_slli_epi16(in7, 2); | |
312 | |
313 // We do two passes, first the columns, then the rows. The results of the | |
314 // first pass are transposed so that the same column code can be reused. The | |
315 // results of the second pass are also transposed so that the rows (processed | |
316 // as columns) are put back in row positions. | |
317 for (pass = 0; pass < 2; pass++) { | |
318 // To store results of each pass before the transpose. | |
319 __m128i res0, res1, res2, res3, res4, res5, res6, res7; | |
320 // Add/subtract | |
321 const __m128i q0 = _mm_add_epi16(in0, in7); | |
322 const __m128i q1 = _mm_add_epi16(in1, in6); | |
323 const __m128i q2 = _mm_add_epi16(in2, in5); | |
324 const __m128i q3 = _mm_add_epi16(in3, in4); | |
325 const __m128i q4 = _mm_sub_epi16(in3, in4); | |
326 const __m128i q5 = _mm_sub_epi16(in2, in5); | |
327 const __m128i q6 = _mm_sub_epi16(in1, in6); | |
328 const __m128i q7 = _mm_sub_epi16(in0, in7); | |
329 // Work on first four results | |
330 { | |
331 // Add/subtract | |
332 const __m128i r0 = _mm_add_epi16(q0, q3); | |
333 const __m128i r1 = _mm_add_epi16(q1, q2); | |
334 const __m128i r2 = _mm_sub_epi16(q1, q2); | |
335 const __m128i r3 = _mm_sub_epi16(q0, q3); | |
336 // Interleave to do the multiply by constants which gets us into 32bits | |
337 const __m128i t0 = _mm_unpacklo_epi16(r0, r1); | |
338 const __m128i t1 = _mm_unpackhi_epi16(r0, r1); | |
339 const __m128i t2 = _mm_unpacklo_epi16(r2, r3); | |
340 const __m128i t3 = _mm_unpackhi_epi16(r2, r3); | |
341 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); | |
342 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16); | |
343 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16); | |
344 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16); | |
345 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08); | |
346 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08); | |
347 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24); | |
348 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24); | |
349 // dct_const_round_shift | |
350 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
351 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
352 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
353 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
354 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); | |
355 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); | |
356 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); | |
357 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); | |
358 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
359 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
360 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
361 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
362 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); | |
363 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); | |
364 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); | |
365 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); | |
366 // Combine | |
367 res0 = _mm_packs_epi32(w0, w1); | |
368 res4 = _mm_packs_epi32(w2, w3); | |
369 res2 = _mm_packs_epi32(w4, w5); | |
370 res6 = _mm_packs_epi32(w6, w7); | |
371 } | |
372 // Work on next four results | |
373 { | |
374 // Interleave to do the multiply by constants which gets us into 32bits | |
375 const __m128i d0 = _mm_unpacklo_epi16(q6, q5); | |
376 const __m128i d1 = _mm_unpackhi_epi16(q6, q5); | |
377 const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16); | |
378 const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16); | |
379 const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16); | |
380 const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16); | |
381 // dct_const_round_shift | |
382 const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING); | |
383 const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING); | |
384 const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING); | |
385 const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING); | |
386 const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS); | |
387 const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS); | |
388 const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS); | |
389 const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS); | |
390 // Combine | |
391 const __m128i r0 = _mm_packs_epi32(s0, s1); | |
392 const __m128i r1 = _mm_packs_epi32(s2, s3); | |
393 // Add/subtract | |
394 const __m128i x0 = _mm_add_epi16(q4, r0); | |
395 const __m128i x1 = _mm_sub_epi16(q4, r0); | |
396 const __m128i x2 = _mm_sub_epi16(q7, r1); | |
397 const __m128i x3 = _mm_add_epi16(q7, r1); | |
398 // Interleave to do the multiply by constants which gets us into 32bits | |
399 const __m128i t0 = _mm_unpacklo_epi16(x0, x3); | |
400 const __m128i t1 = _mm_unpackhi_epi16(x0, x3); | |
401 const __m128i t2 = _mm_unpacklo_epi16(x1, x2); | |
402 const __m128i t3 = _mm_unpackhi_epi16(x1, x2); | |
403 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04); | |
404 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04); | |
405 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28); | |
406 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28); | |
407 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20); | |
408 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20); | |
409 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12); | |
410 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12); | |
411 // dct_const_round_shift | |
412 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
413 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
414 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
415 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
416 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); | |
417 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); | |
418 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); | |
419 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); | |
420 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
421 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
422 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
423 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
424 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); | |
425 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); | |
426 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); | |
427 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); | |
428 // Combine | |
429 res1 = _mm_packs_epi32(w0, w1); | |
430 res7 = _mm_packs_epi32(w2, w3); | |
431 res5 = _mm_packs_epi32(w4, w5); | |
432 res3 = _mm_packs_epi32(w6, w7); | |
433 } | |
434 // Transpose the 8x8. | |
435 { | |
436 // 00 01 02 03 04 05 06 07 | |
437 // 10 11 12 13 14 15 16 17 | |
438 // 20 21 22 23 24 25 26 27 | |
439 // 30 31 32 33 34 35 36 37 | |
440 // 40 41 42 43 44 45 46 47 | |
441 // 50 51 52 53 54 55 56 57 | |
442 // 60 61 62 63 64 65 66 67 | |
443 // 70 71 72 73 74 75 76 77 | |
444 const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1); | |
445 const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3); | |
446 const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1); | |
447 const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3); | |
448 const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5); | |
449 const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7); | |
450 const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5); | |
451 const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7); | |
452 // 00 10 01 11 02 12 03 13 | |
453 // 20 30 21 31 22 32 23 33 | |
454 // 04 14 05 15 06 16 07 17 | |
455 // 24 34 25 35 26 36 27 37 | |
456 // 40 50 41 51 42 52 43 53 | |
457 // 60 70 61 71 62 72 63 73 | |
458 // 54 54 55 55 56 56 57 57 | |
459 // 64 74 65 75 66 76 67 77 | |
460 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); | |
461 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); | |
462 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); | |
463 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); | |
464 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); | |
465 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); | |
466 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); | |
467 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); | |
468 // 00 10 20 30 01 11 21 31 | |
469 // 40 50 60 70 41 51 61 71 | |
470 // 02 12 22 32 03 13 23 33 | |
471 // 42 52 62 72 43 53 63 73 | |
472 // 04 14 24 34 05 15 21 36 | |
473 // 44 54 64 74 45 55 61 76 | |
474 // 06 16 26 36 07 17 27 37 | |
475 // 46 56 66 76 47 57 67 77 | |
476 in0 = _mm_unpacklo_epi64(tr1_0, tr1_4); | |
477 in1 = _mm_unpackhi_epi64(tr1_0, tr1_4); | |
478 in2 = _mm_unpacklo_epi64(tr1_2, tr1_6); | |
479 in3 = _mm_unpackhi_epi64(tr1_2, tr1_6); | |
480 in4 = _mm_unpacklo_epi64(tr1_1, tr1_5); | |
481 in5 = _mm_unpackhi_epi64(tr1_1, tr1_5); | |
482 in6 = _mm_unpacklo_epi64(tr1_3, tr1_7); | |
483 in7 = _mm_unpackhi_epi64(tr1_3, tr1_7); | |
484 // 00 10 20 30 40 50 60 70 | |
485 // 01 11 21 31 41 51 61 71 | |
486 // 02 12 22 32 42 52 62 72 | |
487 // 03 13 23 33 43 53 63 73 | |
488 // 04 14 24 34 44 54 64 74 | |
489 // 05 15 25 35 45 55 65 75 | |
490 // 06 16 26 36 46 56 66 76 | |
491 // 07 17 27 37 47 57 67 77 | |
492 } | |
493 } | |
494 // Post-condition output and store it | |
495 { | |
496 // Post-condition (division by two) | |
497 // division of two 16 bits signed numbers using shifts | |
498 // n / 2 = (n - (n >> 15)) >> 1 | |
499 const __m128i sign_in0 = _mm_srai_epi16(in0, 15); | |
500 const __m128i sign_in1 = _mm_srai_epi16(in1, 15); | |
501 const __m128i sign_in2 = _mm_srai_epi16(in2, 15); | |
502 const __m128i sign_in3 = _mm_srai_epi16(in3, 15); | |
503 const __m128i sign_in4 = _mm_srai_epi16(in4, 15); | |
504 const __m128i sign_in5 = _mm_srai_epi16(in5, 15); | |
505 const __m128i sign_in6 = _mm_srai_epi16(in6, 15); | |
506 const __m128i sign_in7 = _mm_srai_epi16(in7, 15); | |
507 in0 = _mm_sub_epi16(in0, sign_in0); | |
508 in1 = _mm_sub_epi16(in1, sign_in1); | |
509 in2 = _mm_sub_epi16(in2, sign_in2); | |
510 in3 = _mm_sub_epi16(in3, sign_in3); | |
511 in4 = _mm_sub_epi16(in4, sign_in4); | |
512 in5 = _mm_sub_epi16(in5, sign_in5); | |
513 in6 = _mm_sub_epi16(in6, sign_in6); | |
514 in7 = _mm_sub_epi16(in7, sign_in7); | |
515 in0 = _mm_srai_epi16(in0, 1); | |
516 in1 = _mm_srai_epi16(in1, 1); | |
517 in2 = _mm_srai_epi16(in2, 1); | |
518 in3 = _mm_srai_epi16(in3, 1); | |
519 in4 = _mm_srai_epi16(in4, 1); | |
520 in5 = _mm_srai_epi16(in5, 1); | |
521 in6 = _mm_srai_epi16(in6, 1); | |
522 in7 = _mm_srai_epi16(in7, 1); | |
523 // store results | |
524 _mm_store_si128((__m128i *)(output + 0 * 8), in0); | |
525 _mm_store_si128((__m128i *)(output + 1 * 8), in1); | |
526 _mm_store_si128((__m128i *)(output + 2 * 8), in2); | |
527 _mm_store_si128((__m128i *)(output + 3 * 8), in3); | |
528 _mm_store_si128((__m128i *)(output + 4 * 8), in4); | |
529 _mm_store_si128((__m128i *)(output + 5 * 8), in5); | |
530 _mm_store_si128((__m128i *)(output + 6 * 8), in6); | |
531 _mm_store_si128((__m128i *)(output + 7 * 8), in7); | |
532 } | |
533 } | |
534 | |
535 // load 8x8 array | |
536 static INLINE void load_buffer_8x8_avx2(const int16_t *input, __m128i *in, | |
537 int stride) { | |
538 in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride)); | |
539 in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride)); | |
540 in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride)); | |
541 in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride)); | |
542 in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride)); | |
543 in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride)); | |
544 in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride)); | |
545 in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride)); | |
546 | |
547 in[0] = _mm_slli_epi16(in[0], 2); | |
548 in[1] = _mm_slli_epi16(in[1], 2); | |
549 in[2] = _mm_slli_epi16(in[2], 2); | |
550 in[3] = _mm_slli_epi16(in[3], 2); | |
551 in[4] = _mm_slli_epi16(in[4], 2); | |
552 in[5] = _mm_slli_epi16(in[5], 2); | |
553 in[6] = _mm_slli_epi16(in[6], 2); | |
554 in[7] = _mm_slli_epi16(in[7], 2); | |
555 } | |
556 | |
557 // right shift and rounding | |
558 static INLINE void right_shift_8x8_avx2(__m128i *res, int const bit) { | |
559 const __m128i kOne = _mm_set1_epi16(1); | |
560 const int bit_m02 = bit - 2; | |
561 __m128i sign0 = _mm_srai_epi16(res[0], 15); | |
562 __m128i sign1 = _mm_srai_epi16(res[1], 15); | |
563 __m128i sign2 = _mm_srai_epi16(res[2], 15); | |
564 __m128i sign3 = _mm_srai_epi16(res[3], 15); | |
565 __m128i sign4 = _mm_srai_epi16(res[4], 15); | |
566 __m128i sign5 = _mm_srai_epi16(res[5], 15); | |
567 __m128i sign6 = _mm_srai_epi16(res[6], 15); | |
568 __m128i sign7 = _mm_srai_epi16(res[7], 15); | |
569 | |
570 if (bit_m02 >= 0) { | |
571 __m128i k_const_rounding = _mm_slli_epi16(kOne, bit_m02); | |
572 res[0] = _mm_add_epi16(res[0], k_const_rounding); | |
573 res[1] = _mm_add_epi16(res[1], k_const_rounding); | |
574 res[2] = _mm_add_epi16(res[2], k_const_rounding); | |
575 res[3] = _mm_add_epi16(res[3], k_const_rounding); | |
576 res[4] = _mm_add_epi16(res[4], k_const_rounding); | |
577 res[5] = _mm_add_epi16(res[5], k_const_rounding); | |
578 res[6] = _mm_add_epi16(res[6], k_const_rounding); | |
579 res[7] = _mm_add_epi16(res[7], k_const_rounding); | |
580 } | |
581 | |
582 res[0] = _mm_sub_epi16(res[0], sign0); | |
583 res[1] = _mm_sub_epi16(res[1], sign1); | |
584 res[2] = _mm_sub_epi16(res[2], sign2); | |
585 res[3] = _mm_sub_epi16(res[3], sign3); | |
586 res[4] = _mm_sub_epi16(res[4], sign4); | |
587 res[5] = _mm_sub_epi16(res[5], sign5); | |
588 res[6] = _mm_sub_epi16(res[6], sign6); | |
589 res[7] = _mm_sub_epi16(res[7], sign7); | |
590 | |
591 res[0] = _mm_srai_epi16(res[0], bit); | |
592 res[1] = _mm_srai_epi16(res[1], bit); | |
593 res[2] = _mm_srai_epi16(res[2], bit); | |
594 res[3] = _mm_srai_epi16(res[3], bit); | |
595 res[4] = _mm_srai_epi16(res[4], bit); | |
596 res[5] = _mm_srai_epi16(res[5], bit); | |
597 res[6] = _mm_srai_epi16(res[6], bit); | |
598 res[7] = _mm_srai_epi16(res[7], bit); | |
599 } | |
600 | |
601 // write 8x8 array | |
602 static INLINE void write_buffer_8x8_avx2(int16_t *output, __m128i *res, int stri
de) { | |
603 _mm_store_si128((__m128i *)(output + 0 * stride), res[0]); | |
604 _mm_store_si128((__m128i *)(output + 1 * stride), res[1]); | |
605 _mm_store_si128((__m128i *)(output + 2 * stride), res[2]); | |
606 _mm_store_si128((__m128i *)(output + 3 * stride), res[3]); | |
607 _mm_store_si128((__m128i *)(output + 4 * stride), res[4]); | |
608 _mm_store_si128((__m128i *)(output + 5 * stride), res[5]); | |
609 _mm_store_si128((__m128i *)(output + 6 * stride), res[6]); | |
610 _mm_store_si128((__m128i *)(output + 7 * stride), res[7]); | |
611 } | |
612 | |
613 // perform in-place transpose | |
614 static INLINE void array_transpose_8x8_avx2(__m128i *in, __m128i *res) { | |
615 const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]); | |
616 const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]); | |
617 const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]); | |
618 const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]); | |
619 const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]); | |
620 const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]); | |
621 const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]); | |
622 const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]); | |
623 // 00 10 01 11 02 12 03 13 | |
624 // 20 30 21 31 22 32 23 33 | |
625 // 04 14 05 15 06 16 07 17 | |
626 // 24 34 25 35 26 36 27 37 | |
627 // 40 50 41 51 42 52 43 53 | |
628 // 60 70 61 71 62 72 63 73 | |
629 // 44 54 45 55 46 56 47 57 | |
630 // 64 74 65 75 66 76 67 77 | |
631 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); | |
632 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5); | |
633 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); | |
634 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5); | |
635 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3); | |
636 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); | |
637 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3); | |
638 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); | |
639 // 00 10 20 30 01 11 21 31 | |
640 // 40 50 60 70 41 51 61 71 | |
641 // 02 12 22 32 03 13 23 33 | |
642 // 42 52 62 72 43 53 63 73 | |
643 // 04 14 24 34 05 15 25 35 | |
644 // 44 54 64 74 45 55 65 75 | |
645 // 06 16 26 36 07 17 27 37 | |
646 // 46 56 66 76 47 57 67 77 | |
647 res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1); | |
648 res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1); | |
649 res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3); | |
650 res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3); | |
651 res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5); | |
652 res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5); | |
653 res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7); | |
654 res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7); | |
655 // 00 10 20 30 40 50 60 70 | |
656 // 01 11 21 31 41 51 61 71 | |
657 // 02 12 22 32 42 52 62 72 | |
658 // 03 13 23 33 43 53 63 73 | |
659 // 04 14 24 34 44 54 64 74 | |
660 // 05 15 25 35 45 55 65 75 | |
661 // 06 16 26 36 46 56 66 76 | |
662 // 07 17 27 37 47 57 67 77 | |
663 } | |
664 | |
665 void fdct8_avx2(__m128i *in) { | |
666 // constants | |
667 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); | |
668 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); | |
669 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); | |
670 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); | |
671 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); | |
672 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); | |
673 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); | |
674 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); | |
675 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); | |
676 __m128i u0, u1, u2, u3, u4, u5, u6, u7; | |
677 __m128i v0, v1, v2, v3, v4, v5, v6, v7; | |
678 __m128i s0, s1, s2, s3, s4, s5, s6, s7; | |
679 | |
680 // stage 1 | |
681 s0 = _mm_add_epi16(in[0], in[7]); | |
682 s1 = _mm_add_epi16(in[1], in[6]); | |
683 s2 = _mm_add_epi16(in[2], in[5]); | |
684 s3 = _mm_add_epi16(in[3], in[4]); | |
685 s4 = _mm_sub_epi16(in[3], in[4]); | |
686 s5 = _mm_sub_epi16(in[2], in[5]); | |
687 s6 = _mm_sub_epi16(in[1], in[6]); | |
688 s7 = _mm_sub_epi16(in[0], in[7]); | |
689 | |
690 u0 = _mm_add_epi16(s0, s3); | |
691 u1 = _mm_add_epi16(s1, s2); | |
692 u2 = _mm_sub_epi16(s1, s2); | |
693 u3 = _mm_sub_epi16(s0, s3); | |
694 // interleave and perform butterfly multiplication/addition | |
695 v0 = _mm_unpacklo_epi16(u0, u1); | |
696 v1 = _mm_unpackhi_epi16(u0, u1); | |
697 v2 = _mm_unpacklo_epi16(u2, u3); | |
698 v3 = _mm_unpackhi_epi16(u2, u3); | |
699 | |
700 u0 = _mm_madd_epi16(v0, k__cospi_p16_p16); | |
701 u1 = _mm_madd_epi16(v1, k__cospi_p16_p16); | |
702 u2 = _mm_madd_epi16(v0, k__cospi_p16_m16); | |
703 u3 = _mm_madd_epi16(v1, k__cospi_p16_m16); | |
704 u4 = _mm_madd_epi16(v2, k__cospi_p24_p08); | |
705 u5 = _mm_madd_epi16(v3, k__cospi_p24_p08); | |
706 u6 = _mm_madd_epi16(v2, k__cospi_m08_p24); | |
707 u7 = _mm_madd_epi16(v3, k__cospi_m08_p24); | |
708 | |
709 // shift and rounding | |
710 v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
711 v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
712 v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
713 v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
714 v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); | |
715 v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); | |
716 v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); | |
717 v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); | |
718 | |
719 u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
720 u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
721 u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
722 u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
723 u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); | |
724 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); | |
725 u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); | |
726 u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); | |
727 | |
728 in[0] = _mm_packs_epi32(u0, u1); | |
729 in[2] = _mm_packs_epi32(u4, u5); | |
730 in[4] = _mm_packs_epi32(u2, u3); | |
731 in[6] = _mm_packs_epi32(u6, u7); | |
732 | |
733 // stage 2 | |
734 // interleave and perform butterfly multiplication/addition | |
735 u0 = _mm_unpacklo_epi16(s6, s5); | |
736 u1 = _mm_unpackhi_epi16(s6, s5); | |
737 v0 = _mm_madd_epi16(u0, k__cospi_p16_m16); | |
738 v1 = _mm_madd_epi16(u1, k__cospi_p16_m16); | |
739 v2 = _mm_madd_epi16(u0, k__cospi_p16_p16); | |
740 v3 = _mm_madd_epi16(u1, k__cospi_p16_p16); | |
741 | |
742 // shift and rounding | |
743 u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); | |
744 u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); | |
745 u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); | |
746 u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); | |
747 | |
748 v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); | |
749 v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); | |
750 v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); | |
751 v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); | |
752 | |
753 u0 = _mm_packs_epi32(v0, v1); | |
754 u1 = _mm_packs_epi32(v2, v3); | |
755 | |
756 // stage 3 | |
757 s0 = _mm_add_epi16(s4, u0); | |
758 s1 = _mm_sub_epi16(s4, u0); | |
759 s2 = _mm_sub_epi16(s7, u1); | |
760 s3 = _mm_add_epi16(s7, u1); | |
761 | |
762 // stage 4 | |
763 u0 = _mm_unpacklo_epi16(s0, s3); | |
764 u1 = _mm_unpackhi_epi16(s0, s3); | |
765 u2 = _mm_unpacklo_epi16(s1, s2); | |
766 u3 = _mm_unpackhi_epi16(s1, s2); | |
767 | |
768 v0 = _mm_madd_epi16(u0, k__cospi_p28_p04); | |
769 v1 = _mm_madd_epi16(u1, k__cospi_p28_p04); | |
770 v2 = _mm_madd_epi16(u2, k__cospi_p12_p20); | |
771 v3 = _mm_madd_epi16(u3, k__cospi_p12_p20); | |
772 v4 = _mm_madd_epi16(u2, k__cospi_m20_p12); | |
773 v5 = _mm_madd_epi16(u3, k__cospi_m20_p12); | |
774 v6 = _mm_madd_epi16(u0, k__cospi_m04_p28); | |
775 v7 = _mm_madd_epi16(u1, k__cospi_m04_p28); | |
776 | |
777 // shift and rounding | |
778 u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); | |
779 u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); | |
780 u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); | |
781 u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); | |
782 u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING); | |
783 u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING); | |
784 u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING); | |
785 u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING); | |
786 | |
787 v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); | |
788 v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); | |
789 v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); | |
790 v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); | |
791 v4 = _mm_srai_epi32(u4, DCT_CONST_BITS); | |
792 v5 = _mm_srai_epi32(u5, DCT_CONST_BITS); | |
793 v6 = _mm_srai_epi32(u6, DCT_CONST_BITS); | |
794 v7 = _mm_srai_epi32(u7, DCT_CONST_BITS); | |
795 | |
796 in[1] = _mm_packs_epi32(v0, v1); | |
797 in[3] = _mm_packs_epi32(v4, v5); | |
798 in[5] = _mm_packs_epi32(v2, v3); | |
799 in[7] = _mm_packs_epi32(v6, v7); | |
800 | |
801 // transpose | |
802 array_transpose_8x8_avx2(in, in); | |
803 } | |
804 | |
805 void fadst8_avx2(__m128i *in) { | |
806 // Constants | |
807 const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64); | |
808 const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64); | |
809 const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64); | |
810 const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64); | |
811 const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64); | |
812 const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64); | |
813 const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64); | |
814 const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64); | |
815 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); | |
816 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); | |
817 const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64); | |
818 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); | |
819 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); | |
820 const __m128i k__const_0 = _mm_set1_epi16(0); | |
821 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); | |
822 | |
823 __m128i u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15; | |
824 __m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15; | |
825 __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; | |
826 __m128i s0, s1, s2, s3, s4, s5, s6, s7; | |
827 __m128i in0, in1, in2, in3, in4, in5, in6, in7; | |
828 | |
829 // properly aligned for butterfly input | |
830 in0 = in[7]; | |
831 in1 = in[0]; | |
832 in2 = in[5]; | |
833 in3 = in[2]; | |
834 in4 = in[3]; | |
835 in5 = in[4]; | |
836 in6 = in[1]; | |
837 in7 = in[6]; | |
838 | |
839 // column transformation | |
840 // stage 1 | |
841 // interleave and multiply/add into 32-bit integer | |
842 s0 = _mm_unpacklo_epi16(in0, in1); | |
843 s1 = _mm_unpackhi_epi16(in0, in1); | |
844 s2 = _mm_unpacklo_epi16(in2, in3); | |
845 s3 = _mm_unpackhi_epi16(in2, in3); | |
846 s4 = _mm_unpacklo_epi16(in4, in5); | |
847 s5 = _mm_unpackhi_epi16(in4, in5); | |
848 s6 = _mm_unpacklo_epi16(in6, in7); | |
849 s7 = _mm_unpackhi_epi16(in6, in7); | |
850 | |
851 u0 = _mm_madd_epi16(s0, k__cospi_p02_p30); | |
852 u1 = _mm_madd_epi16(s1, k__cospi_p02_p30); | |
853 u2 = _mm_madd_epi16(s0, k__cospi_p30_m02); | |
854 u3 = _mm_madd_epi16(s1, k__cospi_p30_m02); | |
855 u4 = _mm_madd_epi16(s2, k__cospi_p10_p22); | |
856 u5 = _mm_madd_epi16(s3, k__cospi_p10_p22); | |
857 u6 = _mm_madd_epi16(s2, k__cospi_p22_m10); | |
858 u7 = _mm_madd_epi16(s3, k__cospi_p22_m10); | |
859 u8 = _mm_madd_epi16(s4, k__cospi_p18_p14); | |
860 u9 = _mm_madd_epi16(s5, k__cospi_p18_p14); | |
861 u10 = _mm_madd_epi16(s4, k__cospi_p14_m18); | |
862 u11 = _mm_madd_epi16(s5, k__cospi_p14_m18); | |
863 u12 = _mm_madd_epi16(s6, k__cospi_p26_p06); | |
864 u13 = _mm_madd_epi16(s7, k__cospi_p26_p06); | |
865 u14 = _mm_madd_epi16(s6, k__cospi_p06_m26); | |
866 u15 = _mm_madd_epi16(s7, k__cospi_p06_m26); | |
867 | |
868 // addition | |
869 w0 = _mm_add_epi32(u0, u8); | |
870 w1 = _mm_add_epi32(u1, u9); | |
871 w2 = _mm_add_epi32(u2, u10); | |
872 w3 = _mm_add_epi32(u3, u11); | |
873 w4 = _mm_add_epi32(u4, u12); | |
874 w5 = _mm_add_epi32(u5, u13); | |
875 w6 = _mm_add_epi32(u6, u14); | |
876 w7 = _mm_add_epi32(u7, u15); | |
877 w8 = _mm_sub_epi32(u0, u8); | |
878 w9 = _mm_sub_epi32(u1, u9); | |
879 w10 = _mm_sub_epi32(u2, u10); | |
880 w11 = _mm_sub_epi32(u3, u11); | |
881 w12 = _mm_sub_epi32(u4, u12); | |
882 w13 = _mm_sub_epi32(u5, u13); | |
883 w14 = _mm_sub_epi32(u6, u14); | |
884 w15 = _mm_sub_epi32(u7, u15); | |
885 | |
886 // shift and rounding | |
887 v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING); | |
888 v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING); | |
889 v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING); | |
890 v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING); | |
891 v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING); | |
892 v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING); | |
893 v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING); | |
894 v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING); | |
895 v8 = _mm_add_epi32(w8, k__DCT_CONST_ROUNDING); | |
896 v9 = _mm_add_epi32(w9, k__DCT_CONST_ROUNDING); | |
897 v10 = _mm_add_epi32(w10, k__DCT_CONST_ROUNDING); | |
898 v11 = _mm_add_epi32(w11, k__DCT_CONST_ROUNDING); | |
899 v12 = _mm_add_epi32(w12, k__DCT_CONST_ROUNDING); | |
900 v13 = _mm_add_epi32(w13, k__DCT_CONST_ROUNDING); | |
901 v14 = _mm_add_epi32(w14, k__DCT_CONST_ROUNDING); | |
902 v15 = _mm_add_epi32(w15, k__DCT_CONST_ROUNDING); | |
903 | |
904 u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
905 u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
906 u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
907 u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
908 u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); | |
909 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); | |
910 u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); | |
911 u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); | |
912 u8 = _mm_srai_epi32(v8, DCT_CONST_BITS); | |
913 u9 = _mm_srai_epi32(v9, DCT_CONST_BITS); | |
914 u10 = _mm_srai_epi32(v10, DCT_CONST_BITS); | |
915 u11 = _mm_srai_epi32(v11, DCT_CONST_BITS); | |
916 u12 = _mm_srai_epi32(v12, DCT_CONST_BITS); | |
917 u13 = _mm_srai_epi32(v13, DCT_CONST_BITS); | |
918 u14 = _mm_srai_epi32(v14, DCT_CONST_BITS); | |
919 u15 = _mm_srai_epi32(v15, DCT_CONST_BITS); | |
920 | |
921 // back to 16-bit and pack 8 integers into __m128i | |
922 in[0] = _mm_packs_epi32(u0, u1); | |
923 in[1] = _mm_packs_epi32(u2, u3); | |
924 in[2] = _mm_packs_epi32(u4, u5); | |
925 in[3] = _mm_packs_epi32(u6, u7); | |
926 in[4] = _mm_packs_epi32(u8, u9); | |
927 in[5] = _mm_packs_epi32(u10, u11); | |
928 in[6] = _mm_packs_epi32(u12, u13); | |
929 in[7] = _mm_packs_epi32(u14, u15); | |
930 | |
931 // stage 2 | |
932 s0 = _mm_add_epi16(in[0], in[2]); | |
933 s1 = _mm_add_epi16(in[1], in[3]); | |
934 s2 = _mm_sub_epi16(in[0], in[2]); | |
935 s3 = _mm_sub_epi16(in[1], in[3]); | |
936 u0 = _mm_unpacklo_epi16(in[4], in[5]); | |
937 u1 = _mm_unpackhi_epi16(in[4], in[5]); | |
938 u2 = _mm_unpacklo_epi16(in[6], in[7]); | |
939 u3 = _mm_unpackhi_epi16(in[6], in[7]); | |
940 | |
941 v0 = _mm_madd_epi16(u0, k__cospi_p08_p24); | |
942 v1 = _mm_madd_epi16(u1, k__cospi_p08_p24); | |
943 v2 = _mm_madd_epi16(u0, k__cospi_p24_m08); | |
944 v3 = _mm_madd_epi16(u1, k__cospi_p24_m08); | |
945 v4 = _mm_madd_epi16(u2, k__cospi_m24_p08); | |
946 v5 = _mm_madd_epi16(u3, k__cospi_m24_p08); | |
947 v6 = _mm_madd_epi16(u2, k__cospi_p08_p24); | |
948 v7 = _mm_madd_epi16(u3, k__cospi_p08_p24); | |
949 | |
950 w0 = _mm_add_epi32(v0, v4); | |
951 w1 = _mm_add_epi32(v1, v5); | |
952 w2 = _mm_add_epi32(v2, v6); | |
953 w3 = _mm_add_epi32(v3, v7); | |
954 w4 = _mm_sub_epi32(v0, v4); | |
955 w5 = _mm_sub_epi32(v1, v5); | |
956 w6 = _mm_sub_epi32(v2, v6); | |
957 w7 = _mm_sub_epi32(v3, v7); | |
958 | |
959 v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING); | |
960 v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING); | |
961 v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING); | |
962 v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING); | |
963 v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING); | |
964 v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING); | |
965 v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING); | |
966 v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING); | |
967 | |
968 u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
969 u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
970 u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
971 u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
972 u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); | |
973 u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); | |
974 u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); | |
975 u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); | |
976 | |
977 // back to 16-bit intergers | |
978 s4 = _mm_packs_epi32(u0, u1); | |
979 s5 = _mm_packs_epi32(u2, u3); | |
980 s6 = _mm_packs_epi32(u4, u5); | |
981 s7 = _mm_packs_epi32(u6, u7); | |
982 | |
983 // stage 3 | |
984 u0 = _mm_unpacklo_epi16(s2, s3); | |
985 u1 = _mm_unpackhi_epi16(s2, s3); | |
986 u2 = _mm_unpacklo_epi16(s6, s7); | |
987 u3 = _mm_unpackhi_epi16(s6, s7); | |
988 | |
989 v0 = _mm_madd_epi16(u0, k__cospi_p16_p16); | |
990 v1 = _mm_madd_epi16(u1, k__cospi_p16_p16); | |
991 v2 = _mm_madd_epi16(u0, k__cospi_p16_m16); | |
992 v3 = _mm_madd_epi16(u1, k__cospi_p16_m16); | |
993 v4 = _mm_madd_epi16(u2, k__cospi_p16_p16); | |
994 v5 = _mm_madd_epi16(u3, k__cospi_p16_p16); | |
995 v6 = _mm_madd_epi16(u2, k__cospi_p16_m16); | |
996 v7 = _mm_madd_epi16(u3, k__cospi_p16_m16); | |
997 | |
998 u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); | |
999 u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); | |
1000 u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); | |
1001 u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); | |
1002 u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING); | |
1003 u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING); | |
1004 u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING); | |
1005 u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING); | |
1006 | |
1007 v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); | |
1008 v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); | |
1009 v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); | |
1010 v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); | |
1011 v4 = _mm_srai_epi32(u4, DCT_CONST_BITS); | |
1012 v5 = _mm_srai_epi32(u5, DCT_CONST_BITS); | |
1013 v6 = _mm_srai_epi32(u6, DCT_CONST_BITS); | |
1014 v7 = _mm_srai_epi32(u7, DCT_CONST_BITS); | |
1015 | |
1016 s2 = _mm_packs_epi32(v0, v1); | |
1017 s3 = _mm_packs_epi32(v2, v3); | |
1018 s6 = _mm_packs_epi32(v4, v5); | |
1019 s7 = _mm_packs_epi32(v6, v7); | |
1020 | |
1021 // FIXME(jingning): do subtract using bit inversion? | |
1022 in[0] = s0; | |
1023 in[1] = _mm_sub_epi16(k__const_0, s4); | |
1024 in[2] = s6; | |
1025 in[3] = _mm_sub_epi16(k__const_0, s2); | |
1026 in[4] = s3; | |
1027 in[5] = _mm_sub_epi16(k__const_0, s7); | |
1028 in[6] = s5; | |
1029 in[7] = _mm_sub_epi16(k__const_0, s1); | |
1030 | |
1031 // transpose | |
1032 array_transpose_8x8_avx2(in, in); | |
1033 } | |
1034 | |
1035 void vp9_fht8x8_avx2(const int16_t *input, int16_t *output, | |
1036 int stride, int tx_type) { | |
1037 __m128i in[8]; | |
1038 | |
1039 switch (tx_type) { | |
1040 case DCT_DCT: | |
1041 vp9_fdct8x8_avx2(input, output, stride); | |
1042 break; | |
1043 case ADST_DCT: | |
1044 load_buffer_8x8_avx2(input, in, stride); | |
1045 fadst8_avx2(in); | |
1046 fdct8_avx2(in); | |
1047 right_shift_8x8_avx2(in, 1); | |
1048 write_buffer_8x8_avx2(output, in, 8); | |
1049 break; | |
1050 case DCT_ADST: | |
1051 load_buffer_8x8_avx2(input, in, stride); | |
1052 fdct8_avx2(in); | |
1053 fadst8_avx2(in); | |
1054 right_shift_8x8_avx2(in, 1); | |
1055 write_buffer_8x8_avx2(output, in, 8); | |
1056 break; | |
1057 case ADST_ADST: | |
1058 load_buffer_8x8_avx2(input, in, stride); | |
1059 fadst8_avx2(in); | |
1060 fadst8_avx2(in); | |
1061 right_shift_8x8_avx2(in, 1); | |
1062 write_buffer_8x8_avx2(output, in, 8); | |
1063 break; | |
1064 default: | |
1065 assert(0); | |
1066 break; | |
1067 } | |
1068 } | |
1069 | |
1070 void vp9_fdct16x16_avx2(const int16_t *input, int16_t *output, int stride) { | |
1071 // The 2D transform is done with two passes which are actually pretty | |
1072 // similar. In the first one, we transform the columns and transpose | |
1073 // the results. In the second one, we transform the rows. To achieve that, | |
1074 // as the first pass results are transposed, we transpose the columns (that | |
1075 // is the transposed rows) and transpose the results (so that it goes back | |
1076 // in normal/row positions). | |
1077 int pass; | |
1078 // We need an intermediate buffer between passes. | |
1079 DECLARE_ALIGNED_ARRAY(16, int16_t, intermediate, 256); | |
1080 const int16_t *in = input; | |
1081 int16_t *out = intermediate; | |
1082 // Constants | |
1083 // When we use them, in one case, they are all the same. In all others | |
1084 // it's a pair of them that we need to repeat four times. This is done | |
1085 // by constructing the 32 bit constant corresponding to that pair. | |
1086 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); | |
1087 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); | |
1088 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); | |
1089 const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64); | |
1090 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); | |
1091 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); | |
1092 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); | |
1093 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); | |
1094 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); | |
1095 const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64); | |
1096 const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64); | |
1097 const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64); | |
1098 const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64); | |
1099 const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64); | |
1100 const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64); | |
1101 const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64); | |
1102 const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64); | |
1103 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); | |
1104 const __m128i kOne = _mm_set1_epi16(1); | |
1105 // Do the two transform/transpose passes | |
1106 for (pass = 0; pass < 2; ++pass) { | |
1107 // We process eight columns (transposed rows in second pass) at a time. | |
1108 int column_start; | |
1109 for (column_start = 0; column_start < 16; column_start += 8) { | |
1110 __m128i in00, in01, in02, in03, in04, in05, in06, in07; | |
1111 __m128i in08, in09, in10, in11, in12, in13, in14, in15; | |
1112 __m128i input0, input1, input2, input3, input4, input5, input6, input7; | |
1113 __m128i step1_0, step1_1, step1_2, step1_3; | |
1114 __m128i step1_4, step1_5, step1_6, step1_7; | |
1115 __m128i step2_1, step2_2, step2_3, step2_4, step2_5, step2_6; | |
1116 __m128i step3_0, step3_1, step3_2, step3_3; | |
1117 __m128i step3_4, step3_5, step3_6, step3_7; | |
1118 __m128i res00, res01, res02, res03, res04, res05, res06, res07; | |
1119 __m128i res08, res09, res10, res11, res12, res13, res14, res15; | |
1120 // Load and pre-condition input. | |
1121 if (0 == pass) { | |
1122 in00 = _mm_load_si128((const __m128i *)(in + 0 * stride)); | |
1123 in01 = _mm_load_si128((const __m128i *)(in + 1 * stride)); | |
1124 in02 = _mm_load_si128((const __m128i *)(in + 2 * stride)); | |
1125 in03 = _mm_load_si128((const __m128i *)(in + 3 * stride)); | |
1126 in04 = _mm_load_si128((const __m128i *)(in + 4 * stride)); | |
1127 in05 = _mm_load_si128((const __m128i *)(in + 5 * stride)); | |
1128 in06 = _mm_load_si128((const __m128i *)(in + 6 * stride)); | |
1129 in07 = _mm_load_si128((const __m128i *)(in + 7 * stride)); | |
1130 in08 = _mm_load_si128((const __m128i *)(in + 8 * stride)); | |
1131 in09 = _mm_load_si128((const __m128i *)(in + 9 * stride)); | |
1132 in10 = _mm_load_si128((const __m128i *)(in + 10 * stride)); | |
1133 in11 = _mm_load_si128((const __m128i *)(in + 11 * stride)); | |
1134 in12 = _mm_load_si128((const __m128i *)(in + 12 * stride)); | |
1135 in13 = _mm_load_si128((const __m128i *)(in + 13 * stride)); | |
1136 in14 = _mm_load_si128((const __m128i *)(in + 14 * stride)); | |
1137 in15 = _mm_load_si128((const __m128i *)(in + 15 * stride)); | |
1138 // x = x << 2 | |
1139 in00 = _mm_slli_epi16(in00, 2); | |
1140 in01 = _mm_slli_epi16(in01, 2); | |
1141 in02 = _mm_slli_epi16(in02, 2); | |
1142 in03 = _mm_slli_epi16(in03, 2); | |
1143 in04 = _mm_slli_epi16(in04, 2); | |
1144 in05 = _mm_slli_epi16(in05, 2); | |
1145 in06 = _mm_slli_epi16(in06, 2); | |
1146 in07 = _mm_slli_epi16(in07, 2); | |
1147 in08 = _mm_slli_epi16(in08, 2); | |
1148 in09 = _mm_slli_epi16(in09, 2); | |
1149 in10 = _mm_slli_epi16(in10, 2); | |
1150 in11 = _mm_slli_epi16(in11, 2); | |
1151 in12 = _mm_slli_epi16(in12, 2); | |
1152 in13 = _mm_slli_epi16(in13, 2); | |
1153 in14 = _mm_slli_epi16(in14, 2); | |
1154 in15 = _mm_slli_epi16(in15, 2); | |
1155 } else { | |
1156 in00 = _mm_load_si128((const __m128i *)(in + 0 * 16)); | |
1157 in01 = _mm_load_si128((const __m128i *)(in + 1 * 16)); | |
1158 in02 = _mm_load_si128((const __m128i *)(in + 2 * 16)); | |
1159 in03 = _mm_load_si128((const __m128i *)(in + 3 * 16)); | |
1160 in04 = _mm_load_si128((const __m128i *)(in + 4 * 16)); | |
1161 in05 = _mm_load_si128((const __m128i *)(in + 5 * 16)); | |
1162 in06 = _mm_load_si128((const __m128i *)(in + 6 * 16)); | |
1163 in07 = _mm_load_si128((const __m128i *)(in + 7 * 16)); | |
1164 in08 = _mm_load_si128((const __m128i *)(in + 8 * 16)); | |
1165 in09 = _mm_load_si128((const __m128i *)(in + 9 * 16)); | |
1166 in10 = _mm_load_si128((const __m128i *)(in + 10 * 16)); | |
1167 in11 = _mm_load_si128((const __m128i *)(in + 11 * 16)); | |
1168 in12 = _mm_load_si128((const __m128i *)(in + 12 * 16)); | |
1169 in13 = _mm_load_si128((const __m128i *)(in + 13 * 16)); | |
1170 in14 = _mm_load_si128((const __m128i *)(in + 14 * 16)); | |
1171 in15 = _mm_load_si128((const __m128i *)(in + 15 * 16)); | |
1172 // x = (x + 1) >> 2 | |
1173 in00 = _mm_add_epi16(in00, kOne); | |
1174 in01 = _mm_add_epi16(in01, kOne); | |
1175 in02 = _mm_add_epi16(in02, kOne); | |
1176 in03 = _mm_add_epi16(in03, kOne); | |
1177 in04 = _mm_add_epi16(in04, kOne); | |
1178 in05 = _mm_add_epi16(in05, kOne); | |
1179 in06 = _mm_add_epi16(in06, kOne); | |
1180 in07 = _mm_add_epi16(in07, kOne); | |
1181 in08 = _mm_add_epi16(in08, kOne); | |
1182 in09 = _mm_add_epi16(in09, kOne); | |
1183 in10 = _mm_add_epi16(in10, kOne); | |
1184 in11 = _mm_add_epi16(in11, kOne); | |
1185 in12 = _mm_add_epi16(in12, kOne); | |
1186 in13 = _mm_add_epi16(in13, kOne); | |
1187 in14 = _mm_add_epi16(in14, kOne); | |
1188 in15 = _mm_add_epi16(in15, kOne); | |
1189 in00 = _mm_srai_epi16(in00, 2); | |
1190 in01 = _mm_srai_epi16(in01, 2); | |
1191 in02 = _mm_srai_epi16(in02, 2); | |
1192 in03 = _mm_srai_epi16(in03, 2); | |
1193 in04 = _mm_srai_epi16(in04, 2); | |
1194 in05 = _mm_srai_epi16(in05, 2); | |
1195 in06 = _mm_srai_epi16(in06, 2); | |
1196 in07 = _mm_srai_epi16(in07, 2); | |
1197 in08 = _mm_srai_epi16(in08, 2); | |
1198 in09 = _mm_srai_epi16(in09, 2); | |
1199 in10 = _mm_srai_epi16(in10, 2); | |
1200 in11 = _mm_srai_epi16(in11, 2); | |
1201 in12 = _mm_srai_epi16(in12, 2); | |
1202 in13 = _mm_srai_epi16(in13, 2); | |
1203 in14 = _mm_srai_epi16(in14, 2); | |
1204 in15 = _mm_srai_epi16(in15, 2); | |
1205 } | |
1206 in += 8; | |
1207 // Calculate input for the first 8 results. | |
1208 { | |
1209 input0 = _mm_add_epi16(in00, in15); | |
1210 input1 = _mm_add_epi16(in01, in14); | |
1211 input2 = _mm_add_epi16(in02, in13); | |
1212 input3 = _mm_add_epi16(in03, in12); | |
1213 input4 = _mm_add_epi16(in04, in11); | |
1214 input5 = _mm_add_epi16(in05, in10); | |
1215 input6 = _mm_add_epi16(in06, in09); | |
1216 input7 = _mm_add_epi16(in07, in08); | |
1217 } | |
1218 // Calculate input for the next 8 results. | |
1219 { | |
1220 step1_0 = _mm_sub_epi16(in07, in08); | |
1221 step1_1 = _mm_sub_epi16(in06, in09); | |
1222 step1_2 = _mm_sub_epi16(in05, in10); | |
1223 step1_3 = _mm_sub_epi16(in04, in11); | |
1224 step1_4 = _mm_sub_epi16(in03, in12); | |
1225 step1_5 = _mm_sub_epi16(in02, in13); | |
1226 step1_6 = _mm_sub_epi16(in01, in14); | |
1227 step1_7 = _mm_sub_epi16(in00, in15); | |
1228 } | |
1229 // Work on the first eight values; fdct8(input, even_results); | |
1230 { | |
1231 // Add/subtract | |
1232 const __m128i q0 = _mm_add_epi16(input0, input7); | |
1233 const __m128i q1 = _mm_add_epi16(input1, input6); | |
1234 const __m128i q2 = _mm_add_epi16(input2, input5); | |
1235 const __m128i q3 = _mm_add_epi16(input3, input4); | |
1236 const __m128i q4 = _mm_sub_epi16(input3, input4); | |
1237 const __m128i q5 = _mm_sub_epi16(input2, input5); | |
1238 const __m128i q6 = _mm_sub_epi16(input1, input6); | |
1239 const __m128i q7 = _mm_sub_epi16(input0, input7); | |
1240 // Work on first four results | |
1241 { | |
1242 // Add/subtract | |
1243 const __m128i r0 = _mm_add_epi16(q0, q3); | |
1244 const __m128i r1 = _mm_add_epi16(q1, q2); | |
1245 const __m128i r2 = _mm_sub_epi16(q1, q2); | |
1246 const __m128i r3 = _mm_sub_epi16(q0, q3); | |
1247 // Interleave to do the multiply by constants which gets us | |
1248 // into 32 bits. | |
1249 const __m128i t0 = _mm_unpacklo_epi16(r0, r1); | |
1250 const __m128i t1 = _mm_unpackhi_epi16(r0, r1); | |
1251 const __m128i t2 = _mm_unpacklo_epi16(r2, r3); | |
1252 const __m128i t3 = _mm_unpackhi_epi16(r2, r3); | |
1253 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); | |
1254 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16); | |
1255 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16); | |
1256 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16); | |
1257 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08); | |
1258 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08); | |
1259 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24); | |
1260 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24); | |
1261 // dct_const_round_shift | |
1262 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
1263 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
1264 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
1265 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
1266 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); | |
1267 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); | |
1268 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); | |
1269 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); | |
1270 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
1271 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
1272 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
1273 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
1274 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); | |
1275 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); | |
1276 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); | |
1277 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); | |
1278 // Combine | |
1279 res00 = _mm_packs_epi32(w0, w1); | |
1280 res08 = _mm_packs_epi32(w2, w3); | |
1281 res04 = _mm_packs_epi32(w4, w5); | |
1282 res12 = _mm_packs_epi32(w6, w7); | |
1283 } | |
1284 // Work on next four results | |
1285 { | |
1286 // Interleave to do the multiply by constants which gets us | |
1287 // into 32 bits. | |
1288 const __m128i d0 = _mm_unpacklo_epi16(q6, q5); | |
1289 const __m128i d1 = _mm_unpackhi_epi16(q6, q5); | |
1290 const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16); | |
1291 const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16); | |
1292 const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16); | |
1293 const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16); | |
1294 // dct_const_round_shift | |
1295 const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING); | |
1296 const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING); | |
1297 const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING); | |
1298 const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING); | |
1299 const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS); | |
1300 const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS); | |
1301 const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS); | |
1302 const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS); | |
1303 // Combine | |
1304 const __m128i r0 = _mm_packs_epi32(s0, s1); | |
1305 const __m128i r1 = _mm_packs_epi32(s2, s3); | |
1306 // Add/subtract | |
1307 const __m128i x0 = _mm_add_epi16(q4, r0); | |
1308 const __m128i x1 = _mm_sub_epi16(q4, r0); | |
1309 const __m128i x2 = _mm_sub_epi16(q7, r1); | |
1310 const __m128i x3 = _mm_add_epi16(q7, r1); | |
1311 // Interleave to do the multiply by constants which gets us | |
1312 // into 32 bits. | |
1313 const __m128i t0 = _mm_unpacklo_epi16(x0, x3); | |
1314 const __m128i t1 = _mm_unpackhi_epi16(x0, x3); | |
1315 const __m128i t2 = _mm_unpacklo_epi16(x1, x2); | |
1316 const __m128i t3 = _mm_unpackhi_epi16(x1, x2); | |
1317 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04); | |
1318 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04); | |
1319 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28); | |
1320 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28); | |
1321 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20); | |
1322 const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20); | |
1323 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12); | |
1324 const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12); | |
1325 // dct_const_round_shift | |
1326 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
1327 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
1328 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
1329 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
1330 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); | |
1331 const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); | |
1332 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); | |
1333 const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); | |
1334 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
1335 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
1336 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
1337 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
1338 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); | |
1339 const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); | |
1340 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); | |
1341 const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); | |
1342 // Combine | |
1343 res02 = _mm_packs_epi32(w0, w1); | |
1344 res14 = _mm_packs_epi32(w2, w3); | |
1345 res10 = _mm_packs_epi32(w4, w5); | |
1346 res06 = _mm_packs_epi32(w6, w7); | |
1347 } | |
1348 } | |
1349 // Work on the next eight values; step1 -> odd_results | |
1350 { | |
1351 // step 2 | |
1352 { | |
1353 const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2); | |
1354 const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2); | |
1355 const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3); | |
1356 const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3); | |
1357 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_m16); | |
1358 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_m16); | |
1359 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p16_m16); | |
1360 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p16_m16); | |
1361 // dct_const_round_shift | |
1362 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
1363 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
1364 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
1365 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
1366 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
1367 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
1368 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
1369 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
1370 // Combine | |
1371 step2_2 = _mm_packs_epi32(w0, w1); | |
1372 step2_3 = _mm_packs_epi32(w2, w3); | |
1373 } | |
1374 { | |
1375 const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2); | |
1376 const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2); | |
1377 const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3); | |
1378 const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3); | |
1379 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); | |
1380 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16); | |
1381 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p16_p16); | |
1382 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p16_p16); | |
1383 // dct_const_round_shift | |
1384 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
1385 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
1386 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
1387 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
1388 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
1389 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
1390 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
1391 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
1392 // Combine | |
1393 step2_5 = _mm_packs_epi32(w0, w1); | |
1394 step2_4 = _mm_packs_epi32(w2, w3); | |
1395 } | |
1396 // step 3 | |
1397 { | |
1398 step3_0 = _mm_add_epi16(step1_0, step2_3); | |
1399 step3_1 = _mm_add_epi16(step1_1, step2_2); | |
1400 step3_2 = _mm_sub_epi16(step1_1, step2_2); | |
1401 step3_3 = _mm_sub_epi16(step1_0, step2_3); | |
1402 step3_4 = _mm_sub_epi16(step1_7, step2_4); | |
1403 step3_5 = _mm_sub_epi16(step1_6, step2_5); | |
1404 step3_6 = _mm_add_epi16(step1_6, step2_5); | |
1405 step3_7 = _mm_add_epi16(step1_7, step2_4); | |
1406 } | |
1407 // step 4 | |
1408 { | |
1409 const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6); | |
1410 const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6); | |
1411 const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5); | |
1412 const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5); | |
1413 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m08_p24); | |
1414 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m08_p24); | |
1415 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m24_m08); | |
1416 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m24_m08); | |
1417 // dct_const_round_shift | |
1418 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
1419 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
1420 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
1421 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
1422 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
1423 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
1424 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
1425 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
1426 // Combine | |
1427 step2_1 = _mm_packs_epi32(w0, w1); | |
1428 step2_2 = _mm_packs_epi32(w2, w3); | |
1429 } | |
1430 { | |
1431 const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6); | |
1432 const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6); | |
1433 const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5); | |
1434 const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5); | |
1435 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p24_p08); | |
1436 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p24_p08); | |
1437 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m08_p24); | |
1438 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m08_p24); | |
1439 // dct_const_round_shift | |
1440 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
1441 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
1442 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
1443 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
1444 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
1445 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
1446 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
1447 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
1448 // Combine | |
1449 step2_6 = _mm_packs_epi32(w0, w1); | |
1450 step2_5 = _mm_packs_epi32(w2, w3); | |
1451 } | |
1452 // step 5 | |
1453 { | |
1454 step1_0 = _mm_add_epi16(step3_0, step2_1); | |
1455 step1_1 = _mm_sub_epi16(step3_0, step2_1); | |
1456 step1_2 = _mm_sub_epi16(step3_3, step2_2); | |
1457 step1_3 = _mm_add_epi16(step3_3, step2_2); | |
1458 step1_4 = _mm_add_epi16(step3_4, step2_5); | |
1459 step1_5 = _mm_sub_epi16(step3_4, step2_5); | |
1460 step1_6 = _mm_sub_epi16(step3_7, step2_6); | |
1461 step1_7 = _mm_add_epi16(step3_7, step2_6); | |
1462 } | |
1463 // step 6 | |
1464 { | |
1465 const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7); | |
1466 const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7); | |
1467 const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6); | |
1468 const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6); | |
1469 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p30_p02); | |
1470 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p30_p02); | |
1471 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p14_p18); | |
1472 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p14_p18); | |
1473 // dct_const_round_shift | |
1474 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
1475 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
1476 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
1477 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
1478 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
1479 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
1480 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
1481 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
1482 // Combine | |
1483 res01 = _mm_packs_epi32(w0, w1); | |
1484 res09 = _mm_packs_epi32(w2, w3); | |
1485 } | |
1486 { | |
1487 const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5); | |
1488 const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5); | |
1489 const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4); | |
1490 const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4); | |
1491 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p22_p10); | |
1492 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p22_p10); | |
1493 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_p06_p26); | |
1494 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_p06_p26); | |
1495 // dct_const_round_shift | |
1496 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
1497 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
1498 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
1499 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
1500 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
1501 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
1502 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
1503 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
1504 // Combine | |
1505 res05 = _mm_packs_epi32(w0, w1); | |
1506 res13 = _mm_packs_epi32(w2, w3); | |
1507 } | |
1508 { | |
1509 const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5); | |
1510 const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5); | |
1511 const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4); | |
1512 const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4); | |
1513 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m10_p22); | |
1514 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m10_p22); | |
1515 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m26_p06); | |
1516 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m26_p06); | |
1517 // dct_const_round_shift | |
1518 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
1519 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
1520 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
1521 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
1522 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
1523 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
1524 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
1525 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
1526 // Combine | |
1527 res11 = _mm_packs_epi32(w0, w1); | |
1528 res03 = _mm_packs_epi32(w2, w3); | |
1529 } | |
1530 { | |
1531 const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7); | |
1532 const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7); | |
1533 const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6); | |
1534 const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6); | |
1535 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_m02_p30); | |
1536 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_m02_p30); | |
1537 const __m128i u2 = _mm_madd_epi16(t2, k__cospi_m18_p14); | |
1538 const __m128i u3 = _mm_madd_epi16(t3, k__cospi_m18_p14); | |
1539 // dct_const_round_shift | |
1540 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | |
1541 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); | |
1542 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | |
1543 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); | |
1544 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | |
1545 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); | |
1546 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | |
1547 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); | |
1548 // Combine | |
1549 res15 = _mm_packs_epi32(w0, w1); | |
1550 res07 = _mm_packs_epi32(w2, w3); | |
1551 } | |
1552 } | |
1553 // Transpose the results, do it as two 8x8 transposes. | |
1554 { | |
1555 // 00 01 02 03 04 05 06 07 | |
1556 // 10 11 12 13 14 15 16 17 | |
1557 // 20 21 22 23 24 25 26 27 | |
1558 // 30 31 32 33 34 35 36 37 | |
1559 // 40 41 42 43 44 45 46 47 | |
1560 // 50 51 52 53 54 55 56 57 | |
1561 // 60 61 62 63 64 65 66 67 | |
1562 // 70 71 72 73 74 75 76 77 | |
1563 const __m128i tr0_0 = _mm_unpacklo_epi16(res00, res01); | |
1564 const __m128i tr0_1 = _mm_unpacklo_epi16(res02, res03); | |
1565 const __m128i tr0_2 = _mm_unpackhi_epi16(res00, res01); | |
1566 const __m128i tr0_3 = _mm_unpackhi_epi16(res02, res03); | |
1567 const __m128i tr0_4 = _mm_unpacklo_epi16(res04, res05); | |
1568 const __m128i tr0_5 = _mm_unpacklo_epi16(res06, res07); | |
1569 const __m128i tr0_6 = _mm_unpackhi_epi16(res04, res05); | |
1570 const __m128i tr0_7 = _mm_unpackhi_epi16(res06, res07); | |
1571 // 00 10 01 11 02 12 03 13 | |
1572 // 20 30 21 31 22 32 23 33 | |
1573 // 04 14 05 15 06 16 07 17 | |
1574 // 24 34 25 35 26 36 27 37 | |
1575 // 40 50 41 51 42 52 43 53 | |
1576 // 60 70 61 71 62 72 63 73 | |
1577 // 54 54 55 55 56 56 57 57 | |
1578 // 64 74 65 75 66 76 67 77 | |
1579 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); | |
1580 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); | |
1581 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); | |
1582 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); | |
1583 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); | |
1584 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); | |
1585 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); | |
1586 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); | |
1587 // 00 10 20 30 01 11 21 31 | |
1588 // 40 50 60 70 41 51 61 71 | |
1589 // 02 12 22 32 03 13 23 33 | |
1590 // 42 52 62 72 43 53 63 73 | |
1591 // 04 14 24 34 05 15 21 36 | |
1592 // 44 54 64 74 45 55 61 76 | |
1593 // 06 16 26 36 07 17 27 37 | |
1594 // 46 56 66 76 47 57 67 77 | |
1595 const __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4); | |
1596 const __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4); | |
1597 const __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6); | |
1598 const __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6); | |
1599 const __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5); | |
1600 const __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5); | |
1601 const __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7); | |
1602 const __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7); | |
1603 // 00 10 20 30 40 50 60 70 | |
1604 // 01 11 21 31 41 51 61 71 | |
1605 // 02 12 22 32 42 52 62 72 | |
1606 // 03 13 23 33 43 53 63 73 | |
1607 // 04 14 24 34 44 54 64 74 | |
1608 // 05 15 25 35 45 55 65 75 | |
1609 // 06 16 26 36 46 56 66 76 | |
1610 // 07 17 27 37 47 57 67 77 | |
1611 _mm_storeu_si128((__m128i *)(out + 0 * 16), tr2_0); | |
1612 _mm_storeu_si128((__m128i *)(out + 1 * 16), tr2_1); | |
1613 _mm_storeu_si128((__m128i *)(out + 2 * 16), tr2_2); | |
1614 _mm_storeu_si128((__m128i *)(out + 3 * 16), tr2_3); | |
1615 _mm_storeu_si128((__m128i *)(out + 4 * 16), tr2_4); | |
1616 _mm_storeu_si128((__m128i *)(out + 5 * 16), tr2_5); | |
1617 _mm_storeu_si128((__m128i *)(out + 6 * 16), tr2_6); | |
1618 _mm_storeu_si128((__m128i *)(out + 7 * 16), tr2_7); | |
1619 } | |
1620 { | |
1621 // 00 01 02 03 04 05 06 07 | |
1622 // 10 11 12 13 14 15 16 17 | |
1623 // 20 21 22 23 24 25 26 27 | |
1624 // 30 31 32 33 34 35 36 37 | |
1625 // 40 41 42 43 44 45 46 47 | |
1626 // 50 51 52 53 54 55 56 57 | |
1627 // 60 61 62 63 64 65 66 67 | |
1628 // 70 71 72 73 74 75 76 77 | |
1629 const __m128i tr0_0 = _mm_unpacklo_epi16(res08, res09); | |
1630 const __m128i tr0_1 = _mm_unpacklo_epi16(res10, res11); | |
1631 const __m128i tr0_2 = _mm_unpackhi_epi16(res08, res09); | |
1632 const __m128i tr0_3 = _mm_unpackhi_epi16(res10, res11); | |
1633 const __m128i tr0_4 = _mm_unpacklo_epi16(res12, res13); | |
1634 const __m128i tr0_5 = _mm_unpacklo_epi16(res14, res15); | |
1635 const __m128i tr0_6 = _mm_unpackhi_epi16(res12, res13); | |
1636 const __m128i tr0_7 = _mm_unpackhi_epi16(res14, res15); | |
1637 // 00 10 01 11 02 12 03 13 | |
1638 // 20 30 21 31 22 32 23 33 | |
1639 // 04 14 05 15 06 16 07 17 | |
1640 // 24 34 25 35 26 36 27 37 | |
1641 // 40 50 41 51 42 52 43 53 | |
1642 // 60 70 61 71 62 72 63 73 | |
1643 // 54 54 55 55 56 56 57 57 | |
1644 // 64 74 65 75 66 76 67 77 | |
1645 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); | |
1646 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); | |
1647 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); | |
1648 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); | |
1649 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); | |
1650 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); | |
1651 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); | |
1652 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); | |
1653 // 00 10 20 30 01 11 21 31 | |
1654 // 40 50 60 70 41 51 61 71 | |
1655 // 02 12 22 32 03 13 23 33 | |
1656 // 42 52 62 72 43 53 63 73 | |
1657 // 04 14 24 34 05 15 21 36 | |
1658 // 44 54 64 74 45 55 61 76 | |
1659 // 06 16 26 36 07 17 27 37 | |
1660 // 46 56 66 76 47 57 67 77 | |
1661 const __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4); | |
1662 const __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4); | |
1663 const __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6); | |
1664 const __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6); | |
1665 const __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5); | |
1666 const __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5); | |
1667 const __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7); | |
1668 const __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7); | |
1669 // 00 10 20 30 40 50 60 70 | |
1670 // 01 11 21 31 41 51 61 71 | |
1671 // 02 12 22 32 42 52 62 72 | |
1672 // 03 13 23 33 43 53 63 73 | |
1673 // 04 14 24 34 44 54 64 74 | |
1674 // 05 15 25 35 45 55 65 75 | |
1675 // 06 16 26 36 46 56 66 76 | |
1676 // 07 17 27 37 47 57 67 77 | |
1677 // Store results | |
1678 _mm_store_si128((__m128i *)(out + 8 + 0 * 16), tr2_0); | |
1679 _mm_store_si128((__m128i *)(out + 8 + 1 * 16), tr2_1); | |
1680 _mm_store_si128((__m128i *)(out + 8 + 2 * 16), tr2_2); | |
1681 _mm_store_si128((__m128i *)(out + 8 + 3 * 16), tr2_3); | |
1682 _mm_store_si128((__m128i *)(out + 8 + 4 * 16), tr2_4); | |
1683 _mm_store_si128((__m128i *)(out + 8 + 5 * 16), tr2_5); | |
1684 _mm_store_si128((__m128i *)(out + 8 + 6 * 16), tr2_6); | |
1685 _mm_store_si128((__m128i *)(out + 8 + 7 * 16), tr2_7); | |
1686 } | |
1687 out += 8*16; | |
1688 } | |
1689 // Setup in/out for next pass. | |
1690 in = intermediate; | |
1691 out = output; | |
1692 } | |
1693 } | |
1694 | |
1695 static INLINE void load_buffer_16x16_avx2(const int16_t* input, __m128i *in0, | |
1696 __m128i *in1, int stride) { | |
1697 // load first 8 columns | |
1698 load_buffer_8x8_avx2(input, in0, stride); | |
1699 load_buffer_8x8_avx2(input + 8 * stride, in0 + 8, stride); | |
1700 | |
1701 input += 8; | |
1702 // load second 8 columns | |
1703 load_buffer_8x8_avx2(input, in1, stride); | |
1704 load_buffer_8x8_avx2(input + 8 * stride, in1 + 8, stride); | |
1705 } | |
1706 | |
1707 static INLINE void write_buffer_16x16_avx2(int16_t *output, __m128i *in0, | |
1708 __m128i *in1, int stride) { | |
1709 // write first 8 columns | |
1710 write_buffer_8x8_avx2(output, in0, stride); | |
1711 write_buffer_8x8_avx2(output + 8 * stride, in0 + 8, stride); | |
1712 // write second 8 columns | |
1713 output += 8; | |
1714 write_buffer_8x8_avx2(output, in1, stride); | |
1715 write_buffer_8x8_avx2(output + 8 * stride, in1 + 8, stride); | |
1716 } | |
1717 | |
1718 static INLINE void array_transpose_16x16_avx2(__m128i *res0, __m128i *res1) { | |
1719 __m128i tbuf[8]; | |
1720 array_transpose_8x8_avx2(res0, res0); | |
1721 array_transpose_8x8_avx2(res1, tbuf); | |
1722 array_transpose_8x8_avx2(res0 + 8, res1); | |
1723 array_transpose_8x8_avx2(res1 + 8, res1 + 8); | |
1724 | |
1725 res0[8] = tbuf[0]; | |
1726 res0[9] = tbuf[1]; | |
1727 res0[10] = tbuf[2]; | |
1728 res0[11] = tbuf[3]; | |
1729 res0[12] = tbuf[4]; | |
1730 res0[13] = tbuf[5]; | |
1731 res0[14] = tbuf[6]; | |
1732 res0[15] = tbuf[7]; | |
1733 } | |
1734 | |
1735 static INLINE void right_shift_16x16_avx2(__m128i *res0, __m128i *res1) { | |
1736 // perform rounding operations | |
1737 right_shift_8x8_avx2(res0, 2); | |
1738 right_shift_8x8_avx2(res0 + 8, 2); | |
1739 right_shift_8x8_avx2(res1, 2); | |
1740 right_shift_8x8_avx2(res1 + 8, 2); | |
1741 } | |
1742 | |
1743 void fdct16_8col_avx2(__m128i *in) { | |
1744 // perform 16x16 1-D DCT for 8 columns | |
1745 __m128i i[8], s[8], p[8], t[8], u[16], v[16]; | |
1746 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); | |
1747 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); | |
1748 const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64); | |
1749 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); | |
1750 const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64); | |
1751 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); | |
1752 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); | |
1753 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); | |
1754 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); | |
1755 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); | |
1756 const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64); | |
1757 const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64); | |
1758 const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64); | |
1759 const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64); | |
1760 const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64); | |
1761 const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64); | |
1762 const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64); | |
1763 const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64); | |
1764 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); | |
1765 | |
1766 // stage 1 | |
1767 i[0] = _mm_add_epi16(in[0], in[15]); | |
1768 i[1] = _mm_add_epi16(in[1], in[14]); | |
1769 i[2] = _mm_add_epi16(in[2], in[13]); | |
1770 i[3] = _mm_add_epi16(in[3], in[12]); | |
1771 i[4] = _mm_add_epi16(in[4], in[11]); | |
1772 i[5] = _mm_add_epi16(in[5], in[10]); | |
1773 i[6] = _mm_add_epi16(in[6], in[9]); | |
1774 i[7] = _mm_add_epi16(in[7], in[8]); | |
1775 | |
1776 s[0] = _mm_sub_epi16(in[7], in[8]); | |
1777 s[1] = _mm_sub_epi16(in[6], in[9]); | |
1778 s[2] = _mm_sub_epi16(in[5], in[10]); | |
1779 s[3] = _mm_sub_epi16(in[4], in[11]); | |
1780 s[4] = _mm_sub_epi16(in[3], in[12]); | |
1781 s[5] = _mm_sub_epi16(in[2], in[13]); | |
1782 s[6] = _mm_sub_epi16(in[1], in[14]); | |
1783 s[7] = _mm_sub_epi16(in[0], in[15]); | |
1784 | |
1785 p[0] = _mm_add_epi16(i[0], i[7]); | |
1786 p[1] = _mm_add_epi16(i[1], i[6]); | |
1787 p[2] = _mm_add_epi16(i[2], i[5]); | |
1788 p[3] = _mm_add_epi16(i[3], i[4]); | |
1789 p[4] = _mm_sub_epi16(i[3], i[4]); | |
1790 p[5] = _mm_sub_epi16(i[2], i[5]); | |
1791 p[6] = _mm_sub_epi16(i[1], i[6]); | |
1792 p[7] = _mm_sub_epi16(i[0], i[7]); | |
1793 | |
1794 u[0] = _mm_add_epi16(p[0], p[3]); | |
1795 u[1] = _mm_add_epi16(p[1], p[2]); | |
1796 u[2] = _mm_sub_epi16(p[1], p[2]); | |
1797 u[3] = _mm_sub_epi16(p[0], p[3]); | |
1798 | |
1799 v[0] = _mm_unpacklo_epi16(u[0], u[1]); | |
1800 v[1] = _mm_unpackhi_epi16(u[0], u[1]); | |
1801 v[2] = _mm_unpacklo_epi16(u[2], u[3]); | |
1802 v[3] = _mm_unpackhi_epi16(u[2], u[3]); | |
1803 | |
1804 u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16); | |
1805 u[1] = _mm_madd_epi16(v[1], k__cospi_p16_p16); | |
1806 u[2] = _mm_madd_epi16(v[0], k__cospi_p16_m16); | |
1807 u[3] = _mm_madd_epi16(v[1], k__cospi_p16_m16); | |
1808 u[4] = _mm_madd_epi16(v[2], k__cospi_p24_p08); | |
1809 u[5] = _mm_madd_epi16(v[3], k__cospi_p24_p08); | |
1810 u[6] = _mm_madd_epi16(v[2], k__cospi_m08_p24); | |
1811 u[7] = _mm_madd_epi16(v[3], k__cospi_m08_p24); | |
1812 | |
1813 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); | |
1814 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); | |
1815 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); | |
1816 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); | |
1817 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); | |
1818 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); | |
1819 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); | |
1820 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); | |
1821 | |
1822 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); | |
1823 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); | |
1824 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); | |
1825 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); | |
1826 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); | |
1827 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); | |
1828 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); | |
1829 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); | |
1830 | |
1831 in[0] = _mm_packs_epi32(u[0], u[1]); | |
1832 in[4] = _mm_packs_epi32(u[4], u[5]); | |
1833 in[8] = _mm_packs_epi32(u[2], u[3]); | |
1834 in[12] = _mm_packs_epi32(u[6], u[7]); | |
1835 | |
1836 u[0] = _mm_unpacklo_epi16(p[5], p[6]); | |
1837 u[1] = _mm_unpackhi_epi16(p[5], p[6]); | |
1838 v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16); | |
1839 v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16); | |
1840 v[2] = _mm_madd_epi16(u[0], k__cospi_p16_p16); | |
1841 v[3] = _mm_madd_epi16(u[1], k__cospi_p16_p16); | |
1842 | |
1843 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); | |
1844 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); | |
1845 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); | |
1846 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); | |
1847 | |
1848 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); | |
1849 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); | |
1850 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); | |
1851 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); | |
1852 | |
1853 u[0] = _mm_packs_epi32(v[0], v[1]); | |
1854 u[1] = _mm_packs_epi32(v[2], v[3]); | |
1855 | |
1856 t[0] = _mm_add_epi16(p[4], u[0]); | |
1857 t[1] = _mm_sub_epi16(p[4], u[0]); | |
1858 t[2] = _mm_sub_epi16(p[7], u[1]); | |
1859 t[3] = _mm_add_epi16(p[7], u[1]); | |
1860 | |
1861 u[0] = _mm_unpacklo_epi16(t[0], t[3]); | |
1862 u[1] = _mm_unpackhi_epi16(t[0], t[3]); | |
1863 u[2] = _mm_unpacklo_epi16(t[1], t[2]); | |
1864 u[3] = _mm_unpackhi_epi16(t[1], t[2]); | |
1865 | |
1866 v[0] = _mm_madd_epi16(u[0], k__cospi_p28_p04); | |
1867 v[1] = _mm_madd_epi16(u[1], k__cospi_p28_p04); | |
1868 v[2] = _mm_madd_epi16(u[2], k__cospi_p12_p20); | |
1869 v[3] = _mm_madd_epi16(u[3], k__cospi_p12_p20); | |
1870 v[4] = _mm_madd_epi16(u[2], k__cospi_m20_p12); | |
1871 v[5] = _mm_madd_epi16(u[3], k__cospi_m20_p12); | |
1872 v[6] = _mm_madd_epi16(u[0], k__cospi_m04_p28); | |
1873 v[7] = _mm_madd_epi16(u[1], k__cospi_m04_p28); | |
1874 | |
1875 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); | |
1876 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); | |
1877 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); | |
1878 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); | |
1879 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); | |
1880 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); | |
1881 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); | |
1882 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); | |
1883 | |
1884 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); | |
1885 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); | |
1886 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); | |
1887 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); | |
1888 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); | |
1889 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); | |
1890 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); | |
1891 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); | |
1892 | |
1893 in[2] = _mm_packs_epi32(v[0], v[1]); | |
1894 in[6] = _mm_packs_epi32(v[4], v[5]); | |
1895 in[10] = _mm_packs_epi32(v[2], v[3]); | |
1896 in[14] = _mm_packs_epi32(v[6], v[7]); | |
1897 | |
1898 // stage 2 | |
1899 u[0] = _mm_unpacklo_epi16(s[2], s[5]); | |
1900 u[1] = _mm_unpackhi_epi16(s[2], s[5]); | |
1901 u[2] = _mm_unpacklo_epi16(s[3], s[4]); | |
1902 u[3] = _mm_unpackhi_epi16(s[3], s[4]); | |
1903 | |
1904 v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16); | |
1905 v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16); | |
1906 v[2] = _mm_madd_epi16(u[2], k__cospi_m16_p16); | |
1907 v[3] = _mm_madd_epi16(u[3], k__cospi_m16_p16); | |
1908 v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16); | |
1909 v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16); | |
1910 v[6] = _mm_madd_epi16(u[0], k__cospi_p16_p16); | |
1911 v[7] = _mm_madd_epi16(u[1], k__cospi_p16_p16); | |
1912 | |
1913 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); | |
1914 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); | |
1915 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); | |
1916 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); | |
1917 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); | |
1918 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); | |
1919 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); | |
1920 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); | |
1921 | |
1922 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); | |
1923 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); | |
1924 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); | |
1925 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); | |
1926 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); | |
1927 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); | |
1928 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); | |
1929 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); | |
1930 | |
1931 t[2] = _mm_packs_epi32(v[0], v[1]); | |
1932 t[3] = _mm_packs_epi32(v[2], v[3]); | |
1933 t[4] = _mm_packs_epi32(v[4], v[5]); | |
1934 t[5] = _mm_packs_epi32(v[6], v[7]); | |
1935 | |
1936 // stage 3 | |
1937 p[0] = _mm_add_epi16(s[0], t[3]); | |
1938 p[1] = _mm_add_epi16(s[1], t[2]); | |
1939 p[2] = _mm_sub_epi16(s[1], t[2]); | |
1940 p[3] = _mm_sub_epi16(s[0], t[3]); | |
1941 p[4] = _mm_sub_epi16(s[7], t[4]); | |
1942 p[5] = _mm_sub_epi16(s[6], t[5]); | |
1943 p[6] = _mm_add_epi16(s[6], t[5]); | |
1944 p[7] = _mm_add_epi16(s[7], t[4]); | |
1945 | |
1946 // stage 4 | |
1947 u[0] = _mm_unpacklo_epi16(p[1], p[6]); | |
1948 u[1] = _mm_unpackhi_epi16(p[1], p[6]); | |
1949 u[2] = _mm_unpacklo_epi16(p[2], p[5]); | |
1950 u[3] = _mm_unpackhi_epi16(p[2], p[5]); | |
1951 | |
1952 v[0] = _mm_madd_epi16(u[0], k__cospi_m08_p24); | |
1953 v[1] = _mm_madd_epi16(u[1], k__cospi_m08_p24); | |
1954 v[2] = _mm_madd_epi16(u[2], k__cospi_m24_m08); | |
1955 v[3] = _mm_madd_epi16(u[3], k__cospi_m24_m08); | |
1956 v[4] = _mm_madd_epi16(u[2], k__cospi_m08_p24); | |
1957 v[5] = _mm_madd_epi16(u[3], k__cospi_m08_p24); | |
1958 v[6] = _mm_madd_epi16(u[0], k__cospi_p24_p08); | |
1959 v[7] = _mm_madd_epi16(u[1], k__cospi_p24_p08); | |
1960 | |
1961 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); | |
1962 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); | |
1963 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); | |
1964 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); | |
1965 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); | |
1966 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); | |
1967 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); | |
1968 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); | |
1969 | |
1970 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); | |
1971 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); | |
1972 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); | |
1973 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); | |
1974 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); | |
1975 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); | |
1976 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); | |
1977 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); | |
1978 | |
1979 t[1] = _mm_packs_epi32(v[0], v[1]); | |
1980 t[2] = _mm_packs_epi32(v[2], v[3]); | |
1981 t[5] = _mm_packs_epi32(v[4], v[5]); | |
1982 t[6] = _mm_packs_epi32(v[6], v[7]); | |
1983 | |
1984 // stage 5 | |
1985 s[0] = _mm_add_epi16(p[0], t[1]); | |
1986 s[1] = _mm_sub_epi16(p[0], t[1]); | |
1987 s[2] = _mm_sub_epi16(p[3], t[2]); | |
1988 s[3] = _mm_add_epi16(p[3], t[2]); | |
1989 s[4] = _mm_add_epi16(p[4], t[5]); | |
1990 s[5] = _mm_sub_epi16(p[4], t[5]); | |
1991 s[6] = _mm_sub_epi16(p[7], t[6]); | |
1992 s[7] = _mm_add_epi16(p[7], t[6]); | |
1993 | |
1994 // stage 6 | |
1995 u[0] = _mm_unpacklo_epi16(s[0], s[7]); | |
1996 u[1] = _mm_unpackhi_epi16(s[0], s[7]); | |
1997 u[2] = _mm_unpacklo_epi16(s[1], s[6]); | |
1998 u[3] = _mm_unpackhi_epi16(s[1], s[6]); | |
1999 u[4] = _mm_unpacklo_epi16(s[2], s[5]); | |
2000 u[5] = _mm_unpackhi_epi16(s[2], s[5]); | |
2001 u[6] = _mm_unpacklo_epi16(s[3], s[4]); | |
2002 u[7] = _mm_unpackhi_epi16(s[3], s[4]); | |
2003 | |
2004 v[0] = _mm_madd_epi16(u[0], k__cospi_p30_p02); | |
2005 v[1] = _mm_madd_epi16(u[1], k__cospi_p30_p02); | |
2006 v[2] = _mm_madd_epi16(u[2], k__cospi_p14_p18); | |
2007 v[3] = _mm_madd_epi16(u[3], k__cospi_p14_p18); | |
2008 v[4] = _mm_madd_epi16(u[4], k__cospi_p22_p10); | |
2009 v[5] = _mm_madd_epi16(u[5], k__cospi_p22_p10); | |
2010 v[6] = _mm_madd_epi16(u[6], k__cospi_p06_p26); | |
2011 v[7] = _mm_madd_epi16(u[7], k__cospi_p06_p26); | |
2012 v[8] = _mm_madd_epi16(u[6], k__cospi_m26_p06); | |
2013 v[9] = _mm_madd_epi16(u[7], k__cospi_m26_p06); | |
2014 v[10] = _mm_madd_epi16(u[4], k__cospi_m10_p22); | |
2015 v[11] = _mm_madd_epi16(u[5], k__cospi_m10_p22); | |
2016 v[12] = _mm_madd_epi16(u[2], k__cospi_m18_p14); | |
2017 v[13] = _mm_madd_epi16(u[3], k__cospi_m18_p14); | |
2018 v[14] = _mm_madd_epi16(u[0], k__cospi_m02_p30); | |
2019 v[15] = _mm_madd_epi16(u[1], k__cospi_m02_p30); | |
2020 | |
2021 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); | |
2022 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); | |
2023 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); | |
2024 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); | |
2025 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); | |
2026 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); | |
2027 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); | |
2028 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); | |
2029 u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING); | |
2030 u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING); | |
2031 u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING); | |
2032 u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING); | |
2033 u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING); | |
2034 u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING); | |
2035 u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING); | |
2036 u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING); | |
2037 | |
2038 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); | |
2039 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); | |
2040 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); | |
2041 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); | |
2042 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); | |
2043 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); | |
2044 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); | |
2045 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); | |
2046 v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); | |
2047 v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); | |
2048 v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); | |
2049 v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); | |
2050 v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); | |
2051 v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); | |
2052 v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); | |
2053 v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); | |
2054 | |
2055 in[1] = _mm_packs_epi32(v[0], v[1]); | |
2056 in[9] = _mm_packs_epi32(v[2], v[3]); | |
2057 in[5] = _mm_packs_epi32(v[4], v[5]); | |
2058 in[13] = _mm_packs_epi32(v[6], v[7]); | |
2059 in[3] = _mm_packs_epi32(v[8], v[9]); | |
2060 in[11] = _mm_packs_epi32(v[10], v[11]); | |
2061 in[7] = _mm_packs_epi32(v[12], v[13]); | |
2062 in[15] = _mm_packs_epi32(v[14], v[15]); | |
2063 } | |
2064 | |
2065 void fadst16_8col_avx2(__m128i *in) { | |
2066 // perform 16x16 1-D ADST for 8 columns | |
2067 __m128i s[16], x[16], u[32], v[32]; | |
2068 const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64); | |
2069 const __m128i k__cospi_p31_m01 = pair_set_epi16(cospi_31_64, -cospi_1_64); | |
2070 const __m128i k__cospi_p05_p27 = pair_set_epi16(cospi_5_64, cospi_27_64); | |
2071 const __m128i k__cospi_p27_m05 = pair_set_epi16(cospi_27_64, -cospi_5_64); | |
2072 const __m128i k__cospi_p09_p23 = pair_set_epi16(cospi_9_64, cospi_23_64); | |
2073 const __m128i k__cospi_p23_m09 = pair_set_epi16(cospi_23_64, -cospi_9_64); | |
2074 const __m128i k__cospi_p13_p19 = pair_set_epi16(cospi_13_64, cospi_19_64); | |
2075 const __m128i k__cospi_p19_m13 = pair_set_epi16(cospi_19_64, -cospi_13_64); | |
2076 const __m128i k__cospi_p17_p15 = pair_set_epi16(cospi_17_64, cospi_15_64); | |
2077 const __m128i k__cospi_p15_m17 = pair_set_epi16(cospi_15_64, -cospi_17_64); | |
2078 const __m128i k__cospi_p21_p11 = pair_set_epi16(cospi_21_64, cospi_11_64); | |
2079 const __m128i k__cospi_p11_m21 = pair_set_epi16(cospi_11_64, -cospi_21_64); | |
2080 const __m128i k__cospi_p25_p07 = pair_set_epi16(cospi_25_64, cospi_7_64); | |
2081 const __m128i k__cospi_p07_m25 = pair_set_epi16(cospi_7_64, -cospi_25_64); | |
2082 const __m128i k__cospi_p29_p03 = pair_set_epi16(cospi_29_64, cospi_3_64); | |
2083 const __m128i k__cospi_p03_m29 = pair_set_epi16(cospi_3_64, -cospi_29_64); | |
2084 const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64); | |
2085 const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64); | |
2086 const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64); | |
2087 const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64); | |
2088 const __m128i k__cospi_m28_p04 = pair_set_epi16(-cospi_28_64, cospi_4_64); | |
2089 const __m128i k__cospi_m12_p20 = pair_set_epi16(-cospi_12_64, cospi_20_64); | |
2090 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); | |
2091 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); | |
2092 const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64); | |
2093 const __m128i k__cospi_m16_m16 = _mm_set1_epi16(-cospi_16_64); | |
2094 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); | |
2095 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); | |
2096 const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64); | |
2097 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); | |
2098 const __m128i kZero = _mm_set1_epi16(0); | |
2099 | |
2100 u[0] = _mm_unpacklo_epi16(in[15], in[0]); | |
2101 u[1] = _mm_unpackhi_epi16(in[15], in[0]); | |
2102 u[2] = _mm_unpacklo_epi16(in[13], in[2]); | |
2103 u[3] = _mm_unpackhi_epi16(in[13], in[2]); | |
2104 u[4] = _mm_unpacklo_epi16(in[11], in[4]); | |
2105 u[5] = _mm_unpackhi_epi16(in[11], in[4]); | |
2106 u[6] = _mm_unpacklo_epi16(in[9], in[6]); | |
2107 u[7] = _mm_unpackhi_epi16(in[9], in[6]); | |
2108 u[8] = _mm_unpacklo_epi16(in[7], in[8]); | |
2109 u[9] = _mm_unpackhi_epi16(in[7], in[8]); | |
2110 u[10] = _mm_unpacklo_epi16(in[5], in[10]); | |
2111 u[11] = _mm_unpackhi_epi16(in[5], in[10]); | |
2112 u[12] = _mm_unpacklo_epi16(in[3], in[12]); | |
2113 u[13] = _mm_unpackhi_epi16(in[3], in[12]); | |
2114 u[14] = _mm_unpacklo_epi16(in[1], in[14]); | |
2115 u[15] = _mm_unpackhi_epi16(in[1], in[14]); | |
2116 | |
2117 v[0] = _mm_madd_epi16(u[0], k__cospi_p01_p31); | |
2118 v[1] = _mm_madd_epi16(u[1], k__cospi_p01_p31); | |
2119 v[2] = _mm_madd_epi16(u[0], k__cospi_p31_m01); | |
2120 v[3] = _mm_madd_epi16(u[1], k__cospi_p31_m01); | |
2121 v[4] = _mm_madd_epi16(u[2], k__cospi_p05_p27); | |
2122 v[5] = _mm_madd_epi16(u[3], k__cospi_p05_p27); | |
2123 v[6] = _mm_madd_epi16(u[2], k__cospi_p27_m05); | |
2124 v[7] = _mm_madd_epi16(u[3], k__cospi_p27_m05); | |
2125 v[8] = _mm_madd_epi16(u[4], k__cospi_p09_p23); | |
2126 v[9] = _mm_madd_epi16(u[5], k__cospi_p09_p23); | |
2127 v[10] = _mm_madd_epi16(u[4], k__cospi_p23_m09); | |
2128 v[11] = _mm_madd_epi16(u[5], k__cospi_p23_m09); | |
2129 v[12] = _mm_madd_epi16(u[6], k__cospi_p13_p19); | |
2130 v[13] = _mm_madd_epi16(u[7], k__cospi_p13_p19); | |
2131 v[14] = _mm_madd_epi16(u[6], k__cospi_p19_m13); | |
2132 v[15] = _mm_madd_epi16(u[7], k__cospi_p19_m13); | |
2133 v[16] = _mm_madd_epi16(u[8], k__cospi_p17_p15); | |
2134 v[17] = _mm_madd_epi16(u[9], k__cospi_p17_p15); | |
2135 v[18] = _mm_madd_epi16(u[8], k__cospi_p15_m17); | |
2136 v[19] = _mm_madd_epi16(u[9], k__cospi_p15_m17); | |
2137 v[20] = _mm_madd_epi16(u[10], k__cospi_p21_p11); | |
2138 v[21] = _mm_madd_epi16(u[11], k__cospi_p21_p11); | |
2139 v[22] = _mm_madd_epi16(u[10], k__cospi_p11_m21); | |
2140 v[23] = _mm_madd_epi16(u[11], k__cospi_p11_m21); | |
2141 v[24] = _mm_madd_epi16(u[12], k__cospi_p25_p07); | |
2142 v[25] = _mm_madd_epi16(u[13], k__cospi_p25_p07); | |
2143 v[26] = _mm_madd_epi16(u[12], k__cospi_p07_m25); | |
2144 v[27] = _mm_madd_epi16(u[13], k__cospi_p07_m25); | |
2145 v[28] = _mm_madd_epi16(u[14], k__cospi_p29_p03); | |
2146 v[29] = _mm_madd_epi16(u[15], k__cospi_p29_p03); | |
2147 v[30] = _mm_madd_epi16(u[14], k__cospi_p03_m29); | |
2148 v[31] = _mm_madd_epi16(u[15], k__cospi_p03_m29); | |
2149 | |
2150 u[0] = _mm_add_epi32(v[0], v[16]); | |
2151 u[1] = _mm_add_epi32(v[1], v[17]); | |
2152 u[2] = _mm_add_epi32(v[2], v[18]); | |
2153 u[3] = _mm_add_epi32(v[3], v[19]); | |
2154 u[4] = _mm_add_epi32(v[4], v[20]); | |
2155 u[5] = _mm_add_epi32(v[5], v[21]); | |
2156 u[6] = _mm_add_epi32(v[6], v[22]); | |
2157 u[7] = _mm_add_epi32(v[7], v[23]); | |
2158 u[8] = _mm_add_epi32(v[8], v[24]); | |
2159 u[9] = _mm_add_epi32(v[9], v[25]); | |
2160 u[10] = _mm_add_epi32(v[10], v[26]); | |
2161 u[11] = _mm_add_epi32(v[11], v[27]); | |
2162 u[12] = _mm_add_epi32(v[12], v[28]); | |
2163 u[13] = _mm_add_epi32(v[13], v[29]); | |
2164 u[14] = _mm_add_epi32(v[14], v[30]); | |
2165 u[15] = _mm_add_epi32(v[15], v[31]); | |
2166 u[16] = _mm_sub_epi32(v[0], v[16]); | |
2167 u[17] = _mm_sub_epi32(v[1], v[17]); | |
2168 u[18] = _mm_sub_epi32(v[2], v[18]); | |
2169 u[19] = _mm_sub_epi32(v[3], v[19]); | |
2170 u[20] = _mm_sub_epi32(v[4], v[20]); | |
2171 u[21] = _mm_sub_epi32(v[5], v[21]); | |
2172 u[22] = _mm_sub_epi32(v[6], v[22]); | |
2173 u[23] = _mm_sub_epi32(v[7], v[23]); | |
2174 u[24] = _mm_sub_epi32(v[8], v[24]); | |
2175 u[25] = _mm_sub_epi32(v[9], v[25]); | |
2176 u[26] = _mm_sub_epi32(v[10], v[26]); | |
2177 u[27] = _mm_sub_epi32(v[11], v[27]); | |
2178 u[28] = _mm_sub_epi32(v[12], v[28]); | |
2179 u[29] = _mm_sub_epi32(v[13], v[29]); | |
2180 u[30] = _mm_sub_epi32(v[14], v[30]); | |
2181 u[31] = _mm_sub_epi32(v[15], v[31]); | |
2182 | |
2183 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); | |
2184 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); | |
2185 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); | |
2186 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); | |
2187 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); | |
2188 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); | |
2189 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); | |
2190 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); | |
2191 v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); | |
2192 v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); | |
2193 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); | |
2194 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); | |
2195 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); | |
2196 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); | |
2197 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); | |
2198 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); | |
2199 v[16] = _mm_add_epi32(u[16], k__DCT_CONST_ROUNDING); | |
2200 v[17] = _mm_add_epi32(u[17], k__DCT_CONST_ROUNDING); | |
2201 v[18] = _mm_add_epi32(u[18], k__DCT_CONST_ROUNDING); | |
2202 v[19] = _mm_add_epi32(u[19], k__DCT_CONST_ROUNDING); | |
2203 v[20] = _mm_add_epi32(u[20], k__DCT_CONST_ROUNDING); | |
2204 v[21] = _mm_add_epi32(u[21], k__DCT_CONST_ROUNDING); | |
2205 v[22] = _mm_add_epi32(u[22], k__DCT_CONST_ROUNDING); | |
2206 v[23] = _mm_add_epi32(u[23], k__DCT_CONST_ROUNDING); | |
2207 v[24] = _mm_add_epi32(u[24], k__DCT_CONST_ROUNDING); | |
2208 v[25] = _mm_add_epi32(u[25], k__DCT_CONST_ROUNDING); | |
2209 v[26] = _mm_add_epi32(u[26], k__DCT_CONST_ROUNDING); | |
2210 v[27] = _mm_add_epi32(u[27], k__DCT_CONST_ROUNDING); | |
2211 v[28] = _mm_add_epi32(u[28], k__DCT_CONST_ROUNDING); | |
2212 v[29] = _mm_add_epi32(u[29], k__DCT_CONST_ROUNDING); | |
2213 v[30] = _mm_add_epi32(u[30], k__DCT_CONST_ROUNDING); | |
2214 v[31] = _mm_add_epi32(u[31], k__DCT_CONST_ROUNDING); | |
2215 | |
2216 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); | |
2217 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); | |
2218 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); | |
2219 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); | |
2220 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); | |
2221 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); | |
2222 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); | |
2223 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); | |
2224 u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS); | |
2225 u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS); | |
2226 u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); | |
2227 u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); | |
2228 u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); | |
2229 u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); | |
2230 u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); | |
2231 u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); | |
2232 u[16] = _mm_srai_epi32(v[16], DCT_CONST_BITS); | |
2233 u[17] = _mm_srai_epi32(v[17], DCT_CONST_BITS); | |
2234 u[18] = _mm_srai_epi32(v[18], DCT_CONST_BITS); | |
2235 u[19] = _mm_srai_epi32(v[19], DCT_CONST_BITS); | |
2236 u[20] = _mm_srai_epi32(v[20], DCT_CONST_BITS); | |
2237 u[21] = _mm_srai_epi32(v[21], DCT_CONST_BITS); | |
2238 u[22] = _mm_srai_epi32(v[22], DCT_CONST_BITS); | |
2239 u[23] = _mm_srai_epi32(v[23], DCT_CONST_BITS); | |
2240 u[24] = _mm_srai_epi32(v[24], DCT_CONST_BITS); | |
2241 u[25] = _mm_srai_epi32(v[25], DCT_CONST_BITS); | |
2242 u[26] = _mm_srai_epi32(v[26], DCT_CONST_BITS); | |
2243 u[27] = _mm_srai_epi32(v[27], DCT_CONST_BITS); | |
2244 u[28] = _mm_srai_epi32(v[28], DCT_CONST_BITS); | |
2245 u[29] = _mm_srai_epi32(v[29], DCT_CONST_BITS); | |
2246 u[30] = _mm_srai_epi32(v[30], DCT_CONST_BITS); | |
2247 u[31] = _mm_srai_epi32(v[31], DCT_CONST_BITS); | |
2248 | |
2249 s[0] = _mm_packs_epi32(u[0], u[1]); | |
2250 s[1] = _mm_packs_epi32(u[2], u[3]); | |
2251 s[2] = _mm_packs_epi32(u[4], u[5]); | |
2252 s[3] = _mm_packs_epi32(u[6], u[7]); | |
2253 s[4] = _mm_packs_epi32(u[8], u[9]); | |
2254 s[5] = _mm_packs_epi32(u[10], u[11]); | |
2255 s[6] = _mm_packs_epi32(u[12], u[13]); | |
2256 s[7] = _mm_packs_epi32(u[14], u[15]); | |
2257 s[8] = _mm_packs_epi32(u[16], u[17]); | |
2258 s[9] = _mm_packs_epi32(u[18], u[19]); | |
2259 s[10] = _mm_packs_epi32(u[20], u[21]); | |
2260 s[11] = _mm_packs_epi32(u[22], u[23]); | |
2261 s[12] = _mm_packs_epi32(u[24], u[25]); | |
2262 s[13] = _mm_packs_epi32(u[26], u[27]); | |
2263 s[14] = _mm_packs_epi32(u[28], u[29]); | |
2264 s[15] = _mm_packs_epi32(u[30], u[31]); | |
2265 | |
2266 // stage 2 | |
2267 u[0] = _mm_unpacklo_epi16(s[8], s[9]); | |
2268 u[1] = _mm_unpackhi_epi16(s[8], s[9]); | |
2269 u[2] = _mm_unpacklo_epi16(s[10], s[11]); | |
2270 u[3] = _mm_unpackhi_epi16(s[10], s[11]); | |
2271 u[4] = _mm_unpacklo_epi16(s[12], s[13]); | |
2272 u[5] = _mm_unpackhi_epi16(s[12], s[13]); | |
2273 u[6] = _mm_unpacklo_epi16(s[14], s[15]); | |
2274 u[7] = _mm_unpackhi_epi16(s[14], s[15]); | |
2275 | |
2276 v[0] = _mm_madd_epi16(u[0], k__cospi_p04_p28); | |
2277 v[1] = _mm_madd_epi16(u[1], k__cospi_p04_p28); | |
2278 v[2] = _mm_madd_epi16(u[0], k__cospi_p28_m04); | |
2279 v[3] = _mm_madd_epi16(u[1], k__cospi_p28_m04); | |
2280 v[4] = _mm_madd_epi16(u[2], k__cospi_p20_p12); | |
2281 v[5] = _mm_madd_epi16(u[3], k__cospi_p20_p12); | |
2282 v[6] = _mm_madd_epi16(u[2], k__cospi_p12_m20); | |
2283 v[7] = _mm_madd_epi16(u[3], k__cospi_p12_m20); | |
2284 v[8] = _mm_madd_epi16(u[4], k__cospi_m28_p04); | |
2285 v[9] = _mm_madd_epi16(u[5], k__cospi_m28_p04); | |
2286 v[10] = _mm_madd_epi16(u[4], k__cospi_p04_p28); | |
2287 v[11] = _mm_madd_epi16(u[5], k__cospi_p04_p28); | |
2288 v[12] = _mm_madd_epi16(u[6], k__cospi_m12_p20); | |
2289 v[13] = _mm_madd_epi16(u[7], k__cospi_m12_p20); | |
2290 v[14] = _mm_madd_epi16(u[6], k__cospi_p20_p12); | |
2291 v[15] = _mm_madd_epi16(u[7], k__cospi_p20_p12); | |
2292 | |
2293 u[0] = _mm_add_epi32(v[0], v[8]); | |
2294 u[1] = _mm_add_epi32(v[1], v[9]); | |
2295 u[2] = _mm_add_epi32(v[2], v[10]); | |
2296 u[3] = _mm_add_epi32(v[3], v[11]); | |
2297 u[4] = _mm_add_epi32(v[4], v[12]); | |
2298 u[5] = _mm_add_epi32(v[5], v[13]); | |
2299 u[6] = _mm_add_epi32(v[6], v[14]); | |
2300 u[7] = _mm_add_epi32(v[7], v[15]); | |
2301 u[8] = _mm_sub_epi32(v[0], v[8]); | |
2302 u[9] = _mm_sub_epi32(v[1], v[9]); | |
2303 u[10] = _mm_sub_epi32(v[2], v[10]); | |
2304 u[11] = _mm_sub_epi32(v[3], v[11]); | |
2305 u[12] = _mm_sub_epi32(v[4], v[12]); | |
2306 u[13] = _mm_sub_epi32(v[5], v[13]); | |
2307 u[14] = _mm_sub_epi32(v[6], v[14]); | |
2308 u[15] = _mm_sub_epi32(v[7], v[15]); | |
2309 | |
2310 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); | |
2311 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); | |
2312 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); | |
2313 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); | |
2314 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); | |
2315 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); | |
2316 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); | |
2317 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); | |
2318 v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); | |
2319 v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); | |
2320 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); | |
2321 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); | |
2322 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); | |
2323 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); | |
2324 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); | |
2325 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); | |
2326 | |
2327 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); | |
2328 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); | |
2329 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); | |
2330 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); | |
2331 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); | |
2332 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); | |
2333 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); | |
2334 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); | |
2335 u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS); | |
2336 u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS); | |
2337 u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); | |
2338 u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); | |
2339 u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); | |
2340 u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); | |
2341 u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); | |
2342 u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); | |
2343 | |
2344 x[0] = _mm_add_epi16(s[0], s[4]); | |
2345 x[1] = _mm_add_epi16(s[1], s[5]); | |
2346 x[2] = _mm_add_epi16(s[2], s[6]); | |
2347 x[3] = _mm_add_epi16(s[3], s[7]); | |
2348 x[4] = _mm_sub_epi16(s[0], s[4]); | |
2349 x[5] = _mm_sub_epi16(s[1], s[5]); | |
2350 x[6] = _mm_sub_epi16(s[2], s[6]); | |
2351 x[7] = _mm_sub_epi16(s[3], s[7]); | |
2352 x[8] = _mm_packs_epi32(u[0], u[1]); | |
2353 x[9] = _mm_packs_epi32(u[2], u[3]); | |
2354 x[10] = _mm_packs_epi32(u[4], u[5]); | |
2355 x[11] = _mm_packs_epi32(u[6], u[7]); | |
2356 x[12] = _mm_packs_epi32(u[8], u[9]); | |
2357 x[13] = _mm_packs_epi32(u[10], u[11]); | |
2358 x[14] = _mm_packs_epi32(u[12], u[13]); | |
2359 x[15] = _mm_packs_epi32(u[14], u[15]); | |
2360 | |
2361 // stage 3 | |
2362 u[0] = _mm_unpacklo_epi16(x[4], x[5]); | |
2363 u[1] = _mm_unpackhi_epi16(x[4], x[5]); | |
2364 u[2] = _mm_unpacklo_epi16(x[6], x[7]); | |
2365 u[3] = _mm_unpackhi_epi16(x[6], x[7]); | |
2366 u[4] = _mm_unpacklo_epi16(x[12], x[13]); | |
2367 u[5] = _mm_unpackhi_epi16(x[12], x[13]); | |
2368 u[6] = _mm_unpacklo_epi16(x[14], x[15]); | |
2369 u[7] = _mm_unpackhi_epi16(x[14], x[15]); | |
2370 | |
2371 v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24); | |
2372 v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24); | |
2373 v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08); | |
2374 v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08); | |
2375 v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08); | |
2376 v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08); | |
2377 v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24); | |
2378 v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24); | |
2379 v[8] = _mm_madd_epi16(u[4], k__cospi_p08_p24); | |
2380 v[9] = _mm_madd_epi16(u[5], k__cospi_p08_p24); | |
2381 v[10] = _mm_madd_epi16(u[4], k__cospi_p24_m08); | |
2382 v[11] = _mm_madd_epi16(u[5], k__cospi_p24_m08); | |
2383 v[12] = _mm_madd_epi16(u[6], k__cospi_m24_p08); | |
2384 v[13] = _mm_madd_epi16(u[7], k__cospi_m24_p08); | |
2385 v[14] = _mm_madd_epi16(u[6], k__cospi_p08_p24); | |
2386 v[15] = _mm_madd_epi16(u[7], k__cospi_p08_p24); | |
2387 | |
2388 u[0] = _mm_add_epi32(v[0], v[4]); | |
2389 u[1] = _mm_add_epi32(v[1], v[5]); | |
2390 u[2] = _mm_add_epi32(v[2], v[6]); | |
2391 u[3] = _mm_add_epi32(v[3], v[7]); | |
2392 u[4] = _mm_sub_epi32(v[0], v[4]); | |
2393 u[5] = _mm_sub_epi32(v[1], v[5]); | |
2394 u[6] = _mm_sub_epi32(v[2], v[6]); | |
2395 u[7] = _mm_sub_epi32(v[3], v[7]); | |
2396 u[8] = _mm_add_epi32(v[8], v[12]); | |
2397 u[9] = _mm_add_epi32(v[9], v[13]); | |
2398 u[10] = _mm_add_epi32(v[10], v[14]); | |
2399 u[11] = _mm_add_epi32(v[11], v[15]); | |
2400 u[12] = _mm_sub_epi32(v[8], v[12]); | |
2401 u[13] = _mm_sub_epi32(v[9], v[13]); | |
2402 u[14] = _mm_sub_epi32(v[10], v[14]); | |
2403 u[15] = _mm_sub_epi32(v[11], v[15]); | |
2404 | |
2405 u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); | |
2406 u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); | |
2407 u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); | |
2408 u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); | |
2409 u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); | |
2410 u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); | |
2411 u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); | |
2412 u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); | |
2413 u[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); | |
2414 u[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); | |
2415 u[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); | |
2416 u[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); | |
2417 u[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); | |
2418 u[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); | |
2419 u[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); | |
2420 u[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); | |
2421 | |
2422 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); | |
2423 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); | |
2424 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); | |
2425 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); | |
2426 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); | |
2427 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); | |
2428 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); | |
2429 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); | |
2430 v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); | |
2431 v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); | |
2432 v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); | |
2433 v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); | |
2434 v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); | |
2435 v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); | |
2436 v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); | |
2437 v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); | |
2438 | |
2439 s[0] = _mm_add_epi16(x[0], x[2]); | |
2440 s[1] = _mm_add_epi16(x[1], x[3]); | |
2441 s[2] = _mm_sub_epi16(x[0], x[2]); | |
2442 s[3] = _mm_sub_epi16(x[1], x[3]); | |
2443 s[4] = _mm_packs_epi32(v[0], v[1]); | |
2444 s[5] = _mm_packs_epi32(v[2], v[3]); | |
2445 s[6] = _mm_packs_epi32(v[4], v[5]); | |
2446 s[7] = _mm_packs_epi32(v[6], v[7]); | |
2447 s[8] = _mm_add_epi16(x[8], x[10]); | |
2448 s[9] = _mm_add_epi16(x[9], x[11]); | |
2449 s[10] = _mm_sub_epi16(x[8], x[10]); | |
2450 s[11] = _mm_sub_epi16(x[9], x[11]); | |
2451 s[12] = _mm_packs_epi32(v[8], v[9]); | |
2452 s[13] = _mm_packs_epi32(v[10], v[11]); | |
2453 s[14] = _mm_packs_epi32(v[12], v[13]); | |
2454 s[15] = _mm_packs_epi32(v[14], v[15]); | |
2455 | |
2456 // stage 4 | |
2457 u[0] = _mm_unpacklo_epi16(s[2], s[3]); | |
2458 u[1] = _mm_unpackhi_epi16(s[2], s[3]); | |
2459 u[2] = _mm_unpacklo_epi16(s[6], s[7]); | |
2460 u[3] = _mm_unpackhi_epi16(s[6], s[7]); | |
2461 u[4] = _mm_unpacklo_epi16(s[10], s[11]); | |
2462 u[5] = _mm_unpackhi_epi16(s[10], s[11]); | |
2463 u[6] = _mm_unpacklo_epi16(s[14], s[15]); | |
2464 u[7] = _mm_unpackhi_epi16(s[14], s[15]); | |
2465 | |
2466 v[0] = _mm_madd_epi16(u[0], k__cospi_m16_m16); | |
2467 v[1] = _mm_madd_epi16(u[1], k__cospi_m16_m16); | |
2468 v[2] = _mm_madd_epi16(u[0], k__cospi_p16_m16); | |
2469 v[3] = _mm_madd_epi16(u[1], k__cospi_p16_m16); | |
2470 v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16); | |
2471 v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16); | |
2472 v[6] = _mm_madd_epi16(u[2], k__cospi_m16_p16); | |
2473 v[7] = _mm_madd_epi16(u[3], k__cospi_m16_p16); | |
2474 v[8] = _mm_madd_epi16(u[4], k__cospi_p16_p16); | |
2475 v[9] = _mm_madd_epi16(u[5], k__cospi_p16_p16); | |
2476 v[10] = _mm_madd_epi16(u[4], k__cospi_m16_p16); | |
2477 v[11] = _mm_madd_epi16(u[5], k__cospi_m16_p16); | |
2478 v[12] = _mm_madd_epi16(u[6], k__cospi_m16_m16); | |
2479 v[13] = _mm_madd_epi16(u[7], k__cospi_m16_m16); | |
2480 v[14] = _mm_madd_epi16(u[6], k__cospi_p16_m16); | |
2481 v[15] = _mm_madd_epi16(u[7], k__cospi_p16_m16); | |
2482 | |
2483 u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); | |
2484 u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); | |
2485 u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); | |
2486 u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); | |
2487 u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); | |
2488 u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); | |
2489 u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); | |
2490 u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); | |
2491 u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING); | |
2492 u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING); | |
2493 u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING); | |
2494 u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING); | |
2495 u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING); | |
2496 u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING); | |
2497 u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING); | |
2498 u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING); | |
2499 | |
2500 v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); | |
2501 v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); | |
2502 v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); | |
2503 v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); | |
2504 v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); | |
2505 v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); | |
2506 v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); | |
2507 v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); | |
2508 v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); | |
2509 v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); | |
2510 v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); | |
2511 v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); | |
2512 v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); | |
2513 v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); | |
2514 v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); | |
2515 v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); | |
2516 | |
2517 in[0] = s[0]; | |
2518 in[1] = _mm_sub_epi16(kZero, s[8]); | |
2519 in[2] = s[12]; | |
2520 in[3] = _mm_sub_epi16(kZero, s[4]); | |
2521 in[4] = _mm_packs_epi32(v[4], v[5]); | |
2522 in[5] = _mm_packs_epi32(v[12], v[13]); | |
2523 in[6] = _mm_packs_epi32(v[8], v[9]); | |
2524 in[7] = _mm_packs_epi32(v[0], v[1]); | |
2525 in[8] = _mm_packs_epi32(v[2], v[3]); | |
2526 in[9] = _mm_packs_epi32(v[10], v[11]); | |
2527 in[10] = _mm_packs_epi32(v[14], v[15]); | |
2528 in[11] = _mm_packs_epi32(v[6], v[7]); | |
2529 in[12] = s[5]; | |
2530 in[13] = _mm_sub_epi16(kZero, s[13]); | |
2531 in[14] = s[9]; | |
2532 in[15] = _mm_sub_epi16(kZero, s[1]); | |
2533 } | |
2534 | |
2535 void fdct16_avx2(__m128i *in0, __m128i *in1) { | |
2536 fdct16_8col_avx2(in0); | |
2537 fdct16_8col_avx2(in1); | |
2538 array_transpose_16x16_avx2(in0, in1); | |
2539 } | |
2540 | |
2541 void fadst16_avx2(__m128i *in0, __m128i *in1) { | |
2542 fadst16_8col_avx2(in0); | |
2543 fadst16_8col_avx2(in1); | |
2544 array_transpose_16x16_avx2(in0, in1); | |
2545 } | |
2546 | |
2547 void vp9_fht16x16_avx2(const int16_t *input, int16_t *output, | |
2548 int stride, int tx_type) { | |
2549 __m128i in0[16], in1[16]; | |
2550 | |
2551 switch (tx_type) { | |
2552 case DCT_DCT: | |
2553 vp9_fdct16x16_avx2(input, output, stride); | |
2554 break; | |
2555 case ADST_DCT: | |
2556 load_buffer_16x16_avx2(input, in0, in1, stride); | |
2557 fadst16_avx2(in0, in1); | |
2558 right_shift_16x16_avx2(in0, in1); | |
2559 fdct16_avx2(in0, in1); | |
2560 write_buffer_16x16_avx2(output, in0, in1, 16); | |
2561 break; | |
2562 case DCT_ADST: | |
2563 load_buffer_16x16_avx2(input, in0, in1, stride); | |
2564 fdct16_avx2(in0, in1); | |
2565 right_shift_16x16_avx2(in0, in1); | |
2566 fadst16_avx2(in0, in1); | |
2567 write_buffer_16x16_avx2(output, in0, in1, 16); | |
2568 break; | |
2569 case ADST_ADST: | |
2570 load_buffer_16x16_avx2(input, in0, in1, stride); | |
2571 fadst16_avx2(in0, in1); | |
2572 right_shift_16x16_avx2(in0, in1); | |
2573 fadst16_avx2(in0, in1); | |
2574 write_buffer_16x16_avx2(output, in0, in1, 16); | |
2575 break; | |
2576 default: | |
2577 assert(0); | |
2578 break; | |
2579 } | |
2580 } | |
2581 | 15 |
2582 #define FDCT32x32_2D_AVX2 vp9_fdct32x32_rd_avx2 | 16 #define FDCT32x32_2D_AVX2 vp9_fdct32x32_rd_avx2 |
2583 #define FDCT32x32_HIGH_PRECISION 0 | 17 #define FDCT32x32_HIGH_PRECISION 0 |
2584 #include "vp9/encoder/x86/vp9_dct32x32_avx2.c" | 18 #include "vp9/encoder/x86/vp9_dct32x32_avx2.c" |
2585 #undef FDCT32x32_2D_AVX2 | 19 #undef FDCT32x32_2D_AVX2 |
2586 #undef FDCT32x32_HIGH_PRECISION | 20 #undef FDCT32x32_HIGH_PRECISION |
2587 | 21 |
2588 #define FDCT32x32_2D_AVX2 vp9_fdct32x32_avx2 | 22 #define FDCT32x32_2D_AVX2 vp9_fdct32x32_avx2 |
2589 #define FDCT32x32_HIGH_PRECISION 1 | 23 #define FDCT32x32_HIGH_PRECISION 1 |
2590 #include "vp9/encoder/x86/vp9_dct32x32_avx2.c" // NOLINT | 24 #include "vp9/encoder/x86/vp9_dct32x32_avx2.c" // NOLINT |
2591 #undef FDCT32x32_2D_AVX2 | 25 #undef FDCT32x32_2D_AVX2 |
2592 #undef FDCT32x32_HIGH_PRECISION | 26 #undef FDCT32x32_HIGH_PRECISION |
OLD | NEW |