OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #include <emmintrin.h> // SSE2 | |
12 #include "vp9/common/vp9_idct.h" // for cospi constants | |
13 #include "vp9/encoder/x86/vp9_dct_sse2.h" | |
14 #include "vp9/encoder/vp9_dct.h" | |
15 #include "vpx_ports/mem.h" | |
16 | |
17 #if DCT_HIGH_BIT_DEPTH | |
18 #define ADD_EPI16 _mm_adds_epi16 | |
19 #define SUB_EPI16 _mm_subs_epi16 | |
20 #if FDCT32x32_HIGH_PRECISION | |
21 void vp9_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) { | |
22 int i, j; | |
23 for (i = 0; i < 32; ++i) { | |
24 tran_high_t temp_in[32], temp_out[32]; | |
25 for (j = 0; j < 32; ++j) | |
26 temp_in[j] = intermediate[j * 32 + i]; | |
27 vp9_fdct32(temp_in, temp_out, 0); | |
28 for (j = 0; j < 32; ++j) | |
29 out[j + i * 32] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2; | |
30 } | |
31 } | |
32 #define HIGH_FDCT32x32_2D_C vp9_highbd_fdct32x32_c | |
33 #define HIGH_FDCT32x32_2D_ROWS_C vp9_fdct32x32_rows_c | |
34 #else | |
35 void vp9_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) { | |
36 int i, j; | |
37 for (i = 0; i < 32; ++i) { | |
38 tran_high_t temp_in[32], temp_out[32]; | |
39 for (j = 0; j < 32; ++j) | |
40 temp_in[j] = intermediate[j * 32 + i]; | |
41 vp9_fdct32(temp_in, temp_out, 1); | |
42 for (j = 0; j < 32; ++j) | |
43 out[j + i * 32] = temp_out[j]; | |
44 } | |
45 } | |
46 #define HIGH_FDCT32x32_2D_C vp9_highbd_fdct32x32_rd_c | |
47 #define HIGH_FDCT32x32_2D_ROWS_C vp9_fdct32x32_rd_rows_c | |
48 #endif // FDCT32x32_HIGH_PRECISION | |
49 #else | |
50 #define ADD_EPI16 _mm_add_epi16 | |
51 #define SUB_EPI16 _mm_sub_epi16 | |
52 #endif // DCT_HIGH_BIT_DEPTH | |
53 | |
54 | |
55 void FDCT32x32_2D(const int16_t *input, | |
56 tran_low_t *output_org, int stride) { | |
57 // Calculate pre-multiplied strides | |
58 const int str1 = stride; | |
59 const int str2 = 2 * stride; | |
60 const int str3 = 2 * stride + str1; | |
61 // We need an intermediate buffer between passes. | |
62 DECLARE_ALIGNED(16, int16_t, intermediate[32 * 32]); | |
63 // Constants | |
64 // When we use them, in one case, they are all the same. In all others | |
65 // it's a pair of them that we need to repeat four times. This is done | |
66 // by constructing the 32 bit constant corresponding to that pair. | |
67 const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); | |
68 const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64); | |
69 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); | |
70 const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64); | |
71 const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64, cospi_8_64); | |
72 const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64, cospi_20_64); | |
73 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); | |
74 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); | |
75 const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64, cospi_4_64); | |
76 const __m128i k__cospi_m28_m04 = pair_set_epi16(-cospi_28_64, -cospi_4_64); | |
77 const __m128i k__cospi_m12_m20 = pair_set_epi16(-cospi_12_64, -cospi_20_64); | |
78 const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64, cospi_2_64); | |
79 const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64, cospi_18_64); | |
80 const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64, cospi_10_64); | |
81 const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64, cospi_26_64); | |
82 const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64); | |
83 const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64); | |
84 const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64); | |
85 const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64); | |
86 const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64, cospi_1_64); | |
87 const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64, cospi_17_64); | |
88 const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64, cospi_9_64); | |
89 const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64, cospi_25_64); | |
90 const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64, cospi_7_64); | |
91 const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64, cospi_23_64); | |
92 const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64, cospi_15_64); | |
93 const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64, cospi_31_64); | |
94 const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64, cospi_5_64); | |
95 const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64, cospi_21_64); | |
96 const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64, cospi_13_64); | |
97 const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64, cospi_29_64); | |
98 const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64, cospi_3_64); | |
99 const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64, cospi_19_64); | |
100 const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64, cospi_11_64); | |
101 const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64, cospi_27_64); | |
102 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); | |
103 const __m128i kZero = _mm_set1_epi16(0); | |
104 const __m128i kOne = _mm_set1_epi16(1); | |
105 // Do the two transform/transpose passes | |
106 int pass; | |
107 #if DCT_HIGH_BIT_DEPTH | |
108 int overflow; | |
109 #endif | |
110 for (pass = 0; pass < 2; ++pass) { | |
111 // We process eight columns (transposed rows in second pass) at a time. | |
112 int column_start; | |
113 for (column_start = 0; column_start < 32; column_start += 8) { | |
114 __m128i step1[32]; | |
115 __m128i step2[32]; | |
116 __m128i step3[32]; | |
117 __m128i out[32]; | |
118 // Stage 1 | |
119 // Note: even though all the loads below are aligned, using the aligned | |
120 // intrinsic make the code slightly slower. | |
121 if (0 == pass) { | |
122 const int16_t *in = &input[column_start]; | |
123 // step1[i] = (in[ 0 * stride] + in[(32 - 1) * stride]) << 2; | |
124 // Note: the next four blocks could be in a loop. That would help the | |
125 // instruction cache but is actually slower. | |
126 { | |
127 const int16_t *ina = in + 0 * str1; | |
128 const int16_t *inb = in + 31 * str1; | |
129 __m128i *step1a = &step1[ 0]; | |
130 __m128i *step1b = &step1[31]; | |
131 const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); | |
132 const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); | |
133 const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); | |
134 const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); | |
135 const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); | |
136 const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); | |
137 const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); | |
138 const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); | |
139 step1a[ 0] = _mm_add_epi16(ina0, inb0); | |
140 step1a[ 1] = _mm_add_epi16(ina1, inb1); | |
141 step1a[ 2] = _mm_add_epi16(ina2, inb2); | |
142 step1a[ 3] = _mm_add_epi16(ina3, inb3); | |
143 step1b[-3] = _mm_sub_epi16(ina3, inb3); | |
144 step1b[-2] = _mm_sub_epi16(ina2, inb2); | |
145 step1b[-1] = _mm_sub_epi16(ina1, inb1); | |
146 step1b[-0] = _mm_sub_epi16(ina0, inb0); | |
147 step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); | |
148 step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); | |
149 step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); | |
150 step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); | |
151 step1b[-3] = _mm_slli_epi16(step1b[-3], 2); | |
152 step1b[-2] = _mm_slli_epi16(step1b[-2], 2); | |
153 step1b[-1] = _mm_slli_epi16(step1b[-1], 2); | |
154 step1b[-0] = _mm_slli_epi16(step1b[-0], 2); | |
155 } | |
156 { | |
157 const int16_t *ina = in + 4 * str1; | |
158 const int16_t *inb = in + 27 * str1; | |
159 __m128i *step1a = &step1[ 4]; | |
160 __m128i *step1b = &step1[27]; | |
161 const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); | |
162 const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); | |
163 const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); | |
164 const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); | |
165 const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); | |
166 const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); | |
167 const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); | |
168 const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); | |
169 step1a[ 0] = _mm_add_epi16(ina0, inb0); | |
170 step1a[ 1] = _mm_add_epi16(ina1, inb1); | |
171 step1a[ 2] = _mm_add_epi16(ina2, inb2); | |
172 step1a[ 3] = _mm_add_epi16(ina3, inb3); | |
173 step1b[-3] = _mm_sub_epi16(ina3, inb3); | |
174 step1b[-2] = _mm_sub_epi16(ina2, inb2); | |
175 step1b[-1] = _mm_sub_epi16(ina1, inb1); | |
176 step1b[-0] = _mm_sub_epi16(ina0, inb0); | |
177 step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); | |
178 step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); | |
179 step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); | |
180 step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); | |
181 step1b[-3] = _mm_slli_epi16(step1b[-3], 2); | |
182 step1b[-2] = _mm_slli_epi16(step1b[-2], 2); | |
183 step1b[-1] = _mm_slli_epi16(step1b[-1], 2); | |
184 step1b[-0] = _mm_slli_epi16(step1b[-0], 2); | |
185 } | |
186 { | |
187 const int16_t *ina = in + 8 * str1; | |
188 const int16_t *inb = in + 23 * str1; | |
189 __m128i *step1a = &step1[ 8]; | |
190 __m128i *step1b = &step1[23]; | |
191 const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); | |
192 const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); | |
193 const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); | |
194 const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); | |
195 const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); | |
196 const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); | |
197 const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); | |
198 const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); | |
199 step1a[ 0] = _mm_add_epi16(ina0, inb0); | |
200 step1a[ 1] = _mm_add_epi16(ina1, inb1); | |
201 step1a[ 2] = _mm_add_epi16(ina2, inb2); | |
202 step1a[ 3] = _mm_add_epi16(ina3, inb3); | |
203 step1b[-3] = _mm_sub_epi16(ina3, inb3); | |
204 step1b[-2] = _mm_sub_epi16(ina2, inb2); | |
205 step1b[-1] = _mm_sub_epi16(ina1, inb1); | |
206 step1b[-0] = _mm_sub_epi16(ina0, inb0); | |
207 step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); | |
208 step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); | |
209 step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); | |
210 step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); | |
211 step1b[-3] = _mm_slli_epi16(step1b[-3], 2); | |
212 step1b[-2] = _mm_slli_epi16(step1b[-2], 2); | |
213 step1b[-1] = _mm_slli_epi16(step1b[-1], 2); | |
214 step1b[-0] = _mm_slli_epi16(step1b[-0], 2); | |
215 } | |
216 { | |
217 const int16_t *ina = in + 12 * str1; | |
218 const int16_t *inb = in + 19 * str1; | |
219 __m128i *step1a = &step1[12]; | |
220 __m128i *step1b = &step1[19]; | |
221 const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); | |
222 const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); | |
223 const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); | |
224 const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); | |
225 const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); | |
226 const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); | |
227 const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); | |
228 const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); | |
229 step1a[ 0] = _mm_add_epi16(ina0, inb0); | |
230 step1a[ 1] = _mm_add_epi16(ina1, inb1); | |
231 step1a[ 2] = _mm_add_epi16(ina2, inb2); | |
232 step1a[ 3] = _mm_add_epi16(ina3, inb3); | |
233 step1b[-3] = _mm_sub_epi16(ina3, inb3); | |
234 step1b[-2] = _mm_sub_epi16(ina2, inb2); | |
235 step1b[-1] = _mm_sub_epi16(ina1, inb1); | |
236 step1b[-0] = _mm_sub_epi16(ina0, inb0); | |
237 step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); | |
238 step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); | |
239 step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); | |
240 step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); | |
241 step1b[-3] = _mm_slli_epi16(step1b[-3], 2); | |
242 step1b[-2] = _mm_slli_epi16(step1b[-2], 2); | |
243 step1b[-1] = _mm_slli_epi16(step1b[-1], 2); | |
244 step1b[-0] = _mm_slli_epi16(step1b[-0], 2); | |
245 } | |
246 } else { | |
247 int16_t *in = &intermediate[column_start]; | |
248 // step1[i] = in[ 0 * 32] + in[(32 - 1) * 32]; | |
249 // Note: using the same approach as above to have common offset is | |
250 // counter-productive as all offsets can be calculated at compile | |
251 // time. | |
252 // Note: the next four blocks could be in a loop. That would help the | |
253 // instruction cache but is actually slower. | |
254 { | |
255 __m128i in00 = _mm_loadu_si128((const __m128i *)(in + 0 * 32)); | |
256 __m128i in01 = _mm_loadu_si128((const __m128i *)(in + 1 * 32)); | |
257 __m128i in02 = _mm_loadu_si128((const __m128i *)(in + 2 * 32)); | |
258 __m128i in03 = _mm_loadu_si128((const __m128i *)(in + 3 * 32)); | |
259 __m128i in28 = _mm_loadu_si128((const __m128i *)(in + 28 * 32)); | |
260 __m128i in29 = _mm_loadu_si128((const __m128i *)(in + 29 * 32)); | |
261 __m128i in30 = _mm_loadu_si128((const __m128i *)(in + 30 * 32)); | |
262 __m128i in31 = _mm_loadu_si128((const __m128i *)(in + 31 * 32)); | |
263 step1[0] = ADD_EPI16(in00, in31); | |
264 step1[1] = ADD_EPI16(in01, in30); | |
265 step1[2] = ADD_EPI16(in02, in29); | |
266 step1[3] = ADD_EPI16(in03, in28); | |
267 step1[28] = SUB_EPI16(in03, in28); | |
268 step1[29] = SUB_EPI16(in02, in29); | |
269 step1[30] = SUB_EPI16(in01, in30); | |
270 step1[31] = SUB_EPI16(in00, in31); | |
271 #if DCT_HIGH_BIT_DEPTH | |
272 overflow = check_epi16_overflow_x8(&step1[0], &step1[1], &step1[2], | |
273 &step1[3], &step1[28], &step1[29], | |
274 &step1[30], &step1[31]); | |
275 if (overflow) { | |
276 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
277 return; | |
278 } | |
279 #endif // DCT_HIGH_BIT_DEPTH | |
280 } | |
281 { | |
282 __m128i in04 = _mm_loadu_si128((const __m128i *)(in + 4 * 32)); | |
283 __m128i in05 = _mm_loadu_si128((const __m128i *)(in + 5 * 32)); | |
284 __m128i in06 = _mm_loadu_si128((const __m128i *)(in + 6 * 32)); | |
285 __m128i in07 = _mm_loadu_si128((const __m128i *)(in + 7 * 32)); | |
286 __m128i in24 = _mm_loadu_si128((const __m128i *)(in + 24 * 32)); | |
287 __m128i in25 = _mm_loadu_si128((const __m128i *)(in + 25 * 32)); | |
288 __m128i in26 = _mm_loadu_si128((const __m128i *)(in + 26 * 32)); | |
289 __m128i in27 = _mm_loadu_si128((const __m128i *)(in + 27 * 32)); | |
290 step1[4] = ADD_EPI16(in04, in27); | |
291 step1[5] = ADD_EPI16(in05, in26); | |
292 step1[6] = ADD_EPI16(in06, in25); | |
293 step1[7] = ADD_EPI16(in07, in24); | |
294 step1[24] = SUB_EPI16(in07, in24); | |
295 step1[25] = SUB_EPI16(in06, in25); | |
296 step1[26] = SUB_EPI16(in05, in26); | |
297 step1[27] = SUB_EPI16(in04, in27); | |
298 #if DCT_HIGH_BIT_DEPTH | |
299 overflow = check_epi16_overflow_x8(&step1[4], &step1[5], &step1[6], | |
300 &step1[7], &step1[24], &step1[25], | |
301 &step1[26], &step1[27]); | |
302 if (overflow) { | |
303 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
304 return; | |
305 } | |
306 #endif // DCT_HIGH_BIT_DEPTH | |
307 } | |
308 { | |
309 __m128i in08 = _mm_loadu_si128((const __m128i *)(in + 8 * 32)); | |
310 __m128i in09 = _mm_loadu_si128((const __m128i *)(in + 9 * 32)); | |
311 __m128i in10 = _mm_loadu_si128((const __m128i *)(in + 10 * 32)); | |
312 __m128i in11 = _mm_loadu_si128((const __m128i *)(in + 11 * 32)); | |
313 __m128i in20 = _mm_loadu_si128((const __m128i *)(in + 20 * 32)); | |
314 __m128i in21 = _mm_loadu_si128((const __m128i *)(in + 21 * 32)); | |
315 __m128i in22 = _mm_loadu_si128((const __m128i *)(in + 22 * 32)); | |
316 __m128i in23 = _mm_loadu_si128((const __m128i *)(in + 23 * 32)); | |
317 step1[8] = ADD_EPI16(in08, in23); | |
318 step1[9] = ADD_EPI16(in09, in22); | |
319 step1[10] = ADD_EPI16(in10, in21); | |
320 step1[11] = ADD_EPI16(in11, in20); | |
321 step1[20] = SUB_EPI16(in11, in20); | |
322 step1[21] = SUB_EPI16(in10, in21); | |
323 step1[22] = SUB_EPI16(in09, in22); | |
324 step1[23] = SUB_EPI16(in08, in23); | |
325 #if DCT_HIGH_BIT_DEPTH | |
326 overflow = check_epi16_overflow_x8(&step1[8], &step1[9], &step1[10], | |
327 &step1[11], &step1[20], &step1[21], | |
328 &step1[22], &step1[23]); | |
329 if (overflow) { | |
330 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
331 return; | |
332 } | |
333 #endif // DCT_HIGH_BIT_DEPTH | |
334 } | |
335 { | |
336 __m128i in12 = _mm_loadu_si128((const __m128i *)(in + 12 * 32)); | |
337 __m128i in13 = _mm_loadu_si128((const __m128i *)(in + 13 * 32)); | |
338 __m128i in14 = _mm_loadu_si128((const __m128i *)(in + 14 * 32)); | |
339 __m128i in15 = _mm_loadu_si128((const __m128i *)(in + 15 * 32)); | |
340 __m128i in16 = _mm_loadu_si128((const __m128i *)(in + 16 * 32)); | |
341 __m128i in17 = _mm_loadu_si128((const __m128i *)(in + 17 * 32)); | |
342 __m128i in18 = _mm_loadu_si128((const __m128i *)(in + 18 * 32)); | |
343 __m128i in19 = _mm_loadu_si128((const __m128i *)(in + 19 * 32)); | |
344 step1[12] = ADD_EPI16(in12, in19); | |
345 step1[13] = ADD_EPI16(in13, in18); | |
346 step1[14] = ADD_EPI16(in14, in17); | |
347 step1[15] = ADD_EPI16(in15, in16); | |
348 step1[16] = SUB_EPI16(in15, in16); | |
349 step1[17] = SUB_EPI16(in14, in17); | |
350 step1[18] = SUB_EPI16(in13, in18); | |
351 step1[19] = SUB_EPI16(in12, in19); | |
352 #if DCT_HIGH_BIT_DEPTH | |
353 overflow = check_epi16_overflow_x8(&step1[12], &step1[13], &step1[14], | |
354 &step1[15], &step1[16], &step1[17], | |
355 &step1[18], &step1[19]); | |
356 if (overflow) { | |
357 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
358 return; | |
359 } | |
360 #endif // DCT_HIGH_BIT_DEPTH | |
361 } | |
362 } | |
363 // Stage 2 | |
364 { | |
365 step2[0] = ADD_EPI16(step1[0], step1[15]); | |
366 step2[1] = ADD_EPI16(step1[1], step1[14]); | |
367 step2[2] = ADD_EPI16(step1[2], step1[13]); | |
368 step2[3] = ADD_EPI16(step1[3], step1[12]); | |
369 step2[4] = ADD_EPI16(step1[4], step1[11]); | |
370 step2[5] = ADD_EPI16(step1[5], step1[10]); | |
371 step2[6] = ADD_EPI16(step1[6], step1[ 9]); | |
372 step2[7] = ADD_EPI16(step1[7], step1[ 8]); | |
373 step2[8] = SUB_EPI16(step1[7], step1[ 8]); | |
374 step2[9] = SUB_EPI16(step1[6], step1[ 9]); | |
375 step2[10] = SUB_EPI16(step1[5], step1[10]); | |
376 step2[11] = SUB_EPI16(step1[4], step1[11]); | |
377 step2[12] = SUB_EPI16(step1[3], step1[12]); | |
378 step2[13] = SUB_EPI16(step1[2], step1[13]); | |
379 step2[14] = SUB_EPI16(step1[1], step1[14]); | |
380 step2[15] = SUB_EPI16(step1[0], step1[15]); | |
381 #if DCT_HIGH_BIT_DEPTH | |
382 overflow = check_epi16_overflow_x16( | |
383 &step2[0], &step2[1], &step2[2], &step2[3], | |
384 &step2[4], &step2[5], &step2[6], &step2[7], | |
385 &step2[8], &step2[9], &step2[10], &step2[11], | |
386 &step2[12], &step2[13], &step2[14], &step2[15]); | |
387 if (overflow) { | |
388 if (pass == 0) | |
389 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
390 else | |
391 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
392 return; | |
393 } | |
394 #endif // DCT_HIGH_BIT_DEPTH | |
395 } | |
396 { | |
397 const __m128i s2_20_0 = _mm_unpacklo_epi16(step1[27], step1[20]); | |
398 const __m128i s2_20_1 = _mm_unpackhi_epi16(step1[27], step1[20]); | |
399 const __m128i s2_21_0 = _mm_unpacklo_epi16(step1[26], step1[21]); | |
400 const __m128i s2_21_1 = _mm_unpackhi_epi16(step1[26], step1[21]); | |
401 const __m128i s2_22_0 = _mm_unpacklo_epi16(step1[25], step1[22]); | |
402 const __m128i s2_22_1 = _mm_unpackhi_epi16(step1[25], step1[22]); | |
403 const __m128i s2_23_0 = _mm_unpacklo_epi16(step1[24], step1[23]); | |
404 const __m128i s2_23_1 = _mm_unpackhi_epi16(step1[24], step1[23]); | |
405 const __m128i s2_20_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_m16); | |
406 const __m128i s2_20_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_m16); | |
407 const __m128i s2_21_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_m16); | |
408 const __m128i s2_21_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_m16); | |
409 const __m128i s2_22_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_m16); | |
410 const __m128i s2_22_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_m16); | |
411 const __m128i s2_23_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_m16); | |
412 const __m128i s2_23_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_m16); | |
413 const __m128i s2_24_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_p16); | |
414 const __m128i s2_24_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_p16); | |
415 const __m128i s2_25_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_p16); | |
416 const __m128i s2_25_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_p16); | |
417 const __m128i s2_26_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_p16); | |
418 const __m128i s2_26_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_p16); | |
419 const __m128i s2_27_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_p16); | |
420 const __m128i s2_27_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_p16); | |
421 // dct_const_round_shift | |
422 const __m128i s2_20_4 = _mm_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING); | |
423 const __m128i s2_20_5 = _mm_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING); | |
424 const __m128i s2_21_4 = _mm_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING); | |
425 const __m128i s2_21_5 = _mm_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING); | |
426 const __m128i s2_22_4 = _mm_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING); | |
427 const __m128i s2_22_5 = _mm_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING); | |
428 const __m128i s2_23_4 = _mm_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING); | |
429 const __m128i s2_23_5 = _mm_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING); | |
430 const __m128i s2_24_4 = _mm_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING); | |
431 const __m128i s2_24_5 = _mm_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING); | |
432 const __m128i s2_25_4 = _mm_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING); | |
433 const __m128i s2_25_5 = _mm_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING); | |
434 const __m128i s2_26_4 = _mm_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING); | |
435 const __m128i s2_26_5 = _mm_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING); | |
436 const __m128i s2_27_4 = _mm_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING); | |
437 const __m128i s2_27_5 = _mm_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING); | |
438 const __m128i s2_20_6 = _mm_srai_epi32(s2_20_4, DCT_CONST_BITS); | |
439 const __m128i s2_20_7 = _mm_srai_epi32(s2_20_5, DCT_CONST_BITS); | |
440 const __m128i s2_21_6 = _mm_srai_epi32(s2_21_4, DCT_CONST_BITS); | |
441 const __m128i s2_21_7 = _mm_srai_epi32(s2_21_5, DCT_CONST_BITS); | |
442 const __m128i s2_22_6 = _mm_srai_epi32(s2_22_4, DCT_CONST_BITS); | |
443 const __m128i s2_22_7 = _mm_srai_epi32(s2_22_5, DCT_CONST_BITS); | |
444 const __m128i s2_23_6 = _mm_srai_epi32(s2_23_4, DCT_CONST_BITS); | |
445 const __m128i s2_23_7 = _mm_srai_epi32(s2_23_5, DCT_CONST_BITS); | |
446 const __m128i s2_24_6 = _mm_srai_epi32(s2_24_4, DCT_CONST_BITS); | |
447 const __m128i s2_24_7 = _mm_srai_epi32(s2_24_5, DCT_CONST_BITS); | |
448 const __m128i s2_25_6 = _mm_srai_epi32(s2_25_4, DCT_CONST_BITS); | |
449 const __m128i s2_25_7 = _mm_srai_epi32(s2_25_5, DCT_CONST_BITS); | |
450 const __m128i s2_26_6 = _mm_srai_epi32(s2_26_4, DCT_CONST_BITS); | |
451 const __m128i s2_26_7 = _mm_srai_epi32(s2_26_5, DCT_CONST_BITS); | |
452 const __m128i s2_27_6 = _mm_srai_epi32(s2_27_4, DCT_CONST_BITS); | |
453 const __m128i s2_27_7 = _mm_srai_epi32(s2_27_5, DCT_CONST_BITS); | |
454 // Combine | |
455 step2[20] = _mm_packs_epi32(s2_20_6, s2_20_7); | |
456 step2[21] = _mm_packs_epi32(s2_21_6, s2_21_7); | |
457 step2[22] = _mm_packs_epi32(s2_22_6, s2_22_7); | |
458 step2[23] = _mm_packs_epi32(s2_23_6, s2_23_7); | |
459 step2[24] = _mm_packs_epi32(s2_24_6, s2_24_7); | |
460 step2[25] = _mm_packs_epi32(s2_25_6, s2_25_7); | |
461 step2[26] = _mm_packs_epi32(s2_26_6, s2_26_7); | |
462 step2[27] = _mm_packs_epi32(s2_27_6, s2_27_7); | |
463 #if DCT_HIGH_BIT_DEPTH | |
464 overflow = check_epi16_overflow_x8(&step2[20], &step2[21], &step2[22], | |
465 &step2[23], &step2[24], &step2[25], | |
466 &step2[26], &step2[27]); | |
467 if (overflow) { | |
468 if (pass == 0) | |
469 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
470 else | |
471 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
472 return; | |
473 } | |
474 #endif // DCT_HIGH_BIT_DEPTH | |
475 } | |
476 | |
477 #if !FDCT32x32_HIGH_PRECISION | |
478 // dump the magnitude by half, hence the intermediate values are within | |
479 // the range of 16 bits. | |
480 if (1 == pass) { | |
481 __m128i s3_00_0 = _mm_cmplt_epi16(step2[ 0], kZero); | |
482 __m128i s3_01_0 = _mm_cmplt_epi16(step2[ 1], kZero); | |
483 __m128i s3_02_0 = _mm_cmplt_epi16(step2[ 2], kZero); | |
484 __m128i s3_03_0 = _mm_cmplt_epi16(step2[ 3], kZero); | |
485 __m128i s3_04_0 = _mm_cmplt_epi16(step2[ 4], kZero); | |
486 __m128i s3_05_0 = _mm_cmplt_epi16(step2[ 5], kZero); | |
487 __m128i s3_06_0 = _mm_cmplt_epi16(step2[ 6], kZero); | |
488 __m128i s3_07_0 = _mm_cmplt_epi16(step2[ 7], kZero); | |
489 __m128i s2_08_0 = _mm_cmplt_epi16(step2[ 8], kZero); | |
490 __m128i s2_09_0 = _mm_cmplt_epi16(step2[ 9], kZero); | |
491 __m128i s3_10_0 = _mm_cmplt_epi16(step2[10], kZero); | |
492 __m128i s3_11_0 = _mm_cmplt_epi16(step2[11], kZero); | |
493 __m128i s3_12_0 = _mm_cmplt_epi16(step2[12], kZero); | |
494 __m128i s3_13_0 = _mm_cmplt_epi16(step2[13], kZero); | |
495 __m128i s2_14_0 = _mm_cmplt_epi16(step2[14], kZero); | |
496 __m128i s2_15_0 = _mm_cmplt_epi16(step2[15], kZero); | |
497 __m128i s3_16_0 = _mm_cmplt_epi16(step1[16], kZero); | |
498 __m128i s3_17_0 = _mm_cmplt_epi16(step1[17], kZero); | |
499 __m128i s3_18_0 = _mm_cmplt_epi16(step1[18], kZero); | |
500 __m128i s3_19_0 = _mm_cmplt_epi16(step1[19], kZero); | |
501 __m128i s3_20_0 = _mm_cmplt_epi16(step2[20], kZero); | |
502 __m128i s3_21_0 = _mm_cmplt_epi16(step2[21], kZero); | |
503 __m128i s3_22_0 = _mm_cmplt_epi16(step2[22], kZero); | |
504 __m128i s3_23_0 = _mm_cmplt_epi16(step2[23], kZero); | |
505 __m128i s3_24_0 = _mm_cmplt_epi16(step2[24], kZero); | |
506 __m128i s3_25_0 = _mm_cmplt_epi16(step2[25], kZero); | |
507 __m128i s3_26_0 = _mm_cmplt_epi16(step2[26], kZero); | |
508 __m128i s3_27_0 = _mm_cmplt_epi16(step2[27], kZero); | |
509 __m128i s3_28_0 = _mm_cmplt_epi16(step1[28], kZero); | |
510 __m128i s3_29_0 = _mm_cmplt_epi16(step1[29], kZero); | |
511 __m128i s3_30_0 = _mm_cmplt_epi16(step1[30], kZero); | |
512 __m128i s3_31_0 = _mm_cmplt_epi16(step1[31], kZero); | |
513 | |
514 step2[0] = SUB_EPI16(step2[ 0], s3_00_0); | |
515 step2[1] = SUB_EPI16(step2[ 1], s3_01_0); | |
516 step2[2] = SUB_EPI16(step2[ 2], s3_02_0); | |
517 step2[3] = SUB_EPI16(step2[ 3], s3_03_0); | |
518 step2[4] = SUB_EPI16(step2[ 4], s3_04_0); | |
519 step2[5] = SUB_EPI16(step2[ 5], s3_05_0); | |
520 step2[6] = SUB_EPI16(step2[ 6], s3_06_0); | |
521 step2[7] = SUB_EPI16(step2[ 7], s3_07_0); | |
522 step2[8] = SUB_EPI16(step2[ 8], s2_08_0); | |
523 step2[9] = SUB_EPI16(step2[ 9], s2_09_0); | |
524 step2[10] = SUB_EPI16(step2[10], s3_10_0); | |
525 step2[11] = SUB_EPI16(step2[11], s3_11_0); | |
526 step2[12] = SUB_EPI16(step2[12], s3_12_0); | |
527 step2[13] = SUB_EPI16(step2[13], s3_13_0); | |
528 step2[14] = SUB_EPI16(step2[14], s2_14_0); | |
529 step2[15] = SUB_EPI16(step2[15], s2_15_0); | |
530 step1[16] = SUB_EPI16(step1[16], s3_16_0); | |
531 step1[17] = SUB_EPI16(step1[17], s3_17_0); | |
532 step1[18] = SUB_EPI16(step1[18], s3_18_0); | |
533 step1[19] = SUB_EPI16(step1[19], s3_19_0); | |
534 step2[20] = SUB_EPI16(step2[20], s3_20_0); | |
535 step2[21] = SUB_EPI16(step2[21], s3_21_0); | |
536 step2[22] = SUB_EPI16(step2[22], s3_22_0); | |
537 step2[23] = SUB_EPI16(step2[23], s3_23_0); | |
538 step2[24] = SUB_EPI16(step2[24], s3_24_0); | |
539 step2[25] = SUB_EPI16(step2[25], s3_25_0); | |
540 step2[26] = SUB_EPI16(step2[26], s3_26_0); | |
541 step2[27] = SUB_EPI16(step2[27], s3_27_0); | |
542 step1[28] = SUB_EPI16(step1[28], s3_28_0); | |
543 step1[29] = SUB_EPI16(step1[29], s3_29_0); | |
544 step1[30] = SUB_EPI16(step1[30], s3_30_0); | |
545 step1[31] = SUB_EPI16(step1[31], s3_31_0); | |
546 #if DCT_HIGH_BIT_DEPTH | |
547 overflow = check_epi16_overflow_x32( | |
548 &step2[0], &step2[1], &step2[2], &step2[3], | |
549 &step2[4], &step2[5], &step2[6], &step2[7], | |
550 &step2[8], &step2[9], &step2[10], &step2[11], | |
551 &step2[12], &step2[13], &step2[14], &step2[15], | |
552 &step1[16], &step1[17], &step1[18], &step1[19], | |
553 &step2[20], &step2[21], &step2[22], &step2[23], | |
554 &step2[24], &step2[25], &step2[26], &step2[27], | |
555 &step1[28], &step1[29], &step1[30], &step1[31]); | |
556 if (overflow) { | |
557 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
558 return; | |
559 } | |
560 #endif // DCT_HIGH_BIT_DEPTH | |
561 step2[0] = _mm_add_epi16(step2[ 0], kOne); | |
562 step2[1] = _mm_add_epi16(step2[ 1], kOne); | |
563 step2[2] = _mm_add_epi16(step2[ 2], kOne); | |
564 step2[3] = _mm_add_epi16(step2[ 3], kOne); | |
565 step2[4] = _mm_add_epi16(step2[ 4], kOne); | |
566 step2[5] = _mm_add_epi16(step2[ 5], kOne); | |
567 step2[6] = _mm_add_epi16(step2[ 6], kOne); | |
568 step2[7] = _mm_add_epi16(step2[ 7], kOne); | |
569 step2[8] = _mm_add_epi16(step2[ 8], kOne); | |
570 step2[9] = _mm_add_epi16(step2[ 9], kOne); | |
571 step2[10] = _mm_add_epi16(step2[10], kOne); | |
572 step2[11] = _mm_add_epi16(step2[11], kOne); | |
573 step2[12] = _mm_add_epi16(step2[12], kOne); | |
574 step2[13] = _mm_add_epi16(step2[13], kOne); | |
575 step2[14] = _mm_add_epi16(step2[14], kOne); | |
576 step2[15] = _mm_add_epi16(step2[15], kOne); | |
577 step1[16] = _mm_add_epi16(step1[16], kOne); | |
578 step1[17] = _mm_add_epi16(step1[17], kOne); | |
579 step1[18] = _mm_add_epi16(step1[18], kOne); | |
580 step1[19] = _mm_add_epi16(step1[19], kOne); | |
581 step2[20] = _mm_add_epi16(step2[20], kOne); | |
582 step2[21] = _mm_add_epi16(step2[21], kOne); | |
583 step2[22] = _mm_add_epi16(step2[22], kOne); | |
584 step2[23] = _mm_add_epi16(step2[23], kOne); | |
585 step2[24] = _mm_add_epi16(step2[24], kOne); | |
586 step2[25] = _mm_add_epi16(step2[25], kOne); | |
587 step2[26] = _mm_add_epi16(step2[26], kOne); | |
588 step2[27] = _mm_add_epi16(step2[27], kOne); | |
589 step1[28] = _mm_add_epi16(step1[28], kOne); | |
590 step1[29] = _mm_add_epi16(step1[29], kOne); | |
591 step1[30] = _mm_add_epi16(step1[30], kOne); | |
592 step1[31] = _mm_add_epi16(step1[31], kOne); | |
593 | |
594 step2[0] = _mm_srai_epi16(step2[ 0], 2); | |
595 step2[1] = _mm_srai_epi16(step2[ 1], 2); | |
596 step2[2] = _mm_srai_epi16(step2[ 2], 2); | |
597 step2[3] = _mm_srai_epi16(step2[ 3], 2); | |
598 step2[4] = _mm_srai_epi16(step2[ 4], 2); | |
599 step2[5] = _mm_srai_epi16(step2[ 5], 2); | |
600 step2[6] = _mm_srai_epi16(step2[ 6], 2); | |
601 step2[7] = _mm_srai_epi16(step2[ 7], 2); | |
602 step2[8] = _mm_srai_epi16(step2[ 8], 2); | |
603 step2[9] = _mm_srai_epi16(step2[ 9], 2); | |
604 step2[10] = _mm_srai_epi16(step2[10], 2); | |
605 step2[11] = _mm_srai_epi16(step2[11], 2); | |
606 step2[12] = _mm_srai_epi16(step2[12], 2); | |
607 step2[13] = _mm_srai_epi16(step2[13], 2); | |
608 step2[14] = _mm_srai_epi16(step2[14], 2); | |
609 step2[15] = _mm_srai_epi16(step2[15], 2); | |
610 step1[16] = _mm_srai_epi16(step1[16], 2); | |
611 step1[17] = _mm_srai_epi16(step1[17], 2); | |
612 step1[18] = _mm_srai_epi16(step1[18], 2); | |
613 step1[19] = _mm_srai_epi16(step1[19], 2); | |
614 step2[20] = _mm_srai_epi16(step2[20], 2); | |
615 step2[21] = _mm_srai_epi16(step2[21], 2); | |
616 step2[22] = _mm_srai_epi16(step2[22], 2); | |
617 step2[23] = _mm_srai_epi16(step2[23], 2); | |
618 step2[24] = _mm_srai_epi16(step2[24], 2); | |
619 step2[25] = _mm_srai_epi16(step2[25], 2); | |
620 step2[26] = _mm_srai_epi16(step2[26], 2); | |
621 step2[27] = _mm_srai_epi16(step2[27], 2); | |
622 step1[28] = _mm_srai_epi16(step1[28], 2); | |
623 step1[29] = _mm_srai_epi16(step1[29], 2); | |
624 step1[30] = _mm_srai_epi16(step1[30], 2); | |
625 step1[31] = _mm_srai_epi16(step1[31], 2); | |
626 } | |
627 #endif // !FDCT32x32_HIGH_PRECISION | |
628 | |
629 #if FDCT32x32_HIGH_PRECISION | |
630 if (pass == 0) { | |
631 #endif | |
632 // Stage 3 | |
633 { | |
634 step3[0] = ADD_EPI16(step2[(8 - 1)], step2[0]); | |
635 step3[1] = ADD_EPI16(step2[(8 - 2)], step2[1]); | |
636 step3[2] = ADD_EPI16(step2[(8 - 3)], step2[2]); | |
637 step3[3] = ADD_EPI16(step2[(8 - 4)], step2[3]); | |
638 step3[4] = SUB_EPI16(step2[(8 - 5)], step2[4]); | |
639 step3[5] = SUB_EPI16(step2[(8 - 6)], step2[5]); | |
640 step3[6] = SUB_EPI16(step2[(8 - 7)], step2[6]); | |
641 step3[7] = SUB_EPI16(step2[(8 - 8)], step2[7]); | |
642 #if DCT_HIGH_BIT_DEPTH | |
643 overflow = check_epi16_overflow_x8(&step3[0], &step3[1], &step3[2], | |
644 &step3[3], &step3[4], &step3[5], | |
645 &step3[6], &step3[7]); | |
646 if (overflow) { | |
647 if (pass == 0) | |
648 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
649 else | |
650 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
651 return; | |
652 } | |
653 #endif // DCT_HIGH_BIT_DEPTH | |
654 } | |
655 { | |
656 const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]); | |
657 const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]); | |
658 const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]); | |
659 const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]); | |
660 const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16); | |
661 const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16); | |
662 const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16); | |
663 const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16); | |
664 const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16); | |
665 const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16); | |
666 const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16); | |
667 const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16); | |
668 // dct_const_round_shift | |
669 const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); | |
670 const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); | |
671 const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); | |
672 const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); | |
673 const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); | |
674 const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); | |
675 const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); | |
676 const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); | |
677 const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS); | |
678 const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS); | |
679 const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS); | |
680 const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS); | |
681 const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS); | |
682 const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS); | |
683 const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS); | |
684 const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS); | |
685 // Combine | |
686 step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7); | |
687 step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7); | |
688 step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7); | |
689 step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7); | |
690 #if DCT_HIGH_BIT_DEPTH | |
691 overflow = check_epi16_overflow_x4(&step3[10], &step3[11], | |
692 &step3[12], &step3[13]); | |
693 if (overflow) { | |
694 if (pass == 0) | |
695 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
696 else | |
697 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
698 return; | |
699 } | |
700 #endif // DCT_HIGH_BIT_DEPTH | |
701 } | |
702 { | |
703 step3[16] = ADD_EPI16(step2[23], step1[16]); | |
704 step3[17] = ADD_EPI16(step2[22], step1[17]); | |
705 step3[18] = ADD_EPI16(step2[21], step1[18]); | |
706 step3[19] = ADD_EPI16(step2[20], step1[19]); | |
707 step3[20] = SUB_EPI16(step1[19], step2[20]); | |
708 step3[21] = SUB_EPI16(step1[18], step2[21]); | |
709 step3[22] = SUB_EPI16(step1[17], step2[22]); | |
710 step3[23] = SUB_EPI16(step1[16], step2[23]); | |
711 step3[24] = SUB_EPI16(step1[31], step2[24]); | |
712 step3[25] = SUB_EPI16(step1[30], step2[25]); | |
713 step3[26] = SUB_EPI16(step1[29], step2[26]); | |
714 step3[27] = SUB_EPI16(step1[28], step2[27]); | |
715 step3[28] = ADD_EPI16(step2[27], step1[28]); | |
716 step3[29] = ADD_EPI16(step2[26], step1[29]); | |
717 step3[30] = ADD_EPI16(step2[25], step1[30]); | |
718 step3[31] = ADD_EPI16(step2[24], step1[31]); | |
719 #if DCT_HIGH_BIT_DEPTH | |
720 overflow = check_epi16_overflow_x16( | |
721 &step3[16], &step3[17], &step3[18], &step3[19], | |
722 &step3[20], &step3[21], &step3[22], &step3[23], | |
723 &step3[24], &step3[25], &step3[26], &step3[27], | |
724 &step3[28], &step3[29], &step3[30], &step3[31]); | |
725 if (overflow) { | |
726 if (pass == 0) | |
727 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
728 else | |
729 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
730 return; | |
731 } | |
732 #endif // DCT_HIGH_BIT_DEPTH | |
733 } | |
734 | |
735 // Stage 4 | |
736 { | |
737 step1[0] = ADD_EPI16(step3[ 3], step3[ 0]); | |
738 step1[1] = ADD_EPI16(step3[ 2], step3[ 1]); | |
739 step1[2] = SUB_EPI16(step3[ 1], step3[ 2]); | |
740 step1[3] = SUB_EPI16(step3[ 0], step3[ 3]); | |
741 step1[8] = ADD_EPI16(step3[11], step2[ 8]); | |
742 step1[9] = ADD_EPI16(step3[10], step2[ 9]); | |
743 step1[10] = SUB_EPI16(step2[ 9], step3[10]); | |
744 step1[11] = SUB_EPI16(step2[ 8], step3[11]); | |
745 step1[12] = SUB_EPI16(step2[15], step3[12]); | |
746 step1[13] = SUB_EPI16(step2[14], step3[13]); | |
747 step1[14] = ADD_EPI16(step3[13], step2[14]); | |
748 step1[15] = ADD_EPI16(step3[12], step2[15]); | |
749 #if DCT_HIGH_BIT_DEPTH | |
750 overflow = check_epi16_overflow_x16( | |
751 &step1[0], &step1[1], &step1[2], &step1[3], | |
752 &step1[4], &step1[5], &step1[6], &step1[7], | |
753 &step1[8], &step1[9], &step1[10], &step1[11], | |
754 &step1[12], &step1[13], &step1[14], &step1[15]); | |
755 if (overflow) { | |
756 if (pass == 0) | |
757 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
758 else | |
759 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
760 return; | |
761 } | |
762 #endif // DCT_HIGH_BIT_DEPTH | |
763 } | |
764 { | |
765 const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]); | |
766 const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]); | |
767 const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16); | |
768 const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16); | |
769 const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16); | |
770 const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16); | |
771 // dct_const_round_shift | |
772 const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING); | |
773 const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING); | |
774 const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING); | |
775 const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING); | |
776 const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS); | |
777 const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS); | |
778 const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS); | |
779 const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS); | |
780 // Combine | |
781 step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7); | |
782 step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7); | |
783 #if DCT_HIGH_BIT_DEPTH | |
784 overflow = check_epi16_overflow_x2(&step1[5], &step1[6]); | |
785 if (overflow) { | |
786 if (pass == 0) | |
787 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
788 else | |
789 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
790 return; | |
791 } | |
792 #endif // DCT_HIGH_BIT_DEPTH | |
793 } | |
794 { | |
795 const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]); | |
796 const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]); | |
797 const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]); | |
798 const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]); | |
799 const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]); | |
800 const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]); | |
801 const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]); | |
802 const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]); | |
803 const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24); | |
804 const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24); | |
805 const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24); | |
806 const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24); | |
807 const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08); | |
808 const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08); | |
809 const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08); | |
810 const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08); | |
811 const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24); | |
812 const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24); | |
813 const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24); | |
814 const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24); | |
815 const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08); | |
816 const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08); | |
817 const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08); | |
818 const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08); | |
819 // dct_const_round_shift | |
820 const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING); | |
821 const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING); | |
822 const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING); | |
823 const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING); | |
824 const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING); | |
825 const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING); | |
826 const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING); | |
827 const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING); | |
828 const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING); | |
829 const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING); | |
830 const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING); | |
831 const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING); | |
832 const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING); | |
833 const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING); | |
834 const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING); | |
835 const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING); | |
836 const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS); | |
837 const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS); | |
838 const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS); | |
839 const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS); | |
840 const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS); | |
841 const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS); | |
842 const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS); | |
843 const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS); | |
844 const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS); | |
845 const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS); | |
846 const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS); | |
847 const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS); | |
848 const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS); | |
849 const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS); | |
850 const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS); | |
851 const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS); | |
852 // Combine | |
853 step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7); | |
854 step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7); | |
855 step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7); | |
856 step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7); | |
857 step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7); | |
858 step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7); | |
859 step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7); | |
860 step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7); | |
861 #if DCT_HIGH_BIT_DEPTH | |
862 overflow = check_epi16_overflow_x8(&step1[18], &step1[19], &step1[20], | |
863 &step1[21], &step1[26], &step1[27], | |
864 &step1[28], &step1[29]); | |
865 if (overflow) { | |
866 if (pass == 0) | |
867 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
868 else | |
869 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
870 return; | |
871 } | |
872 #endif // DCT_HIGH_BIT_DEPTH | |
873 } | |
874 // Stage 5 | |
875 { | |
876 step2[4] = ADD_EPI16(step1[5], step3[4]); | |
877 step2[5] = SUB_EPI16(step3[4], step1[5]); | |
878 step2[6] = SUB_EPI16(step3[7], step1[6]); | |
879 step2[7] = ADD_EPI16(step1[6], step3[7]); | |
880 #if DCT_HIGH_BIT_DEPTH | |
881 overflow = check_epi16_overflow_x4(&step2[4], &step2[5], | |
882 &step2[6], &step2[7]); | |
883 if (overflow) { | |
884 if (pass == 0) | |
885 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
886 else | |
887 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
888 return; | |
889 } | |
890 #endif // DCT_HIGH_BIT_DEPTH | |
891 } | |
892 { | |
893 const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]); | |
894 const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]); | |
895 const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]); | |
896 const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]); | |
897 const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16); | |
898 const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16); | |
899 const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16); | |
900 const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16); | |
901 const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08); | |
902 const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08); | |
903 const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24); | |
904 const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24); | |
905 // dct_const_round_shift | |
906 const __m128i out_00_4 = _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING); | |
907 const __m128i out_00_5 = _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING); | |
908 const __m128i out_16_4 = _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING); | |
909 const __m128i out_16_5 = _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING); | |
910 const __m128i out_08_4 = _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING); | |
911 const __m128i out_08_5 = _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING); | |
912 const __m128i out_24_4 = _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING); | |
913 const __m128i out_24_5 = _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING); | |
914 const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS); | |
915 const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS); | |
916 const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS); | |
917 const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS); | |
918 const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS); | |
919 const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS); | |
920 const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS); | |
921 const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS); | |
922 // Combine | |
923 out[ 0] = _mm_packs_epi32(out_00_6, out_00_7); | |
924 out[16] = _mm_packs_epi32(out_16_6, out_16_7); | |
925 out[ 8] = _mm_packs_epi32(out_08_6, out_08_7); | |
926 out[24] = _mm_packs_epi32(out_24_6, out_24_7); | |
927 #if DCT_HIGH_BIT_DEPTH | |
928 overflow = check_epi16_overflow_x4(&out[0], &out[16], | |
929 &out[8], &out[24]); | |
930 if (overflow) { | |
931 if (pass == 0) | |
932 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
933 else | |
934 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
935 return; | |
936 } | |
937 #endif // DCT_HIGH_BIT_DEPTH | |
938 } | |
939 { | |
940 const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[ 9], step1[14]); | |
941 const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[ 9], step1[14]); | |
942 const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]); | |
943 const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]); | |
944 const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24); | |
945 const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24); | |
946 const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08); | |
947 const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08); | |
948 const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24); | |
949 const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24); | |
950 const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08); | |
951 const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08); | |
952 // dct_const_round_shift | |
953 const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING); | |
954 const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING); | |
955 const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING); | |
956 const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING); | |
957 const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING); | |
958 const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING); | |
959 const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING); | |
960 const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING); | |
961 const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS); | |
962 const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS); | |
963 const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS); | |
964 const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS); | |
965 const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS); | |
966 const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS); | |
967 const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS); | |
968 const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS); | |
969 // Combine | |
970 step2[ 9] = _mm_packs_epi32(s2_09_6, s2_09_7); | |
971 step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7); | |
972 step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7); | |
973 step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7); | |
974 #if DCT_HIGH_BIT_DEPTH | |
975 overflow = check_epi16_overflow_x4(&step2[9], &step2[10], | |
976 &step2[13], &step2[14]); | |
977 if (overflow) { | |
978 if (pass == 0) | |
979 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
980 else | |
981 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
982 return; | |
983 } | |
984 #endif // DCT_HIGH_BIT_DEPTH | |
985 } | |
986 { | |
987 step2[16] = ADD_EPI16(step1[19], step3[16]); | |
988 step2[17] = ADD_EPI16(step1[18], step3[17]); | |
989 step2[18] = SUB_EPI16(step3[17], step1[18]); | |
990 step2[19] = SUB_EPI16(step3[16], step1[19]); | |
991 step2[20] = SUB_EPI16(step3[23], step1[20]); | |
992 step2[21] = SUB_EPI16(step3[22], step1[21]); | |
993 step2[22] = ADD_EPI16(step1[21], step3[22]); | |
994 step2[23] = ADD_EPI16(step1[20], step3[23]); | |
995 step2[24] = ADD_EPI16(step1[27], step3[24]); | |
996 step2[25] = ADD_EPI16(step1[26], step3[25]); | |
997 step2[26] = SUB_EPI16(step3[25], step1[26]); | |
998 step2[27] = SUB_EPI16(step3[24], step1[27]); | |
999 step2[28] = SUB_EPI16(step3[31], step1[28]); | |
1000 step2[29] = SUB_EPI16(step3[30], step1[29]); | |
1001 step2[30] = ADD_EPI16(step1[29], step3[30]); | |
1002 step2[31] = ADD_EPI16(step1[28], step3[31]); | |
1003 #if DCT_HIGH_BIT_DEPTH | |
1004 overflow = check_epi16_overflow_x16( | |
1005 &step2[16], &step2[17], &step2[18], &step2[19], | |
1006 &step2[20], &step2[21], &step2[22], &step2[23], | |
1007 &step2[24], &step2[25], &step2[26], &step2[27], | |
1008 &step2[28], &step2[29], &step2[30], &step2[31]); | |
1009 if (overflow) { | |
1010 if (pass == 0) | |
1011 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
1012 else | |
1013 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
1014 return; | |
1015 } | |
1016 #endif // DCT_HIGH_BIT_DEPTH | |
1017 } | |
1018 // Stage 6 | |
1019 { | |
1020 const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]); | |
1021 const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]); | |
1022 const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]); | |
1023 const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]); | |
1024 const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]); | |
1025 const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]); | |
1026 const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]); | |
1027 const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]); | |
1028 const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04); | |
1029 const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04); | |
1030 const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20); | |
1031 const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20); | |
1032 const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12); | |
1033 const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12); | |
1034 const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28); | |
1035 const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28); | |
1036 // dct_const_round_shift | |
1037 const __m128i out_04_4 = _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING); | |
1038 const __m128i out_04_5 = _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING); | |
1039 const __m128i out_20_4 = _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING); | |
1040 const __m128i out_20_5 = _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING); | |
1041 const __m128i out_12_4 = _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING); | |
1042 const __m128i out_12_5 = _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING); | |
1043 const __m128i out_28_4 = _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING); | |
1044 const __m128i out_28_5 = _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING); | |
1045 const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS); | |
1046 const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS); | |
1047 const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS); | |
1048 const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS); | |
1049 const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS); | |
1050 const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS); | |
1051 const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS); | |
1052 const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS); | |
1053 // Combine | |
1054 out[4] = _mm_packs_epi32(out_04_6, out_04_7); | |
1055 out[20] = _mm_packs_epi32(out_20_6, out_20_7); | |
1056 out[12] = _mm_packs_epi32(out_12_6, out_12_7); | |
1057 out[28] = _mm_packs_epi32(out_28_6, out_28_7); | |
1058 #if DCT_HIGH_BIT_DEPTH | |
1059 overflow = check_epi16_overflow_x4(&out[4], &out[20], | |
1060 &out[12], &out[28]); | |
1061 if (overflow) { | |
1062 if (pass == 0) | |
1063 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
1064 else | |
1065 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
1066 return; | |
1067 } | |
1068 #endif // DCT_HIGH_BIT_DEPTH | |
1069 } | |
1070 { | |
1071 step3[8] = ADD_EPI16(step2[ 9], step1[ 8]); | |
1072 step3[9] = SUB_EPI16(step1[ 8], step2[ 9]); | |
1073 step3[10] = SUB_EPI16(step1[11], step2[10]); | |
1074 step3[11] = ADD_EPI16(step2[10], step1[11]); | |
1075 step3[12] = ADD_EPI16(step2[13], step1[12]); | |
1076 step3[13] = SUB_EPI16(step1[12], step2[13]); | |
1077 step3[14] = SUB_EPI16(step1[15], step2[14]); | |
1078 step3[15] = ADD_EPI16(step2[14], step1[15]); | |
1079 #if DCT_HIGH_BIT_DEPTH | |
1080 overflow = check_epi16_overflow_x8(&step3[8], &step3[9], &step3[10], | |
1081 &step3[11], &step3[12], &step3[13], | |
1082 &step3[14], &step3[15]); | |
1083 if (overflow) { | |
1084 if (pass == 0) | |
1085 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
1086 else | |
1087 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
1088 return; | |
1089 } | |
1090 #endif // DCT_HIGH_BIT_DEPTH | |
1091 } | |
1092 { | |
1093 const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]); | |
1094 const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]); | |
1095 const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]); | |
1096 const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]); | |
1097 const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]); | |
1098 const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]); | |
1099 const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]); | |
1100 const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]); | |
1101 const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28); | |
1102 const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28); | |
1103 const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04); | |
1104 const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04); | |
1105 const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12); | |
1106 const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12); | |
1107 const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20); | |
1108 const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20); | |
1109 const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12); | |
1110 const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12); | |
1111 const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20); | |
1112 const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20); | |
1113 const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28); | |
1114 const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28); | |
1115 const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04); | |
1116 const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04); | |
1117 // dct_const_round_shift | |
1118 const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING); | |
1119 const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING); | |
1120 const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING); | |
1121 const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING); | |
1122 const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING); | |
1123 const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING); | |
1124 const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING); | |
1125 const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING); | |
1126 const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS); | |
1127 const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS); | |
1128 const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS); | |
1129 const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS); | |
1130 const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS); | |
1131 const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS); | |
1132 const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS); | |
1133 const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS); | |
1134 const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING); | |
1135 const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING); | |
1136 const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING); | |
1137 const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING); | |
1138 const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING); | |
1139 const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING); | |
1140 const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING); | |
1141 const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING); | |
1142 const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS); | |
1143 const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS); | |
1144 const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS); | |
1145 const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS); | |
1146 const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS); | |
1147 const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS); | |
1148 const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS); | |
1149 const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS); | |
1150 // Combine | |
1151 step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7); | |
1152 step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7); | |
1153 step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7); | |
1154 step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7); | |
1155 // Combine | |
1156 step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7); | |
1157 step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7); | |
1158 step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7); | |
1159 step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7); | |
1160 #if DCT_HIGH_BIT_DEPTH | |
1161 overflow = check_epi16_overflow_x8(&step3[17], &step3[18], &step3[21], | |
1162 &step3[22], &step3[25], &step3[26], | |
1163 &step3[29], &step3[30]); | |
1164 if (overflow) { | |
1165 if (pass == 0) | |
1166 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
1167 else | |
1168 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
1169 return; | |
1170 } | |
1171 #endif // DCT_HIGH_BIT_DEPTH | |
1172 } | |
1173 // Stage 7 | |
1174 { | |
1175 const __m128i out_02_0 = _mm_unpacklo_epi16(step3[ 8], step3[15]); | |
1176 const __m128i out_02_1 = _mm_unpackhi_epi16(step3[ 8], step3[15]); | |
1177 const __m128i out_18_0 = _mm_unpacklo_epi16(step3[ 9], step3[14]); | |
1178 const __m128i out_18_1 = _mm_unpackhi_epi16(step3[ 9], step3[14]); | |
1179 const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]); | |
1180 const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]); | |
1181 const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]); | |
1182 const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]); | |
1183 const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02); | |
1184 const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02); | |
1185 const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18); | |
1186 const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18); | |
1187 const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10); | |
1188 const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10); | |
1189 const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26); | |
1190 const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26); | |
1191 const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06); | |
1192 const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06); | |
1193 const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22); | |
1194 const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22); | |
1195 const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14); | |
1196 const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14); | |
1197 const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30); | |
1198 const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30); | |
1199 // dct_const_round_shift | |
1200 const __m128i out_02_4 = _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING); | |
1201 const __m128i out_02_5 = _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING); | |
1202 const __m128i out_18_4 = _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING); | |
1203 const __m128i out_18_5 = _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING); | |
1204 const __m128i out_10_4 = _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING); | |
1205 const __m128i out_10_5 = _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING); | |
1206 const __m128i out_26_4 = _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING); | |
1207 const __m128i out_26_5 = _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING); | |
1208 const __m128i out_06_4 = _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING); | |
1209 const __m128i out_06_5 = _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING); | |
1210 const __m128i out_22_4 = _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING); | |
1211 const __m128i out_22_5 = _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING); | |
1212 const __m128i out_14_4 = _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING); | |
1213 const __m128i out_14_5 = _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING); | |
1214 const __m128i out_30_4 = _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING); | |
1215 const __m128i out_30_5 = _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING); | |
1216 const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS); | |
1217 const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS); | |
1218 const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS); | |
1219 const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS); | |
1220 const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS); | |
1221 const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS); | |
1222 const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS); | |
1223 const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS); | |
1224 const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS); | |
1225 const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS); | |
1226 const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS); | |
1227 const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS); | |
1228 const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS); | |
1229 const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS); | |
1230 const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS); | |
1231 const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS); | |
1232 // Combine | |
1233 out[ 2] = _mm_packs_epi32(out_02_6, out_02_7); | |
1234 out[18] = _mm_packs_epi32(out_18_6, out_18_7); | |
1235 out[10] = _mm_packs_epi32(out_10_6, out_10_7); | |
1236 out[26] = _mm_packs_epi32(out_26_6, out_26_7); | |
1237 out[ 6] = _mm_packs_epi32(out_06_6, out_06_7); | |
1238 out[22] = _mm_packs_epi32(out_22_6, out_22_7); | |
1239 out[14] = _mm_packs_epi32(out_14_6, out_14_7); | |
1240 out[30] = _mm_packs_epi32(out_30_6, out_30_7); | |
1241 #if DCT_HIGH_BIT_DEPTH | |
1242 overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10], | |
1243 &out[26], &out[6], &out[22], | |
1244 &out[14], &out[30]); | |
1245 if (overflow) { | |
1246 if (pass == 0) | |
1247 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
1248 else | |
1249 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
1250 return; | |
1251 } | |
1252 #endif // DCT_HIGH_BIT_DEPTH | |
1253 } | |
1254 { | |
1255 step1[16] = ADD_EPI16(step3[17], step2[16]); | |
1256 step1[17] = SUB_EPI16(step2[16], step3[17]); | |
1257 step1[18] = SUB_EPI16(step2[19], step3[18]); | |
1258 step1[19] = ADD_EPI16(step3[18], step2[19]); | |
1259 step1[20] = ADD_EPI16(step3[21], step2[20]); | |
1260 step1[21] = SUB_EPI16(step2[20], step3[21]); | |
1261 step1[22] = SUB_EPI16(step2[23], step3[22]); | |
1262 step1[23] = ADD_EPI16(step3[22], step2[23]); | |
1263 step1[24] = ADD_EPI16(step3[25], step2[24]); | |
1264 step1[25] = SUB_EPI16(step2[24], step3[25]); | |
1265 step1[26] = SUB_EPI16(step2[27], step3[26]); | |
1266 step1[27] = ADD_EPI16(step3[26], step2[27]); | |
1267 step1[28] = ADD_EPI16(step3[29], step2[28]); | |
1268 step1[29] = SUB_EPI16(step2[28], step3[29]); | |
1269 step1[30] = SUB_EPI16(step2[31], step3[30]); | |
1270 step1[31] = ADD_EPI16(step3[30], step2[31]); | |
1271 #if DCT_HIGH_BIT_DEPTH | |
1272 overflow = check_epi16_overflow_x16( | |
1273 &step1[16], &step1[17], &step1[18], &step1[19], | |
1274 &step1[20], &step1[21], &step1[22], &step1[23], | |
1275 &step1[24], &step1[25], &step1[26], &step1[27], | |
1276 &step1[28], &step1[29], &step1[30], &step1[31]); | |
1277 if (overflow) { | |
1278 if (pass == 0) | |
1279 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
1280 else | |
1281 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
1282 return; | |
1283 } | |
1284 #endif // DCT_HIGH_BIT_DEPTH | |
1285 } | |
1286 // Final stage --- outputs indices are bit-reversed. | |
1287 { | |
1288 const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]); | |
1289 const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]); | |
1290 const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]); | |
1291 const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]); | |
1292 const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]); | |
1293 const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]); | |
1294 const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]); | |
1295 const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]); | |
1296 const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01); | |
1297 const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01); | |
1298 const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17); | |
1299 const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17); | |
1300 const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09); | |
1301 const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09); | |
1302 const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25); | |
1303 const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25); | |
1304 const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07); | |
1305 const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07); | |
1306 const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23); | |
1307 const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23); | |
1308 const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15); | |
1309 const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15); | |
1310 const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31); | |
1311 const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31); | |
1312 // dct_const_round_shift | |
1313 const __m128i out_01_4 = _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING); | |
1314 const __m128i out_01_5 = _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING); | |
1315 const __m128i out_17_4 = _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING); | |
1316 const __m128i out_17_5 = _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING); | |
1317 const __m128i out_09_4 = _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING); | |
1318 const __m128i out_09_5 = _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING); | |
1319 const __m128i out_25_4 = _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING); | |
1320 const __m128i out_25_5 = _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING); | |
1321 const __m128i out_07_4 = _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING); | |
1322 const __m128i out_07_5 = _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING); | |
1323 const __m128i out_23_4 = _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING); | |
1324 const __m128i out_23_5 = _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING); | |
1325 const __m128i out_15_4 = _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING); | |
1326 const __m128i out_15_5 = _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING); | |
1327 const __m128i out_31_4 = _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING); | |
1328 const __m128i out_31_5 = _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING); | |
1329 const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS); | |
1330 const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS); | |
1331 const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS); | |
1332 const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS); | |
1333 const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS); | |
1334 const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS); | |
1335 const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS); | |
1336 const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS); | |
1337 const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS); | |
1338 const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS); | |
1339 const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS); | |
1340 const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS); | |
1341 const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS); | |
1342 const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS); | |
1343 const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS); | |
1344 const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS); | |
1345 // Combine | |
1346 out[ 1] = _mm_packs_epi32(out_01_6, out_01_7); | |
1347 out[17] = _mm_packs_epi32(out_17_6, out_17_7); | |
1348 out[ 9] = _mm_packs_epi32(out_09_6, out_09_7); | |
1349 out[25] = _mm_packs_epi32(out_25_6, out_25_7); | |
1350 out[ 7] = _mm_packs_epi32(out_07_6, out_07_7); | |
1351 out[23] = _mm_packs_epi32(out_23_6, out_23_7); | |
1352 out[15] = _mm_packs_epi32(out_15_6, out_15_7); | |
1353 out[31] = _mm_packs_epi32(out_31_6, out_31_7); | |
1354 #if DCT_HIGH_BIT_DEPTH | |
1355 overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9], | |
1356 &out[25], &out[7], &out[23], | |
1357 &out[15], &out[31]); | |
1358 if (overflow) { | |
1359 if (pass == 0) | |
1360 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
1361 else | |
1362 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
1363 return; | |
1364 } | |
1365 #endif // DCT_HIGH_BIT_DEPTH | |
1366 } | |
1367 { | |
1368 const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]); | |
1369 const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]); | |
1370 const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]); | |
1371 const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]); | |
1372 const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]); | |
1373 const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]); | |
1374 const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]); | |
1375 const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]); | |
1376 const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05); | |
1377 const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05); | |
1378 const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21); | |
1379 const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21); | |
1380 const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13); | |
1381 const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13); | |
1382 const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29); | |
1383 const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29); | |
1384 const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03); | |
1385 const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03); | |
1386 const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19); | |
1387 const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19); | |
1388 const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11); | |
1389 const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11); | |
1390 const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27); | |
1391 const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27); | |
1392 // dct_const_round_shift | |
1393 const __m128i out_05_4 = _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING); | |
1394 const __m128i out_05_5 = _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING); | |
1395 const __m128i out_21_4 = _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING); | |
1396 const __m128i out_21_5 = _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING); | |
1397 const __m128i out_13_4 = _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING); | |
1398 const __m128i out_13_5 = _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING); | |
1399 const __m128i out_29_4 = _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING); | |
1400 const __m128i out_29_5 = _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING); | |
1401 const __m128i out_03_4 = _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING); | |
1402 const __m128i out_03_5 = _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING); | |
1403 const __m128i out_19_4 = _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING); | |
1404 const __m128i out_19_5 = _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING); | |
1405 const __m128i out_11_4 = _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING); | |
1406 const __m128i out_11_5 = _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING); | |
1407 const __m128i out_27_4 = _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING); | |
1408 const __m128i out_27_5 = _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING); | |
1409 const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS); | |
1410 const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS); | |
1411 const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS); | |
1412 const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS); | |
1413 const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS); | |
1414 const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS); | |
1415 const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS); | |
1416 const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS); | |
1417 const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS); | |
1418 const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS); | |
1419 const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS); | |
1420 const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS); | |
1421 const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS); | |
1422 const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS); | |
1423 const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS); | |
1424 const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS); | |
1425 // Combine | |
1426 out[ 5] = _mm_packs_epi32(out_05_6, out_05_7); | |
1427 out[21] = _mm_packs_epi32(out_21_6, out_21_7); | |
1428 out[13] = _mm_packs_epi32(out_13_6, out_13_7); | |
1429 out[29] = _mm_packs_epi32(out_29_6, out_29_7); | |
1430 out[ 3] = _mm_packs_epi32(out_03_6, out_03_7); | |
1431 out[19] = _mm_packs_epi32(out_19_6, out_19_7); | |
1432 out[11] = _mm_packs_epi32(out_11_6, out_11_7); | |
1433 out[27] = _mm_packs_epi32(out_27_6, out_27_7); | |
1434 #if DCT_HIGH_BIT_DEPTH | |
1435 overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13], | |
1436 &out[29], &out[3], &out[19], | |
1437 &out[11], &out[27]); | |
1438 if (overflow) { | |
1439 if (pass == 0) | |
1440 HIGH_FDCT32x32_2D_C(input, output_org, stride); | |
1441 else | |
1442 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
1443 return; | |
1444 } | |
1445 #endif // DCT_HIGH_BIT_DEPTH | |
1446 } | |
1447 #if FDCT32x32_HIGH_PRECISION | |
1448 } else { | |
1449 __m128i lstep1[64], lstep2[64], lstep3[64]; | |
1450 __m128i u[32], v[32], sign[16]; | |
1451 const __m128i K32One = _mm_set_epi32(1, 1, 1, 1); | |
1452 // start using 32-bit operations | |
1453 // stage 3 | |
1454 { | |
1455 // expanding to 32-bit length priori to addition operations | |
1456 lstep2[ 0] = _mm_unpacklo_epi16(step2[ 0], kZero); | |
1457 lstep2[ 1] = _mm_unpackhi_epi16(step2[ 0], kZero); | |
1458 lstep2[ 2] = _mm_unpacklo_epi16(step2[ 1], kZero); | |
1459 lstep2[ 3] = _mm_unpackhi_epi16(step2[ 1], kZero); | |
1460 lstep2[ 4] = _mm_unpacklo_epi16(step2[ 2], kZero); | |
1461 lstep2[ 5] = _mm_unpackhi_epi16(step2[ 2], kZero); | |
1462 lstep2[ 6] = _mm_unpacklo_epi16(step2[ 3], kZero); | |
1463 lstep2[ 7] = _mm_unpackhi_epi16(step2[ 3], kZero); | |
1464 lstep2[ 8] = _mm_unpacklo_epi16(step2[ 4], kZero); | |
1465 lstep2[ 9] = _mm_unpackhi_epi16(step2[ 4], kZero); | |
1466 lstep2[10] = _mm_unpacklo_epi16(step2[ 5], kZero); | |
1467 lstep2[11] = _mm_unpackhi_epi16(step2[ 5], kZero); | |
1468 lstep2[12] = _mm_unpacklo_epi16(step2[ 6], kZero); | |
1469 lstep2[13] = _mm_unpackhi_epi16(step2[ 6], kZero); | |
1470 lstep2[14] = _mm_unpacklo_epi16(step2[ 7], kZero); | |
1471 lstep2[15] = _mm_unpackhi_epi16(step2[ 7], kZero); | |
1472 lstep2[ 0] = _mm_madd_epi16(lstep2[ 0], kOne); | |
1473 lstep2[ 1] = _mm_madd_epi16(lstep2[ 1], kOne); | |
1474 lstep2[ 2] = _mm_madd_epi16(lstep2[ 2], kOne); | |
1475 lstep2[ 3] = _mm_madd_epi16(lstep2[ 3], kOne); | |
1476 lstep2[ 4] = _mm_madd_epi16(lstep2[ 4], kOne); | |
1477 lstep2[ 5] = _mm_madd_epi16(lstep2[ 5], kOne); | |
1478 lstep2[ 6] = _mm_madd_epi16(lstep2[ 6], kOne); | |
1479 lstep2[ 7] = _mm_madd_epi16(lstep2[ 7], kOne); | |
1480 lstep2[ 8] = _mm_madd_epi16(lstep2[ 8], kOne); | |
1481 lstep2[ 9] = _mm_madd_epi16(lstep2[ 9], kOne); | |
1482 lstep2[10] = _mm_madd_epi16(lstep2[10], kOne); | |
1483 lstep2[11] = _mm_madd_epi16(lstep2[11], kOne); | |
1484 lstep2[12] = _mm_madd_epi16(lstep2[12], kOne); | |
1485 lstep2[13] = _mm_madd_epi16(lstep2[13], kOne); | |
1486 lstep2[14] = _mm_madd_epi16(lstep2[14], kOne); | |
1487 lstep2[15] = _mm_madd_epi16(lstep2[15], kOne); | |
1488 | |
1489 lstep3[ 0] = _mm_add_epi32(lstep2[14], lstep2[ 0]); | |
1490 lstep3[ 1] = _mm_add_epi32(lstep2[15], lstep2[ 1]); | |
1491 lstep3[ 2] = _mm_add_epi32(lstep2[12], lstep2[ 2]); | |
1492 lstep3[ 3] = _mm_add_epi32(lstep2[13], lstep2[ 3]); | |
1493 lstep3[ 4] = _mm_add_epi32(lstep2[10], lstep2[ 4]); | |
1494 lstep3[ 5] = _mm_add_epi32(lstep2[11], lstep2[ 5]); | |
1495 lstep3[ 6] = _mm_add_epi32(lstep2[ 8], lstep2[ 6]); | |
1496 lstep3[ 7] = _mm_add_epi32(lstep2[ 9], lstep2[ 7]); | |
1497 lstep3[ 8] = _mm_sub_epi32(lstep2[ 6], lstep2[ 8]); | |
1498 lstep3[ 9] = _mm_sub_epi32(lstep2[ 7], lstep2[ 9]); | |
1499 lstep3[10] = _mm_sub_epi32(lstep2[ 4], lstep2[10]); | |
1500 lstep3[11] = _mm_sub_epi32(lstep2[ 5], lstep2[11]); | |
1501 lstep3[12] = _mm_sub_epi32(lstep2[ 2], lstep2[12]); | |
1502 lstep3[13] = _mm_sub_epi32(lstep2[ 3], lstep2[13]); | |
1503 lstep3[14] = _mm_sub_epi32(lstep2[ 0], lstep2[14]); | |
1504 lstep3[15] = _mm_sub_epi32(lstep2[ 1], lstep2[15]); | |
1505 } | |
1506 { | |
1507 const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]); | |
1508 const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]); | |
1509 const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]); | |
1510 const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]); | |
1511 const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16); | |
1512 const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16); | |
1513 const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16); | |
1514 const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16); | |
1515 const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16); | |
1516 const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16); | |
1517 const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16); | |
1518 const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16); | |
1519 // dct_const_round_shift | |
1520 const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); | |
1521 const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); | |
1522 const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); | |
1523 const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); | |
1524 const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); | |
1525 const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); | |
1526 const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); | |
1527 const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); | |
1528 lstep3[20] = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS); | |
1529 lstep3[21] = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS); | |
1530 lstep3[22] = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS); | |
1531 lstep3[23] = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS); | |
1532 lstep3[24] = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS); | |
1533 lstep3[25] = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS); | |
1534 lstep3[26] = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS); | |
1535 lstep3[27] = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS); | |
1536 } | |
1537 { | |
1538 lstep2[40] = _mm_unpacklo_epi16(step2[20], kZero); | |
1539 lstep2[41] = _mm_unpackhi_epi16(step2[20], kZero); | |
1540 lstep2[42] = _mm_unpacklo_epi16(step2[21], kZero); | |
1541 lstep2[43] = _mm_unpackhi_epi16(step2[21], kZero); | |
1542 lstep2[44] = _mm_unpacklo_epi16(step2[22], kZero); | |
1543 lstep2[45] = _mm_unpackhi_epi16(step2[22], kZero); | |
1544 lstep2[46] = _mm_unpacklo_epi16(step2[23], kZero); | |
1545 lstep2[47] = _mm_unpackhi_epi16(step2[23], kZero); | |
1546 lstep2[48] = _mm_unpacklo_epi16(step2[24], kZero); | |
1547 lstep2[49] = _mm_unpackhi_epi16(step2[24], kZero); | |
1548 lstep2[50] = _mm_unpacklo_epi16(step2[25], kZero); | |
1549 lstep2[51] = _mm_unpackhi_epi16(step2[25], kZero); | |
1550 lstep2[52] = _mm_unpacklo_epi16(step2[26], kZero); | |
1551 lstep2[53] = _mm_unpackhi_epi16(step2[26], kZero); | |
1552 lstep2[54] = _mm_unpacklo_epi16(step2[27], kZero); | |
1553 lstep2[55] = _mm_unpackhi_epi16(step2[27], kZero); | |
1554 lstep2[40] = _mm_madd_epi16(lstep2[40], kOne); | |
1555 lstep2[41] = _mm_madd_epi16(lstep2[41], kOne); | |
1556 lstep2[42] = _mm_madd_epi16(lstep2[42], kOne); | |
1557 lstep2[43] = _mm_madd_epi16(lstep2[43], kOne); | |
1558 lstep2[44] = _mm_madd_epi16(lstep2[44], kOne); | |
1559 lstep2[45] = _mm_madd_epi16(lstep2[45], kOne); | |
1560 lstep2[46] = _mm_madd_epi16(lstep2[46], kOne); | |
1561 lstep2[47] = _mm_madd_epi16(lstep2[47], kOne); | |
1562 lstep2[48] = _mm_madd_epi16(lstep2[48], kOne); | |
1563 lstep2[49] = _mm_madd_epi16(lstep2[49], kOne); | |
1564 lstep2[50] = _mm_madd_epi16(lstep2[50], kOne); | |
1565 lstep2[51] = _mm_madd_epi16(lstep2[51], kOne); | |
1566 lstep2[52] = _mm_madd_epi16(lstep2[52], kOne); | |
1567 lstep2[53] = _mm_madd_epi16(lstep2[53], kOne); | |
1568 lstep2[54] = _mm_madd_epi16(lstep2[54], kOne); | |
1569 lstep2[55] = _mm_madd_epi16(lstep2[55], kOne); | |
1570 | |
1571 lstep1[32] = _mm_unpacklo_epi16(step1[16], kZero); | |
1572 lstep1[33] = _mm_unpackhi_epi16(step1[16], kZero); | |
1573 lstep1[34] = _mm_unpacklo_epi16(step1[17], kZero); | |
1574 lstep1[35] = _mm_unpackhi_epi16(step1[17], kZero); | |
1575 lstep1[36] = _mm_unpacklo_epi16(step1[18], kZero); | |
1576 lstep1[37] = _mm_unpackhi_epi16(step1[18], kZero); | |
1577 lstep1[38] = _mm_unpacklo_epi16(step1[19], kZero); | |
1578 lstep1[39] = _mm_unpackhi_epi16(step1[19], kZero); | |
1579 lstep1[56] = _mm_unpacklo_epi16(step1[28], kZero); | |
1580 lstep1[57] = _mm_unpackhi_epi16(step1[28], kZero); | |
1581 lstep1[58] = _mm_unpacklo_epi16(step1[29], kZero); | |
1582 lstep1[59] = _mm_unpackhi_epi16(step1[29], kZero); | |
1583 lstep1[60] = _mm_unpacklo_epi16(step1[30], kZero); | |
1584 lstep1[61] = _mm_unpackhi_epi16(step1[30], kZero); | |
1585 lstep1[62] = _mm_unpacklo_epi16(step1[31], kZero); | |
1586 lstep1[63] = _mm_unpackhi_epi16(step1[31], kZero); | |
1587 lstep1[32] = _mm_madd_epi16(lstep1[32], kOne); | |
1588 lstep1[33] = _mm_madd_epi16(lstep1[33], kOne); | |
1589 lstep1[34] = _mm_madd_epi16(lstep1[34], kOne); | |
1590 lstep1[35] = _mm_madd_epi16(lstep1[35], kOne); | |
1591 lstep1[36] = _mm_madd_epi16(lstep1[36], kOne); | |
1592 lstep1[37] = _mm_madd_epi16(lstep1[37], kOne); | |
1593 lstep1[38] = _mm_madd_epi16(lstep1[38], kOne); | |
1594 lstep1[39] = _mm_madd_epi16(lstep1[39], kOne); | |
1595 lstep1[56] = _mm_madd_epi16(lstep1[56], kOne); | |
1596 lstep1[57] = _mm_madd_epi16(lstep1[57], kOne); | |
1597 lstep1[58] = _mm_madd_epi16(lstep1[58], kOne); | |
1598 lstep1[59] = _mm_madd_epi16(lstep1[59], kOne); | |
1599 lstep1[60] = _mm_madd_epi16(lstep1[60], kOne); | |
1600 lstep1[61] = _mm_madd_epi16(lstep1[61], kOne); | |
1601 lstep1[62] = _mm_madd_epi16(lstep1[62], kOne); | |
1602 lstep1[63] = _mm_madd_epi16(lstep1[63], kOne); | |
1603 | |
1604 lstep3[32] = _mm_add_epi32(lstep2[46], lstep1[32]); | |
1605 lstep3[33] = _mm_add_epi32(lstep2[47], lstep1[33]); | |
1606 | |
1607 lstep3[34] = _mm_add_epi32(lstep2[44], lstep1[34]); | |
1608 lstep3[35] = _mm_add_epi32(lstep2[45], lstep1[35]); | |
1609 lstep3[36] = _mm_add_epi32(lstep2[42], lstep1[36]); | |
1610 lstep3[37] = _mm_add_epi32(lstep2[43], lstep1[37]); | |
1611 lstep3[38] = _mm_add_epi32(lstep2[40], lstep1[38]); | |
1612 lstep3[39] = _mm_add_epi32(lstep2[41], lstep1[39]); | |
1613 lstep3[40] = _mm_sub_epi32(lstep1[38], lstep2[40]); | |
1614 lstep3[41] = _mm_sub_epi32(lstep1[39], lstep2[41]); | |
1615 lstep3[42] = _mm_sub_epi32(lstep1[36], lstep2[42]); | |
1616 lstep3[43] = _mm_sub_epi32(lstep1[37], lstep2[43]); | |
1617 lstep3[44] = _mm_sub_epi32(lstep1[34], lstep2[44]); | |
1618 lstep3[45] = _mm_sub_epi32(lstep1[35], lstep2[45]); | |
1619 lstep3[46] = _mm_sub_epi32(lstep1[32], lstep2[46]); | |
1620 lstep3[47] = _mm_sub_epi32(lstep1[33], lstep2[47]); | |
1621 lstep3[48] = _mm_sub_epi32(lstep1[62], lstep2[48]); | |
1622 lstep3[49] = _mm_sub_epi32(lstep1[63], lstep2[49]); | |
1623 lstep3[50] = _mm_sub_epi32(lstep1[60], lstep2[50]); | |
1624 lstep3[51] = _mm_sub_epi32(lstep1[61], lstep2[51]); | |
1625 lstep3[52] = _mm_sub_epi32(lstep1[58], lstep2[52]); | |
1626 lstep3[53] = _mm_sub_epi32(lstep1[59], lstep2[53]); | |
1627 lstep3[54] = _mm_sub_epi32(lstep1[56], lstep2[54]); | |
1628 lstep3[55] = _mm_sub_epi32(lstep1[57], lstep2[55]); | |
1629 lstep3[56] = _mm_add_epi32(lstep2[54], lstep1[56]); | |
1630 lstep3[57] = _mm_add_epi32(lstep2[55], lstep1[57]); | |
1631 lstep3[58] = _mm_add_epi32(lstep2[52], lstep1[58]); | |
1632 lstep3[59] = _mm_add_epi32(lstep2[53], lstep1[59]); | |
1633 lstep3[60] = _mm_add_epi32(lstep2[50], lstep1[60]); | |
1634 lstep3[61] = _mm_add_epi32(lstep2[51], lstep1[61]); | |
1635 lstep3[62] = _mm_add_epi32(lstep2[48], lstep1[62]); | |
1636 lstep3[63] = _mm_add_epi32(lstep2[49], lstep1[63]); | |
1637 } | |
1638 | |
1639 // stage 4 | |
1640 { | |
1641 // expanding to 32-bit length priori to addition operations | |
1642 lstep2[16] = _mm_unpacklo_epi16(step2[ 8], kZero); | |
1643 lstep2[17] = _mm_unpackhi_epi16(step2[ 8], kZero); | |
1644 lstep2[18] = _mm_unpacklo_epi16(step2[ 9], kZero); | |
1645 lstep2[19] = _mm_unpackhi_epi16(step2[ 9], kZero); | |
1646 lstep2[28] = _mm_unpacklo_epi16(step2[14], kZero); | |
1647 lstep2[29] = _mm_unpackhi_epi16(step2[14], kZero); | |
1648 lstep2[30] = _mm_unpacklo_epi16(step2[15], kZero); | |
1649 lstep2[31] = _mm_unpackhi_epi16(step2[15], kZero); | |
1650 lstep2[16] = _mm_madd_epi16(lstep2[16], kOne); | |
1651 lstep2[17] = _mm_madd_epi16(lstep2[17], kOne); | |
1652 lstep2[18] = _mm_madd_epi16(lstep2[18], kOne); | |
1653 lstep2[19] = _mm_madd_epi16(lstep2[19], kOne); | |
1654 lstep2[28] = _mm_madd_epi16(lstep2[28], kOne); | |
1655 lstep2[29] = _mm_madd_epi16(lstep2[29], kOne); | |
1656 lstep2[30] = _mm_madd_epi16(lstep2[30], kOne); | |
1657 lstep2[31] = _mm_madd_epi16(lstep2[31], kOne); | |
1658 | |
1659 lstep1[ 0] = _mm_add_epi32(lstep3[ 6], lstep3[ 0]); | |
1660 lstep1[ 1] = _mm_add_epi32(lstep3[ 7], lstep3[ 1]); | |
1661 lstep1[ 2] = _mm_add_epi32(lstep3[ 4], lstep3[ 2]); | |
1662 lstep1[ 3] = _mm_add_epi32(lstep3[ 5], lstep3[ 3]); | |
1663 lstep1[ 4] = _mm_sub_epi32(lstep3[ 2], lstep3[ 4]); | |
1664 lstep1[ 5] = _mm_sub_epi32(lstep3[ 3], lstep3[ 5]); | |
1665 lstep1[ 6] = _mm_sub_epi32(lstep3[ 0], lstep3[ 6]); | |
1666 lstep1[ 7] = _mm_sub_epi32(lstep3[ 1], lstep3[ 7]); | |
1667 lstep1[16] = _mm_add_epi32(lstep3[22], lstep2[16]); | |
1668 lstep1[17] = _mm_add_epi32(lstep3[23], lstep2[17]); | |
1669 lstep1[18] = _mm_add_epi32(lstep3[20], lstep2[18]); | |
1670 lstep1[19] = _mm_add_epi32(lstep3[21], lstep2[19]); | |
1671 lstep1[20] = _mm_sub_epi32(lstep2[18], lstep3[20]); | |
1672 lstep1[21] = _mm_sub_epi32(lstep2[19], lstep3[21]); | |
1673 lstep1[22] = _mm_sub_epi32(lstep2[16], lstep3[22]); | |
1674 lstep1[23] = _mm_sub_epi32(lstep2[17], lstep3[23]); | |
1675 lstep1[24] = _mm_sub_epi32(lstep2[30], lstep3[24]); | |
1676 lstep1[25] = _mm_sub_epi32(lstep2[31], lstep3[25]); | |
1677 lstep1[26] = _mm_sub_epi32(lstep2[28], lstep3[26]); | |
1678 lstep1[27] = _mm_sub_epi32(lstep2[29], lstep3[27]); | |
1679 lstep1[28] = _mm_add_epi32(lstep3[26], lstep2[28]); | |
1680 lstep1[29] = _mm_add_epi32(lstep3[27], lstep2[29]); | |
1681 lstep1[30] = _mm_add_epi32(lstep3[24], lstep2[30]); | |
1682 lstep1[31] = _mm_add_epi32(lstep3[25], lstep2[31]); | |
1683 } | |
1684 { | |
1685 // to be continued... | |
1686 // | |
1687 const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64); | |
1688 const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64); | |
1689 | |
1690 u[0] = _mm_unpacklo_epi32(lstep3[12], lstep3[10]); | |
1691 u[1] = _mm_unpackhi_epi32(lstep3[12], lstep3[10]); | |
1692 u[2] = _mm_unpacklo_epi32(lstep3[13], lstep3[11]); | |
1693 u[3] = _mm_unpackhi_epi32(lstep3[13], lstep3[11]); | |
1694 | |
1695 // TODO(jingning): manually inline k_madd_epi32_ to further hide | |
1696 // instruction latency. | |
1697 v[0] = k_madd_epi32(u[0], k32_p16_m16); | |
1698 v[1] = k_madd_epi32(u[1], k32_p16_m16); | |
1699 v[2] = k_madd_epi32(u[2], k32_p16_m16); | |
1700 v[3] = k_madd_epi32(u[3], k32_p16_m16); | |
1701 v[4] = k_madd_epi32(u[0], k32_p16_p16); | |
1702 v[5] = k_madd_epi32(u[1], k32_p16_p16); | |
1703 v[6] = k_madd_epi32(u[2], k32_p16_p16); | |
1704 v[7] = k_madd_epi32(u[3], k32_p16_p16); | |
1705 #if DCT_HIGH_BIT_DEPTH | |
1706 overflow = k_check_epi32_overflow_8(&v[0], &v[1], &v[2], &v[3], | |
1707 &v[4], &v[5], &v[6], &v[7], &kZero); | |
1708 if (overflow) { | |
1709 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
1710 return; | |
1711 } | |
1712 #endif // DCT_HIGH_BIT_DEPTH | |
1713 u[0] = k_packs_epi64(v[0], v[1]); | |
1714 u[1] = k_packs_epi64(v[2], v[3]); | |
1715 u[2] = k_packs_epi64(v[4], v[5]); | |
1716 u[3] = k_packs_epi64(v[6], v[7]); | |
1717 | |
1718 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); | |
1719 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); | |
1720 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); | |
1721 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); | |
1722 | |
1723 lstep1[10] = _mm_srai_epi32(v[0], DCT_CONST_BITS); | |
1724 lstep1[11] = _mm_srai_epi32(v[1], DCT_CONST_BITS); | |
1725 lstep1[12] = _mm_srai_epi32(v[2], DCT_CONST_BITS); | |
1726 lstep1[13] = _mm_srai_epi32(v[3], DCT_CONST_BITS); | |
1727 } | |
1728 { | |
1729 const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64); | |
1730 const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64); | |
1731 const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64); | |
1732 | |
1733 u[ 0] = _mm_unpacklo_epi32(lstep3[36], lstep3[58]); | |
1734 u[ 1] = _mm_unpackhi_epi32(lstep3[36], lstep3[58]); | |
1735 u[ 2] = _mm_unpacklo_epi32(lstep3[37], lstep3[59]); | |
1736 u[ 3] = _mm_unpackhi_epi32(lstep3[37], lstep3[59]); | |
1737 u[ 4] = _mm_unpacklo_epi32(lstep3[38], lstep3[56]); | |
1738 u[ 5] = _mm_unpackhi_epi32(lstep3[38], lstep3[56]); | |
1739 u[ 6] = _mm_unpacklo_epi32(lstep3[39], lstep3[57]); | |
1740 u[ 7] = _mm_unpackhi_epi32(lstep3[39], lstep3[57]); | |
1741 u[ 8] = _mm_unpacklo_epi32(lstep3[40], lstep3[54]); | |
1742 u[ 9] = _mm_unpackhi_epi32(lstep3[40], lstep3[54]); | |
1743 u[10] = _mm_unpacklo_epi32(lstep3[41], lstep3[55]); | |
1744 u[11] = _mm_unpackhi_epi32(lstep3[41], lstep3[55]); | |
1745 u[12] = _mm_unpacklo_epi32(lstep3[42], lstep3[52]); | |
1746 u[13] = _mm_unpackhi_epi32(lstep3[42], lstep3[52]); | |
1747 u[14] = _mm_unpacklo_epi32(lstep3[43], lstep3[53]); | |
1748 u[15] = _mm_unpackhi_epi32(lstep3[43], lstep3[53]); | |
1749 | |
1750 v[ 0] = k_madd_epi32(u[ 0], k32_m08_p24); | |
1751 v[ 1] = k_madd_epi32(u[ 1], k32_m08_p24); | |
1752 v[ 2] = k_madd_epi32(u[ 2], k32_m08_p24); | |
1753 v[ 3] = k_madd_epi32(u[ 3], k32_m08_p24); | |
1754 v[ 4] = k_madd_epi32(u[ 4], k32_m08_p24); | |
1755 v[ 5] = k_madd_epi32(u[ 5], k32_m08_p24); | |
1756 v[ 6] = k_madd_epi32(u[ 6], k32_m08_p24); | |
1757 v[ 7] = k_madd_epi32(u[ 7], k32_m08_p24); | |
1758 v[ 8] = k_madd_epi32(u[ 8], k32_m24_m08); | |
1759 v[ 9] = k_madd_epi32(u[ 9], k32_m24_m08); | |
1760 v[10] = k_madd_epi32(u[10], k32_m24_m08); | |
1761 v[11] = k_madd_epi32(u[11], k32_m24_m08); | |
1762 v[12] = k_madd_epi32(u[12], k32_m24_m08); | |
1763 v[13] = k_madd_epi32(u[13], k32_m24_m08); | |
1764 v[14] = k_madd_epi32(u[14], k32_m24_m08); | |
1765 v[15] = k_madd_epi32(u[15], k32_m24_m08); | |
1766 v[16] = k_madd_epi32(u[12], k32_m08_p24); | |
1767 v[17] = k_madd_epi32(u[13], k32_m08_p24); | |
1768 v[18] = k_madd_epi32(u[14], k32_m08_p24); | |
1769 v[19] = k_madd_epi32(u[15], k32_m08_p24); | |
1770 v[20] = k_madd_epi32(u[ 8], k32_m08_p24); | |
1771 v[21] = k_madd_epi32(u[ 9], k32_m08_p24); | |
1772 v[22] = k_madd_epi32(u[10], k32_m08_p24); | |
1773 v[23] = k_madd_epi32(u[11], k32_m08_p24); | |
1774 v[24] = k_madd_epi32(u[ 4], k32_p24_p08); | |
1775 v[25] = k_madd_epi32(u[ 5], k32_p24_p08); | |
1776 v[26] = k_madd_epi32(u[ 6], k32_p24_p08); | |
1777 v[27] = k_madd_epi32(u[ 7], k32_p24_p08); | |
1778 v[28] = k_madd_epi32(u[ 0], k32_p24_p08); | |
1779 v[29] = k_madd_epi32(u[ 1], k32_p24_p08); | |
1780 v[30] = k_madd_epi32(u[ 2], k32_p24_p08); | |
1781 v[31] = k_madd_epi32(u[ 3], k32_p24_p08); | |
1782 | |
1783 #if DCT_HIGH_BIT_DEPTH | |
1784 overflow = k_check_epi32_overflow_32( | |
1785 &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], | |
1786 &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], | |
1787 &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], | |
1788 &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], | |
1789 &kZero); | |
1790 if (overflow) { | |
1791 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
1792 return; | |
1793 } | |
1794 #endif // DCT_HIGH_BIT_DEPTH | |
1795 u[ 0] = k_packs_epi64(v[ 0], v[ 1]); | |
1796 u[ 1] = k_packs_epi64(v[ 2], v[ 3]); | |
1797 u[ 2] = k_packs_epi64(v[ 4], v[ 5]); | |
1798 u[ 3] = k_packs_epi64(v[ 6], v[ 7]); | |
1799 u[ 4] = k_packs_epi64(v[ 8], v[ 9]); | |
1800 u[ 5] = k_packs_epi64(v[10], v[11]); | |
1801 u[ 6] = k_packs_epi64(v[12], v[13]); | |
1802 u[ 7] = k_packs_epi64(v[14], v[15]); | |
1803 u[ 8] = k_packs_epi64(v[16], v[17]); | |
1804 u[ 9] = k_packs_epi64(v[18], v[19]); | |
1805 u[10] = k_packs_epi64(v[20], v[21]); | |
1806 u[11] = k_packs_epi64(v[22], v[23]); | |
1807 u[12] = k_packs_epi64(v[24], v[25]); | |
1808 u[13] = k_packs_epi64(v[26], v[27]); | |
1809 u[14] = k_packs_epi64(v[28], v[29]); | |
1810 u[15] = k_packs_epi64(v[30], v[31]); | |
1811 | |
1812 v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); | |
1813 v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); | |
1814 v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); | |
1815 v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); | |
1816 v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); | |
1817 v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); | |
1818 v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); | |
1819 v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); | |
1820 v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); | |
1821 v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); | |
1822 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); | |
1823 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); | |
1824 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); | |
1825 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); | |
1826 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); | |
1827 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); | |
1828 | |
1829 lstep1[36] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); | |
1830 lstep1[37] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); | |
1831 lstep1[38] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); | |
1832 lstep1[39] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); | |
1833 lstep1[40] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); | |
1834 lstep1[41] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); | |
1835 lstep1[42] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); | |
1836 lstep1[43] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); | |
1837 lstep1[52] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); | |
1838 lstep1[53] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); | |
1839 lstep1[54] = _mm_srai_epi32(v[10], DCT_CONST_BITS); | |
1840 lstep1[55] = _mm_srai_epi32(v[11], DCT_CONST_BITS); | |
1841 lstep1[56] = _mm_srai_epi32(v[12], DCT_CONST_BITS); | |
1842 lstep1[57] = _mm_srai_epi32(v[13], DCT_CONST_BITS); | |
1843 lstep1[58] = _mm_srai_epi32(v[14], DCT_CONST_BITS); | |
1844 lstep1[59] = _mm_srai_epi32(v[15], DCT_CONST_BITS); | |
1845 } | |
1846 // stage 5 | |
1847 { | |
1848 lstep2[ 8] = _mm_add_epi32(lstep1[10], lstep3[ 8]); | |
1849 lstep2[ 9] = _mm_add_epi32(lstep1[11], lstep3[ 9]); | |
1850 lstep2[10] = _mm_sub_epi32(lstep3[ 8], lstep1[10]); | |
1851 lstep2[11] = _mm_sub_epi32(lstep3[ 9], lstep1[11]); | |
1852 lstep2[12] = _mm_sub_epi32(lstep3[14], lstep1[12]); | |
1853 lstep2[13] = _mm_sub_epi32(lstep3[15], lstep1[13]); | |
1854 lstep2[14] = _mm_add_epi32(lstep1[12], lstep3[14]); | |
1855 lstep2[15] = _mm_add_epi32(lstep1[13], lstep3[15]); | |
1856 } | |
1857 { | |
1858 const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64); | |
1859 const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64); | |
1860 const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64); | |
1861 const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64); | |
1862 | |
1863 u[0] = _mm_unpacklo_epi32(lstep1[0], lstep1[2]); | |
1864 u[1] = _mm_unpackhi_epi32(lstep1[0], lstep1[2]); | |
1865 u[2] = _mm_unpacklo_epi32(lstep1[1], lstep1[3]); | |
1866 u[3] = _mm_unpackhi_epi32(lstep1[1], lstep1[3]); | |
1867 u[4] = _mm_unpacklo_epi32(lstep1[4], lstep1[6]); | |
1868 u[5] = _mm_unpackhi_epi32(lstep1[4], lstep1[6]); | |
1869 u[6] = _mm_unpacklo_epi32(lstep1[5], lstep1[7]); | |
1870 u[7] = _mm_unpackhi_epi32(lstep1[5], lstep1[7]); | |
1871 | |
1872 // TODO(jingning): manually inline k_madd_epi32_ to further hide | |
1873 // instruction latency. | |
1874 v[ 0] = k_madd_epi32(u[0], k32_p16_p16); | |
1875 v[ 1] = k_madd_epi32(u[1], k32_p16_p16); | |
1876 v[ 2] = k_madd_epi32(u[2], k32_p16_p16); | |
1877 v[ 3] = k_madd_epi32(u[3], k32_p16_p16); | |
1878 v[ 4] = k_madd_epi32(u[0], k32_p16_m16); | |
1879 v[ 5] = k_madd_epi32(u[1], k32_p16_m16); | |
1880 v[ 6] = k_madd_epi32(u[2], k32_p16_m16); | |
1881 v[ 7] = k_madd_epi32(u[3], k32_p16_m16); | |
1882 v[ 8] = k_madd_epi32(u[4], k32_p24_p08); | |
1883 v[ 9] = k_madd_epi32(u[5], k32_p24_p08); | |
1884 v[10] = k_madd_epi32(u[6], k32_p24_p08); | |
1885 v[11] = k_madd_epi32(u[7], k32_p24_p08); | |
1886 v[12] = k_madd_epi32(u[4], k32_m08_p24); | |
1887 v[13] = k_madd_epi32(u[5], k32_m08_p24); | |
1888 v[14] = k_madd_epi32(u[6], k32_m08_p24); | |
1889 v[15] = k_madd_epi32(u[7], k32_m08_p24); | |
1890 | |
1891 #if DCT_HIGH_BIT_DEPTH | |
1892 overflow = k_check_epi32_overflow_16( | |
1893 &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], | |
1894 &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], | |
1895 &kZero); | |
1896 if (overflow) { | |
1897 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
1898 return; | |
1899 } | |
1900 #endif // DCT_HIGH_BIT_DEPTH | |
1901 u[0] = k_packs_epi64(v[0], v[1]); | |
1902 u[1] = k_packs_epi64(v[2], v[3]); | |
1903 u[2] = k_packs_epi64(v[4], v[5]); | |
1904 u[3] = k_packs_epi64(v[6], v[7]); | |
1905 u[4] = k_packs_epi64(v[8], v[9]); | |
1906 u[5] = k_packs_epi64(v[10], v[11]); | |
1907 u[6] = k_packs_epi64(v[12], v[13]); | |
1908 u[7] = k_packs_epi64(v[14], v[15]); | |
1909 | |
1910 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); | |
1911 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); | |
1912 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); | |
1913 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); | |
1914 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); | |
1915 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); | |
1916 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); | |
1917 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); | |
1918 | |
1919 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); | |
1920 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); | |
1921 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); | |
1922 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); | |
1923 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); | |
1924 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); | |
1925 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); | |
1926 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); | |
1927 | |
1928 sign[0] = _mm_cmplt_epi32(u[0], kZero); | |
1929 sign[1] = _mm_cmplt_epi32(u[1], kZero); | |
1930 sign[2] = _mm_cmplt_epi32(u[2], kZero); | |
1931 sign[3] = _mm_cmplt_epi32(u[3], kZero); | |
1932 sign[4] = _mm_cmplt_epi32(u[4], kZero); | |
1933 sign[5] = _mm_cmplt_epi32(u[5], kZero); | |
1934 sign[6] = _mm_cmplt_epi32(u[6], kZero); | |
1935 sign[7] = _mm_cmplt_epi32(u[7], kZero); | |
1936 | |
1937 u[0] = _mm_sub_epi32(u[0], sign[0]); | |
1938 u[1] = _mm_sub_epi32(u[1], sign[1]); | |
1939 u[2] = _mm_sub_epi32(u[2], sign[2]); | |
1940 u[3] = _mm_sub_epi32(u[3], sign[3]); | |
1941 u[4] = _mm_sub_epi32(u[4], sign[4]); | |
1942 u[5] = _mm_sub_epi32(u[5], sign[5]); | |
1943 u[6] = _mm_sub_epi32(u[6], sign[6]); | |
1944 u[7] = _mm_sub_epi32(u[7], sign[7]); | |
1945 | |
1946 u[0] = _mm_add_epi32(u[0], K32One); | |
1947 u[1] = _mm_add_epi32(u[1], K32One); | |
1948 u[2] = _mm_add_epi32(u[2], K32One); | |
1949 u[3] = _mm_add_epi32(u[3], K32One); | |
1950 u[4] = _mm_add_epi32(u[4], K32One); | |
1951 u[5] = _mm_add_epi32(u[5], K32One); | |
1952 u[6] = _mm_add_epi32(u[6], K32One); | |
1953 u[7] = _mm_add_epi32(u[7], K32One); | |
1954 | |
1955 u[0] = _mm_srai_epi32(u[0], 2); | |
1956 u[1] = _mm_srai_epi32(u[1], 2); | |
1957 u[2] = _mm_srai_epi32(u[2], 2); | |
1958 u[3] = _mm_srai_epi32(u[3], 2); | |
1959 u[4] = _mm_srai_epi32(u[4], 2); | |
1960 u[5] = _mm_srai_epi32(u[5], 2); | |
1961 u[6] = _mm_srai_epi32(u[6], 2); | |
1962 u[7] = _mm_srai_epi32(u[7], 2); | |
1963 | |
1964 // Combine | |
1965 out[ 0] = _mm_packs_epi32(u[0], u[1]); | |
1966 out[16] = _mm_packs_epi32(u[2], u[3]); | |
1967 out[ 8] = _mm_packs_epi32(u[4], u[5]); | |
1968 out[24] = _mm_packs_epi32(u[6], u[7]); | |
1969 #if DCT_HIGH_BIT_DEPTH | |
1970 overflow = check_epi16_overflow_x4(&out[0], &out[16], | |
1971 &out[8], &out[24]); | |
1972 if (overflow) { | |
1973 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
1974 return; | |
1975 } | |
1976 #endif // DCT_HIGH_BIT_DEPTH | |
1977 } | |
1978 { | |
1979 const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64); | |
1980 const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64); | |
1981 const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64); | |
1982 | |
1983 u[0] = _mm_unpacklo_epi32(lstep1[18], lstep1[28]); | |
1984 u[1] = _mm_unpackhi_epi32(lstep1[18], lstep1[28]); | |
1985 u[2] = _mm_unpacklo_epi32(lstep1[19], lstep1[29]); | |
1986 u[3] = _mm_unpackhi_epi32(lstep1[19], lstep1[29]); | |
1987 u[4] = _mm_unpacklo_epi32(lstep1[20], lstep1[26]); | |
1988 u[5] = _mm_unpackhi_epi32(lstep1[20], lstep1[26]); | |
1989 u[6] = _mm_unpacklo_epi32(lstep1[21], lstep1[27]); | |
1990 u[7] = _mm_unpackhi_epi32(lstep1[21], lstep1[27]); | |
1991 | |
1992 v[0] = k_madd_epi32(u[0], k32_m08_p24); | |
1993 v[1] = k_madd_epi32(u[1], k32_m08_p24); | |
1994 v[2] = k_madd_epi32(u[2], k32_m08_p24); | |
1995 v[3] = k_madd_epi32(u[3], k32_m08_p24); | |
1996 v[4] = k_madd_epi32(u[4], k32_m24_m08); | |
1997 v[5] = k_madd_epi32(u[5], k32_m24_m08); | |
1998 v[6] = k_madd_epi32(u[6], k32_m24_m08); | |
1999 v[7] = k_madd_epi32(u[7], k32_m24_m08); | |
2000 v[ 8] = k_madd_epi32(u[4], k32_m08_p24); | |
2001 v[ 9] = k_madd_epi32(u[5], k32_m08_p24); | |
2002 v[10] = k_madd_epi32(u[6], k32_m08_p24); | |
2003 v[11] = k_madd_epi32(u[7], k32_m08_p24); | |
2004 v[12] = k_madd_epi32(u[0], k32_p24_p08); | |
2005 v[13] = k_madd_epi32(u[1], k32_p24_p08); | |
2006 v[14] = k_madd_epi32(u[2], k32_p24_p08); | |
2007 v[15] = k_madd_epi32(u[3], k32_p24_p08); | |
2008 | |
2009 #if DCT_HIGH_BIT_DEPTH | |
2010 overflow = k_check_epi32_overflow_16( | |
2011 &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], | |
2012 &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], | |
2013 &kZero); | |
2014 if (overflow) { | |
2015 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
2016 return; | |
2017 } | |
2018 #endif // DCT_HIGH_BIT_DEPTH | |
2019 u[0] = k_packs_epi64(v[0], v[1]); | |
2020 u[1] = k_packs_epi64(v[2], v[3]); | |
2021 u[2] = k_packs_epi64(v[4], v[5]); | |
2022 u[3] = k_packs_epi64(v[6], v[7]); | |
2023 u[4] = k_packs_epi64(v[8], v[9]); | |
2024 u[5] = k_packs_epi64(v[10], v[11]); | |
2025 u[6] = k_packs_epi64(v[12], v[13]); | |
2026 u[7] = k_packs_epi64(v[14], v[15]); | |
2027 | |
2028 u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); | |
2029 u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); | |
2030 u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); | |
2031 u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); | |
2032 u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); | |
2033 u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); | |
2034 u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); | |
2035 u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); | |
2036 | |
2037 lstep2[18] = _mm_srai_epi32(u[0], DCT_CONST_BITS); | |
2038 lstep2[19] = _mm_srai_epi32(u[1], DCT_CONST_BITS); | |
2039 lstep2[20] = _mm_srai_epi32(u[2], DCT_CONST_BITS); | |
2040 lstep2[21] = _mm_srai_epi32(u[3], DCT_CONST_BITS); | |
2041 lstep2[26] = _mm_srai_epi32(u[4], DCT_CONST_BITS); | |
2042 lstep2[27] = _mm_srai_epi32(u[5], DCT_CONST_BITS); | |
2043 lstep2[28] = _mm_srai_epi32(u[6], DCT_CONST_BITS); | |
2044 lstep2[29] = _mm_srai_epi32(u[7], DCT_CONST_BITS); | |
2045 } | |
2046 { | |
2047 lstep2[32] = _mm_add_epi32(lstep1[38], lstep3[32]); | |
2048 lstep2[33] = _mm_add_epi32(lstep1[39], lstep3[33]); | |
2049 lstep2[34] = _mm_add_epi32(lstep1[36], lstep3[34]); | |
2050 lstep2[35] = _mm_add_epi32(lstep1[37], lstep3[35]); | |
2051 lstep2[36] = _mm_sub_epi32(lstep3[34], lstep1[36]); | |
2052 lstep2[37] = _mm_sub_epi32(lstep3[35], lstep1[37]); | |
2053 lstep2[38] = _mm_sub_epi32(lstep3[32], lstep1[38]); | |
2054 lstep2[39] = _mm_sub_epi32(lstep3[33], lstep1[39]); | |
2055 lstep2[40] = _mm_sub_epi32(lstep3[46], lstep1[40]); | |
2056 lstep2[41] = _mm_sub_epi32(lstep3[47], lstep1[41]); | |
2057 lstep2[42] = _mm_sub_epi32(lstep3[44], lstep1[42]); | |
2058 lstep2[43] = _mm_sub_epi32(lstep3[45], lstep1[43]); | |
2059 lstep2[44] = _mm_add_epi32(lstep1[42], lstep3[44]); | |
2060 lstep2[45] = _mm_add_epi32(lstep1[43], lstep3[45]); | |
2061 lstep2[46] = _mm_add_epi32(lstep1[40], lstep3[46]); | |
2062 lstep2[47] = _mm_add_epi32(lstep1[41], lstep3[47]); | |
2063 lstep2[48] = _mm_add_epi32(lstep1[54], lstep3[48]); | |
2064 lstep2[49] = _mm_add_epi32(lstep1[55], lstep3[49]); | |
2065 lstep2[50] = _mm_add_epi32(lstep1[52], lstep3[50]); | |
2066 lstep2[51] = _mm_add_epi32(lstep1[53], lstep3[51]); | |
2067 lstep2[52] = _mm_sub_epi32(lstep3[50], lstep1[52]); | |
2068 lstep2[53] = _mm_sub_epi32(lstep3[51], lstep1[53]); | |
2069 lstep2[54] = _mm_sub_epi32(lstep3[48], lstep1[54]); | |
2070 lstep2[55] = _mm_sub_epi32(lstep3[49], lstep1[55]); | |
2071 lstep2[56] = _mm_sub_epi32(lstep3[62], lstep1[56]); | |
2072 lstep2[57] = _mm_sub_epi32(lstep3[63], lstep1[57]); | |
2073 lstep2[58] = _mm_sub_epi32(lstep3[60], lstep1[58]); | |
2074 lstep2[59] = _mm_sub_epi32(lstep3[61], lstep1[59]); | |
2075 lstep2[60] = _mm_add_epi32(lstep1[58], lstep3[60]); | |
2076 lstep2[61] = _mm_add_epi32(lstep1[59], lstep3[61]); | |
2077 lstep2[62] = _mm_add_epi32(lstep1[56], lstep3[62]); | |
2078 lstep2[63] = _mm_add_epi32(lstep1[57], lstep3[63]); | |
2079 } | |
2080 // stage 6 | |
2081 { | |
2082 const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64); | |
2083 const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64); | |
2084 const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64); | |
2085 const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64); | |
2086 | |
2087 u[0] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]); | |
2088 u[1] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]); | |
2089 u[2] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]); | |
2090 u[3] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]); | |
2091 u[4] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]); | |
2092 u[5] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]); | |
2093 u[6] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]); | |
2094 u[7] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]); | |
2095 u[8] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]); | |
2096 u[9] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]); | |
2097 u[10] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]); | |
2098 u[11] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]); | |
2099 u[12] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]); | |
2100 u[13] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]); | |
2101 u[14] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]); | |
2102 u[15] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]); | |
2103 | |
2104 v[0] = k_madd_epi32(u[0], k32_p28_p04); | |
2105 v[1] = k_madd_epi32(u[1], k32_p28_p04); | |
2106 v[2] = k_madd_epi32(u[2], k32_p28_p04); | |
2107 v[3] = k_madd_epi32(u[3], k32_p28_p04); | |
2108 v[4] = k_madd_epi32(u[4], k32_p12_p20); | |
2109 v[5] = k_madd_epi32(u[5], k32_p12_p20); | |
2110 v[6] = k_madd_epi32(u[6], k32_p12_p20); | |
2111 v[7] = k_madd_epi32(u[7], k32_p12_p20); | |
2112 v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12); | |
2113 v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12); | |
2114 v[10] = k_madd_epi32(u[10], k32_m20_p12); | |
2115 v[11] = k_madd_epi32(u[11], k32_m20_p12); | |
2116 v[12] = k_madd_epi32(u[12], k32_m04_p28); | |
2117 v[13] = k_madd_epi32(u[13], k32_m04_p28); | |
2118 v[14] = k_madd_epi32(u[14], k32_m04_p28); | |
2119 v[15] = k_madd_epi32(u[15], k32_m04_p28); | |
2120 | |
2121 #if DCT_HIGH_BIT_DEPTH | |
2122 overflow = k_check_epi32_overflow_16( | |
2123 &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], | |
2124 &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], | |
2125 &kZero); | |
2126 if (overflow) { | |
2127 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
2128 return; | |
2129 } | |
2130 #endif // DCT_HIGH_BIT_DEPTH | |
2131 u[0] = k_packs_epi64(v[0], v[1]); | |
2132 u[1] = k_packs_epi64(v[2], v[3]); | |
2133 u[2] = k_packs_epi64(v[4], v[5]); | |
2134 u[3] = k_packs_epi64(v[6], v[7]); | |
2135 u[4] = k_packs_epi64(v[8], v[9]); | |
2136 u[5] = k_packs_epi64(v[10], v[11]); | |
2137 u[6] = k_packs_epi64(v[12], v[13]); | |
2138 u[7] = k_packs_epi64(v[14], v[15]); | |
2139 | |
2140 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); | |
2141 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); | |
2142 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); | |
2143 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); | |
2144 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); | |
2145 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); | |
2146 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); | |
2147 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); | |
2148 | |
2149 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); | |
2150 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); | |
2151 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); | |
2152 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); | |
2153 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); | |
2154 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); | |
2155 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); | |
2156 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); | |
2157 | |
2158 sign[0] = _mm_cmplt_epi32(u[0], kZero); | |
2159 sign[1] = _mm_cmplt_epi32(u[1], kZero); | |
2160 sign[2] = _mm_cmplt_epi32(u[2], kZero); | |
2161 sign[3] = _mm_cmplt_epi32(u[3], kZero); | |
2162 sign[4] = _mm_cmplt_epi32(u[4], kZero); | |
2163 sign[5] = _mm_cmplt_epi32(u[5], kZero); | |
2164 sign[6] = _mm_cmplt_epi32(u[6], kZero); | |
2165 sign[7] = _mm_cmplt_epi32(u[7], kZero); | |
2166 | |
2167 u[0] = _mm_sub_epi32(u[0], sign[0]); | |
2168 u[1] = _mm_sub_epi32(u[1], sign[1]); | |
2169 u[2] = _mm_sub_epi32(u[2], sign[2]); | |
2170 u[3] = _mm_sub_epi32(u[3], sign[3]); | |
2171 u[4] = _mm_sub_epi32(u[4], sign[4]); | |
2172 u[5] = _mm_sub_epi32(u[5], sign[5]); | |
2173 u[6] = _mm_sub_epi32(u[6], sign[6]); | |
2174 u[7] = _mm_sub_epi32(u[7], sign[7]); | |
2175 | |
2176 u[0] = _mm_add_epi32(u[0], K32One); | |
2177 u[1] = _mm_add_epi32(u[1], K32One); | |
2178 u[2] = _mm_add_epi32(u[2], K32One); | |
2179 u[3] = _mm_add_epi32(u[3], K32One); | |
2180 u[4] = _mm_add_epi32(u[4], K32One); | |
2181 u[5] = _mm_add_epi32(u[5], K32One); | |
2182 u[6] = _mm_add_epi32(u[6], K32One); | |
2183 u[7] = _mm_add_epi32(u[7], K32One); | |
2184 | |
2185 u[0] = _mm_srai_epi32(u[0], 2); | |
2186 u[1] = _mm_srai_epi32(u[1], 2); | |
2187 u[2] = _mm_srai_epi32(u[2], 2); | |
2188 u[3] = _mm_srai_epi32(u[3], 2); | |
2189 u[4] = _mm_srai_epi32(u[4], 2); | |
2190 u[5] = _mm_srai_epi32(u[5], 2); | |
2191 u[6] = _mm_srai_epi32(u[6], 2); | |
2192 u[7] = _mm_srai_epi32(u[7], 2); | |
2193 | |
2194 out[ 4] = _mm_packs_epi32(u[0], u[1]); | |
2195 out[20] = _mm_packs_epi32(u[2], u[3]); | |
2196 out[12] = _mm_packs_epi32(u[4], u[5]); | |
2197 out[28] = _mm_packs_epi32(u[6], u[7]); | |
2198 #if DCT_HIGH_BIT_DEPTH | |
2199 overflow = check_epi16_overflow_x4(&out[4], &out[20], | |
2200 &out[12], &out[28]); | |
2201 if (overflow) { | |
2202 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
2203 return; | |
2204 } | |
2205 #endif // DCT_HIGH_BIT_DEPTH | |
2206 } | |
2207 { | |
2208 lstep3[16] = _mm_add_epi32(lstep2[18], lstep1[16]); | |
2209 lstep3[17] = _mm_add_epi32(lstep2[19], lstep1[17]); | |
2210 lstep3[18] = _mm_sub_epi32(lstep1[16], lstep2[18]); | |
2211 lstep3[19] = _mm_sub_epi32(lstep1[17], lstep2[19]); | |
2212 lstep3[20] = _mm_sub_epi32(lstep1[22], lstep2[20]); | |
2213 lstep3[21] = _mm_sub_epi32(lstep1[23], lstep2[21]); | |
2214 lstep3[22] = _mm_add_epi32(lstep2[20], lstep1[22]); | |
2215 lstep3[23] = _mm_add_epi32(lstep2[21], lstep1[23]); | |
2216 lstep3[24] = _mm_add_epi32(lstep2[26], lstep1[24]); | |
2217 lstep3[25] = _mm_add_epi32(lstep2[27], lstep1[25]); | |
2218 lstep3[26] = _mm_sub_epi32(lstep1[24], lstep2[26]); | |
2219 lstep3[27] = _mm_sub_epi32(lstep1[25], lstep2[27]); | |
2220 lstep3[28] = _mm_sub_epi32(lstep1[30], lstep2[28]); | |
2221 lstep3[29] = _mm_sub_epi32(lstep1[31], lstep2[29]); | |
2222 lstep3[30] = _mm_add_epi32(lstep2[28], lstep1[30]); | |
2223 lstep3[31] = _mm_add_epi32(lstep2[29], lstep1[31]); | |
2224 } | |
2225 { | |
2226 const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64); | |
2227 const __m128i k32_m28_m04 = pair_set_epi32(-cospi_28_64, -cospi_4_64); | |
2228 const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64); | |
2229 const __m128i k32_m12_m20 = pair_set_epi32(-cospi_12_64, | |
2230 -cospi_20_64); | |
2231 const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64); | |
2232 const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64); | |
2233 | |
2234 u[ 0] = _mm_unpacklo_epi32(lstep2[34], lstep2[60]); | |
2235 u[ 1] = _mm_unpackhi_epi32(lstep2[34], lstep2[60]); | |
2236 u[ 2] = _mm_unpacklo_epi32(lstep2[35], lstep2[61]); | |
2237 u[ 3] = _mm_unpackhi_epi32(lstep2[35], lstep2[61]); | |
2238 u[ 4] = _mm_unpacklo_epi32(lstep2[36], lstep2[58]); | |
2239 u[ 5] = _mm_unpackhi_epi32(lstep2[36], lstep2[58]); | |
2240 u[ 6] = _mm_unpacklo_epi32(lstep2[37], lstep2[59]); | |
2241 u[ 7] = _mm_unpackhi_epi32(lstep2[37], lstep2[59]); | |
2242 u[ 8] = _mm_unpacklo_epi32(lstep2[42], lstep2[52]); | |
2243 u[ 9] = _mm_unpackhi_epi32(lstep2[42], lstep2[52]); | |
2244 u[10] = _mm_unpacklo_epi32(lstep2[43], lstep2[53]); | |
2245 u[11] = _mm_unpackhi_epi32(lstep2[43], lstep2[53]); | |
2246 u[12] = _mm_unpacklo_epi32(lstep2[44], lstep2[50]); | |
2247 u[13] = _mm_unpackhi_epi32(lstep2[44], lstep2[50]); | |
2248 u[14] = _mm_unpacklo_epi32(lstep2[45], lstep2[51]); | |
2249 u[15] = _mm_unpackhi_epi32(lstep2[45], lstep2[51]); | |
2250 | |
2251 v[ 0] = k_madd_epi32(u[ 0], k32_m04_p28); | |
2252 v[ 1] = k_madd_epi32(u[ 1], k32_m04_p28); | |
2253 v[ 2] = k_madd_epi32(u[ 2], k32_m04_p28); | |
2254 v[ 3] = k_madd_epi32(u[ 3], k32_m04_p28); | |
2255 v[ 4] = k_madd_epi32(u[ 4], k32_m28_m04); | |
2256 v[ 5] = k_madd_epi32(u[ 5], k32_m28_m04); | |
2257 v[ 6] = k_madd_epi32(u[ 6], k32_m28_m04); | |
2258 v[ 7] = k_madd_epi32(u[ 7], k32_m28_m04); | |
2259 v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12); | |
2260 v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12); | |
2261 v[10] = k_madd_epi32(u[10], k32_m20_p12); | |
2262 v[11] = k_madd_epi32(u[11], k32_m20_p12); | |
2263 v[12] = k_madd_epi32(u[12], k32_m12_m20); | |
2264 v[13] = k_madd_epi32(u[13], k32_m12_m20); | |
2265 v[14] = k_madd_epi32(u[14], k32_m12_m20); | |
2266 v[15] = k_madd_epi32(u[15], k32_m12_m20); | |
2267 v[16] = k_madd_epi32(u[12], k32_m20_p12); | |
2268 v[17] = k_madd_epi32(u[13], k32_m20_p12); | |
2269 v[18] = k_madd_epi32(u[14], k32_m20_p12); | |
2270 v[19] = k_madd_epi32(u[15], k32_m20_p12); | |
2271 v[20] = k_madd_epi32(u[ 8], k32_p12_p20); | |
2272 v[21] = k_madd_epi32(u[ 9], k32_p12_p20); | |
2273 v[22] = k_madd_epi32(u[10], k32_p12_p20); | |
2274 v[23] = k_madd_epi32(u[11], k32_p12_p20); | |
2275 v[24] = k_madd_epi32(u[ 4], k32_m04_p28); | |
2276 v[25] = k_madd_epi32(u[ 5], k32_m04_p28); | |
2277 v[26] = k_madd_epi32(u[ 6], k32_m04_p28); | |
2278 v[27] = k_madd_epi32(u[ 7], k32_m04_p28); | |
2279 v[28] = k_madd_epi32(u[ 0], k32_p28_p04); | |
2280 v[29] = k_madd_epi32(u[ 1], k32_p28_p04); | |
2281 v[30] = k_madd_epi32(u[ 2], k32_p28_p04); | |
2282 v[31] = k_madd_epi32(u[ 3], k32_p28_p04); | |
2283 | |
2284 #if DCT_HIGH_BIT_DEPTH | |
2285 overflow = k_check_epi32_overflow_32( | |
2286 &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], | |
2287 &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], | |
2288 &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], | |
2289 &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], | |
2290 &kZero); | |
2291 if (overflow) { | |
2292 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
2293 return; | |
2294 } | |
2295 #endif // DCT_HIGH_BIT_DEPTH | |
2296 u[ 0] = k_packs_epi64(v[ 0], v[ 1]); | |
2297 u[ 1] = k_packs_epi64(v[ 2], v[ 3]); | |
2298 u[ 2] = k_packs_epi64(v[ 4], v[ 5]); | |
2299 u[ 3] = k_packs_epi64(v[ 6], v[ 7]); | |
2300 u[ 4] = k_packs_epi64(v[ 8], v[ 9]); | |
2301 u[ 5] = k_packs_epi64(v[10], v[11]); | |
2302 u[ 6] = k_packs_epi64(v[12], v[13]); | |
2303 u[ 7] = k_packs_epi64(v[14], v[15]); | |
2304 u[ 8] = k_packs_epi64(v[16], v[17]); | |
2305 u[ 9] = k_packs_epi64(v[18], v[19]); | |
2306 u[10] = k_packs_epi64(v[20], v[21]); | |
2307 u[11] = k_packs_epi64(v[22], v[23]); | |
2308 u[12] = k_packs_epi64(v[24], v[25]); | |
2309 u[13] = k_packs_epi64(v[26], v[27]); | |
2310 u[14] = k_packs_epi64(v[28], v[29]); | |
2311 u[15] = k_packs_epi64(v[30], v[31]); | |
2312 | |
2313 v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); | |
2314 v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); | |
2315 v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); | |
2316 v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); | |
2317 v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); | |
2318 v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); | |
2319 v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); | |
2320 v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); | |
2321 v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); | |
2322 v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); | |
2323 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); | |
2324 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); | |
2325 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); | |
2326 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); | |
2327 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); | |
2328 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); | |
2329 | |
2330 lstep3[34] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); | |
2331 lstep3[35] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); | |
2332 lstep3[36] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); | |
2333 lstep3[37] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); | |
2334 lstep3[42] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); | |
2335 lstep3[43] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); | |
2336 lstep3[44] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); | |
2337 lstep3[45] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); | |
2338 lstep3[50] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); | |
2339 lstep3[51] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); | |
2340 lstep3[52] = _mm_srai_epi32(v[10], DCT_CONST_BITS); | |
2341 lstep3[53] = _mm_srai_epi32(v[11], DCT_CONST_BITS); | |
2342 lstep3[58] = _mm_srai_epi32(v[12], DCT_CONST_BITS); | |
2343 lstep3[59] = _mm_srai_epi32(v[13], DCT_CONST_BITS); | |
2344 lstep3[60] = _mm_srai_epi32(v[14], DCT_CONST_BITS); | |
2345 lstep3[61] = _mm_srai_epi32(v[15], DCT_CONST_BITS); | |
2346 } | |
2347 // stage 7 | |
2348 { | |
2349 const __m128i k32_p30_p02 = pair_set_epi32(cospi_30_64, cospi_2_64); | |
2350 const __m128i k32_p14_p18 = pair_set_epi32(cospi_14_64, cospi_18_64); | |
2351 const __m128i k32_p22_p10 = pair_set_epi32(cospi_22_64, cospi_10_64); | |
2352 const __m128i k32_p06_p26 = pair_set_epi32(cospi_6_64, cospi_26_64); | |
2353 const __m128i k32_m26_p06 = pair_set_epi32(-cospi_26_64, cospi_6_64); | |
2354 const __m128i k32_m10_p22 = pair_set_epi32(-cospi_10_64, cospi_22_64); | |
2355 const __m128i k32_m18_p14 = pair_set_epi32(-cospi_18_64, cospi_14_64); | |
2356 const __m128i k32_m02_p30 = pair_set_epi32(-cospi_2_64, cospi_30_64); | |
2357 | |
2358 u[ 0] = _mm_unpacklo_epi32(lstep3[16], lstep3[30]); | |
2359 u[ 1] = _mm_unpackhi_epi32(lstep3[16], lstep3[30]); | |
2360 u[ 2] = _mm_unpacklo_epi32(lstep3[17], lstep3[31]); | |
2361 u[ 3] = _mm_unpackhi_epi32(lstep3[17], lstep3[31]); | |
2362 u[ 4] = _mm_unpacklo_epi32(lstep3[18], lstep3[28]); | |
2363 u[ 5] = _mm_unpackhi_epi32(lstep3[18], lstep3[28]); | |
2364 u[ 6] = _mm_unpacklo_epi32(lstep3[19], lstep3[29]); | |
2365 u[ 7] = _mm_unpackhi_epi32(lstep3[19], lstep3[29]); | |
2366 u[ 8] = _mm_unpacklo_epi32(lstep3[20], lstep3[26]); | |
2367 u[ 9] = _mm_unpackhi_epi32(lstep3[20], lstep3[26]); | |
2368 u[10] = _mm_unpacklo_epi32(lstep3[21], lstep3[27]); | |
2369 u[11] = _mm_unpackhi_epi32(lstep3[21], lstep3[27]); | |
2370 u[12] = _mm_unpacklo_epi32(lstep3[22], lstep3[24]); | |
2371 u[13] = _mm_unpackhi_epi32(lstep3[22], lstep3[24]); | |
2372 u[14] = _mm_unpacklo_epi32(lstep3[23], lstep3[25]); | |
2373 u[15] = _mm_unpackhi_epi32(lstep3[23], lstep3[25]); | |
2374 | |
2375 v[ 0] = k_madd_epi32(u[ 0], k32_p30_p02); | |
2376 v[ 1] = k_madd_epi32(u[ 1], k32_p30_p02); | |
2377 v[ 2] = k_madd_epi32(u[ 2], k32_p30_p02); | |
2378 v[ 3] = k_madd_epi32(u[ 3], k32_p30_p02); | |
2379 v[ 4] = k_madd_epi32(u[ 4], k32_p14_p18); | |
2380 v[ 5] = k_madd_epi32(u[ 5], k32_p14_p18); | |
2381 v[ 6] = k_madd_epi32(u[ 6], k32_p14_p18); | |
2382 v[ 7] = k_madd_epi32(u[ 7], k32_p14_p18); | |
2383 v[ 8] = k_madd_epi32(u[ 8], k32_p22_p10); | |
2384 v[ 9] = k_madd_epi32(u[ 9], k32_p22_p10); | |
2385 v[10] = k_madd_epi32(u[10], k32_p22_p10); | |
2386 v[11] = k_madd_epi32(u[11], k32_p22_p10); | |
2387 v[12] = k_madd_epi32(u[12], k32_p06_p26); | |
2388 v[13] = k_madd_epi32(u[13], k32_p06_p26); | |
2389 v[14] = k_madd_epi32(u[14], k32_p06_p26); | |
2390 v[15] = k_madd_epi32(u[15], k32_p06_p26); | |
2391 v[16] = k_madd_epi32(u[12], k32_m26_p06); | |
2392 v[17] = k_madd_epi32(u[13], k32_m26_p06); | |
2393 v[18] = k_madd_epi32(u[14], k32_m26_p06); | |
2394 v[19] = k_madd_epi32(u[15], k32_m26_p06); | |
2395 v[20] = k_madd_epi32(u[ 8], k32_m10_p22); | |
2396 v[21] = k_madd_epi32(u[ 9], k32_m10_p22); | |
2397 v[22] = k_madd_epi32(u[10], k32_m10_p22); | |
2398 v[23] = k_madd_epi32(u[11], k32_m10_p22); | |
2399 v[24] = k_madd_epi32(u[ 4], k32_m18_p14); | |
2400 v[25] = k_madd_epi32(u[ 5], k32_m18_p14); | |
2401 v[26] = k_madd_epi32(u[ 6], k32_m18_p14); | |
2402 v[27] = k_madd_epi32(u[ 7], k32_m18_p14); | |
2403 v[28] = k_madd_epi32(u[ 0], k32_m02_p30); | |
2404 v[29] = k_madd_epi32(u[ 1], k32_m02_p30); | |
2405 v[30] = k_madd_epi32(u[ 2], k32_m02_p30); | |
2406 v[31] = k_madd_epi32(u[ 3], k32_m02_p30); | |
2407 | |
2408 #if DCT_HIGH_BIT_DEPTH | |
2409 overflow = k_check_epi32_overflow_32( | |
2410 &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], | |
2411 &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], | |
2412 &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], | |
2413 &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], | |
2414 &kZero); | |
2415 if (overflow) { | |
2416 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
2417 return; | |
2418 } | |
2419 #endif // DCT_HIGH_BIT_DEPTH | |
2420 u[ 0] = k_packs_epi64(v[ 0], v[ 1]); | |
2421 u[ 1] = k_packs_epi64(v[ 2], v[ 3]); | |
2422 u[ 2] = k_packs_epi64(v[ 4], v[ 5]); | |
2423 u[ 3] = k_packs_epi64(v[ 6], v[ 7]); | |
2424 u[ 4] = k_packs_epi64(v[ 8], v[ 9]); | |
2425 u[ 5] = k_packs_epi64(v[10], v[11]); | |
2426 u[ 6] = k_packs_epi64(v[12], v[13]); | |
2427 u[ 7] = k_packs_epi64(v[14], v[15]); | |
2428 u[ 8] = k_packs_epi64(v[16], v[17]); | |
2429 u[ 9] = k_packs_epi64(v[18], v[19]); | |
2430 u[10] = k_packs_epi64(v[20], v[21]); | |
2431 u[11] = k_packs_epi64(v[22], v[23]); | |
2432 u[12] = k_packs_epi64(v[24], v[25]); | |
2433 u[13] = k_packs_epi64(v[26], v[27]); | |
2434 u[14] = k_packs_epi64(v[28], v[29]); | |
2435 u[15] = k_packs_epi64(v[30], v[31]); | |
2436 | |
2437 v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); | |
2438 v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); | |
2439 v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); | |
2440 v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); | |
2441 v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); | |
2442 v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); | |
2443 v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); | |
2444 v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); | |
2445 v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); | |
2446 v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); | |
2447 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); | |
2448 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); | |
2449 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); | |
2450 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); | |
2451 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); | |
2452 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); | |
2453 | |
2454 u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); | |
2455 u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); | |
2456 u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); | |
2457 u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); | |
2458 u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); | |
2459 u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); | |
2460 u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); | |
2461 u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); | |
2462 u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); | |
2463 u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); | |
2464 u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); | |
2465 u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); | |
2466 u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); | |
2467 u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); | |
2468 u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); | |
2469 u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); | |
2470 | |
2471 v[ 0] = _mm_cmplt_epi32(u[ 0], kZero); | |
2472 v[ 1] = _mm_cmplt_epi32(u[ 1], kZero); | |
2473 v[ 2] = _mm_cmplt_epi32(u[ 2], kZero); | |
2474 v[ 3] = _mm_cmplt_epi32(u[ 3], kZero); | |
2475 v[ 4] = _mm_cmplt_epi32(u[ 4], kZero); | |
2476 v[ 5] = _mm_cmplt_epi32(u[ 5], kZero); | |
2477 v[ 6] = _mm_cmplt_epi32(u[ 6], kZero); | |
2478 v[ 7] = _mm_cmplt_epi32(u[ 7], kZero); | |
2479 v[ 8] = _mm_cmplt_epi32(u[ 8], kZero); | |
2480 v[ 9] = _mm_cmplt_epi32(u[ 9], kZero); | |
2481 v[10] = _mm_cmplt_epi32(u[10], kZero); | |
2482 v[11] = _mm_cmplt_epi32(u[11], kZero); | |
2483 v[12] = _mm_cmplt_epi32(u[12], kZero); | |
2484 v[13] = _mm_cmplt_epi32(u[13], kZero); | |
2485 v[14] = _mm_cmplt_epi32(u[14], kZero); | |
2486 v[15] = _mm_cmplt_epi32(u[15], kZero); | |
2487 | |
2488 u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]); | |
2489 u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]); | |
2490 u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]); | |
2491 u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]); | |
2492 u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]); | |
2493 u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]); | |
2494 u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]); | |
2495 u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]); | |
2496 u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]); | |
2497 u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]); | |
2498 u[10] = _mm_sub_epi32(u[10], v[10]); | |
2499 u[11] = _mm_sub_epi32(u[11], v[11]); | |
2500 u[12] = _mm_sub_epi32(u[12], v[12]); | |
2501 u[13] = _mm_sub_epi32(u[13], v[13]); | |
2502 u[14] = _mm_sub_epi32(u[14], v[14]); | |
2503 u[15] = _mm_sub_epi32(u[15], v[15]); | |
2504 | |
2505 v[ 0] = _mm_add_epi32(u[ 0], K32One); | |
2506 v[ 1] = _mm_add_epi32(u[ 1], K32One); | |
2507 v[ 2] = _mm_add_epi32(u[ 2], K32One); | |
2508 v[ 3] = _mm_add_epi32(u[ 3], K32One); | |
2509 v[ 4] = _mm_add_epi32(u[ 4], K32One); | |
2510 v[ 5] = _mm_add_epi32(u[ 5], K32One); | |
2511 v[ 6] = _mm_add_epi32(u[ 6], K32One); | |
2512 v[ 7] = _mm_add_epi32(u[ 7], K32One); | |
2513 v[ 8] = _mm_add_epi32(u[ 8], K32One); | |
2514 v[ 9] = _mm_add_epi32(u[ 9], K32One); | |
2515 v[10] = _mm_add_epi32(u[10], K32One); | |
2516 v[11] = _mm_add_epi32(u[11], K32One); | |
2517 v[12] = _mm_add_epi32(u[12], K32One); | |
2518 v[13] = _mm_add_epi32(u[13], K32One); | |
2519 v[14] = _mm_add_epi32(u[14], K32One); | |
2520 v[15] = _mm_add_epi32(u[15], K32One); | |
2521 | |
2522 u[ 0] = _mm_srai_epi32(v[ 0], 2); | |
2523 u[ 1] = _mm_srai_epi32(v[ 1], 2); | |
2524 u[ 2] = _mm_srai_epi32(v[ 2], 2); | |
2525 u[ 3] = _mm_srai_epi32(v[ 3], 2); | |
2526 u[ 4] = _mm_srai_epi32(v[ 4], 2); | |
2527 u[ 5] = _mm_srai_epi32(v[ 5], 2); | |
2528 u[ 6] = _mm_srai_epi32(v[ 6], 2); | |
2529 u[ 7] = _mm_srai_epi32(v[ 7], 2); | |
2530 u[ 8] = _mm_srai_epi32(v[ 8], 2); | |
2531 u[ 9] = _mm_srai_epi32(v[ 9], 2); | |
2532 u[10] = _mm_srai_epi32(v[10], 2); | |
2533 u[11] = _mm_srai_epi32(v[11], 2); | |
2534 u[12] = _mm_srai_epi32(v[12], 2); | |
2535 u[13] = _mm_srai_epi32(v[13], 2); | |
2536 u[14] = _mm_srai_epi32(v[14], 2); | |
2537 u[15] = _mm_srai_epi32(v[15], 2); | |
2538 | |
2539 out[ 2] = _mm_packs_epi32(u[0], u[1]); | |
2540 out[18] = _mm_packs_epi32(u[2], u[3]); | |
2541 out[10] = _mm_packs_epi32(u[4], u[5]); | |
2542 out[26] = _mm_packs_epi32(u[6], u[7]); | |
2543 out[ 6] = _mm_packs_epi32(u[8], u[9]); | |
2544 out[22] = _mm_packs_epi32(u[10], u[11]); | |
2545 out[14] = _mm_packs_epi32(u[12], u[13]); | |
2546 out[30] = _mm_packs_epi32(u[14], u[15]); | |
2547 #if DCT_HIGH_BIT_DEPTH | |
2548 overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10], | |
2549 &out[26], &out[6], &out[22], | |
2550 &out[14], &out[30]); | |
2551 if (overflow) { | |
2552 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
2553 return; | |
2554 } | |
2555 #endif // DCT_HIGH_BIT_DEPTH | |
2556 } | |
2557 { | |
2558 lstep1[32] = _mm_add_epi32(lstep3[34], lstep2[32]); | |
2559 lstep1[33] = _mm_add_epi32(lstep3[35], lstep2[33]); | |
2560 lstep1[34] = _mm_sub_epi32(lstep2[32], lstep3[34]); | |
2561 lstep1[35] = _mm_sub_epi32(lstep2[33], lstep3[35]); | |
2562 lstep1[36] = _mm_sub_epi32(lstep2[38], lstep3[36]); | |
2563 lstep1[37] = _mm_sub_epi32(lstep2[39], lstep3[37]); | |
2564 lstep1[38] = _mm_add_epi32(lstep3[36], lstep2[38]); | |
2565 lstep1[39] = _mm_add_epi32(lstep3[37], lstep2[39]); | |
2566 lstep1[40] = _mm_add_epi32(lstep3[42], lstep2[40]); | |
2567 lstep1[41] = _mm_add_epi32(lstep3[43], lstep2[41]); | |
2568 lstep1[42] = _mm_sub_epi32(lstep2[40], lstep3[42]); | |
2569 lstep1[43] = _mm_sub_epi32(lstep2[41], lstep3[43]); | |
2570 lstep1[44] = _mm_sub_epi32(lstep2[46], lstep3[44]); | |
2571 lstep1[45] = _mm_sub_epi32(lstep2[47], lstep3[45]); | |
2572 lstep1[46] = _mm_add_epi32(lstep3[44], lstep2[46]); | |
2573 lstep1[47] = _mm_add_epi32(lstep3[45], lstep2[47]); | |
2574 lstep1[48] = _mm_add_epi32(lstep3[50], lstep2[48]); | |
2575 lstep1[49] = _mm_add_epi32(lstep3[51], lstep2[49]); | |
2576 lstep1[50] = _mm_sub_epi32(lstep2[48], lstep3[50]); | |
2577 lstep1[51] = _mm_sub_epi32(lstep2[49], lstep3[51]); | |
2578 lstep1[52] = _mm_sub_epi32(lstep2[54], lstep3[52]); | |
2579 lstep1[53] = _mm_sub_epi32(lstep2[55], lstep3[53]); | |
2580 lstep1[54] = _mm_add_epi32(lstep3[52], lstep2[54]); | |
2581 lstep1[55] = _mm_add_epi32(lstep3[53], lstep2[55]); | |
2582 lstep1[56] = _mm_add_epi32(lstep3[58], lstep2[56]); | |
2583 lstep1[57] = _mm_add_epi32(lstep3[59], lstep2[57]); | |
2584 lstep1[58] = _mm_sub_epi32(lstep2[56], lstep3[58]); | |
2585 lstep1[59] = _mm_sub_epi32(lstep2[57], lstep3[59]); | |
2586 lstep1[60] = _mm_sub_epi32(lstep2[62], lstep3[60]); | |
2587 lstep1[61] = _mm_sub_epi32(lstep2[63], lstep3[61]); | |
2588 lstep1[62] = _mm_add_epi32(lstep3[60], lstep2[62]); | |
2589 lstep1[63] = _mm_add_epi32(lstep3[61], lstep2[63]); | |
2590 } | |
2591 // stage 8 | |
2592 { | |
2593 const __m128i k32_p31_p01 = pair_set_epi32(cospi_31_64, cospi_1_64); | |
2594 const __m128i k32_p15_p17 = pair_set_epi32(cospi_15_64, cospi_17_64); | |
2595 const __m128i k32_p23_p09 = pair_set_epi32(cospi_23_64, cospi_9_64); | |
2596 const __m128i k32_p07_p25 = pair_set_epi32(cospi_7_64, cospi_25_64); | |
2597 const __m128i k32_m25_p07 = pair_set_epi32(-cospi_25_64, cospi_7_64); | |
2598 const __m128i k32_m09_p23 = pair_set_epi32(-cospi_9_64, cospi_23_64); | |
2599 const __m128i k32_m17_p15 = pair_set_epi32(-cospi_17_64, cospi_15_64); | |
2600 const __m128i k32_m01_p31 = pair_set_epi32(-cospi_1_64, cospi_31_64); | |
2601 | |
2602 u[ 0] = _mm_unpacklo_epi32(lstep1[32], lstep1[62]); | |
2603 u[ 1] = _mm_unpackhi_epi32(lstep1[32], lstep1[62]); | |
2604 u[ 2] = _mm_unpacklo_epi32(lstep1[33], lstep1[63]); | |
2605 u[ 3] = _mm_unpackhi_epi32(lstep1[33], lstep1[63]); | |
2606 u[ 4] = _mm_unpacklo_epi32(lstep1[34], lstep1[60]); | |
2607 u[ 5] = _mm_unpackhi_epi32(lstep1[34], lstep1[60]); | |
2608 u[ 6] = _mm_unpacklo_epi32(lstep1[35], lstep1[61]); | |
2609 u[ 7] = _mm_unpackhi_epi32(lstep1[35], lstep1[61]); | |
2610 u[ 8] = _mm_unpacklo_epi32(lstep1[36], lstep1[58]); | |
2611 u[ 9] = _mm_unpackhi_epi32(lstep1[36], lstep1[58]); | |
2612 u[10] = _mm_unpacklo_epi32(lstep1[37], lstep1[59]); | |
2613 u[11] = _mm_unpackhi_epi32(lstep1[37], lstep1[59]); | |
2614 u[12] = _mm_unpacklo_epi32(lstep1[38], lstep1[56]); | |
2615 u[13] = _mm_unpackhi_epi32(lstep1[38], lstep1[56]); | |
2616 u[14] = _mm_unpacklo_epi32(lstep1[39], lstep1[57]); | |
2617 u[15] = _mm_unpackhi_epi32(lstep1[39], lstep1[57]); | |
2618 | |
2619 v[ 0] = k_madd_epi32(u[ 0], k32_p31_p01); | |
2620 v[ 1] = k_madd_epi32(u[ 1], k32_p31_p01); | |
2621 v[ 2] = k_madd_epi32(u[ 2], k32_p31_p01); | |
2622 v[ 3] = k_madd_epi32(u[ 3], k32_p31_p01); | |
2623 v[ 4] = k_madd_epi32(u[ 4], k32_p15_p17); | |
2624 v[ 5] = k_madd_epi32(u[ 5], k32_p15_p17); | |
2625 v[ 6] = k_madd_epi32(u[ 6], k32_p15_p17); | |
2626 v[ 7] = k_madd_epi32(u[ 7], k32_p15_p17); | |
2627 v[ 8] = k_madd_epi32(u[ 8], k32_p23_p09); | |
2628 v[ 9] = k_madd_epi32(u[ 9], k32_p23_p09); | |
2629 v[10] = k_madd_epi32(u[10], k32_p23_p09); | |
2630 v[11] = k_madd_epi32(u[11], k32_p23_p09); | |
2631 v[12] = k_madd_epi32(u[12], k32_p07_p25); | |
2632 v[13] = k_madd_epi32(u[13], k32_p07_p25); | |
2633 v[14] = k_madd_epi32(u[14], k32_p07_p25); | |
2634 v[15] = k_madd_epi32(u[15], k32_p07_p25); | |
2635 v[16] = k_madd_epi32(u[12], k32_m25_p07); | |
2636 v[17] = k_madd_epi32(u[13], k32_m25_p07); | |
2637 v[18] = k_madd_epi32(u[14], k32_m25_p07); | |
2638 v[19] = k_madd_epi32(u[15], k32_m25_p07); | |
2639 v[20] = k_madd_epi32(u[ 8], k32_m09_p23); | |
2640 v[21] = k_madd_epi32(u[ 9], k32_m09_p23); | |
2641 v[22] = k_madd_epi32(u[10], k32_m09_p23); | |
2642 v[23] = k_madd_epi32(u[11], k32_m09_p23); | |
2643 v[24] = k_madd_epi32(u[ 4], k32_m17_p15); | |
2644 v[25] = k_madd_epi32(u[ 5], k32_m17_p15); | |
2645 v[26] = k_madd_epi32(u[ 6], k32_m17_p15); | |
2646 v[27] = k_madd_epi32(u[ 7], k32_m17_p15); | |
2647 v[28] = k_madd_epi32(u[ 0], k32_m01_p31); | |
2648 v[29] = k_madd_epi32(u[ 1], k32_m01_p31); | |
2649 v[30] = k_madd_epi32(u[ 2], k32_m01_p31); | |
2650 v[31] = k_madd_epi32(u[ 3], k32_m01_p31); | |
2651 | |
2652 #if DCT_HIGH_BIT_DEPTH | |
2653 overflow = k_check_epi32_overflow_32( | |
2654 &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], | |
2655 &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], | |
2656 &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], | |
2657 &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], | |
2658 &kZero); | |
2659 if (overflow) { | |
2660 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
2661 return; | |
2662 } | |
2663 #endif // DCT_HIGH_BIT_DEPTH | |
2664 u[ 0] = k_packs_epi64(v[ 0], v[ 1]); | |
2665 u[ 1] = k_packs_epi64(v[ 2], v[ 3]); | |
2666 u[ 2] = k_packs_epi64(v[ 4], v[ 5]); | |
2667 u[ 3] = k_packs_epi64(v[ 6], v[ 7]); | |
2668 u[ 4] = k_packs_epi64(v[ 8], v[ 9]); | |
2669 u[ 5] = k_packs_epi64(v[10], v[11]); | |
2670 u[ 6] = k_packs_epi64(v[12], v[13]); | |
2671 u[ 7] = k_packs_epi64(v[14], v[15]); | |
2672 u[ 8] = k_packs_epi64(v[16], v[17]); | |
2673 u[ 9] = k_packs_epi64(v[18], v[19]); | |
2674 u[10] = k_packs_epi64(v[20], v[21]); | |
2675 u[11] = k_packs_epi64(v[22], v[23]); | |
2676 u[12] = k_packs_epi64(v[24], v[25]); | |
2677 u[13] = k_packs_epi64(v[26], v[27]); | |
2678 u[14] = k_packs_epi64(v[28], v[29]); | |
2679 u[15] = k_packs_epi64(v[30], v[31]); | |
2680 | |
2681 v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); | |
2682 v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); | |
2683 v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); | |
2684 v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); | |
2685 v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); | |
2686 v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); | |
2687 v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); | |
2688 v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); | |
2689 v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); | |
2690 v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); | |
2691 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); | |
2692 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); | |
2693 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); | |
2694 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); | |
2695 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); | |
2696 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); | |
2697 | |
2698 u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); | |
2699 u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); | |
2700 u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); | |
2701 u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); | |
2702 u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); | |
2703 u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); | |
2704 u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); | |
2705 u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); | |
2706 u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); | |
2707 u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); | |
2708 u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); | |
2709 u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); | |
2710 u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); | |
2711 u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); | |
2712 u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); | |
2713 u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); | |
2714 | |
2715 v[ 0] = _mm_cmplt_epi32(u[ 0], kZero); | |
2716 v[ 1] = _mm_cmplt_epi32(u[ 1], kZero); | |
2717 v[ 2] = _mm_cmplt_epi32(u[ 2], kZero); | |
2718 v[ 3] = _mm_cmplt_epi32(u[ 3], kZero); | |
2719 v[ 4] = _mm_cmplt_epi32(u[ 4], kZero); | |
2720 v[ 5] = _mm_cmplt_epi32(u[ 5], kZero); | |
2721 v[ 6] = _mm_cmplt_epi32(u[ 6], kZero); | |
2722 v[ 7] = _mm_cmplt_epi32(u[ 7], kZero); | |
2723 v[ 8] = _mm_cmplt_epi32(u[ 8], kZero); | |
2724 v[ 9] = _mm_cmplt_epi32(u[ 9], kZero); | |
2725 v[10] = _mm_cmplt_epi32(u[10], kZero); | |
2726 v[11] = _mm_cmplt_epi32(u[11], kZero); | |
2727 v[12] = _mm_cmplt_epi32(u[12], kZero); | |
2728 v[13] = _mm_cmplt_epi32(u[13], kZero); | |
2729 v[14] = _mm_cmplt_epi32(u[14], kZero); | |
2730 v[15] = _mm_cmplt_epi32(u[15], kZero); | |
2731 | |
2732 u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]); | |
2733 u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]); | |
2734 u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]); | |
2735 u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]); | |
2736 u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]); | |
2737 u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]); | |
2738 u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]); | |
2739 u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]); | |
2740 u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]); | |
2741 u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]); | |
2742 u[10] = _mm_sub_epi32(u[10], v[10]); | |
2743 u[11] = _mm_sub_epi32(u[11], v[11]); | |
2744 u[12] = _mm_sub_epi32(u[12], v[12]); | |
2745 u[13] = _mm_sub_epi32(u[13], v[13]); | |
2746 u[14] = _mm_sub_epi32(u[14], v[14]); | |
2747 u[15] = _mm_sub_epi32(u[15], v[15]); | |
2748 | |
2749 v[0] = _mm_add_epi32(u[0], K32One); | |
2750 v[1] = _mm_add_epi32(u[1], K32One); | |
2751 v[2] = _mm_add_epi32(u[2], K32One); | |
2752 v[3] = _mm_add_epi32(u[3], K32One); | |
2753 v[4] = _mm_add_epi32(u[4], K32One); | |
2754 v[5] = _mm_add_epi32(u[5], K32One); | |
2755 v[6] = _mm_add_epi32(u[6], K32One); | |
2756 v[7] = _mm_add_epi32(u[7], K32One); | |
2757 v[8] = _mm_add_epi32(u[8], K32One); | |
2758 v[9] = _mm_add_epi32(u[9], K32One); | |
2759 v[10] = _mm_add_epi32(u[10], K32One); | |
2760 v[11] = _mm_add_epi32(u[11], K32One); | |
2761 v[12] = _mm_add_epi32(u[12], K32One); | |
2762 v[13] = _mm_add_epi32(u[13], K32One); | |
2763 v[14] = _mm_add_epi32(u[14], K32One); | |
2764 v[15] = _mm_add_epi32(u[15], K32One); | |
2765 | |
2766 u[0] = _mm_srai_epi32(v[0], 2); | |
2767 u[1] = _mm_srai_epi32(v[1], 2); | |
2768 u[2] = _mm_srai_epi32(v[2], 2); | |
2769 u[3] = _mm_srai_epi32(v[3], 2); | |
2770 u[4] = _mm_srai_epi32(v[4], 2); | |
2771 u[5] = _mm_srai_epi32(v[5], 2); | |
2772 u[6] = _mm_srai_epi32(v[6], 2); | |
2773 u[7] = _mm_srai_epi32(v[7], 2); | |
2774 u[8] = _mm_srai_epi32(v[8], 2); | |
2775 u[9] = _mm_srai_epi32(v[9], 2); | |
2776 u[10] = _mm_srai_epi32(v[10], 2); | |
2777 u[11] = _mm_srai_epi32(v[11], 2); | |
2778 u[12] = _mm_srai_epi32(v[12], 2); | |
2779 u[13] = _mm_srai_epi32(v[13], 2); | |
2780 u[14] = _mm_srai_epi32(v[14], 2); | |
2781 u[15] = _mm_srai_epi32(v[15], 2); | |
2782 | |
2783 out[ 1] = _mm_packs_epi32(u[0], u[1]); | |
2784 out[17] = _mm_packs_epi32(u[2], u[3]); | |
2785 out[ 9] = _mm_packs_epi32(u[4], u[5]); | |
2786 out[25] = _mm_packs_epi32(u[6], u[7]); | |
2787 out[ 7] = _mm_packs_epi32(u[8], u[9]); | |
2788 out[23] = _mm_packs_epi32(u[10], u[11]); | |
2789 out[15] = _mm_packs_epi32(u[12], u[13]); | |
2790 out[31] = _mm_packs_epi32(u[14], u[15]); | |
2791 #if DCT_HIGH_BIT_DEPTH | |
2792 overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9], | |
2793 &out[25], &out[7], &out[23], | |
2794 &out[15], &out[31]); | |
2795 if (overflow) { | |
2796 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
2797 return; | |
2798 } | |
2799 #endif // DCT_HIGH_BIT_DEPTH | |
2800 } | |
2801 { | |
2802 const __m128i k32_p27_p05 = pair_set_epi32(cospi_27_64, cospi_5_64); | |
2803 const __m128i k32_p11_p21 = pair_set_epi32(cospi_11_64, cospi_21_64); | |
2804 const __m128i k32_p19_p13 = pair_set_epi32(cospi_19_64, cospi_13_64); | |
2805 const __m128i k32_p03_p29 = pair_set_epi32(cospi_3_64, cospi_29_64); | |
2806 const __m128i k32_m29_p03 = pair_set_epi32(-cospi_29_64, cospi_3_64); | |
2807 const __m128i k32_m13_p19 = pair_set_epi32(-cospi_13_64, cospi_19_64); | |
2808 const __m128i k32_m21_p11 = pair_set_epi32(-cospi_21_64, cospi_11_64); | |
2809 const __m128i k32_m05_p27 = pair_set_epi32(-cospi_5_64, cospi_27_64); | |
2810 | |
2811 u[ 0] = _mm_unpacklo_epi32(lstep1[40], lstep1[54]); | |
2812 u[ 1] = _mm_unpackhi_epi32(lstep1[40], lstep1[54]); | |
2813 u[ 2] = _mm_unpacklo_epi32(lstep1[41], lstep1[55]); | |
2814 u[ 3] = _mm_unpackhi_epi32(lstep1[41], lstep1[55]); | |
2815 u[ 4] = _mm_unpacklo_epi32(lstep1[42], lstep1[52]); | |
2816 u[ 5] = _mm_unpackhi_epi32(lstep1[42], lstep1[52]); | |
2817 u[ 6] = _mm_unpacklo_epi32(lstep1[43], lstep1[53]); | |
2818 u[ 7] = _mm_unpackhi_epi32(lstep1[43], lstep1[53]); | |
2819 u[ 8] = _mm_unpacklo_epi32(lstep1[44], lstep1[50]); | |
2820 u[ 9] = _mm_unpackhi_epi32(lstep1[44], lstep1[50]); | |
2821 u[10] = _mm_unpacklo_epi32(lstep1[45], lstep1[51]); | |
2822 u[11] = _mm_unpackhi_epi32(lstep1[45], lstep1[51]); | |
2823 u[12] = _mm_unpacklo_epi32(lstep1[46], lstep1[48]); | |
2824 u[13] = _mm_unpackhi_epi32(lstep1[46], lstep1[48]); | |
2825 u[14] = _mm_unpacklo_epi32(lstep1[47], lstep1[49]); | |
2826 u[15] = _mm_unpackhi_epi32(lstep1[47], lstep1[49]); | |
2827 | |
2828 v[ 0] = k_madd_epi32(u[ 0], k32_p27_p05); | |
2829 v[ 1] = k_madd_epi32(u[ 1], k32_p27_p05); | |
2830 v[ 2] = k_madd_epi32(u[ 2], k32_p27_p05); | |
2831 v[ 3] = k_madd_epi32(u[ 3], k32_p27_p05); | |
2832 v[ 4] = k_madd_epi32(u[ 4], k32_p11_p21); | |
2833 v[ 5] = k_madd_epi32(u[ 5], k32_p11_p21); | |
2834 v[ 6] = k_madd_epi32(u[ 6], k32_p11_p21); | |
2835 v[ 7] = k_madd_epi32(u[ 7], k32_p11_p21); | |
2836 v[ 8] = k_madd_epi32(u[ 8], k32_p19_p13); | |
2837 v[ 9] = k_madd_epi32(u[ 9], k32_p19_p13); | |
2838 v[10] = k_madd_epi32(u[10], k32_p19_p13); | |
2839 v[11] = k_madd_epi32(u[11], k32_p19_p13); | |
2840 v[12] = k_madd_epi32(u[12], k32_p03_p29); | |
2841 v[13] = k_madd_epi32(u[13], k32_p03_p29); | |
2842 v[14] = k_madd_epi32(u[14], k32_p03_p29); | |
2843 v[15] = k_madd_epi32(u[15], k32_p03_p29); | |
2844 v[16] = k_madd_epi32(u[12], k32_m29_p03); | |
2845 v[17] = k_madd_epi32(u[13], k32_m29_p03); | |
2846 v[18] = k_madd_epi32(u[14], k32_m29_p03); | |
2847 v[19] = k_madd_epi32(u[15], k32_m29_p03); | |
2848 v[20] = k_madd_epi32(u[ 8], k32_m13_p19); | |
2849 v[21] = k_madd_epi32(u[ 9], k32_m13_p19); | |
2850 v[22] = k_madd_epi32(u[10], k32_m13_p19); | |
2851 v[23] = k_madd_epi32(u[11], k32_m13_p19); | |
2852 v[24] = k_madd_epi32(u[ 4], k32_m21_p11); | |
2853 v[25] = k_madd_epi32(u[ 5], k32_m21_p11); | |
2854 v[26] = k_madd_epi32(u[ 6], k32_m21_p11); | |
2855 v[27] = k_madd_epi32(u[ 7], k32_m21_p11); | |
2856 v[28] = k_madd_epi32(u[ 0], k32_m05_p27); | |
2857 v[29] = k_madd_epi32(u[ 1], k32_m05_p27); | |
2858 v[30] = k_madd_epi32(u[ 2], k32_m05_p27); | |
2859 v[31] = k_madd_epi32(u[ 3], k32_m05_p27); | |
2860 | |
2861 #if DCT_HIGH_BIT_DEPTH | |
2862 overflow = k_check_epi32_overflow_32( | |
2863 &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], | |
2864 &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], | |
2865 &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], | |
2866 &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], | |
2867 &kZero); | |
2868 if (overflow) { | |
2869 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
2870 return; | |
2871 } | |
2872 #endif // DCT_HIGH_BIT_DEPTH | |
2873 u[ 0] = k_packs_epi64(v[ 0], v[ 1]); | |
2874 u[ 1] = k_packs_epi64(v[ 2], v[ 3]); | |
2875 u[ 2] = k_packs_epi64(v[ 4], v[ 5]); | |
2876 u[ 3] = k_packs_epi64(v[ 6], v[ 7]); | |
2877 u[ 4] = k_packs_epi64(v[ 8], v[ 9]); | |
2878 u[ 5] = k_packs_epi64(v[10], v[11]); | |
2879 u[ 6] = k_packs_epi64(v[12], v[13]); | |
2880 u[ 7] = k_packs_epi64(v[14], v[15]); | |
2881 u[ 8] = k_packs_epi64(v[16], v[17]); | |
2882 u[ 9] = k_packs_epi64(v[18], v[19]); | |
2883 u[10] = k_packs_epi64(v[20], v[21]); | |
2884 u[11] = k_packs_epi64(v[22], v[23]); | |
2885 u[12] = k_packs_epi64(v[24], v[25]); | |
2886 u[13] = k_packs_epi64(v[26], v[27]); | |
2887 u[14] = k_packs_epi64(v[28], v[29]); | |
2888 u[15] = k_packs_epi64(v[30], v[31]); | |
2889 | |
2890 v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); | |
2891 v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); | |
2892 v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); | |
2893 v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); | |
2894 v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); | |
2895 v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); | |
2896 v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); | |
2897 v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); | |
2898 v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); | |
2899 v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); | |
2900 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); | |
2901 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); | |
2902 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); | |
2903 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); | |
2904 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); | |
2905 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); | |
2906 | |
2907 u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); | |
2908 u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); | |
2909 u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); | |
2910 u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); | |
2911 u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); | |
2912 u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); | |
2913 u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); | |
2914 u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); | |
2915 u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); | |
2916 u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); | |
2917 u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); | |
2918 u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); | |
2919 u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); | |
2920 u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); | |
2921 u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); | |
2922 u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); | |
2923 | |
2924 v[ 0] = _mm_cmplt_epi32(u[ 0], kZero); | |
2925 v[ 1] = _mm_cmplt_epi32(u[ 1], kZero); | |
2926 v[ 2] = _mm_cmplt_epi32(u[ 2], kZero); | |
2927 v[ 3] = _mm_cmplt_epi32(u[ 3], kZero); | |
2928 v[ 4] = _mm_cmplt_epi32(u[ 4], kZero); | |
2929 v[ 5] = _mm_cmplt_epi32(u[ 5], kZero); | |
2930 v[ 6] = _mm_cmplt_epi32(u[ 6], kZero); | |
2931 v[ 7] = _mm_cmplt_epi32(u[ 7], kZero); | |
2932 v[ 8] = _mm_cmplt_epi32(u[ 8], kZero); | |
2933 v[ 9] = _mm_cmplt_epi32(u[ 9], kZero); | |
2934 v[10] = _mm_cmplt_epi32(u[10], kZero); | |
2935 v[11] = _mm_cmplt_epi32(u[11], kZero); | |
2936 v[12] = _mm_cmplt_epi32(u[12], kZero); | |
2937 v[13] = _mm_cmplt_epi32(u[13], kZero); | |
2938 v[14] = _mm_cmplt_epi32(u[14], kZero); | |
2939 v[15] = _mm_cmplt_epi32(u[15], kZero); | |
2940 | |
2941 u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]); | |
2942 u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]); | |
2943 u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]); | |
2944 u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]); | |
2945 u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]); | |
2946 u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]); | |
2947 u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]); | |
2948 u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]); | |
2949 u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]); | |
2950 u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]); | |
2951 u[10] = _mm_sub_epi32(u[10], v[10]); | |
2952 u[11] = _mm_sub_epi32(u[11], v[11]); | |
2953 u[12] = _mm_sub_epi32(u[12], v[12]); | |
2954 u[13] = _mm_sub_epi32(u[13], v[13]); | |
2955 u[14] = _mm_sub_epi32(u[14], v[14]); | |
2956 u[15] = _mm_sub_epi32(u[15], v[15]); | |
2957 | |
2958 v[0] = _mm_add_epi32(u[0], K32One); | |
2959 v[1] = _mm_add_epi32(u[1], K32One); | |
2960 v[2] = _mm_add_epi32(u[2], K32One); | |
2961 v[3] = _mm_add_epi32(u[3], K32One); | |
2962 v[4] = _mm_add_epi32(u[4], K32One); | |
2963 v[5] = _mm_add_epi32(u[5], K32One); | |
2964 v[6] = _mm_add_epi32(u[6], K32One); | |
2965 v[7] = _mm_add_epi32(u[7], K32One); | |
2966 v[8] = _mm_add_epi32(u[8], K32One); | |
2967 v[9] = _mm_add_epi32(u[9], K32One); | |
2968 v[10] = _mm_add_epi32(u[10], K32One); | |
2969 v[11] = _mm_add_epi32(u[11], K32One); | |
2970 v[12] = _mm_add_epi32(u[12], K32One); | |
2971 v[13] = _mm_add_epi32(u[13], K32One); | |
2972 v[14] = _mm_add_epi32(u[14], K32One); | |
2973 v[15] = _mm_add_epi32(u[15], K32One); | |
2974 | |
2975 u[0] = _mm_srai_epi32(v[0], 2); | |
2976 u[1] = _mm_srai_epi32(v[1], 2); | |
2977 u[2] = _mm_srai_epi32(v[2], 2); | |
2978 u[3] = _mm_srai_epi32(v[3], 2); | |
2979 u[4] = _mm_srai_epi32(v[4], 2); | |
2980 u[5] = _mm_srai_epi32(v[5], 2); | |
2981 u[6] = _mm_srai_epi32(v[6], 2); | |
2982 u[7] = _mm_srai_epi32(v[7], 2); | |
2983 u[8] = _mm_srai_epi32(v[8], 2); | |
2984 u[9] = _mm_srai_epi32(v[9], 2); | |
2985 u[10] = _mm_srai_epi32(v[10], 2); | |
2986 u[11] = _mm_srai_epi32(v[11], 2); | |
2987 u[12] = _mm_srai_epi32(v[12], 2); | |
2988 u[13] = _mm_srai_epi32(v[13], 2); | |
2989 u[14] = _mm_srai_epi32(v[14], 2); | |
2990 u[15] = _mm_srai_epi32(v[15], 2); | |
2991 | |
2992 out[ 5] = _mm_packs_epi32(u[0], u[1]); | |
2993 out[21] = _mm_packs_epi32(u[2], u[3]); | |
2994 out[13] = _mm_packs_epi32(u[4], u[5]); | |
2995 out[29] = _mm_packs_epi32(u[6], u[7]); | |
2996 out[ 3] = _mm_packs_epi32(u[8], u[9]); | |
2997 out[19] = _mm_packs_epi32(u[10], u[11]); | |
2998 out[11] = _mm_packs_epi32(u[12], u[13]); | |
2999 out[27] = _mm_packs_epi32(u[14], u[15]); | |
3000 #if DCT_HIGH_BIT_DEPTH | |
3001 overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13], | |
3002 &out[29], &out[3], &out[19], | |
3003 &out[11], &out[27]); | |
3004 if (overflow) { | |
3005 HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); | |
3006 return; | |
3007 } | |
3008 #endif // DCT_HIGH_BIT_DEPTH | |
3009 } | |
3010 } | |
3011 #endif // FDCT32x32_HIGH_PRECISION | |
3012 // Transpose the results, do it as four 8x8 transposes. | |
3013 { | |
3014 int transpose_block; | |
3015 int16_t *output0 = &intermediate[column_start * 32]; | |
3016 tran_low_t *output1 = &output_org[column_start * 32]; | |
3017 for (transpose_block = 0; transpose_block < 4; ++transpose_block) { | |
3018 __m128i *this_out = &out[8 * transpose_block]; | |
3019 // 00 01 02 03 04 05 06 07 | |
3020 // 10 11 12 13 14 15 16 17 | |
3021 // 20 21 22 23 24 25 26 27 | |
3022 // 30 31 32 33 34 35 36 37 | |
3023 // 40 41 42 43 44 45 46 47 | |
3024 // 50 51 52 53 54 55 56 57 | |
3025 // 60 61 62 63 64 65 66 67 | |
3026 // 70 71 72 73 74 75 76 77 | |
3027 const __m128i tr0_0 = _mm_unpacklo_epi16(this_out[0], this_out[1]); | |
3028 const __m128i tr0_1 = _mm_unpacklo_epi16(this_out[2], this_out[3]); | |
3029 const __m128i tr0_2 = _mm_unpackhi_epi16(this_out[0], this_out[1]); | |
3030 const __m128i tr0_3 = _mm_unpackhi_epi16(this_out[2], this_out[3]); | |
3031 const __m128i tr0_4 = _mm_unpacklo_epi16(this_out[4], this_out[5]); | |
3032 const __m128i tr0_5 = _mm_unpacklo_epi16(this_out[6], this_out[7]); | |
3033 const __m128i tr0_6 = _mm_unpackhi_epi16(this_out[4], this_out[5]); | |
3034 const __m128i tr0_7 = _mm_unpackhi_epi16(this_out[6], this_out[7]); | |
3035 // 00 10 01 11 02 12 03 13 | |
3036 // 20 30 21 31 22 32 23 33 | |
3037 // 04 14 05 15 06 16 07 17 | |
3038 // 24 34 25 35 26 36 27 37 | |
3039 // 40 50 41 51 42 52 43 53 | |
3040 // 60 70 61 71 62 72 63 73 | |
3041 // 54 54 55 55 56 56 57 57 | |
3042 // 64 74 65 75 66 76 67 77 | |
3043 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); | |
3044 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); | |
3045 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); | |
3046 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); | |
3047 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); | |
3048 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); | |
3049 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); | |
3050 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); | |
3051 // 00 10 20 30 01 11 21 31 | |
3052 // 40 50 60 70 41 51 61 71 | |
3053 // 02 12 22 32 03 13 23 33 | |
3054 // 42 52 62 72 43 53 63 73 | |
3055 // 04 14 24 34 05 15 21 36 | |
3056 // 44 54 64 74 45 55 61 76 | |
3057 // 06 16 26 36 07 17 27 37 | |
3058 // 46 56 66 76 47 57 67 77 | |
3059 __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4); | |
3060 __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4); | |
3061 __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6); | |
3062 __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6); | |
3063 __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5); | |
3064 __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5); | |
3065 __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7); | |
3066 __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7); | |
3067 // 00 10 20 30 40 50 60 70 | |
3068 // 01 11 21 31 41 51 61 71 | |
3069 // 02 12 22 32 42 52 62 72 | |
3070 // 03 13 23 33 43 53 63 73 | |
3071 // 04 14 24 34 44 54 64 74 | |
3072 // 05 15 25 35 45 55 65 75 | |
3073 // 06 16 26 36 46 56 66 76 | |
3074 // 07 17 27 37 47 57 67 77 | |
3075 if (0 == pass) { | |
3076 // output[j] = (output[j] + 1 + (output[j] > 0)) >> 2; | |
3077 // TODO(cd): see quality impact of only doing | |
3078 // output[j] = (output[j] + 1) >> 2; | |
3079 // which would remove the code between here ... | |
3080 __m128i tr2_0_0 = _mm_cmpgt_epi16(tr2_0, kZero); | |
3081 __m128i tr2_1_0 = _mm_cmpgt_epi16(tr2_1, kZero); | |
3082 __m128i tr2_2_0 = _mm_cmpgt_epi16(tr2_2, kZero); | |
3083 __m128i tr2_3_0 = _mm_cmpgt_epi16(tr2_3, kZero); | |
3084 __m128i tr2_4_0 = _mm_cmpgt_epi16(tr2_4, kZero); | |
3085 __m128i tr2_5_0 = _mm_cmpgt_epi16(tr2_5, kZero); | |
3086 __m128i tr2_6_0 = _mm_cmpgt_epi16(tr2_6, kZero); | |
3087 __m128i tr2_7_0 = _mm_cmpgt_epi16(tr2_7, kZero); | |
3088 tr2_0 = _mm_sub_epi16(tr2_0, tr2_0_0); | |
3089 tr2_1 = _mm_sub_epi16(tr2_1, tr2_1_0); | |
3090 tr2_2 = _mm_sub_epi16(tr2_2, tr2_2_0); | |
3091 tr2_3 = _mm_sub_epi16(tr2_3, tr2_3_0); | |
3092 tr2_4 = _mm_sub_epi16(tr2_4, tr2_4_0); | |
3093 tr2_5 = _mm_sub_epi16(tr2_5, tr2_5_0); | |
3094 tr2_6 = _mm_sub_epi16(tr2_6, tr2_6_0); | |
3095 tr2_7 = _mm_sub_epi16(tr2_7, tr2_7_0); | |
3096 // ... and here. | |
3097 // PS: also change code in vp9/encoder/vp9_dct.c | |
3098 tr2_0 = _mm_add_epi16(tr2_0, kOne); | |
3099 tr2_1 = _mm_add_epi16(tr2_1, kOne); | |
3100 tr2_2 = _mm_add_epi16(tr2_2, kOne); | |
3101 tr2_3 = _mm_add_epi16(tr2_3, kOne); | |
3102 tr2_4 = _mm_add_epi16(tr2_4, kOne); | |
3103 tr2_5 = _mm_add_epi16(tr2_5, kOne); | |
3104 tr2_6 = _mm_add_epi16(tr2_6, kOne); | |
3105 tr2_7 = _mm_add_epi16(tr2_7, kOne); | |
3106 tr2_0 = _mm_srai_epi16(tr2_0, 2); | |
3107 tr2_1 = _mm_srai_epi16(tr2_1, 2); | |
3108 tr2_2 = _mm_srai_epi16(tr2_2, 2); | |
3109 tr2_3 = _mm_srai_epi16(tr2_3, 2); | |
3110 tr2_4 = _mm_srai_epi16(tr2_4, 2); | |
3111 tr2_5 = _mm_srai_epi16(tr2_5, 2); | |
3112 tr2_6 = _mm_srai_epi16(tr2_6, 2); | |
3113 tr2_7 = _mm_srai_epi16(tr2_7, 2); | |
3114 } | |
3115 // Note: even though all these stores are aligned, using the aligned | |
3116 // intrinsic make the code slightly slower. | |
3117 if (pass == 0) { | |
3118 _mm_storeu_si128((__m128i *)(output0 + 0 * 32), tr2_0); | |
3119 _mm_storeu_si128((__m128i *)(output0 + 1 * 32), tr2_1); | |
3120 _mm_storeu_si128((__m128i *)(output0 + 2 * 32), tr2_2); | |
3121 _mm_storeu_si128((__m128i *)(output0 + 3 * 32), tr2_3); | |
3122 _mm_storeu_si128((__m128i *)(output0 + 4 * 32), tr2_4); | |
3123 _mm_storeu_si128((__m128i *)(output0 + 5 * 32), tr2_5); | |
3124 _mm_storeu_si128((__m128i *)(output0 + 6 * 32), tr2_6); | |
3125 _mm_storeu_si128((__m128i *)(output0 + 7 * 32), tr2_7); | |
3126 // Process next 8x8 | |
3127 output0 += 8; | |
3128 } else { | |
3129 storeu_output(&tr2_0, (output1 + 0 * 32)); | |
3130 storeu_output(&tr2_1, (output1 + 1 * 32)); | |
3131 storeu_output(&tr2_2, (output1 + 2 * 32)); | |
3132 storeu_output(&tr2_3, (output1 + 3 * 32)); | |
3133 storeu_output(&tr2_4, (output1 + 4 * 32)); | |
3134 storeu_output(&tr2_5, (output1 + 5 * 32)); | |
3135 storeu_output(&tr2_6, (output1 + 6 * 32)); | |
3136 storeu_output(&tr2_7, (output1 + 7 * 32)); | |
3137 // Process next 8x8 | |
3138 output1 += 8; | |
3139 } | |
3140 } | |
3141 } | |
3142 } | |
3143 } | |
3144 } // NOLINT | |
3145 | |
3146 #undef ADD_EPI16 | |
3147 #undef SUB_EPI16 | |
3148 #undef HIGH_FDCT32x32_2D_C | |
3149 #undef HIGH_FDCT32x32_2D_ROWS_C | |
OLD | NEW |