OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 |
| 11 #include <emmintrin.h> // SSE2 |
| 12 #include "vp9/common/vp9_idct.h" // for cospi constants |
| 13 #include "vpx_ports/mem.h" |
| 14 |
| 15 #if FDCT32x32_HIGH_PRECISION |
| 16 static INLINE __m128i k_madd_epi32(__m128i a, __m128i b) { |
| 17 __m128i buf0, buf1; |
| 18 buf0 = _mm_mul_epu32(a, b); |
| 19 a = _mm_srli_epi64(a, 32); |
| 20 b = _mm_srli_epi64(b, 32); |
| 21 buf1 = _mm_mul_epu32(a, b); |
| 22 return _mm_add_epi64(buf0, buf1); |
| 23 } |
| 24 |
| 25 static INLINE __m128i k_packs_epi64(__m128i a, __m128i b) { |
| 26 __m128i buf0 = _mm_shuffle_epi32(a, _MM_SHUFFLE(0, 0, 2, 0)); |
| 27 __m128i buf1 = _mm_shuffle_epi32(b, _MM_SHUFFLE(0, 0, 2, 0)); |
| 28 return _mm_unpacklo_epi64(buf0, buf1); |
| 29 } |
| 30 |
| 31 static INLINE __m128i k_cvtlo_epi16(__m128i a, __m128i mask16, __m128i kZero) { |
| 32 // convert the lower 4 signed 16-bit integers into 4 signed 32-bit integers |
| 33 __m128i sign_bit = _mm_and_si128(a, mask16); |
| 34 __m128i b = _mm_unpacklo_epi16(a, kZero); |
| 35 sign_bit = _mm_cmplt_epi16(sign_bit, kZero); |
| 36 sign_bit = _mm_unpacklo_epi16(kZero, sign_bit); |
| 37 return _mm_or_si128(sign_bit, b); |
| 38 } |
| 39 |
| 40 static INLINE __m128i k_cvthi_epi16(__m128i a, __m128i mask16, __m128i kZero) { |
| 41 // convert the lower 4 signed 16-bit integers into 4 signed 32-bit integers |
| 42 __m128i sign_bit = _mm_and_si128(a, mask16); |
| 43 __m128i b = _mm_unpackhi_epi16(a, kZero); |
| 44 sign_bit = _mm_cmplt_epi16(sign_bit, kZero); |
| 45 sign_bit = _mm_unpackhi_epi16(kZero, sign_bit); |
| 46 return _mm_or_si128(sign_bit, b); |
| 47 } |
| 48 #endif |
| 49 |
| 50 void FDCT32x32_2D(int16_t *input, |
| 51 int16_t *output_org, int pitch) { |
| 52 // Calculate pre-multiplied strides |
| 53 const int str1 = pitch >> 1; |
| 54 const int str2 = pitch; |
| 55 const int str3 = pitch + str1; |
| 56 // We need an intermediate buffer between passes. |
| 57 DECLARE_ALIGNED(16, int16_t, intermediate[32 * 32]); |
| 58 // Constants |
| 59 // When we use them, in one case, they are all the same. In all others |
| 60 // it's a pair of them that we need to repeat four times. This is done |
| 61 // by constructing the 32 bit constant corresponding to that pair. |
| 62 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(+cospi_16_64); |
| 63 const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64); |
| 64 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); |
| 65 const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64); |
| 66 const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64, cospi_8_64); |
| 67 const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64, cospi_20_64); |
| 68 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); |
| 69 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); |
| 70 const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64, cospi_4_64); |
| 71 const __m128i k__cospi_m28_m04 = pair_set_epi16(-cospi_28_64, -cospi_4_64); |
| 72 const __m128i k__cospi_m12_m20 = pair_set_epi16(-cospi_12_64, -cospi_20_64); |
| 73 const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64, cospi_2_64); |
| 74 const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64, cospi_18_64); |
| 75 const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64, cospi_10_64); |
| 76 const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64, cospi_26_64); |
| 77 const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64); |
| 78 const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64); |
| 79 const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64); |
| 80 const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64); |
| 81 const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64, cospi_1_64); |
| 82 const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64, cospi_17_64); |
| 83 const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64, cospi_9_64); |
| 84 const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64, cospi_25_64); |
| 85 const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64, cospi_7_64); |
| 86 const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64, cospi_23_64); |
| 87 const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64, cospi_15_64); |
| 88 const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64, cospi_31_64); |
| 89 const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64, cospi_5_64); |
| 90 const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64, cospi_21_64); |
| 91 const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64, cospi_13_64); |
| 92 const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64, cospi_29_64); |
| 93 const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64, cospi_3_64); |
| 94 const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64, cospi_19_64); |
| 95 const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64, cospi_11_64); |
| 96 const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64, cospi_27_64); |
| 97 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); |
| 98 const __m128i kZero = _mm_set1_epi16(0); |
| 99 const __m128i kOne = _mm_set1_epi16(1); |
| 100 // Do the two transform/transpose passes |
| 101 int pass; |
| 102 for (pass = 0; pass < 2; ++pass) { |
| 103 // We process eight columns (transposed rows in second pass) at a time. |
| 104 int column_start; |
| 105 for (column_start = 0; column_start < 32; column_start += 8) { |
| 106 __m128i step1[32]; |
| 107 __m128i step2[32]; |
| 108 __m128i step3[32]; |
| 109 __m128i out[32]; |
| 110 // Stage 1 |
| 111 // Note: even though all the loads below are aligned, using the aligned |
| 112 // intrinsic make the code slightly slower. |
| 113 if (0 == pass) { |
| 114 int16_t *in = &input[column_start]; |
| 115 // step1[i] = (in[ 0 * stride] + in[(32 - 1) * stride]) << 2; |
| 116 // Note: the next four blocks could be in a loop. That would help the |
| 117 // instruction cache but is actually slower. |
| 118 { |
| 119 int16_t *ina = in + 0 * str1; |
| 120 int16_t *inb = in + 31 * str1; |
| 121 __m128i *step1a = &step1[ 0]; |
| 122 __m128i *step1b = &step1[31]; |
| 123 const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); |
| 124 const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); |
| 125 const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); |
| 126 const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); |
| 127 const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); |
| 128 const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); |
| 129 const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); |
| 130 const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); |
| 131 step1a[ 0] = _mm_add_epi16(ina0, inb0); |
| 132 step1a[ 1] = _mm_add_epi16(ina1, inb1); |
| 133 step1a[ 2] = _mm_add_epi16(ina2, inb2); |
| 134 step1a[ 3] = _mm_add_epi16(ina3, inb3); |
| 135 step1b[-3] = _mm_sub_epi16(ina3, inb3); |
| 136 step1b[-2] = _mm_sub_epi16(ina2, inb2); |
| 137 step1b[-1] = _mm_sub_epi16(ina1, inb1); |
| 138 step1b[-0] = _mm_sub_epi16(ina0, inb0); |
| 139 step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); |
| 140 step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); |
| 141 step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); |
| 142 step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); |
| 143 step1b[-3] = _mm_slli_epi16(step1b[-3], 2); |
| 144 step1b[-2] = _mm_slli_epi16(step1b[-2], 2); |
| 145 step1b[-1] = _mm_slli_epi16(step1b[-1], 2); |
| 146 step1b[-0] = _mm_slli_epi16(step1b[-0], 2); |
| 147 } |
| 148 { |
| 149 int16_t *ina = in + 4 * str1; |
| 150 int16_t *inb = in + 27 * str1; |
| 151 __m128i *step1a = &step1[ 4]; |
| 152 __m128i *step1b = &step1[27]; |
| 153 const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); |
| 154 const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); |
| 155 const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); |
| 156 const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); |
| 157 const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); |
| 158 const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); |
| 159 const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); |
| 160 const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); |
| 161 step1a[ 0] = _mm_add_epi16(ina0, inb0); |
| 162 step1a[ 1] = _mm_add_epi16(ina1, inb1); |
| 163 step1a[ 2] = _mm_add_epi16(ina2, inb2); |
| 164 step1a[ 3] = _mm_add_epi16(ina3, inb3); |
| 165 step1b[-3] = _mm_sub_epi16(ina3, inb3); |
| 166 step1b[-2] = _mm_sub_epi16(ina2, inb2); |
| 167 step1b[-1] = _mm_sub_epi16(ina1, inb1); |
| 168 step1b[-0] = _mm_sub_epi16(ina0, inb0); |
| 169 step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); |
| 170 step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); |
| 171 step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); |
| 172 step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); |
| 173 step1b[-3] = _mm_slli_epi16(step1b[-3], 2); |
| 174 step1b[-2] = _mm_slli_epi16(step1b[-2], 2); |
| 175 step1b[-1] = _mm_slli_epi16(step1b[-1], 2); |
| 176 step1b[-0] = _mm_slli_epi16(step1b[-0], 2); |
| 177 } |
| 178 { |
| 179 int16_t *ina = in + 8 * str1; |
| 180 int16_t *inb = in + 23 * str1; |
| 181 __m128i *step1a = &step1[ 8]; |
| 182 __m128i *step1b = &step1[23]; |
| 183 const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); |
| 184 const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); |
| 185 const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); |
| 186 const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); |
| 187 const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); |
| 188 const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); |
| 189 const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); |
| 190 const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); |
| 191 step1a[ 0] = _mm_add_epi16(ina0, inb0); |
| 192 step1a[ 1] = _mm_add_epi16(ina1, inb1); |
| 193 step1a[ 2] = _mm_add_epi16(ina2, inb2); |
| 194 step1a[ 3] = _mm_add_epi16(ina3, inb3); |
| 195 step1b[-3] = _mm_sub_epi16(ina3, inb3); |
| 196 step1b[-2] = _mm_sub_epi16(ina2, inb2); |
| 197 step1b[-1] = _mm_sub_epi16(ina1, inb1); |
| 198 step1b[-0] = _mm_sub_epi16(ina0, inb0); |
| 199 step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); |
| 200 step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); |
| 201 step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); |
| 202 step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); |
| 203 step1b[-3] = _mm_slli_epi16(step1b[-3], 2); |
| 204 step1b[-2] = _mm_slli_epi16(step1b[-2], 2); |
| 205 step1b[-1] = _mm_slli_epi16(step1b[-1], 2); |
| 206 step1b[-0] = _mm_slli_epi16(step1b[-0], 2); |
| 207 } |
| 208 { |
| 209 int16_t *ina = in + 12 * str1; |
| 210 int16_t *inb = in + 19 * str1; |
| 211 __m128i *step1a = &step1[12]; |
| 212 __m128i *step1b = &step1[19]; |
| 213 const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); |
| 214 const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); |
| 215 const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); |
| 216 const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); |
| 217 const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); |
| 218 const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); |
| 219 const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); |
| 220 const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); |
| 221 step1a[ 0] = _mm_add_epi16(ina0, inb0); |
| 222 step1a[ 1] = _mm_add_epi16(ina1, inb1); |
| 223 step1a[ 2] = _mm_add_epi16(ina2, inb2); |
| 224 step1a[ 3] = _mm_add_epi16(ina3, inb3); |
| 225 step1b[-3] = _mm_sub_epi16(ina3, inb3); |
| 226 step1b[-2] = _mm_sub_epi16(ina2, inb2); |
| 227 step1b[-1] = _mm_sub_epi16(ina1, inb1); |
| 228 step1b[-0] = _mm_sub_epi16(ina0, inb0); |
| 229 step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); |
| 230 step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); |
| 231 step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); |
| 232 step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); |
| 233 step1b[-3] = _mm_slli_epi16(step1b[-3], 2); |
| 234 step1b[-2] = _mm_slli_epi16(step1b[-2], 2); |
| 235 step1b[-1] = _mm_slli_epi16(step1b[-1], 2); |
| 236 step1b[-0] = _mm_slli_epi16(step1b[-0], 2); |
| 237 } |
| 238 } else { |
| 239 int16_t *in = &intermediate[column_start]; |
| 240 // step1[i] = in[ 0 * 32] + in[(32 - 1) * 32]; |
| 241 // Note: using the same approach as above to have common offset is |
| 242 // counter-productive as all offsets can be calculated at compile |
| 243 // time. |
| 244 // Note: the next four blocks could be in a loop. That would help the |
| 245 // instruction cache but is actually slower. |
| 246 { |
| 247 __m128i in00 = _mm_loadu_si128((const __m128i *)(in + 0 * 32)); |
| 248 __m128i in01 = _mm_loadu_si128((const __m128i *)(in + 1 * 32)); |
| 249 __m128i in02 = _mm_loadu_si128((const __m128i *)(in + 2 * 32)); |
| 250 __m128i in03 = _mm_loadu_si128((const __m128i *)(in + 3 * 32)); |
| 251 __m128i in28 = _mm_loadu_si128((const __m128i *)(in + 28 * 32)); |
| 252 __m128i in29 = _mm_loadu_si128((const __m128i *)(in + 29 * 32)); |
| 253 __m128i in30 = _mm_loadu_si128((const __m128i *)(in + 30 * 32)); |
| 254 __m128i in31 = _mm_loadu_si128((const __m128i *)(in + 31 * 32)); |
| 255 step1[ 0] = _mm_add_epi16(in00, in31); |
| 256 step1[ 1] = _mm_add_epi16(in01, in30); |
| 257 step1[ 2] = _mm_add_epi16(in02, in29); |
| 258 step1[ 3] = _mm_add_epi16(in03, in28); |
| 259 step1[28] = _mm_sub_epi16(in03, in28); |
| 260 step1[29] = _mm_sub_epi16(in02, in29); |
| 261 step1[30] = _mm_sub_epi16(in01, in30); |
| 262 step1[31] = _mm_sub_epi16(in00, in31); |
| 263 } |
| 264 { |
| 265 __m128i in04 = _mm_loadu_si128((const __m128i *)(in + 4 * 32)); |
| 266 __m128i in05 = _mm_loadu_si128((const __m128i *)(in + 5 * 32)); |
| 267 __m128i in06 = _mm_loadu_si128((const __m128i *)(in + 6 * 32)); |
| 268 __m128i in07 = _mm_loadu_si128((const __m128i *)(in + 7 * 32)); |
| 269 __m128i in24 = _mm_loadu_si128((const __m128i *)(in + 24 * 32)); |
| 270 __m128i in25 = _mm_loadu_si128((const __m128i *)(in + 25 * 32)); |
| 271 __m128i in26 = _mm_loadu_si128((const __m128i *)(in + 26 * 32)); |
| 272 __m128i in27 = _mm_loadu_si128((const __m128i *)(in + 27 * 32)); |
| 273 step1[ 4] = _mm_add_epi16(in04, in27); |
| 274 step1[ 5] = _mm_add_epi16(in05, in26); |
| 275 step1[ 6] = _mm_add_epi16(in06, in25); |
| 276 step1[ 7] = _mm_add_epi16(in07, in24); |
| 277 step1[24] = _mm_sub_epi16(in07, in24); |
| 278 step1[25] = _mm_sub_epi16(in06, in25); |
| 279 step1[26] = _mm_sub_epi16(in05, in26); |
| 280 step1[27] = _mm_sub_epi16(in04, in27); |
| 281 } |
| 282 { |
| 283 __m128i in08 = _mm_loadu_si128((const __m128i *)(in + 8 * 32)); |
| 284 __m128i in09 = _mm_loadu_si128((const __m128i *)(in + 9 * 32)); |
| 285 __m128i in10 = _mm_loadu_si128((const __m128i *)(in + 10 * 32)); |
| 286 __m128i in11 = _mm_loadu_si128((const __m128i *)(in + 11 * 32)); |
| 287 __m128i in20 = _mm_loadu_si128((const __m128i *)(in + 20 * 32)); |
| 288 __m128i in21 = _mm_loadu_si128((const __m128i *)(in + 21 * 32)); |
| 289 __m128i in22 = _mm_loadu_si128((const __m128i *)(in + 22 * 32)); |
| 290 __m128i in23 = _mm_loadu_si128((const __m128i *)(in + 23 * 32)); |
| 291 step1[ 8] = _mm_add_epi16(in08, in23); |
| 292 step1[ 9] = _mm_add_epi16(in09, in22); |
| 293 step1[10] = _mm_add_epi16(in10, in21); |
| 294 step1[11] = _mm_add_epi16(in11, in20); |
| 295 step1[20] = _mm_sub_epi16(in11, in20); |
| 296 step1[21] = _mm_sub_epi16(in10, in21); |
| 297 step1[22] = _mm_sub_epi16(in09, in22); |
| 298 step1[23] = _mm_sub_epi16(in08, in23); |
| 299 } |
| 300 { |
| 301 __m128i in12 = _mm_loadu_si128((const __m128i *)(in + 12 * 32)); |
| 302 __m128i in13 = _mm_loadu_si128((const __m128i *)(in + 13 * 32)); |
| 303 __m128i in14 = _mm_loadu_si128((const __m128i *)(in + 14 * 32)); |
| 304 __m128i in15 = _mm_loadu_si128((const __m128i *)(in + 15 * 32)); |
| 305 __m128i in16 = _mm_loadu_si128((const __m128i *)(in + 16 * 32)); |
| 306 __m128i in17 = _mm_loadu_si128((const __m128i *)(in + 17 * 32)); |
| 307 __m128i in18 = _mm_loadu_si128((const __m128i *)(in + 18 * 32)); |
| 308 __m128i in19 = _mm_loadu_si128((const __m128i *)(in + 19 * 32)); |
| 309 step1[12] = _mm_add_epi16(in12, in19); |
| 310 step1[13] = _mm_add_epi16(in13, in18); |
| 311 step1[14] = _mm_add_epi16(in14, in17); |
| 312 step1[15] = _mm_add_epi16(in15, in16); |
| 313 step1[16] = _mm_sub_epi16(in15, in16); |
| 314 step1[17] = _mm_sub_epi16(in14, in17); |
| 315 step1[18] = _mm_sub_epi16(in13, in18); |
| 316 step1[19] = _mm_sub_epi16(in12, in19); |
| 317 } |
| 318 } |
| 319 // Stage 2 |
| 320 { |
| 321 step2[ 0] = _mm_add_epi16(step1[0], step1[15]); |
| 322 step2[ 1] = _mm_add_epi16(step1[1], step1[14]); |
| 323 step2[ 2] = _mm_add_epi16(step1[2], step1[13]); |
| 324 step2[ 3] = _mm_add_epi16(step1[3], step1[12]); |
| 325 step2[ 4] = _mm_add_epi16(step1[4], step1[11]); |
| 326 step2[ 5] = _mm_add_epi16(step1[5], step1[10]); |
| 327 step2[ 6] = _mm_add_epi16(step1[6], step1[ 9]); |
| 328 step2[ 7] = _mm_add_epi16(step1[7], step1[ 8]); |
| 329 step2[ 8] = _mm_sub_epi16(step1[7], step1[ 8]); |
| 330 step2[ 9] = _mm_sub_epi16(step1[6], step1[ 9]); |
| 331 step2[10] = _mm_sub_epi16(step1[5], step1[10]); |
| 332 step2[11] = _mm_sub_epi16(step1[4], step1[11]); |
| 333 step2[12] = _mm_sub_epi16(step1[3], step1[12]); |
| 334 step2[13] = _mm_sub_epi16(step1[2], step1[13]); |
| 335 step2[14] = _mm_sub_epi16(step1[1], step1[14]); |
| 336 step2[15] = _mm_sub_epi16(step1[0], step1[15]); |
| 337 } |
| 338 { |
| 339 const __m128i s2_20_0 = _mm_unpacklo_epi16(step1[27], step1[20]); |
| 340 const __m128i s2_20_1 = _mm_unpackhi_epi16(step1[27], step1[20]); |
| 341 const __m128i s2_21_0 = _mm_unpacklo_epi16(step1[26], step1[21]); |
| 342 const __m128i s2_21_1 = _mm_unpackhi_epi16(step1[26], step1[21]); |
| 343 const __m128i s2_22_0 = _mm_unpacklo_epi16(step1[25], step1[22]); |
| 344 const __m128i s2_22_1 = _mm_unpackhi_epi16(step1[25], step1[22]); |
| 345 const __m128i s2_23_0 = _mm_unpacklo_epi16(step1[24], step1[23]); |
| 346 const __m128i s2_23_1 = _mm_unpackhi_epi16(step1[24], step1[23]); |
| 347 const __m128i s2_20_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_m16); |
| 348 const __m128i s2_20_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_m16); |
| 349 const __m128i s2_21_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_m16); |
| 350 const __m128i s2_21_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_m16); |
| 351 const __m128i s2_22_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_m16); |
| 352 const __m128i s2_22_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_m16); |
| 353 const __m128i s2_23_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_m16); |
| 354 const __m128i s2_23_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_m16); |
| 355 const __m128i s2_24_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_p16); |
| 356 const __m128i s2_24_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_p16); |
| 357 const __m128i s2_25_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_p16); |
| 358 const __m128i s2_25_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_p16); |
| 359 const __m128i s2_26_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_p16); |
| 360 const __m128i s2_26_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_p16); |
| 361 const __m128i s2_27_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_p16); |
| 362 const __m128i s2_27_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_p16); |
| 363 // dct_const_round_shift |
| 364 const __m128i s2_20_4 = _mm_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING); |
| 365 const __m128i s2_20_5 = _mm_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING); |
| 366 const __m128i s2_21_4 = _mm_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING); |
| 367 const __m128i s2_21_5 = _mm_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING); |
| 368 const __m128i s2_22_4 = _mm_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING); |
| 369 const __m128i s2_22_5 = _mm_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING); |
| 370 const __m128i s2_23_4 = _mm_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING); |
| 371 const __m128i s2_23_5 = _mm_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING); |
| 372 const __m128i s2_24_4 = _mm_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING); |
| 373 const __m128i s2_24_5 = _mm_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING); |
| 374 const __m128i s2_25_4 = _mm_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING); |
| 375 const __m128i s2_25_5 = _mm_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING); |
| 376 const __m128i s2_26_4 = _mm_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING); |
| 377 const __m128i s2_26_5 = _mm_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING); |
| 378 const __m128i s2_27_4 = _mm_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING); |
| 379 const __m128i s2_27_5 = _mm_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING); |
| 380 const __m128i s2_20_6 = _mm_srai_epi32(s2_20_4, DCT_CONST_BITS); |
| 381 const __m128i s2_20_7 = _mm_srai_epi32(s2_20_5, DCT_CONST_BITS); |
| 382 const __m128i s2_21_6 = _mm_srai_epi32(s2_21_4, DCT_CONST_BITS); |
| 383 const __m128i s2_21_7 = _mm_srai_epi32(s2_21_5, DCT_CONST_BITS); |
| 384 const __m128i s2_22_6 = _mm_srai_epi32(s2_22_4, DCT_CONST_BITS); |
| 385 const __m128i s2_22_7 = _mm_srai_epi32(s2_22_5, DCT_CONST_BITS); |
| 386 const __m128i s2_23_6 = _mm_srai_epi32(s2_23_4, DCT_CONST_BITS); |
| 387 const __m128i s2_23_7 = _mm_srai_epi32(s2_23_5, DCT_CONST_BITS); |
| 388 const __m128i s2_24_6 = _mm_srai_epi32(s2_24_4, DCT_CONST_BITS); |
| 389 const __m128i s2_24_7 = _mm_srai_epi32(s2_24_5, DCT_CONST_BITS); |
| 390 const __m128i s2_25_6 = _mm_srai_epi32(s2_25_4, DCT_CONST_BITS); |
| 391 const __m128i s2_25_7 = _mm_srai_epi32(s2_25_5, DCT_CONST_BITS); |
| 392 const __m128i s2_26_6 = _mm_srai_epi32(s2_26_4, DCT_CONST_BITS); |
| 393 const __m128i s2_26_7 = _mm_srai_epi32(s2_26_5, DCT_CONST_BITS); |
| 394 const __m128i s2_27_6 = _mm_srai_epi32(s2_27_4, DCT_CONST_BITS); |
| 395 const __m128i s2_27_7 = _mm_srai_epi32(s2_27_5, DCT_CONST_BITS); |
| 396 // Combine |
| 397 step2[20] = _mm_packs_epi32(s2_20_6, s2_20_7); |
| 398 step2[21] = _mm_packs_epi32(s2_21_6, s2_21_7); |
| 399 step2[22] = _mm_packs_epi32(s2_22_6, s2_22_7); |
| 400 step2[23] = _mm_packs_epi32(s2_23_6, s2_23_7); |
| 401 step2[24] = _mm_packs_epi32(s2_24_6, s2_24_7); |
| 402 step2[25] = _mm_packs_epi32(s2_25_6, s2_25_7); |
| 403 step2[26] = _mm_packs_epi32(s2_26_6, s2_26_7); |
| 404 step2[27] = _mm_packs_epi32(s2_27_6, s2_27_7); |
| 405 } |
| 406 // Stage 3 |
| 407 { |
| 408 step3[0] = _mm_add_epi16(step2[(8 - 1)], step2[0]); |
| 409 step3[1] = _mm_add_epi16(step2[(8 - 2)], step2[1]); |
| 410 step3[2] = _mm_add_epi16(step2[(8 - 3)], step2[2]); |
| 411 step3[3] = _mm_add_epi16(step2[(8 - 4)], step2[3]); |
| 412 step3[4] = _mm_sub_epi16(step2[(8 - 5)], step2[4]); |
| 413 step3[5] = _mm_sub_epi16(step2[(8 - 6)], step2[5]); |
| 414 step3[6] = _mm_sub_epi16(step2[(8 - 7)], step2[6]); |
| 415 step3[7] = _mm_sub_epi16(step2[(8 - 8)], step2[7]); |
| 416 } |
| 417 { |
| 418 const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]); |
| 419 const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]); |
| 420 const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]); |
| 421 const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]); |
| 422 const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16); |
| 423 const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16); |
| 424 const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16); |
| 425 const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16); |
| 426 const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16); |
| 427 const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16); |
| 428 const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16); |
| 429 const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16); |
| 430 // dct_const_round_shift |
| 431 const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); |
| 432 const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); |
| 433 const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); |
| 434 const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); |
| 435 const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); |
| 436 const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); |
| 437 const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); |
| 438 const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); |
| 439 const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS); |
| 440 const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS); |
| 441 const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS); |
| 442 const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS); |
| 443 const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS); |
| 444 const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS); |
| 445 const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS); |
| 446 const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS); |
| 447 // Combine |
| 448 step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7); |
| 449 step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7); |
| 450 step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7); |
| 451 step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7); |
| 452 } |
| 453 { |
| 454 step3[16] = _mm_add_epi16(step2[23], step1[16]); |
| 455 step3[17] = _mm_add_epi16(step2[22], step1[17]); |
| 456 step3[18] = _mm_add_epi16(step2[21], step1[18]); |
| 457 step3[19] = _mm_add_epi16(step2[20], step1[19]); |
| 458 step3[20] = _mm_sub_epi16(step1[19], step2[20]); |
| 459 step3[21] = _mm_sub_epi16(step1[18], step2[21]); |
| 460 step3[22] = _mm_sub_epi16(step1[17], step2[22]); |
| 461 step3[23] = _mm_sub_epi16(step1[16], step2[23]); |
| 462 step3[24] = _mm_sub_epi16(step1[31], step2[24]); |
| 463 step3[25] = _mm_sub_epi16(step1[30], step2[25]); |
| 464 step3[26] = _mm_sub_epi16(step1[29], step2[26]); |
| 465 step3[27] = _mm_sub_epi16(step1[28], step2[27]); |
| 466 step3[28] = _mm_add_epi16(step2[27], step1[28]); |
| 467 step3[29] = _mm_add_epi16(step2[26], step1[29]); |
| 468 step3[30] = _mm_add_epi16(step2[25], step1[30]); |
| 469 step3[31] = _mm_add_epi16(step2[24], step1[31]); |
| 470 } |
| 471 |
| 472 #if !FDCT32x32_HIGH_PRECISION |
| 473 // dump the magnitude by half, hence the intermediate values are within |
| 474 // the range of 16 bits. |
| 475 if (1 == pass) { |
| 476 __m128i s3_00_0 = _mm_cmplt_epi16(step3[ 0], kZero); |
| 477 __m128i s3_01_0 = _mm_cmplt_epi16(step3[ 1], kZero); |
| 478 __m128i s3_02_0 = _mm_cmplt_epi16(step3[ 2], kZero); |
| 479 __m128i s3_03_0 = _mm_cmplt_epi16(step3[ 3], kZero); |
| 480 __m128i s3_04_0 = _mm_cmplt_epi16(step3[ 4], kZero); |
| 481 __m128i s3_05_0 = _mm_cmplt_epi16(step3[ 5], kZero); |
| 482 __m128i s3_06_0 = _mm_cmplt_epi16(step3[ 6], kZero); |
| 483 __m128i s3_07_0 = _mm_cmplt_epi16(step3[ 7], kZero); |
| 484 __m128i s2_08_0 = _mm_cmplt_epi16(step2[ 8], kZero); |
| 485 __m128i s2_09_0 = _mm_cmplt_epi16(step2[ 9], kZero); |
| 486 __m128i s3_10_0 = _mm_cmplt_epi16(step3[10], kZero); |
| 487 __m128i s3_11_0 = _mm_cmplt_epi16(step3[11], kZero); |
| 488 __m128i s3_12_0 = _mm_cmplt_epi16(step3[12], kZero); |
| 489 __m128i s3_13_0 = _mm_cmplt_epi16(step3[13], kZero); |
| 490 __m128i s2_14_0 = _mm_cmplt_epi16(step2[14], kZero); |
| 491 __m128i s2_15_0 = _mm_cmplt_epi16(step2[15], kZero); |
| 492 __m128i s3_16_0 = _mm_cmplt_epi16(step3[16], kZero); |
| 493 __m128i s3_17_0 = _mm_cmplt_epi16(step3[17], kZero); |
| 494 __m128i s3_18_0 = _mm_cmplt_epi16(step3[18], kZero); |
| 495 __m128i s3_19_0 = _mm_cmplt_epi16(step3[19], kZero); |
| 496 __m128i s3_20_0 = _mm_cmplt_epi16(step3[20], kZero); |
| 497 __m128i s3_21_0 = _mm_cmplt_epi16(step3[21], kZero); |
| 498 __m128i s3_22_0 = _mm_cmplt_epi16(step3[22], kZero); |
| 499 __m128i s3_23_0 = _mm_cmplt_epi16(step3[23], kZero); |
| 500 __m128i s3_24_0 = _mm_cmplt_epi16(step3[24], kZero); |
| 501 __m128i s3_25_0 = _mm_cmplt_epi16(step3[25], kZero); |
| 502 __m128i s3_26_0 = _mm_cmplt_epi16(step3[26], kZero); |
| 503 __m128i s3_27_0 = _mm_cmplt_epi16(step3[27], kZero); |
| 504 __m128i s3_28_0 = _mm_cmplt_epi16(step3[28], kZero); |
| 505 __m128i s3_29_0 = _mm_cmplt_epi16(step3[29], kZero); |
| 506 __m128i s3_30_0 = _mm_cmplt_epi16(step3[30], kZero); |
| 507 __m128i s3_31_0 = _mm_cmplt_epi16(step3[31], kZero); |
| 508 step3[ 0] = _mm_sub_epi16(step3[ 0], s3_00_0); |
| 509 step3[ 1] = _mm_sub_epi16(step3[ 1], s3_01_0); |
| 510 step3[ 2] = _mm_sub_epi16(step3[ 2], s3_02_0); |
| 511 step3[ 3] = _mm_sub_epi16(step3[ 3], s3_03_0); |
| 512 step3[ 4] = _mm_sub_epi16(step3[ 4], s3_04_0); |
| 513 step3[ 5] = _mm_sub_epi16(step3[ 5], s3_05_0); |
| 514 step3[ 6] = _mm_sub_epi16(step3[ 6], s3_06_0); |
| 515 step3[ 7] = _mm_sub_epi16(step3[ 7], s3_07_0); |
| 516 step2[ 8] = _mm_sub_epi16(step2[ 8], s2_08_0); |
| 517 step2[ 9] = _mm_sub_epi16(step2[ 9], s2_09_0); |
| 518 step3[10] = _mm_sub_epi16(step3[10], s3_10_0); |
| 519 step3[11] = _mm_sub_epi16(step3[11], s3_11_0); |
| 520 step3[12] = _mm_sub_epi16(step3[12], s3_12_0); |
| 521 step3[13] = _mm_sub_epi16(step3[13], s3_13_0); |
| 522 step2[14] = _mm_sub_epi16(step2[14], s2_14_0); |
| 523 step2[15] = _mm_sub_epi16(step2[15], s2_15_0); |
| 524 step3[16] = _mm_sub_epi16(step3[16], s3_16_0); |
| 525 step3[17] = _mm_sub_epi16(step3[17], s3_17_0); |
| 526 step3[18] = _mm_sub_epi16(step3[18], s3_18_0); |
| 527 step3[19] = _mm_sub_epi16(step3[19], s3_19_0); |
| 528 step3[20] = _mm_sub_epi16(step3[20], s3_20_0); |
| 529 step3[21] = _mm_sub_epi16(step3[21], s3_21_0); |
| 530 step3[22] = _mm_sub_epi16(step3[22], s3_22_0); |
| 531 step3[23] = _mm_sub_epi16(step3[23], s3_23_0); |
| 532 step3[24] = _mm_sub_epi16(step3[24], s3_24_0); |
| 533 step3[25] = _mm_sub_epi16(step3[25], s3_25_0); |
| 534 step3[26] = _mm_sub_epi16(step3[26], s3_26_0); |
| 535 step3[27] = _mm_sub_epi16(step3[27], s3_27_0); |
| 536 step3[28] = _mm_sub_epi16(step3[28], s3_28_0); |
| 537 step3[29] = _mm_sub_epi16(step3[29], s3_29_0); |
| 538 step3[30] = _mm_sub_epi16(step3[30], s3_30_0); |
| 539 step3[31] = _mm_sub_epi16(step3[31], s3_31_0); |
| 540 step3[ 0] = _mm_add_epi16(step3[ 0], kOne); |
| 541 step3[ 1] = _mm_add_epi16(step3[ 1], kOne); |
| 542 step3[ 2] = _mm_add_epi16(step3[ 2], kOne); |
| 543 step3[ 3] = _mm_add_epi16(step3[ 3], kOne); |
| 544 step3[ 4] = _mm_add_epi16(step3[ 4], kOne); |
| 545 step3[ 5] = _mm_add_epi16(step3[ 5], kOne); |
| 546 step3[ 6] = _mm_add_epi16(step3[ 6], kOne); |
| 547 step3[ 7] = _mm_add_epi16(step3[ 7], kOne); |
| 548 step2[ 8] = _mm_add_epi16(step2[ 8], kOne); |
| 549 step2[ 9] = _mm_add_epi16(step2[ 9], kOne); |
| 550 step3[10] = _mm_add_epi16(step3[10], kOne); |
| 551 step3[11] = _mm_add_epi16(step3[11], kOne); |
| 552 step3[12] = _mm_add_epi16(step3[12], kOne); |
| 553 step3[13] = _mm_add_epi16(step3[13], kOne); |
| 554 step2[14] = _mm_add_epi16(step2[14], kOne); |
| 555 step2[15] = _mm_add_epi16(step2[15], kOne); |
| 556 step3[16] = _mm_add_epi16(step3[16], kOne); |
| 557 step3[17] = _mm_add_epi16(step3[17], kOne); |
| 558 step3[18] = _mm_add_epi16(step3[18], kOne); |
| 559 step3[19] = _mm_add_epi16(step3[19], kOne); |
| 560 step3[20] = _mm_add_epi16(step3[20], kOne); |
| 561 step3[21] = _mm_add_epi16(step3[21], kOne); |
| 562 step3[22] = _mm_add_epi16(step3[22], kOne); |
| 563 step3[23] = _mm_add_epi16(step3[23], kOne); |
| 564 step3[24] = _mm_add_epi16(step3[24], kOne); |
| 565 step3[25] = _mm_add_epi16(step3[25], kOne); |
| 566 step3[26] = _mm_add_epi16(step3[26], kOne); |
| 567 step3[27] = _mm_add_epi16(step3[27], kOne); |
| 568 step3[28] = _mm_add_epi16(step3[28], kOne); |
| 569 step3[29] = _mm_add_epi16(step3[29], kOne); |
| 570 step3[30] = _mm_add_epi16(step3[30], kOne); |
| 571 step3[31] = _mm_add_epi16(step3[31], kOne); |
| 572 step3[ 0] = _mm_srai_epi16(step3[ 0], 2); |
| 573 step3[ 1] = _mm_srai_epi16(step3[ 1], 2); |
| 574 step3[ 2] = _mm_srai_epi16(step3[ 2], 2); |
| 575 step3[ 3] = _mm_srai_epi16(step3[ 3], 2); |
| 576 step3[ 4] = _mm_srai_epi16(step3[ 4], 2); |
| 577 step3[ 5] = _mm_srai_epi16(step3[ 5], 2); |
| 578 step3[ 6] = _mm_srai_epi16(step3[ 6], 2); |
| 579 step3[ 7] = _mm_srai_epi16(step3[ 7], 2); |
| 580 step2[ 8] = _mm_srai_epi16(step2[ 8], 2); |
| 581 step2[ 9] = _mm_srai_epi16(step2[ 9], 2); |
| 582 step3[10] = _mm_srai_epi16(step3[10], 2); |
| 583 step3[11] = _mm_srai_epi16(step3[11], 2); |
| 584 step3[12] = _mm_srai_epi16(step3[12], 2); |
| 585 step3[13] = _mm_srai_epi16(step3[13], 2); |
| 586 step2[14] = _mm_srai_epi16(step2[14], 2); |
| 587 step2[15] = _mm_srai_epi16(step2[15], 2); |
| 588 step3[16] = _mm_srai_epi16(step3[16], 2); |
| 589 step3[17] = _mm_srai_epi16(step3[17], 2); |
| 590 step3[18] = _mm_srai_epi16(step3[18], 2); |
| 591 step3[19] = _mm_srai_epi16(step3[19], 2); |
| 592 step3[20] = _mm_srai_epi16(step3[20], 2); |
| 593 step3[21] = _mm_srai_epi16(step3[21], 2); |
| 594 step3[22] = _mm_srai_epi16(step3[22], 2); |
| 595 step3[23] = _mm_srai_epi16(step3[23], 2); |
| 596 step3[24] = _mm_srai_epi16(step3[24], 2); |
| 597 step3[25] = _mm_srai_epi16(step3[25], 2); |
| 598 step3[26] = _mm_srai_epi16(step3[26], 2); |
| 599 step3[27] = _mm_srai_epi16(step3[27], 2); |
| 600 step3[28] = _mm_srai_epi16(step3[28], 2); |
| 601 step3[29] = _mm_srai_epi16(step3[29], 2); |
| 602 step3[30] = _mm_srai_epi16(step3[30], 2); |
| 603 step3[31] = _mm_srai_epi16(step3[31], 2); |
| 604 } |
| 605 #endif |
| 606 |
| 607 #if FDCT32x32_HIGH_PRECISION |
| 608 if (pass == 0) { |
| 609 #endif |
| 610 // Stage 4 |
| 611 { |
| 612 step1[ 0] = _mm_add_epi16(step3[ 3], step3[ 0]); |
| 613 step1[ 1] = _mm_add_epi16(step3[ 2], step3[ 1]); |
| 614 step1[ 2] = _mm_sub_epi16(step3[ 1], step3[ 2]); |
| 615 step1[ 3] = _mm_sub_epi16(step3[ 0], step3[ 3]); |
| 616 step1[ 8] = _mm_add_epi16(step3[11], step2[ 8]); |
| 617 step1[ 9] = _mm_add_epi16(step3[10], step2[ 9]); |
| 618 step1[10] = _mm_sub_epi16(step2[ 9], step3[10]); |
| 619 step1[11] = _mm_sub_epi16(step2[ 8], step3[11]); |
| 620 step1[12] = _mm_sub_epi16(step2[15], step3[12]); |
| 621 step1[13] = _mm_sub_epi16(step2[14], step3[13]); |
| 622 step1[14] = _mm_add_epi16(step3[13], step2[14]); |
| 623 step1[15] = _mm_add_epi16(step3[12], step2[15]); |
| 624 } |
| 625 { |
| 626 const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]); |
| 627 const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]); |
| 628 const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16); |
| 629 const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16); |
| 630 const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16); |
| 631 const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16); |
| 632 // dct_const_round_shift |
| 633 const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING); |
| 634 const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING); |
| 635 const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING); |
| 636 const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING); |
| 637 const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS); |
| 638 const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS); |
| 639 const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS); |
| 640 const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS); |
| 641 // Combine |
| 642 step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7); |
| 643 step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7); |
| 644 } |
| 645 { |
| 646 const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]); |
| 647 const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]); |
| 648 const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]); |
| 649 const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]); |
| 650 const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]); |
| 651 const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]); |
| 652 const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]); |
| 653 const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]); |
| 654 const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24); |
| 655 const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24); |
| 656 const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24); |
| 657 const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24); |
| 658 const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08); |
| 659 const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08); |
| 660 const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08); |
| 661 const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08); |
| 662 const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24); |
| 663 const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24); |
| 664 const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24); |
| 665 const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24); |
| 666 const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08); |
| 667 const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08); |
| 668 const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08); |
| 669 const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08); |
| 670 // dct_const_round_shift |
| 671 const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING); |
| 672 const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING); |
| 673 const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING); |
| 674 const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING); |
| 675 const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING); |
| 676 const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING); |
| 677 const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING); |
| 678 const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING); |
| 679 const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING); |
| 680 const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING); |
| 681 const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING); |
| 682 const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING); |
| 683 const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING); |
| 684 const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING); |
| 685 const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING); |
| 686 const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING); |
| 687 const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS); |
| 688 const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS); |
| 689 const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS); |
| 690 const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS); |
| 691 const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS); |
| 692 const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS); |
| 693 const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS); |
| 694 const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS); |
| 695 const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS); |
| 696 const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS); |
| 697 const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS); |
| 698 const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS); |
| 699 const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS); |
| 700 const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS); |
| 701 const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS); |
| 702 const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS); |
| 703 // Combine |
| 704 step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7); |
| 705 step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7); |
| 706 step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7); |
| 707 step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7); |
| 708 step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7); |
| 709 step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7); |
| 710 step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7); |
| 711 step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7); |
| 712 } |
| 713 // Stage 5 |
| 714 { |
| 715 step2[4] = _mm_add_epi16(step1[5], step3[4]); |
| 716 step2[5] = _mm_sub_epi16(step3[4], step1[5]); |
| 717 step2[6] = _mm_sub_epi16(step3[7], step1[6]); |
| 718 step2[7] = _mm_add_epi16(step1[6], step3[7]); |
| 719 } |
| 720 { |
| 721 const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]); |
| 722 const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]); |
| 723 const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]); |
| 724 const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]); |
| 725 const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16); |
| 726 const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16); |
| 727 const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16); |
| 728 const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16); |
| 729 const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08); |
| 730 const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08); |
| 731 const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24); |
| 732 const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24); |
| 733 // dct_const_round_shift |
| 734 const __m128i out_00_4 = _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING); |
| 735 const __m128i out_00_5 = _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING); |
| 736 const __m128i out_16_4 = _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING); |
| 737 const __m128i out_16_5 = _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING); |
| 738 const __m128i out_08_4 = _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING); |
| 739 const __m128i out_08_5 = _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING); |
| 740 const __m128i out_24_4 = _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING); |
| 741 const __m128i out_24_5 = _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING); |
| 742 const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS); |
| 743 const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS); |
| 744 const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS); |
| 745 const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS); |
| 746 const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS); |
| 747 const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS); |
| 748 const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS); |
| 749 const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS); |
| 750 // Combine |
| 751 out[ 0] = _mm_packs_epi32(out_00_6, out_00_7); |
| 752 out[16] = _mm_packs_epi32(out_16_6, out_16_7); |
| 753 out[ 8] = _mm_packs_epi32(out_08_6, out_08_7); |
| 754 out[24] = _mm_packs_epi32(out_24_6, out_24_7); |
| 755 } |
| 756 { |
| 757 const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[ 9], step1[14]); |
| 758 const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[ 9], step1[14]); |
| 759 const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]); |
| 760 const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]); |
| 761 const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24); |
| 762 const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24); |
| 763 const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08); |
| 764 const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08); |
| 765 const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24); |
| 766 const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24); |
| 767 const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08); |
| 768 const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08); |
| 769 // dct_const_round_shift |
| 770 const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING); |
| 771 const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING); |
| 772 const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING); |
| 773 const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING); |
| 774 const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING); |
| 775 const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING); |
| 776 const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING); |
| 777 const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING); |
| 778 const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS); |
| 779 const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS); |
| 780 const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS); |
| 781 const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS); |
| 782 const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS); |
| 783 const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS); |
| 784 const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS); |
| 785 const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS); |
| 786 // Combine |
| 787 step2[ 9] = _mm_packs_epi32(s2_09_6, s2_09_7); |
| 788 step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7); |
| 789 step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7); |
| 790 step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7); |
| 791 } |
| 792 { |
| 793 step2[16] = _mm_add_epi16(step1[19], step3[16]); |
| 794 step2[17] = _mm_add_epi16(step1[18], step3[17]); |
| 795 step2[18] = _mm_sub_epi16(step3[17], step1[18]); |
| 796 step2[19] = _mm_sub_epi16(step3[16], step1[19]); |
| 797 step2[20] = _mm_sub_epi16(step3[23], step1[20]); |
| 798 step2[21] = _mm_sub_epi16(step3[22], step1[21]); |
| 799 step2[22] = _mm_add_epi16(step1[21], step3[22]); |
| 800 step2[23] = _mm_add_epi16(step1[20], step3[23]); |
| 801 step2[24] = _mm_add_epi16(step1[27], step3[24]); |
| 802 step2[25] = _mm_add_epi16(step1[26], step3[25]); |
| 803 step2[26] = _mm_sub_epi16(step3[25], step1[26]); |
| 804 step2[27] = _mm_sub_epi16(step3[24], step1[27]); |
| 805 step2[28] = _mm_sub_epi16(step3[31], step1[28]); |
| 806 step2[29] = _mm_sub_epi16(step3[30], step1[29]); |
| 807 step2[30] = _mm_add_epi16(step1[29], step3[30]); |
| 808 step2[31] = _mm_add_epi16(step1[28], step3[31]); |
| 809 } |
| 810 // Stage 6 |
| 811 { |
| 812 const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]); |
| 813 const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]); |
| 814 const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]); |
| 815 const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]); |
| 816 const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]); |
| 817 const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]); |
| 818 const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]); |
| 819 const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]); |
| 820 const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04); |
| 821 const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04); |
| 822 const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20); |
| 823 const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20); |
| 824 const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12); |
| 825 const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12); |
| 826 const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28); |
| 827 const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28); |
| 828 // dct_const_round_shift |
| 829 const __m128i out_04_4 = _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING); |
| 830 const __m128i out_04_5 = _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING); |
| 831 const __m128i out_20_4 = _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING); |
| 832 const __m128i out_20_5 = _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING); |
| 833 const __m128i out_12_4 = _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING); |
| 834 const __m128i out_12_5 = _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING); |
| 835 const __m128i out_28_4 = _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING); |
| 836 const __m128i out_28_5 = _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING); |
| 837 const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS); |
| 838 const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS); |
| 839 const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS); |
| 840 const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS); |
| 841 const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS); |
| 842 const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS); |
| 843 const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS); |
| 844 const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS); |
| 845 // Combine |
| 846 out[ 4] = _mm_packs_epi32(out_04_6, out_04_7); |
| 847 out[20] = _mm_packs_epi32(out_20_6, out_20_7); |
| 848 out[12] = _mm_packs_epi32(out_12_6, out_12_7); |
| 849 out[28] = _mm_packs_epi32(out_28_6, out_28_7); |
| 850 } |
| 851 { |
| 852 step3[ 8] = _mm_add_epi16(step2[ 9], step1[ 8]); |
| 853 step3[ 9] = _mm_sub_epi16(step1[ 8], step2[ 9]); |
| 854 step3[10] = _mm_sub_epi16(step1[11], step2[10]); |
| 855 step3[11] = _mm_add_epi16(step2[10], step1[11]); |
| 856 step3[12] = _mm_add_epi16(step2[13], step1[12]); |
| 857 step3[13] = _mm_sub_epi16(step1[12], step2[13]); |
| 858 step3[14] = _mm_sub_epi16(step1[15], step2[14]); |
| 859 step3[15] = _mm_add_epi16(step2[14], step1[15]); |
| 860 } |
| 861 { |
| 862 const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]); |
| 863 const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]); |
| 864 const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]); |
| 865 const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]); |
| 866 const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]); |
| 867 const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]); |
| 868 const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]); |
| 869 const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]); |
| 870 const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28); |
| 871 const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28); |
| 872 const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04); |
| 873 const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04); |
| 874 const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12); |
| 875 const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12); |
| 876 const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20); |
| 877 const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20); |
| 878 const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12); |
| 879 const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12); |
| 880 const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20); |
| 881 const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20); |
| 882 const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28); |
| 883 const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28); |
| 884 const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04); |
| 885 const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04); |
| 886 // dct_const_round_shift |
| 887 const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING); |
| 888 const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING); |
| 889 const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING); |
| 890 const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING); |
| 891 const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING); |
| 892 const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING); |
| 893 const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING); |
| 894 const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING); |
| 895 const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS); |
| 896 const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS); |
| 897 const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS); |
| 898 const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS); |
| 899 const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS); |
| 900 const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS); |
| 901 const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS); |
| 902 const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS); |
| 903 const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING); |
| 904 const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING); |
| 905 const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING); |
| 906 const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING); |
| 907 const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING); |
| 908 const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING); |
| 909 const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING); |
| 910 const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING); |
| 911 const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS); |
| 912 const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS); |
| 913 const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS); |
| 914 const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS); |
| 915 const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS); |
| 916 const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS); |
| 917 const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS); |
| 918 const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS); |
| 919 // Combine |
| 920 step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7); |
| 921 step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7); |
| 922 step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7); |
| 923 step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7); |
| 924 // Combine |
| 925 step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7); |
| 926 step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7); |
| 927 step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7); |
| 928 step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7); |
| 929 } |
| 930 // Stage 7 |
| 931 { |
| 932 const __m128i out_02_0 = _mm_unpacklo_epi16(step3[ 8], step3[15]); |
| 933 const __m128i out_02_1 = _mm_unpackhi_epi16(step3[ 8], step3[15]); |
| 934 const __m128i out_18_0 = _mm_unpacklo_epi16(step3[ 9], step3[14]); |
| 935 const __m128i out_18_1 = _mm_unpackhi_epi16(step3[ 9], step3[14]); |
| 936 const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]); |
| 937 const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]); |
| 938 const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]); |
| 939 const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]); |
| 940 const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02); |
| 941 const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02); |
| 942 const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18); |
| 943 const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18); |
| 944 const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10); |
| 945 const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10); |
| 946 const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26); |
| 947 const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26); |
| 948 const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06); |
| 949 const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06); |
| 950 const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22); |
| 951 const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22); |
| 952 const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14); |
| 953 const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14); |
| 954 const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30); |
| 955 const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30); |
| 956 // dct_const_round_shift |
| 957 const __m128i out_02_4 = _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING); |
| 958 const __m128i out_02_5 = _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING); |
| 959 const __m128i out_18_4 = _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING); |
| 960 const __m128i out_18_5 = _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING); |
| 961 const __m128i out_10_4 = _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING); |
| 962 const __m128i out_10_5 = _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING); |
| 963 const __m128i out_26_4 = _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING); |
| 964 const __m128i out_26_5 = _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING); |
| 965 const __m128i out_06_4 = _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING); |
| 966 const __m128i out_06_5 = _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING); |
| 967 const __m128i out_22_4 = _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING); |
| 968 const __m128i out_22_5 = _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING); |
| 969 const __m128i out_14_4 = _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING); |
| 970 const __m128i out_14_5 = _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING); |
| 971 const __m128i out_30_4 = _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING); |
| 972 const __m128i out_30_5 = _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING); |
| 973 const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS); |
| 974 const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS); |
| 975 const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS); |
| 976 const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS); |
| 977 const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS); |
| 978 const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS); |
| 979 const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS); |
| 980 const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS); |
| 981 const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS); |
| 982 const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS); |
| 983 const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS); |
| 984 const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS); |
| 985 const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS); |
| 986 const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS); |
| 987 const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS); |
| 988 const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS); |
| 989 // Combine |
| 990 out[ 2] = _mm_packs_epi32(out_02_6, out_02_7); |
| 991 out[18] = _mm_packs_epi32(out_18_6, out_18_7); |
| 992 out[10] = _mm_packs_epi32(out_10_6, out_10_7); |
| 993 out[26] = _mm_packs_epi32(out_26_6, out_26_7); |
| 994 out[ 6] = _mm_packs_epi32(out_06_6, out_06_7); |
| 995 out[22] = _mm_packs_epi32(out_22_6, out_22_7); |
| 996 out[14] = _mm_packs_epi32(out_14_6, out_14_7); |
| 997 out[30] = _mm_packs_epi32(out_30_6, out_30_7); |
| 998 } |
| 999 { |
| 1000 step1[16] = _mm_add_epi16(step3[17], step2[16]); |
| 1001 step1[17] = _mm_sub_epi16(step2[16], step3[17]); |
| 1002 step1[18] = _mm_sub_epi16(step2[19], step3[18]); |
| 1003 step1[19] = _mm_add_epi16(step3[18], step2[19]); |
| 1004 step1[20] = _mm_add_epi16(step3[21], step2[20]); |
| 1005 step1[21] = _mm_sub_epi16(step2[20], step3[21]); |
| 1006 step1[22] = _mm_sub_epi16(step2[23], step3[22]); |
| 1007 step1[23] = _mm_add_epi16(step3[22], step2[23]); |
| 1008 step1[24] = _mm_add_epi16(step3[25], step2[24]); |
| 1009 step1[25] = _mm_sub_epi16(step2[24], step3[25]); |
| 1010 step1[26] = _mm_sub_epi16(step2[27], step3[26]); |
| 1011 step1[27] = _mm_add_epi16(step3[26], step2[27]); |
| 1012 step1[28] = _mm_add_epi16(step3[29], step2[28]); |
| 1013 step1[29] = _mm_sub_epi16(step2[28], step3[29]); |
| 1014 step1[30] = _mm_sub_epi16(step2[31], step3[30]); |
| 1015 step1[31] = _mm_add_epi16(step3[30], step2[31]); |
| 1016 } |
| 1017 // Final stage --- outputs indices are bit-reversed. |
| 1018 { |
| 1019 const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]); |
| 1020 const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]); |
| 1021 const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]); |
| 1022 const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]); |
| 1023 const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]); |
| 1024 const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]); |
| 1025 const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]); |
| 1026 const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]); |
| 1027 const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01); |
| 1028 const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01); |
| 1029 const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17); |
| 1030 const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17); |
| 1031 const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09); |
| 1032 const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09); |
| 1033 const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25); |
| 1034 const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25); |
| 1035 const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07); |
| 1036 const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07); |
| 1037 const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23); |
| 1038 const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23); |
| 1039 const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15); |
| 1040 const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15); |
| 1041 const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31); |
| 1042 const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31); |
| 1043 // dct_const_round_shift |
| 1044 const __m128i out_01_4 = _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING); |
| 1045 const __m128i out_01_5 = _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING); |
| 1046 const __m128i out_17_4 = _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING); |
| 1047 const __m128i out_17_5 = _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING); |
| 1048 const __m128i out_09_4 = _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING); |
| 1049 const __m128i out_09_5 = _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING); |
| 1050 const __m128i out_25_4 = _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING); |
| 1051 const __m128i out_25_5 = _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING); |
| 1052 const __m128i out_07_4 = _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING); |
| 1053 const __m128i out_07_5 = _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING); |
| 1054 const __m128i out_23_4 = _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING); |
| 1055 const __m128i out_23_5 = _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING); |
| 1056 const __m128i out_15_4 = _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING); |
| 1057 const __m128i out_15_5 = _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING); |
| 1058 const __m128i out_31_4 = _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING); |
| 1059 const __m128i out_31_5 = _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING); |
| 1060 const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS); |
| 1061 const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS); |
| 1062 const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS); |
| 1063 const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS); |
| 1064 const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS); |
| 1065 const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS); |
| 1066 const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS); |
| 1067 const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS); |
| 1068 const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS); |
| 1069 const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS); |
| 1070 const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS); |
| 1071 const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS); |
| 1072 const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS); |
| 1073 const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS); |
| 1074 const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS); |
| 1075 const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS); |
| 1076 // Combine |
| 1077 out[ 1] = _mm_packs_epi32(out_01_6, out_01_7); |
| 1078 out[17] = _mm_packs_epi32(out_17_6, out_17_7); |
| 1079 out[ 9] = _mm_packs_epi32(out_09_6, out_09_7); |
| 1080 out[25] = _mm_packs_epi32(out_25_6, out_25_7); |
| 1081 out[ 7] = _mm_packs_epi32(out_07_6, out_07_7); |
| 1082 out[23] = _mm_packs_epi32(out_23_6, out_23_7); |
| 1083 out[15] = _mm_packs_epi32(out_15_6, out_15_7); |
| 1084 out[31] = _mm_packs_epi32(out_31_6, out_31_7); |
| 1085 } |
| 1086 { |
| 1087 const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]); |
| 1088 const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]); |
| 1089 const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]); |
| 1090 const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]); |
| 1091 const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]); |
| 1092 const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]); |
| 1093 const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]); |
| 1094 const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]); |
| 1095 const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05); |
| 1096 const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05); |
| 1097 const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21); |
| 1098 const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21); |
| 1099 const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13); |
| 1100 const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13); |
| 1101 const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29); |
| 1102 const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29); |
| 1103 const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03); |
| 1104 const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03); |
| 1105 const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19); |
| 1106 const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19); |
| 1107 const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11); |
| 1108 const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11); |
| 1109 const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27); |
| 1110 const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27); |
| 1111 // dct_const_round_shift |
| 1112 const __m128i out_05_4 = _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING); |
| 1113 const __m128i out_05_5 = _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING); |
| 1114 const __m128i out_21_4 = _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING); |
| 1115 const __m128i out_21_5 = _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING); |
| 1116 const __m128i out_13_4 = _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING); |
| 1117 const __m128i out_13_5 = _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING); |
| 1118 const __m128i out_29_4 = _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING); |
| 1119 const __m128i out_29_5 = _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING); |
| 1120 const __m128i out_03_4 = _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING); |
| 1121 const __m128i out_03_5 = _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING); |
| 1122 const __m128i out_19_4 = _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING); |
| 1123 const __m128i out_19_5 = _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING); |
| 1124 const __m128i out_11_4 = _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING); |
| 1125 const __m128i out_11_5 = _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING); |
| 1126 const __m128i out_27_4 = _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING); |
| 1127 const __m128i out_27_5 = _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING); |
| 1128 const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS); |
| 1129 const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS); |
| 1130 const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS); |
| 1131 const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS); |
| 1132 const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS); |
| 1133 const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS); |
| 1134 const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS); |
| 1135 const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS); |
| 1136 const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS); |
| 1137 const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS); |
| 1138 const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS); |
| 1139 const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS); |
| 1140 const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS); |
| 1141 const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS); |
| 1142 const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS); |
| 1143 const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS); |
| 1144 // Combine |
| 1145 out[ 5] = _mm_packs_epi32(out_05_6, out_05_7); |
| 1146 out[21] = _mm_packs_epi32(out_21_6, out_21_7); |
| 1147 out[13] = _mm_packs_epi32(out_13_6, out_13_7); |
| 1148 out[29] = _mm_packs_epi32(out_29_6, out_29_7); |
| 1149 out[ 3] = _mm_packs_epi32(out_03_6, out_03_7); |
| 1150 out[19] = _mm_packs_epi32(out_19_6, out_19_7); |
| 1151 out[11] = _mm_packs_epi32(out_11_6, out_11_7); |
| 1152 out[27] = _mm_packs_epi32(out_27_6, out_27_7); |
| 1153 } |
| 1154 #if FDCT32x32_HIGH_PRECISION |
| 1155 } else { |
| 1156 __m128i lstep1[64], lstep2[64], lstep3[64]; |
| 1157 __m128i u[32], v[32], sign[16]; |
| 1158 const __m128i mask16 = _mm_set1_epi32(0x80008000); |
| 1159 const __m128i K32One = _mm_set_epi32(1, 1, 1, 1); |
| 1160 // start using 32-bit operations |
| 1161 // stage 4 |
| 1162 { |
| 1163 // expanding to 32-bit length priori to addition operations |
| 1164 lstep3[ 0] = k_cvtlo_epi16(step3[ 0], mask16, kZero); |
| 1165 lstep3[ 1] = k_cvthi_epi16(step3[ 0], mask16, kZero); |
| 1166 lstep3[ 2] = k_cvtlo_epi16(step3[ 1], mask16, kZero); |
| 1167 lstep3[ 3] = k_cvthi_epi16(step3[ 1], mask16, kZero); |
| 1168 lstep3[ 4] = k_cvtlo_epi16(step3[ 2], mask16, kZero); |
| 1169 lstep3[ 5] = k_cvthi_epi16(step3[ 2], mask16, kZero); |
| 1170 lstep3[ 6] = k_cvtlo_epi16(step3[ 3], mask16, kZero); |
| 1171 lstep3[ 7] = k_cvthi_epi16(step3[ 3], mask16, kZero); |
| 1172 lstep3[20] = k_cvtlo_epi16(step3[10], mask16, kZero); |
| 1173 lstep3[21] = k_cvthi_epi16(step3[10], mask16, kZero); |
| 1174 lstep3[22] = k_cvtlo_epi16(step3[11], mask16, kZero); |
| 1175 lstep3[23] = k_cvthi_epi16(step3[11], mask16, kZero); |
| 1176 lstep3[24] = k_cvtlo_epi16(step3[12], mask16, kZero); |
| 1177 lstep3[25] = k_cvthi_epi16(step3[12], mask16, kZero); |
| 1178 lstep3[26] = k_cvtlo_epi16(step3[13], mask16, kZero); |
| 1179 lstep3[27] = k_cvthi_epi16(step3[13], mask16, kZero); |
| 1180 lstep2[16] = k_cvtlo_epi16(step2[ 8], mask16, kZero); |
| 1181 lstep2[17] = k_cvthi_epi16(step2[ 8], mask16, kZero); |
| 1182 lstep2[18] = k_cvtlo_epi16(step2[ 9], mask16, kZero); |
| 1183 lstep2[19] = k_cvthi_epi16(step2[ 9], mask16, kZero); |
| 1184 lstep2[28] = k_cvtlo_epi16(step2[14], mask16, kZero); |
| 1185 lstep2[29] = k_cvthi_epi16(step2[14], mask16, kZero); |
| 1186 lstep2[30] = k_cvtlo_epi16(step2[15], mask16, kZero); |
| 1187 lstep2[31] = k_cvthi_epi16(step2[15], mask16, kZero); |
| 1188 |
| 1189 lstep1[ 0] = _mm_add_epi32(lstep3[ 6], lstep3[ 0]); |
| 1190 lstep1[ 1] = _mm_add_epi32(lstep3[ 7], lstep3[ 1]); |
| 1191 lstep1[ 2] = _mm_add_epi32(lstep3[ 4], lstep3[ 2]); |
| 1192 lstep1[ 3] = _mm_add_epi32(lstep3[ 5], lstep3[ 3]); |
| 1193 lstep1[ 4] = _mm_sub_epi32(lstep3[ 2], lstep3[ 4]); |
| 1194 lstep1[ 5] = _mm_sub_epi32(lstep3[ 3], lstep3[ 5]); |
| 1195 lstep1[ 6] = _mm_sub_epi32(lstep3[ 0], lstep3[ 6]); |
| 1196 lstep1[ 7] = _mm_sub_epi32(lstep3[ 1], lstep3[ 7]); |
| 1197 lstep1[16] = _mm_add_epi32(lstep3[22], lstep2[16]); |
| 1198 lstep1[17] = _mm_add_epi32(lstep3[23], lstep2[17]); |
| 1199 lstep1[18] = _mm_add_epi32(lstep3[20], lstep2[18]); |
| 1200 lstep1[19] = _mm_add_epi32(lstep3[21], lstep2[19]); |
| 1201 lstep1[20] = _mm_sub_epi32(lstep2[18], lstep3[20]); |
| 1202 lstep1[21] = _mm_sub_epi32(lstep2[19], lstep3[21]); |
| 1203 lstep1[22] = _mm_sub_epi32(lstep2[16], lstep3[22]); |
| 1204 lstep1[23] = _mm_sub_epi32(lstep2[17], lstep3[23]); |
| 1205 lstep1[24] = _mm_sub_epi32(lstep2[30], lstep3[24]); |
| 1206 lstep1[25] = _mm_sub_epi32(lstep2[31], lstep3[25]); |
| 1207 lstep1[26] = _mm_sub_epi32(lstep2[28], lstep3[26]); |
| 1208 lstep1[27] = _mm_sub_epi32(lstep2[29], lstep3[27]); |
| 1209 lstep1[28] = _mm_add_epi32(lstep3[26], lstep2[28]); |
| 1210 lstep1[29] = _mm_add_epi32(lstep3[27], lstep2[29]); |
| 1211 lstep1[30] = _mm_add_epi32(lstep3[24], lstep2[30]); |
| 1212 lstep1[31] = _mm_add_epi32(lstep3[25], lstep2[31]); |
| 1213 } |
| 1214 { |
| 1215 const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]); |
| 1216 const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]); |
| 1217 const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16); |
| 1218 const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16); |
| 1219 const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16); |
| 1220 const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16); |
| 1221 // dct_const_round_shift |
| 1222 const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING); |
| 1223 const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING); |
| 1224 const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING); |
| 1225 const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING); |
| 1226 lstep1[10] = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS); |
| 1227 lstep1[11] = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS); |
| 1228 lstep1[12] = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS); |
| 1229 lstep1[13] = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS); |
| 1230 } |
| 1231 { |
| 1232 const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]); |
| 1233 const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]); |
| 1234 const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]); |
| 1235 const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]); |
| 1236 const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]); |
| 1237 const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]); |
| 1238 const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]); |
| 1239 const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]); |
| 1240 const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24); |
| 1241 const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24); |
| 1242 const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24); |
| 1243 const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24); |
| 1244 const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08); |
| 1245 const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08); |
| 1246 const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08); |
| 1247 const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08); |
| 1248 const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24); |
| 1249 const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24); |
| 1250 const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24); |
| 1251 const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24); |
| 1252 const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08); |
| 1253 const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08); |
| 1254 const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08); |
| 1255 const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08); |
| 1256 // dct_const_round_shift |
| 1257 const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING); |
| 1258 const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING); |
| 1259 const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING); |
| 1260 const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING); |
| 1261 const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING); |
| 1262 const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING); |
| 1263 const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING); |
| 1264 const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING); |
| 1265 const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING); |
| 1266 const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING); |
| 1267 const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING); |
| 1268 const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING); |
| 1269 const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING); |
| 1270 const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING); |
| 1271 const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING); |
| 1272 const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING); |
| 1273 lstep1[36] = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS); |
| 1274 lstep1[37] = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS); |
| 1275 lstep1[38] = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS); |
| 1276 lstep1[39] = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS); |
| 1277 lstep1[40] = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS); |
| 1278 lstep1[41] = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS); |
| 1279 lstep1[42] = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS); |
| 1280 lstep1[43] = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS); |
| 1281 lstep1[52] = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS); |
| 1282 lstep1[53] = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS); |
| 1283 lstep1[54] = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS); |
| 1284 lstep1[55] = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS); |
| 1285 lstep1[56] = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS); |
| 1286 lstep1[57] = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS); |
| 1287 lstep1[58] = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS); |
| 1288 lstep1[59] = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS); |
| 1289 } |
| 1290 // stage 5 |
| 1291 { |
| 1292 lstep3[ 8] = k_cvtlo_epi16(step3[4], mask16, kZero); |
| 1293 lstep3[ 9] = k_cvthi_epi16(step3[4], mask16, kZero); |
| 1294 lstep3[14] = k_cvtlo_epi16(step3[7], mask16, kZero); |
| 1295 lstep3[15] = k_cvthi_epi16(step3[7], mask16, kZero); |
| 1296 |
| 1297 lstep2[ 8] = _mm_add_epi32(lstep1[10], lstep3[ 8]); |
| 1298 lstep2[ 9] = _mm_add_epi32(lstep1[11], lstep3[ 9]); |
| 1299 lstep2[10] = _mm_sub_epi32(lstep3[ 8], lstep1[10]); |
| 1300 lstep2[11] = _mm_sub_epi32(lstep3[ 9], lstep1[11]); |
| 1301 lstep2[12] = _mm_sub_epi32(lstep3[14], lstep1[12]); |
| 1302 lstep2[13] = _mm_sub_epi32(lstep3[15], lstep1[13]); |
| 1303 lstep2[14] = _mm_add_epi32(lstep1[12], lstep3[14]); |
| 1304 lstep2[15] = _mm_add_epi32(lstep1[13], lstep3[15]); |
| 1305 } |
| 1306 { |
| 1307 const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64); |
| 1308 const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64); |
| 1309 const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64); |
| 1310 const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64); |
| 1311 |
| 1312 u[0] = _mm_unpacklo_epi32(lstep1[0], lstep1[2]); |
| 1313 u[1] = _mm_unpackhi_epi32(lstep1[0], lstep1[2]); |
| 1314 u[2] = _mm_unpacklo_epi32(lstep1[1], lstep1[3]); |
| 1315 u[3] = _mm_unpackhi_epi32(lstep1[1], lstep1[3]); |
| 1316 u[4] = _mm_unpacklo_epi32(lstep1[4], lstep1[6]); |
| 1317 u[5] = _mm_unpackhi_epi32(lstep1[4], lstep1[6]); |
| 1318 u[6] = _mm_unpacklo_epi32(lstep1[5], lstep1[7]); |
| 1319 u[7] = _mm_unpackhi_epi32(lstep1[5], lstep1[7]); |
| 1320 |
| 1321 // TODO(jingning): manually inline k_madd_epi32_ to further hide |
| 1322 // instruction latency. |
| 1323 v[ 0] = k_madd_epi32(u[0], k32_p16_p16); |
| 1324 v[ 1] = k_madd_epi32(u[1], k32_p16_p16); |
| 1325 v[ 2] = k_madd_epi32(u[2], k32_p16_p16); |
| 1326 v[ 3] = k_madd_epi32(u[3], k32_p16_p16); |
| 1327 v[ 4] = k_madd_epi32(u[0], k32_p16_m16); |
| 1328 v[ 5] = k_madd_epi32(u[1], k32_p16_m16); |
| 1329 v[ 6] = k_madd_epi32(u[2], k32_p16_m16); |
| 1330 v[ 7] = k_madd_epi32(u[3], k32_p16_m16); |
| 1331 v[ 8] = k_madd_epi32(u[4], k32_p24_p08); |
| 1332 v[ 9] = k_madd_epi32(u[5], k32_p24_p08); |
| 1333 v[10] = k_madd_epi32(u[6], k32_p24_p08); |
| 1334 v[11] = k_madd_epi32(u[7], k32_p24_p08); |
| 1335 v[12] = k_madd_epi32(u[4], k32_m08_p24); |
| 1336 v[13] = k_madd_epi32(u[5], k32_m08_p24); |
| 1337 v[14] = k_madd_epi32(u[6], k32_m08_p24); |
| 1338 v[15] = k_madd_epi32(u[7], k32_m08_p24); |
| 1339 |
| 1340 u[0] = k_packs_epi64(v[0], v[1]); |
| 1341 u[1] = k_packs_epi64(v[2], v[3]); |
| 1342 u[2] = k_packs_epi64(v[4], v[5]); |
| 1343 u[3] = k_packs_epi64(v[6], v[7]); |
| 1344 u[4] = k_packs_epi64(v[8], v[9]); |
| 1345 u[5] = k_packs_epi64(v[10], v[11]); |
| 1346 u[6] = k_packs_epi64(v[12], v[13]); |
| 1347 u[7] = k_packs_epi64(v[14], v[15]); |
| 1348 |
| 1349 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); |
| 1350 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); |
| 1351 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); |
| 1352 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); |
| 1353 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); |
| 1354 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); |
| 1355 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); |
| 1356 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); |
| 1357 |
| 1358 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); |
| 1359 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); |
| 1360 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); |
| 1361 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); |
| 1362 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); |
| 1363 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); |
| 1364 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); |
| 1365 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); |
| 1366 |
| 1367 sign[0] = _mm_cmplt_epi32(u[0], kZero); |
| 1368 sign[1] = _mm_cmplt_epi32(u[1], kZero); |
| 1369 sign[2] = _mm_cmplt_epi32(u[2], kZero); |
| 1370 sign[3] = _mm_cmplt_epi32(u[3], kZero); |
| 1371 sign[4] = _mm_cmplt_epi32(u[4], kZero); |
| 1372 sign[5] = _mm_cmplt_epi32(u[5], kZero); |
| 1373 sign[6] = _mm_cmplt_epi32(u[6], kZero); |
| 1374 sign[7] = _mm_cmplt_epi32(u[7], kZero); |
| 1375 |
| 1376 u[0] = _mm_sub_epi32(u[0], sign[0]); |
| 1377 u[1] = _mm_sub_epi32(u[1], sign[1]); |
| 1378 u[2] = _mm_sub_epi32(u[2], sign[2]); |
| 1379 u[3] = _mm_sub_epi32(u[3], sign[3]); |
| 1380 u[4] = _mm_sub_epi32(u[4], sign[4]); |
| 1381 u[5] = _mm_sub_epi32(u[5], sign[5]); |
| 1382 u[6] = _mm_sub_epi32(u[6], sign[6]); |
| 1383 u[7] = _mm_sub_epi32(u[7], sign[7]); |
| 1384 |
| 1385 u[0] = _mm_add_epi32(u[0], K32One); |
| 1386 u[1] = _mm_add_epi32(u[1], K32One); |
| 1387 u[2] = _mm_add_epi32(u[2], K32One); |
| 1388 u[3] = _mm_add_epi32(u[3], K32One); |
| 1389 u[4] = _mm_add_epi32(u[4], K32One); |
| 1390 u[5] = _mm_add_epi32(u[5], K32One); |
| 1391 u[6] = _mm_add_epi32(u[6], K32One); |
| 1392 u[7] = _mm_add_epi32(u[7], K32One); |
| 1393 |
| 1394 u[0] = _mm_srai_epi32(u[0], 2); |
| 1395 u[1] = _mm_srai_epi32(u[1], 2); |
| 1396 u[2] = _mm_srai_epi32(u[2], 2); |
| 1397 u[3] = _mm_srai_epi32(u[3], 2); |
| 1398 u[4] = _mm_srai_epi32(u[4], 2); |
| 1399 u[5] = _mm_srai_epi32(u[5], 2); |
| 1400 u[6] = _mm_srai_epi32(u[6], 2); |
| 1401 u[7] = _mm_srai_epi32(u[7], 2); |
| 1402 |
| 1403 // Combine |
| 1404 out[ 0] = _mm_packs_epi32(u[0], u[1]); |
| 1405 out[16] = _mm_packs_epi32(u[2], u[3]); |
| 1406 out[ 8] = _mm_packs_epi32(u[4], u[5]); |
| 1407 out[24] = _mm_packs_epi32(u[6], u[7]); |
| 1408 } |
| 1409 { |
| 1410 const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64); |
| 1411 const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64); |
| 1412 const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64); |
| 1413 |
| 1414 u[0] = _mm_unpacklo_epi32(lstep1[18], lstep1[28]); |
| 1415 u[1] = _mm_unpackhi_epi32(lstep1[18], lstep1[28]); |
| 1416 u[2] = _mm_unpacklo_epi32(lstep1[19], lstep1[29]); |
| 1417 u[3] = _mm_unpackhi_epi32(lstep1[19], lstep1[29]); |
| 1418 u[4] = _mm_unpacklo_epi32(lstep1[20], lstep1[26]); |
| 1419 u[5] = _mm_unpackhi_epi32(lstep1[20], lstep1[26]); |
| 1420 u[6] = _mm_unpacklo_epi32(lstep1[21], lstep1[27]); |
| 1421 u[7] = _mm_unpackhi_epi32(lstep1[21], lstep1[27]); |
| 1422 |
| 1423 v[0] = k_madd_epi32(u[0], k32_m08_p24); |
| 1424 v[1] = k_madd_epi32(u[1], k32_m08_p24); |
| 1425 v[2] = k_madd_epi32(u[2], k32_m08_p24); |
| 1426 v[3] = k_madd_epi32(u[3], k32_m08_p24); |
| 1427 v[4] = k_madd_epi32(u[4], k32_m24_m08); |
| 1428 v[5] = k_madd_epi32(u[5], k32_m24_m08); |
| 1429 v[6] = k_madd_epi32(u[6], k32_m24_m08); |
| 1430 v[7] = k_madd_epi32(u[7], k32_m24_m08); |
| 1431 v[ 8] = k_madd_epi32(u[4], k32_m08_p24); |
| 1432 v[ 9] = k_madd_epi32(u[5], k32_m08_p24); |
| 1433 v[10] = k_madd_epi32(u[6], k32_m08_p24); |
| 1434 v[11] = k_madd_epi32(u[7], k32_m08_p24); |
| 1435 v[12] = k_madd_epi32(u[0], k32_p24_p08); |
| 1436 v[13] = k_madd_epi32(u[1], k32_p24_p08); |
| 1437 v[14] = k_madd_epi32(u[2], k32_p24_p08); |
| 1438 v[15] = k_madd_epi32(u[3], k32_p24_p08); |
| 1439 |
| 1440 u[0] = k_packs_epi64(v[0], v[1]); |
| 1441 u[1] = k_packs_epi64(v[2], v[3]); |
| 1442 u[2] = k_packs_epi64(v[4], v[5]); |
| 1443 u[3] = k_packs_epi64(v[6], v[7]); |
| 1444 u[4] = k_packs_epi64(v[8], v[9]); |
| 1445 u[5] = k_packs_epi64(v[10], v[11]); |
| 1446 u[6] = k_packs_epi64(v[12], v[13]); |
| 1447 u[7] = k_packs_epi64(v[14], v[15]); |
| 1448 |
| 1449 u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); |
| 1450 u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); |
| 1451 u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); |
| 1452 u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); |
| 1453 u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); |
| 1454 u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); |
| 1455 u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); |
| 1456 u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); |
| 1457 |
| 1458 lstep2[18] = _mm_srai_epi32(u[0], DCT_CONST_BITS); |
| 1459 lstep2[19] = _mm_srai_epi32(u[1], DCT_CONST_BITS); |
| 1460 lstep2[20] = _mm_srai_epi32(u[2], DCT_CONST_BITS); |
| 1461 lstep2[21] = _mm_srai_epi32(u[3], DCT_CONST_BITS); |
| 1462 lstep2[26] = _mm_srai_epi32(u[4], DCT_CONST_BITS); |
| 1463 lstep2[27] = _mm_srai_epi32(u[5], DCT_CONST_BITS); |
| 1464 lstep2[28] = _mm_srai_epi32(u[6], DCT_CONST_BITS); |
| 1465 lstep2[29] = _mm_srai_epi32(u[7], DCT_CONST_BITS); |
| 1466 } |
| 1467 { |
| 1468 lstep3[32] = k_cvtlo_epi16(step3[16], mask16, kZero); |
| 1469 lstep3[33] = k_cvthi_epi16(step3[16], mask16, kZero); |
| 1470 lstep3[34] = k_cvtlo_epi16(step3[17], mask16, kZero); |
| 1471 lstep3[35] = k_cvthi_epi16(step3[17], mask16, kZero); |
| 1472 lstep3[44] = k_cvtlo_epi16(step3[22], mask16, kZero); |
| 1473 lstep3[45] = k_cvthi_epi16(step3[22], mask16, kZero); |
| 1474 lstep3[46] = k_cvtlo_epi16(step3[23], mask16, kZero); |
| 1475 lstep3[47] = k_cvthi_epi16(step3[23], mask16, kZero); |
| 1476 lstep3[48] = k_cvtlo_epi16(step3[24], mask16, kZero); |
| 1477 lstep3[49] = k_cvthi_epi16(step3[24], mask16, kZero); |
| 1478 lstep3[50] = k_cvtlo_epi16(step3[25], mask16, kZero); |
| 1479 lstep3[51] = k_cvthi_epi16(step3[25], mask16, kZero); |
| 1480 lstep3[60] = k_cvtlo_epi16(step3[30], mask16, kZero); |
| 1481 lstep3[61] = k_cvthi_epi16(step3[30], mask16, kZero); |
| 1482 lstep3[62] = k_cvtlo_epi16(step3[31], mask16, kZero); |
| 1483 lstep3[63] = k_cvthi_epi16(step3[31], mask16, kZero); |
| 1484 |
| 1485 lstep2[32] = _mm_add_epi32(lstep1[38], lstep3[32]); |
| 1486 lstep2[33] = _mm_add_epi32(lstep1[39], lstep3[33]); |
| 1487 lstep2[34] = _mm_add_epi32(lstep1[36], lstep3[34]); |
| 1488 lstep2[35] = _mm_add_epi32(lstep1[37], lstep3[35]); |
| 1489 lstep2[36] = _mm_sub_epi32(lstep3[34], lstep1[36]); |
| 1490 lstep2[37] = _mm_sub_epi32(lstep3[35], lstep1[37]); |
| 1491 lstep2[38] = _mm_sub_epi32(lstep3[32], lstep1[38]); |
| 1492 lstep2[39] = _mm_sub_epi32(lstep3[33], lstep1[39]); |
| 1493 lstep2[40] = _mm_sub_epi32(lstep3[46], lstep1[40]); |
| 1494 lstep2[41] = _mm_sub_epi32(lstep3[47], lstep1[41]); |
| 1495 lstep2[42] = _mm_sub_epi32(lstep3[44], lstep1[42]); |
| 1496 lstep2[43] = _mm_sub_epi32(lstep3[45], lstep1[43]); |
| 1497 lstep2[44] = _mm_add_epi32(lstep1[42], lstep3[44]); |
| 1498 lstep2[45] = _mm_add_epi32(lstep1[43], lstep3[45]); |
| 1499 lstep2[46] = _mm_add_epi32(lstep1[40], lstep3[46]); |
| 1500 lstep2[47] = _mm_add_epi32(lstep1[41], lstep3[47]); |
| 1501 lstep2[48] = _mm_add_epi32(lstep1[54], lstep3[48]); |
| 1502 lstep2[49] = _mm_add_epi32(lstep1[55], lstep3[49]); |
| 1503 lstep2[50] = _mm_add_epi32(lstep1[52], lstep3[50]); |
| 1504 lstep2[51] = _mm_add_epi32(lstep1[53], lstep3[51]); |
| 1505 lstep2[52] = _mm_sub_epi32(lstep3[50], lstep1[52]); |
| 1506 lstep2[53] = _mm_sub_epi32(lstep3[51], lstep1[53]); |
| 1507 lstep2[54] = _mm_sub_epi32(lstep3[48], lstep1[54]); |
| 1508 lstep2[55] = _mm_sub_epi32(lstep3[49], lstep1[55]); |
| 1509 lstep2[56] = _mm_sub_epi32(lstep3[62], lstep1[56]); |
| 1510 lstep2[57] = _mm_sub_epi32(lstep3[63], lstep1[57]); |
| 1511 lstep2[58] = _mm_sub_epi32(lstep3[60], lstep1[58]); |
| 1512 lstep2[59] = _mm_sub_epi32(lstep3[61], lstep1[59]); |
| 1513 lstep2[60] = _mm_add_epi32(lstep1[58], lstep3[60]); |
| 1514 lstep2[61] = _mm_add_epi32(lstep1[59], lstep3[61]); |
| 1515 lstep2[62] = _mm_add_epi32(lstep1[56], lstep3[62]); |
| 1516 lstep2[63] = _mm_add_epi32(lstep1[57], lstep3[63]); |
| 1517 } |
| 1518 // stage 6 |
| 1519 { |
| 1520 const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64); |
| 1521 const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64); |
| 1522 const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64); |
| 1523 const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64); |
| 1524 |
| 1525 u[0] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]); |
| 1526 u[1] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]); |
| 1527 u[2] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]); |
| 1528 u[3] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]); |
| 1529 u[4] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]); |
| 1530 u[5] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]); |
| 1531 u[6] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]); |
| 1532 u[7] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]); |
| 1533 u[8] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]); |
| 1534 u[9] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]); |
| 1535 u[10] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]); |
| 1536 u[11] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]); |
| 1537 u[12] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]); |
| 1538 u[13] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]); |
| 1539 u[14] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]); |
| 1540 u[15] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]); |
| 1541 |
| 1542 v[0] = k_madd_epi32(u[0], k32_p28_p04); |
| 1543 v[1] = k_madd_epi32(u[1], k32_p28_p04); |
| 1544 v[2] = k_madd_epi32(u[2], k32_p28_p04); |
| 1545 v[3] = k_madd_epi32(u[3], k32_p28_p04); |
| 1546 v[4] = k_madd_epi32(u[4], k32_p12_p20); |
| 1547 v[5] = k_madd_epi32(u[5], k32_p12_p20); |
| 1548 v[6] = k_madd_epi32(u[6], k32_p12_p20); |
| 1549 v[7] = k_madd_epi32(u[7], k32_p12_p20); |
| 1550 v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12); |
| 1551 v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12); |
| 1552 v[10] = k_madd_epi32(u[10], k32_m20_p12); |
| 1553 v[11] = k_madd_epi32(u[11], k32_m20_p12); |
| 1554 v[12] = k_madd_epi32(u[12], k32_m04_p28); |
| 1555 v[13] = k_madd_epi32(u[13], k32_m04_p28); |
| 1556 v[14] = k_madd_epi32(u[14], k32_m04_p28); |
| 1557 v[15] = k_madd_epi32(u[15], k32_m04_p28); |
| 1558 |
| 1559 u[0] = k_packs_epi64(v[0], v[1]); |
| 1560 u[1] = k_packs_epi64(v[2], v[3]); |
| 1561 u[2] = k_packs_epi64(v[4], v[5]); |
| 1562 u[3] = k_packs_epi64(v[6], v[7]); |
| 1563 u[4] = k_packs_epi64(v[8], v[9]); |
| 1564 u[5] = k_packs_epi64(v[10], v[11]); |
| 1565 u[6] = k_packs_epi64(v[12], v[13]); |
| 1566 u[7] = k_packs_epi64(v[14], v[15]); |
| 1567 |
| 1568 v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); |
| 1569 v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); |
| 1570 v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); |
| 1571 v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); |
| 1572 v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); |
| 1573 v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); |
| 1574 v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); |
| 1575 v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); |
| 1576 |
| 1577 u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); |
| 1578 u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); |
| 1579 u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); |
| 1580 u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); |
| 1581 u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); |
| 1582 u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); |
| 1583 u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); |
| 1584 u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); |
| 1585 |
| 1586 sign[0] = _mm_cmplt_epi32(u[0], kZero); |
| 1587 sign[1] = _mm_cmplt_epi32(u[1], kZero); |
| 1588 sign[2] = _mm_cmplt_epi32(u[2], kZero); |
| 1589 sign[3] = _mm_cmplt_epi32(u[3], kZero); |
| 1590 sign[4] = _mm_cmplt_epi32(u[4], kZero); |
| 1591 sign[5] = _mm_cmplt_epi32(u[5], kZero); |
| 1592 sign[6] = _mm_cmplt_epi32(u[6], kZero); |
| 1593 sign[7] = _mm_cmplt_epi32(u[7], kZero); |
| 1594 |
| 1595 u[0] = _mm_sub_epi32(u[0], sign[0]); |
| 1596 u[1] = _mm_sub_epi32(u[1], sign[1]); |
| 1597 u[2] = _mm_sub_epi32(u[2], sign[2]); |
| 1598 u[3] = _mm_sub_epi32(u[3], sign[3]); |
| 1599 u[4] = _mm_sub_epi32(u[4], sign[4]); |
| 1600 u[5] = _mm_sub_epi32(u[5], sign[5]); |
| 1601 u[6] = _mm_sub_epi32(u[6], sign[6]); |
| 1602 u[7] = _mm_sub_epi32(u[7], sign[7]); |
| 1603 |
| 1604 u[0] = _mm_add_epi32(u[0], K32One); |
| 1605 u[1] = _mm_add_epi32(u[1], K32One); |
| 1606 u[2] = _mm_add_epi32(u[2], K32One); |
| 1607 u[3] = _mm_add_epi32(u[3], K32One); |
| 1608 u[4] = _mm_add_epi32(u[4], K32One); |
| 1609 u[5] = _mm_add_epi32(u[5], K32One); |
| 1610 u[6] = _mm_add_epi32(u[6], K32One); |
| 1611 u[7] = _mm_add_epi32(u[7], K32One); |
| 1612 |
| 1613 u[0] = _mm_srai_epi32(u[0], 2); |
| 1614 u[1] = _mm_srai_epi32(u[1], 2); |
| 1615 u[2] = _mm_srai_epi32(u[2], 2); |
| 1616 u[3] = _mm_srai_epi32(u[3], 2); |
| 1617 u[4] = _mm_srai_epi32(u[4], 2); |
| 1618 u[5] = _mm_srai_epi32(u[5], 2); |
| 1619 u[6] = _mm_srai_epi32(u[6], 2); |
| 1620 u[7] = _mm_srai_epi32(u[7], 2); |
| 1621 |
| 1622 out[ 4] = _mm_packs_epi32(u[0], u[1]); |
| 1623 out[20] = _mm_packs_epi32(u[2], u[3]); |
| 1624 out[12] = _mm_packs_epi32(u[4], u[5]); |
| 1625 out[28] = _mm_packs_epi32(u[6], u[7]); |
| 1626 } |
| 1627 { |
| 1628 lstep3[16] = _mm_add_epi32(lstep2[18], lstep1[16]); |
| 1629 lstep3[17] = _mm_add_epi32(lstep2[19], lstep1[17]); |
| 1630 lstep3[18] = _mm_sub_epi32(lstep1[16], lstep2[18]); |
| 1631 lstep3[19] = _mm_sub_epi32(lstep1[17], lstep2[19]); |
| 1632 lstep3[20] = _mm_sub_epi32(lstep1[22], lstep2[20]); |
| 1633 lstep3[21] = _mm_sub_epi32(lstep1[23], lstep2[21]); |
| 1634 lstep3[22] = _mm_add_epi32(lstep2[20], lstep1[22]); |
| 1635 lstep3[23] = _mm_add_epi32(lstep2[21], lstep1[23]); |
| 1636 lstep3[24] = _mm_add_epi32(lstep2[26], lstep1[24]); |
| 1637 lstep3[25] = _mm_add_epi32(lstep2[27], lstep1[25]); |
| 1638 lstep3[26] = _mm_sub_epi32(lstep1[24], lstep2[26]); |
| 1639 lstep3[27] = _mm_sub_epi32(lstep1[25], lstep2[27]); |
| 1640 lstep3[28] = _mm_sub_epi32(lstep1[30], lstep2[28]); |
| 1641 lstep3[29] = _mm_sub_epi32(lstep1[31], lstep2[29]); |
| 1642 lstep3[30] = _mm_add_epi32(lstep2[28], lstep1[30]); |
| 1643 lstep3[31] = _mm_add_epi32(lstep2[29], lstep1[31]); |
| 1644 } |
| 1645 { |
| 1646 const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64); |
| 1647 const __m128i k32_m28_m04 = pair_set_epi32(-cospi_28_64, -cospi_4_64); |
| 1648 const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64); |
| 1649 const __m128i k32_m12_m20 = pair_set_epi32(-cospi_12_64, |
| 1650 -cospi_20_64); |
| 1651 const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64); |
| 1652 const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64); |
| 1653 |
| 1654 u[ 0] = _mm_unpacklo_epi32(lstep2[34], lstep2[60]); |
| 1655 u[ 1] = _mm_unpackhi_epi32(lstep2[34], lstep2[60]); |
| 1656 u[ 2] = _mm_unpacklo_epi32(lstep2[35], lstep2[61]); |
| 1657 u[ 3] = _mm_unpackhi_epi32(lstep2[35], lstep2[61]); |
| 1658 u[ 4] = _mm_unpacklo_epi32(lstep2[36], lstep2[58]); |
| 1659 u[ 5] = _mm_unpackhi_epi32(lstep2[36], lstep2[58]); |
| 1660 u[ 6] = _mm_unpacklo_epi32(lstep2[37], lstep2[59]); |
| 1661 u[ 7] = _mm_unpackhi_epi32(lstep2[37], lstep2[59]); |
| 1662 u[ 8] = _mm_unpacklo_epi32(lstep2[42], lstep2[52]); |
| 1663 u[ 9] = _mm_unpackhi_epi32(lstep2[42], lstep2[52]); |
| 1664 u[10] = _mm_unpacklo_epi32(lstep2[43], lstep2[53]); |
| 1665 u[11] = _mm_unpackhi_epi32(lstep2[43], lstep2[53]); |
| 1666 u[12] = _mm_unpacklo_epi32(lstep2[44], lstep2[50]); |
| 1667 u[13] = _mm_unpackhi_epi32(lstep2[44], lstep2[50]); |
| 1668 u[14] = _mm_unpacklo_epi32(lstep2[45], lstep2[51]); |
| 1669 u[15] = _mm_unpackhi_epi32(lstep2[45], lstep2[51]); |
| 1670 |
| 1671 v[ 0] = k_madd_epi32(u[ 0], k32_m04_p28); |
| 1672 v[ 1] = k_madd_epi32(u[ 1], k32_m04_p28); |
| 1673 v[ 2] = k_madd_epi32(u[ 2], k32_m04_p28); |
| 1674 v[ 3] = k_madd_epi32(u[ 3], k32_m04_p28); |
| 1675 v[ 4] = k_madd_epi32(u[ 4], k32_m28_m04); |
| 1676 v[ 5] = k_madd_epi32(u[ 5], k32_m28_m04); |
| 1677 v[ 6] = k_madd_epi32(u[ 6], k32_m28_m04); |
| 1678 v[ 7] = k_madd_epi32(u[ 7], k32_m28_m04); |
| 1679 v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12); |
| 1680 v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12); |
| 1681 v[10] = k_madd_epi32(u[10], k32_m20_p12); |
| 1682 v[11] = k_madd_epi32(u[11], k32_m20_p12); |
| 1683 v[12] = k_madd_epi32(u[12], k32_m12_m20); |
| 1684 v[13] = k_madd_epi32(u[13], k32_m12_m20); |
| 1685 v[14] = k_madd_epi32(u[14], k32_m12_m20); |
| 1686 v[15] = k_madd_epi32(u[15], k32_m12_m20); |
| 1687 v[16] = k_madd_epi32(u[12], k32_m20_p12); |
| 1688 v[17] = k_madd_epi32(u[13], k32_m20_p12); |
| 1689 v[18] = k_madd_epi32(u[14], k32_m20_p12); |
| 1690 v[19] = k_madd_epi32(u[15], k32_m20_p12); |
| 1691 v[20] = k_madd_epi32(u[ 8], k32_p12_p20); |
| 1692 v[21] = k_madd_epi32(u[ 9], k32_p12_p20); |
| 1693 v[22] = k_madd_epi32(u[10], k32_p12_p20); |
| 1694 v[23] = k_madd_epi32(u[11], k32_p12_p20); |
| 1695 v[24] = k_madd_epi32(u[ 4], k32_m04_p28); |
| 1696 v[25] = k_madd_epi32(u[ 5], k32_m04_p28); |
| 1697 v[26] = k_madd_epi32(u[ 6], k32_m04_p28); |
| 1698 v[27] = k_madd_epi32(u[ 7], k32_m04_p28); |
| 1699 v[28] = k_madd_epi32(u[ 0], k32_p28_p04); |
| 1700 v[29] = k_madd_epi32(u[ 1], k32_p28_p04); |
| 1701 v[30] = k_madd_epi32(u[ 2], k32_p28_p04); |
| 1702 v[31] = k_madd_epi32(u[ 3], k32_p28_p04); |
| 1703 |
| 1704 u[ 0] = k_packs_epi64(v[ 0], v[ 1]); |
| 1705 u[ 1] = k_packs_epi64(v[ 2], v[ 3]); |
| 1706 u[ 2] = k_packs_epi64(v[ 4], v[ 5]); |
| 1707 u[ 3] = k_packs_epi64(v[ 6], v[ 7]); |
| 1708 u[ 4] = k_packs_epi64(v[ 8], v[ 9]); |
| 1709 u[ 5] = k_packs_epi64(v[10], v[11]); |
| 1710 u[ 6] = k_packs_epi64(v[12], v[13]); |
| 1711 u[ 7] = k_packs_epi64(v[14], v[15]); |
| 1712 u[ 8] = k_packs_epi64(v[16], v[17]); |
| 1713 u[ 9] = k_packs_epi64(v[18], v[19]); |
| 1714 u[10] = k_packs_epi64(v[20], v[21]); |
| 1715 u[11] = k_packs_epi64(v[22], v[23]); |
| 1716 u[12] = k_packs_epi64(v[24], v[25]); |
| 1717 u[13] = k_packs_epi64(v[26], v[27]); |
| 1718 u[14] = k_packs_epi64(v[28], v[29]); |
| 1719 u[15] = k_packs_epi64(v[30], v[31]); |
| 1720 |
| 1721 v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); |
| 1722 v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); |
| 1723 v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); |
| 1724 v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); |
| 1725 v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); |
| 1726 v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); |
| 1727 v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); |
| 1728 v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); |
| 1729 v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); |
| 1730 v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); |
| 1731 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); |
| 1732 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); |
| 1733 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); |
| 1734 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); |
| 1735 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); |
| 1736 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); |
| 1737 |
| 1738 lstep3[34] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); |
| 1739 lstep3[35] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); |
| 1740 lstep3[36] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); |
| 1741 lstep3[37] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); |
| 1742 lstep3[42] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); |
| 1743 lstep3[43] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); |
| 1744 lstep3[44] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); |
| 1745 lstep3[45] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); |
| 1746 lstep3[50] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); |
| 1747 lstep3[51] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); |
| 1748 lstep3[52] = _mm_srai_epi32(v[10], DCT_CONST_BITS); |
| 1749 lstep3[53] = _mm_srai_epi32(v[11], DCT_CONST_BITS); |
| 1750 lstep3[58] = _mm_srai_epi32(v[12], DCT_CONST_BITS); |
| 1751 lstep3[59] = _mm_srai_epi32(v[13], DCT_CONST_BITS); |
| 1752 lstep3[60] = _mm_srai_epi32(v[14], DCT_CONST_BITS); |
| 1753 lstep3[61] = _mm_srai_epi32(v[15], DCT_CONST_BITS); |
| 1754 } |
| 1755 // stage 7 |
| 1756 { |
| 1757 const __m128i k32_p30_p02 = pair_set_epi32(cospi_30_64, cospi_2_64); |
| 1758 const __m128i k32_p14_p18 = pair_set_epi32(cospi_14_64, cospi_18_64); |
| 1759 const __m128i k32_p22_p10 = pair_set_epi32(cospi_22_64, cospi_10_64); |
| 1760 const __m128i k32_p06_p26 = pair_set_epi32(cospi_6_64, cospi_26_64); |
| 1761 const __m128i k32_m26_p06 = pair_set_epi32(-cospi_26_64, cospi_6_64); |
| 1762 const __m128i k32_m10_p22 = pair_set_epi32(-cospi_10_64, cospi_22_64); |
| 1763 const __m128i k32_m18_p14 = pair_set_epi32(-cospi_18_64, cospi_14_64); |
| 1764 const __m128i k32_m02_p30 = pair_set_epi32(-cospi_2_64, cospi_30_64); |
| 1765 |
| 1766 u[ 0] = _mm_unpacklo_epi32(lstep3[16], lstep3[30]); |
| 1767 u[ 1] = _mm_unpackhi_epi32(lstep3[16], lstep3[30]); |
| 1768 u[ 2] = _mm_unpacklo_epi32(lstep3[17], lstep3[31]); |
| 1769 u[ 3] = _mm_unpackhi_epi32(lstep3[17], lstep3[31]); |
| 1770 u[ 4] = _mm_unpacklo_epi32(lstep3[18], lstep3[28]); |
| 1771 u[ 5] = _mm_unpackhi_epi32(lstep3[18], lstep3[28]); |
| 1772 u[ 6] = _mm_unpacklo_epi32(lstep3[19], lstep3[29]); |
| 1773 u[ 7] = _mm_unpackhi_epi32(lstep3[19], lstep3[29]); |
| 1774 u[ 8] = _mm_unpacklo_epi32(lstep3[20], lstep3[26]); |
| 1775 u[ 9] = _mm_unpackhi_epi32(lstep3[20], lstep3[26]); |
| 1776 u[10] = _mm_unpacklo_epi32(lstep3[21], lstep3[27]); |
| 1777 u[11] = _mm_unpackhi_epi32(lstep3[21], lstep3[27]); |
| 1778 u[12] = _mm_unpacklo_epi32(lstep3[22], lstep3[24]); |
| 1779 u[13] = _mm_unpackhi_epi32(lstep3[22], lstep3[24]); |
| 1780 u[14] = _mm_unpacklo_epi32(lstep3[23], lstep3[25]); |
| 1781 u[15] = _mm_unpackhi_epi32(lstep3[23], lstep3[25]); |
| 1782 |
| 1783 v[ 0] = k_madd_epi32(u[ 0], k32_p30_p02); |
| 1784 v[ 1] = k_madd_epi32(u[ 1], k32_p30_p02); |
| 1785 v[ 2] = k_madd_epi32(u[ 2], k32_p30_p02); |
| 1786 v[ 3] = k_madd_epi32(u[ 3], k32_p30_p02); |
| 1787 v[ 4] = k_madd_epi32(u[ 4], k32_p14_p18); |
| 1788 v[ 5] = k_madd_epi32(u[ 5], k32_p14_p18); |
| 1789 v[ 6] = k_madd_epi32(u[ 6], k32_p14_p18); |
| 1790 v[ 7] = k_madd_epi32(u[ 7], k32_p14_p18); |
| 1791 v[ 8] = k_madd_epi32(u[ 8], k32_p22_p10); |
| 1792 v[ 9] = k_madd_epi32(u[ 9], k32_p22_p10); |
| 1793 v[10] = k_madd_epi32(u[10], k32_p22_p10); |
| 1794 v[11] = k_madd_epi32(u[11], k32_p22_p10); |
| 1795 v[12] = k_madd_epi32(u[12], k32_p06_p26); |
| 1796 v[13] = k_madd_epi32(u[13], k32_p06_p26); |
| 1797 v[14] = k_madd_epi32(u[14], k32_p06_p26); |
| 1798 v[15] = k_madd_epi32(u[15], k32_p06_p26); |
| 1799 v[16] = k_madd_epi32(u[12], k32_m26_p06); |
| 1800 v[17] = k_madd_epi32(u[13], k32_m26_p06); |
| 1801 v[18] = k_madd_epi32(u[14], k32_m26_p06); |
| 1802 v[19] = k_madd_epi32(u[15], k32_m26_p06); |
| 1803 v[20] = k_madd_epi32(u[ 8], k32_m10_p22); |
| 1804 v[21] = k_madd_epi32(u[ 9], k32_m10_p22); |
| 1805 v[22] = k_madd_epi32(u[10], k32_m10_p22); |
| 1806 v[23] = k_madd_epi32(u[11], k32_m10_p22); |
| 1807 v[24] = k_madd_epi32(u[ 4], k32_m18_p14); |
| 1808 v[25] = k_madd_epi32(u[ 5], k32_m18_p14); |
| 1809 v[26] = k_madd_epi32(u[ 6], k32_m18_p14); |
| 1810 v[27] = k_madd_epi32(u[ 7], k32_m18_p14); |
| 1811 v[28] = k_madd_epi32(u[ 0], k32_m02_p30); |
| 1812 v[29] = k_madd_epi32(u[ 1], k32_m02_p30); |
| 1813 v[30] = k_madd_epi32(u[ 2], k32_m02_p30); |
| 1814 v[31] = k_madd_epi32(u[ 3], k32_m02_p30); |
| 1815 |
| 1816 u[ 0] = k_packs_epi64(v[ 0], v[ 1]); |
| 1817 u[ 1] = k_packs_epi64(v[ 2], v[ 3]); |
| 1818 u[ 2] = k_packs_epi64(v[ 4], v[ 5]); |
| 1819 u[ 3] = k_packs_epi64(v[ 6], v[ 7]); |
| 1820 u[ 4] = k_packs_epi64(v[ 8], v[ 9]); |
| 1821 u[ 5] = k_packs_epi64(v[10], v[11]); |
| 1822 u[ 6] = k_packs_epi64(v[12], v[13]); |
| 1823 u[ 7] = k_packs_epi64(v[14], v[15]); |
| 1824 u[ 8] = k_packs_epi64(v[16], v[17]); |
| 1825 u[ 9] = k_packs_epi64(v[18], v[19]); |
| 1826 u[10] = k_packs_epi64(v[20], v[21]); |
| 1827 u[11] = k_packs_epi64(v[22], v[23]); |
| 1828 u[12] = k_packs_epi64(v[24], v[25]); |
| 1829 u[13] = k_packs_epi64(v[26], v[27]); |
| 1830 u[14] = k_packs_epi64(v[28], v[29]); |
| 1831 u[15] = k_packs_epi64(v[30], v[31]); |
| 1832 |
| 1833 v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); |
| 1834 v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); |
| 1835 v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); |
| 1836 v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); |
| 1837 v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); |
| 1838 v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); |
| 1839 v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); |
| 1840 v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); |
| 1841 v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); |
| 1842 v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); |
| 1843 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); |
| 1844 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); |
| 1845 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); |
| 1846 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); |
| 1847 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); |
| 1848 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); |
| 1849 |
| 1850 u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); |
| 1851 u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); |
| 1852 u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); |
| 1853 u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); |
| 1854 u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); |
| 1855 u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); |
| 1856 u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); |
| 1857 u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); |
| 1858 u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); |
| 1859 u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); |
| 1860 u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); |
| 1861 u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); |
| 1862 u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); |
| 1863 u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); |
| 1864 u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); |
| 1865 u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); |
| 1866 |
| 1867 v[ 0] = _mm_cmplt_epi32(u[ 0], kZero); |
| 1868 v[ 1] = _mm_cmplt_epi32(u[ 1], kZero); |
| 1869 v[ 2] = _mm_cmplt_epi32(u[ 2], kZero); |
| 1870 v[ 3] = _mm_cmplt_epi32(u[ 3], kZero); |
| 1871 v[ 4] = _mm_cmplt_epi32(u[ 4], kZero); |
| 1872 v[ 5] = _mm_cmplt_epi32(u[ 5], kZero); |
| 1873 v[ 6] = _mm_cmplt_epi32(u[ 6], kZero); |
| 1874 v[ 7] = _mm_cmplt_epi32(u[ 7], kZero); |
| 1875 v[ 8] = _mm_cmplt_epi32(u[ 8], kZero); |
| 1876 v[ 9] = _mm_cmplt_epi32(u[ 9], kZero); |
| 1877 v[10] = _mm_cmplt_epi32(u[10], kZero); |
| 1878 v[11] = _mm_cmplt_epi32(u[11], kZero); |
| 1879 v[12] = _mm_cmplt_epi32(u[12], kZero); |
| 1880 v[13] = _mm_cmplt_epi32(u[13], kZero); |
| 1881 v[14] = _mm_cmplt_epi32(u[14], kZero); |
| 1882 v[15] = _mm_cmplt_epi32(u[15], kZero); |
| 1883 |
| 1884 u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]); |
| 1885 u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]); |
| 1886 u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]); |
| 1887 u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]); |
| 1888 u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]); |
| 1889 u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]); |
| 1890 u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]); |
| 1891 u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]); |
| 1892 u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]); |
| 1893 u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]); |
| 1894 u[10] = _mm_sub_epi32(u[10], v[10]); |
| 1895 u[11] = _mm_sub_epi32(u[11], v[11]); |
| 1896 u[12] = _mm_sub_epi32(u[12], v[12]); |
| 1897 u[13] = _mm_sub_epi32(u[13], v[13]); |
| 1898 u[14] = _mm_sub_epi32(u[14], v[14]); |
| 1899 u[15] = _mm_sub_epi32(u[15], v[15]); |
| 1900 |
| 1901 v[ 0] = _mm_add_epi32(u[ 0], K32One); |
| 1902 v[ 1] = _mm_add_epi32(u[ 1], K32One); |
| 1903 v[ 2] = _mm_add_epi32(u[ 2], K32One); |
| 1904 v[ 3] = _mm_add_epi32(u[ 3], K32One); |
| 1905 v[ 4] = _mm_add_epi32(u[ 4], K32One); |
| 1906 v[ 5] = _mm_add_epi32(u[ 5], K32One); |
| 1907 v[ 6] = _mm_add_epi32(u[ 6], K32One); |
| 1908 v[ 7] = _mm_add_epi32(u[ 7], K32One); |
| 1909 v[ 8] = _mm_add_epi32(u[ 8], K32One); |
| 1910 v[ 9] = _mm_add_epi32(u[ 9], K32One); |
| 1911 v[10] = _mm_add_epi32(u[10], K32One); |
| 1912 v[11] = _mm_add_epi32(u[11], K32One); |
| 1913 v[12] = _mm_add_epi32(u[12], K32One); |
| 1914 v[13] = _mm_add_epi32(u[13], K32One); |
| 1915 v[14] = _mm_add_epi32(u[14], K32One); |
| 1916 v[15] = _mm_add_epi32(u[15], K32One); |
| 1917 |
| 1918 u[ 0] = _mm_srai_epi32(v[ 0], 2); |
| 1919 u[ 1] = _mm_srai_epi32(v[ 1], 2); |
| 1920 u[ 2] = _mm_srai_epi32(v[ 2], 2); |
| 1921 u[ 3] = _mm_srai_epi32(v[ 3], 2); |
| 1922 u[ 4] = _mm_srai_epi32(v[ 4], 2); |
| 1923 u[ 5] = _mm_srai_epi32(v[ 5], 2); |
| 1924 u[ 6] = _mm_srai_epi32(v[ 6], 2); |
| 1925 u[ 7] = _mm_srai_epi32(v[ 7], 2); |
| 1926 u[ 8] = _mm_srai_epi32(v[ 8], 2); |
| 1927 u[ 9] = _mm_srai_epi32(v[ 9], 2); |
| 1928 u[10] = _mm_srai_epi32(v[10], 2); |
| 1929 u[11] = _mm_srai_epi32(v[11], 2); |
| 1930 u[12] = _mm_srai_epi32(v[12], 2); |
| 1931 u[13] = _mm_srai_epi32(v[13], 2); |
| 1932 u[14] = _mm_srai_epi32(v[14], 2); |
| 1933 u[15] = _mm_srai_epi32(v[15], 2); |
| 1934 |
| 1935 out[ 2] = _mm_packs_epi32(u[0], u[1]); |
| 1936 out[18] = _mm_packs_epi32(u[2], u[3]); |
| 1937 out[10] = _mm_packs_epi32(u[4], u[5]); |
| 1938 out[26] = _mm_packs_epi32(u[6], u[7]); |
| 1939 out[ 6] = _mm_packs_epi32(u[8], u[9]); |
| 1940 out[22] = _mm_packs_epi32(u[10], u[11]); |
| 1941 out[14] = _mm_packs_epi32(u[12], u[13]); |
| 1942 out[30] = _mm_packs_epi32(u[14], u[15]); |
| 1943 } |
| 1944 { |
| 1945 lstep1[32] = _mm_add_epi32(lstep3[34], lstep2[32]); |
| 1946 lstep1[33] = _mm_add_epi32(lstep3[35], lstep2[33]); |
| 1947 lstep1[34] = _mm_sub_epi32(lstep2[32], lstep3[34]); |
| 1948 lstep1[35] = _mm_sub_epi32(lstep2[33], lstep3[35]); |
| 1949 lstep1[36] = _mm_sub_epi32(lstep2[38], lstep3[36]); |
| 1950 lstep1[37] = _mm_sub_epi32(lstep2[39], lstep3[37]); |
| 1951 lstep1[38] = _mm_add_epi32(lstep3[36], lstep2[38]); |
| 1952 lstep1[39] = _mm_add_epi32(lstep3[37], lstep2[39]); |
| 1953 lstep1[40] = _mm_add_epi32(lstep3[42], lstep2[40]); |
| 1954 lstep1[41] = _mm_add_epi32(lstep3[43], lstep2[41]); |
| 1955 lstep1[42] = _mm_sub_epi32(lstep2[40], lstep3[42]); |
| 1956 lstep1[43] = _mm_sub_epi32(lstep2[41], lstep3[43]); |
| 1957 lstep1[44] = _mm_sub_epi32(lstep2[46], lstep3[44]); |
| 1958 lstep1[45] = _mm_sub_epi32(lstep2[47], lstep3[45]); |
| 1959 lstep1[46] = _mm_add_epi32(lstep3[44], lstep2[46]); |
| 1960 lstep1[47] = _mm_add_epi32(lstep3[45], lstep2[47]); |
| 1961 lstep1[48] = _mm_add_epi32(lstep3[50], lstep2[48]); |
| 1962 lstep1[49] = _mm_add_epi32(lstep3[51], lstep2[49]); |
| 1963 lstep1[50] = _mm_sub_epi32(lstep2[48], lstep3[50]); |
| 1964 lstep1[51] = _mm_sub_epi32(lstep2[49], lstep3[51]); |
| 1965 lstep1[52] = _mm_sub_epi32(lstep2[54], lstep3[52]); |
| 1966 lstep1[53] = _mm_sub_epi32(lstep2[55], lstep3[53]); |
| 1967 lstep1[54] = _mm_add_epi32(lstep3[52], lstep2[54]); |
| 1968 lstep1[55] = _mm_add_epi32(lstep3[53], lstep2[55]); |
| 1969 lstep1[56] = _mm_add_epi32(lstep3[58], lstep2[56]); |
| 1970 lstep1[57] = _mm_add_epi32(lstep3[59], lstep2[57]); |
| 1971 lstep1[58] = _mm_sub_epi32(lstep2[56], lstep3[58]); |
| 1972 lstep1[59] = _mm_sub_epi32(lstep2[57], lstep3[59]); |
| 1973 lstep1[60] = _mm_sub_epi32(lstep2[62], lstep3[60]); |
| 1974 lstep1[61] = _mm_sub_epi32(lstep2[63], lstep3[61]); |
| 1975 lstep1[62] = _mm_add_epi32(lstep3[60], lstep2[62]); |
| 1976 lstep1[63] = _mm_add_epi32(lstep3[61], lstep2[63]); |
| 1977 } |
| 1978 // stage 8 |
| 1979 { |
| 1980 const __m128i k32_p31_p01 = pair_set_epi32(cospi_31_64, cospi_1_64); |
| 1981 const __m128i k32_p15_p17 = pair_set_epi32(cospi_15_64, cospi_17_64); |
| 1982 const __m128i k32_p23_p09 = pair_set_epi32(cospi_23_64, cospi_9_64); |
| 1983 const __m128i k32_p07_p25 = pair_set_epi32(cospi_7_64, cospi_25_64); |
| 1984 const __m128i k32_m25_p07 = pair_set_epi32(-cospi_25_64, cospi_7_64); |
| 1985 const __m128i k32_m09_p23 = pair_set_epi32(-cospi_9_64, cospi_23_64); |
| 1986 const __m128i k32_m17_p15 = pair_set_epi32(-cospi_17_64, cospi_15_64); |
| 1987 const __m128i k32_m01_p31 = pair_set_epi32(-cospi_1_64, cospi_31_64); |
| 1988 |
| 1989 u[ 0] = _mm_unpacklo_epi32(lstep1[32], lstep1[62]); |
| 1990 u[ 1] = _mm_unpackhi_epi32(lstep1[32], lstep1[62]); |
| 1991 u[ 2] = _mm_unpacklo_epi32(lstep1[33], lstep1[63]); |
| 1992 u[ 3] = _mm_unpackhi_epi32(lstep1[33], lstep1[63]); |
| 1993 u[ 4] = _mm_unpacklo_epi32(lstep1[34], lstep1[60]); |
| 1994 u[ 5] = _mm_unpackhi_epi32(lstep1[34], lstep1[60]); |
| 1995 u[ 6] = _mm_unpacklo_epi32(lstep1[35], lstep1[61]); |
| 1996 u[ 7] = _mm_unpackhi_epi32(lstep1[35], lstep1[61]); |
| 1997 u[ 8] = _mm_unpacklo_epi32(lstep1[36], lstep1[58]); |
| 1998 u[ 9] = _mm_unpackhi_epi32(lstep1[36], lstep1[58]); |
| 1999 u[10] = _mm_unpacklo_epi32(lstep1[37], lstep1[59]); |
| 2000 u[11] = _mm_unpackhi_epi32(lstep1[37], lstep1[59]); |
| 2001 u[12] = _mm_unpacklo_epi32(lstep1[38], lstep1[56]); |
| 2002 u[13] = _mm_unpackhi_epi32(lstep1[38], lstep1[56]); |
| 2003 u[14] = _mm_unpacklo_epi32(lstep1[39], lstep1[57]); |
| 2004 u[15] = _mm_unpackhi_epi32(lstep1[39], lstep1[57]); |
| 2005 |
| 2006 v[ 0] = k_madd_epi32(u[ 0], k32_p31_p01); |
| 2007 v[ 1] = k_madd_epi32(u[ 1], k32_p31_p01); |
| 2008 v[ 2] = k_madd_epi32(u[ 2], k32_p31_p01); |
| 2009 v[ 3] = k_madd_epi32(u[ 3], k32_p31_p01); |
| 2010 v[ 4] = k_madd_epi32(u[ 4], k32_p15_p17); |
| 2011 v[ 5] = k_madd_epi32(u[ 5], k32_p15_p17); |
| 2012 v[ 6] = k_madd_epi32(u[ 6], k32_p15_p17); |
| 2013 v[ 7] = k_madd_epi32(u[ 7], k32_p15_p17); |
| 2014 v[ 8] = k_madd_epi32(u[ 8], k32_p23_p09); |
| 2015 v[ 9] = k_madd_epi32(u[ 9], k32_p23_p09); |
| 2016 v[10] = k_madd_epi32(u[10], k32_p23_p09); |
| 2017 v[11] = k_madd_epi32(u[11], k32_p23_p09); |
| 2018 v[12] = k_madd_epi32(u[12], k32_p07_p25); |
| 2019 v[13] = k_madd_epi32(u[13], k32_p07_p25); |
| 2020 v[14] = k_madd_epi32(u[14], k32_p07_p25); |
| 2021 v[15] = k_madd_epi32(u[15], k32_p07_p25); |
| 2022 v[16] = k_madd_epi32(u[12], k32_m25_p07); |
| 2023 v[17] = k_madd_epi32(u[13], k32_m25_p07); |
| 2024 v[18] = k_madd_epi32(u[14], k32_m25_p07); |
| 2025 v[19] = k_madd_epi32(u[15], k32_m25_p07); |
| 2026 v[20] = k_madd_epi32(u[ 8], k32_m09_p23); |
| 2027 v[21] = k_madd_epi32(u[ 9], k32_m09_p23); |
| 2028 v[22] = k_madd_epi32(u[10], k32_m09_p23); |
| 2029 v[23] = k_madd_epi32(u[11], k32_m09_p23); |
| 2030 v[24] = k_madd_epi32(u[ 4], k32_m17_p15); |
| 2031 v[25] = k_madd_epi32(u[ 5], k32_m17_p15); |
| 2032 v[26] = k_madd_epi32(u[ 6], k32_m17_p15); |
| 2033 v[27] = k_madd_epi32(u[ 7], k32_m17_p15); |
| 2034 v[28] = k_madd_epi32(u[ 0], k32_m01_p31); |
| 2035 v[29] = k_madd_epi32(u[ 1], k32_m01_p31); |
| 2036 v[30] = k_madd_epi32(u[ 2], k32_m01_p31); |
| 2037 v[31] = k_madd_epi32(u[ 3], k32_m01_p31); |
| 2038 |
| 2039 u[ 0] = k_packs_epi64(v[ 0], v[ 1]); |
| 2040 u[ 1] = k_packs_epi64(v[ 2], v[ 3]); |
| 2041 u[ 2] = k_packs_epi64(v[ 4], v[ 5]); |
| 2042 u[ 3] = k_packs_epi64(v[ 6], v[ 7]); |
| 2043 u[ 4] = k_packs_epi64(v[ 8], v[ 9]); |
| 2044 u[ 5] = k_packs_epi64(v[10], v[11]); |
| 2045 u[ 6] = k_packs_epi64(v[12], v[13]); |
| 2046 u[ 7] = k_packs_epi64(v[14], v[15]); |
| 2047 u[ 8] = k_packs_epi64(v[16], v[17]); |
| 2048 u[ 9] = k_packs_epi64(v[18], v[19]); |
| 2049 u[10] = k_packs_epi64(v[20], v[21]); |
| 2050 u[11] = k_packs_epi64(v[22], v[23]); |
| 2051 u[12] = k_packs_epi64(v[24], v[25]); |
| 2052 u[13] = k_packs_epi64(v[26], v[27]); |
| 2053 u[14] = k_packs_epi64(v[28], v[29]); |
| 2054 u[15] = k_packs_epi64(v[30], v[31]); |
| 2055 |
| 2056 v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); |
| 2057 v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); |
| 2058 v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); |
| 2059 v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); |
| 2060 v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); |
| 2061 v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); |
| 2062 v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); |
| 2063 v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); |
| 2064 v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); |
| 2065 v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); |
| 2066 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); |
| 2067 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); |
| 2068 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); |
| 2069 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); |
| 2070 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); |
| 2071 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); |
| 2072 |
| 2073 u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); |
| 2074 u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); |
| 2075 u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); |
| 2076 u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); |
| 2077 u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); |
| 2078 u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); |
| 2079 u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); |
| 2080 u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); |
| 2081 u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); |
| 2082 u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); |
| 2083 u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); |
| 2084 u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); |
| 2085 u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); |
| 2086 u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); |
| 2087 u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); |
| 2088 u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); |
| 2089 |
| 2090 v[ 0] = _mm_cmplt_epi32(u[ 0], kZero); |
| 2091 v[ 1] = _mm_cmplt_epi32(u[ 1], kZero); |
| 2092 v[ 2] = _mm_cmplt_epi32(u[ 2], kZero); |
| 2093 v[ 3] = _mm_cmplt_epi32(u[ 3], kZero); |
| 2094 v[ 4] = _mm_cmplt_epi32(u[ 4], kZero); |
| 2095 v[ 5] = _mm_cmplt_epi32(u[ 5], kZero); |
| 2096 v[ 6] = _mm_cmplt_epi32(u[ 6], kZero); |
| 2097 v[ 7] = _mm_cmplt_epi32(u[ 7], kZero); |
| 2098 v[ 8] = _mm_cmplt_epi32(u[ 8], kZero); |
| 2099 v[ 9] = _mm_cmplt_epi32(u[ 9], kZero); |
| 2100 v[10] = _mm_cmplt_epi32(u[10], kZero); |
| 2101 v[11] = _mm_cmplt_epi32(u[11], kZero); |
| 2102 v[12] = _mm_cmplt_epi32(u[12], kZero); |
| 2103 v[13] = _mm_cmplt_epi32(u[13], kZero); |
| 2104 v[14] = _mm_cmplt_epi32(u[14], kZero); |
| 2105 v[15] = _mm_cmplt_epi32(u[15], kZero); |
| 2106 |
| 2107 u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]); |
| 2108 u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]); |
| 2109 u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]); |
| 2110 u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]); |
| 2111 u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]); |
| 2112 u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]); |
| 2113 u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]); |
| 2114 u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]); |
| 2115 u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]); |
| 2116 u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]); |
| 2117 u[10] = _mm_sub_epi32(u[10], v[10]); |
| 2118 u[11] = _mm_sub_epi32(u[11], v[11]); |
| 2119 u[12] = _mm_sub_epi32(u[12], v[12]); |
| 2120 u[13] = _mm_sub_epi32(u[13], v[13]); |
| 2121 u[14] = _mm_sub_epi32(u[14], v[14]); |
| 2122 u[15] = _mm_sub_epi32(u[15], v[15]); |
| 2123 |
| 2124 v[0] = _mm_add_epi32(u[0], K32One); |
| 2125 v[1] = _mm_add_epi32(u[1], K32One); |
| 2126 v[2] = _mm_add_epi32(u[2], K32One); |
| 2127 v[3] = _mm_add_epi32(u[3], K32One); |
| 2128 v[4] = _mm_add_epi32(u[4], K32One); |
| 2129 v[5] = _mm_add_epi32(u[5], K32One); |
| 2130 v[6] = _mm_add_epi32(u[6], K32One); |
| 2131 v[7] = _mm_add_epi32(u[7], K32One); |
| 2132 v[8] = _mm_add_epi32(u[8], K32One); |
| 2133 v[9] = _mm_add_epi32(u[9], K32One); |
| 2134 v[10] = _mm_add_epi32(u[10], K32One); |
| 2135 v[11] = _mm_add_epi32(u[11], K32One); |
| 2136 v[12] = _mm_add_epi32(u[12], K32One); |
| 2137 v[13] = _mm_add_epi32(u[13], K32One); |
| 2138 v[14] = _mm_add_epi32(u[14], K32One); |
| 2139 v[15] = _mm_add_epi32(u[15], K32One); |
| 2140 |
| 2141 u[0] = _mm_srai_epi32(v[0], 2); |
| 2142 u[1] = _mm_srai_epi32(v[1], 2); |
| 2143 u[2] = _mm_srai_epi32(v[2], 2); |
| 2144 u[3] = _mm_srai_epi32(v[3], 2); |
| 2145 u[4] = _mm_srai_epi32(v[4], 2); |
| 2146 u[5] = _mm_srai_epi32(v[5], 2); |
| 2147 u[6] = _mm_srai_epi32(v[6], 2); |
| 2148 u[7] = _mm_srai_epi32(v[7], 2); |
| 2149 u[8] = _mm_srai_epi32(v[8], 2); |
| 2150 u[9] = _mm_srai_epi32(v[9], 2); |
| 2151 u[10] = _mm_srai_epi32(v[10], 2); |
| 2152 u[11] = _mm_srai_epi32(v[11], 2); |
| 2153 u[12] = _mm_srai_epi32(v[12], 2); |
| 2154 u[13] = _mm_srai_epi32(v[13], 2); |
| 2155 u[14] = _mm_srai_epi32(v[14], 2); |
| 2156 u[15] = _mm_srai_epi32(v[15], 2); |
| 2157 |
| 2158 out[ 1] = _mm_packs_epi32(u[0], u[1]); |
| 2159 out[17] = _mm_packs_epi32(u[2], u[3]); |
| 2160 out[ 9] = _mm_packs_epi32(u[4], u[5]); |
| 2161 out[25] = _mm_packs_epi32(u[6], u[7]); |
| 2162 out[ 7] = _mm_packs_epi32(u[8], u[9]); |
| 2163 out[23] = _mm_packs_epi32(u[10], u[11]); |
| 2164 out[15] = _mm_packs_epi32(u[12], u[13]); |
| 2165 out[31] = _mm_packs_epi32(u[14], u[15]); |
| 2166 } |
| 2167 { |
| 2168 const __m128i k32_p27_p05 = pair_set_epi32(cospi_27_64, cospi_5_64); |
| 2169 const __m128i k32_p11_p21 = pair_set_epi32(cospi_11_64, cospi_21_64); |
| 2170 const __m128i k32_p19_p13 = pair_set_epi32(cospi_19_64, cospi_13_64); |
| 2171 const __m128i k32_p03_p29 = pair_set_epi32(cospi_3_64, cospi_29_64); |
| 2172 const __m128i k32_m29_p03 = pair_set_epi32(-cospi_29_64, cospi_3_64); |
| 2173 const __m128i k32_m13_p19 = pair_set_epi32(-cospi_13_64, cospi_19_64); |
| 2174 const __m128i k32_m21_p11 = pair_set_epi32(-cospi_21_64, cospi_11_64); |
| 2175 const __m128i k32_m05_p27 = pair_set_epi32(-cospi_5_64, cospi_27_64); |
| 2176 |
| 2177 u[ 0] = _mm_unpacklo_epi32(lstep1[40], lstep1[54]); |
| 2178 u[ 1] = _mm_unpackhi_epi32(lstep1[40], lstep1[54]); |
| 2179 u[ 2] = _mm_unpacklo_epi32(lstep1[41], lstep1[55]); |
| 2180 u[ 3] = _mm_unpackhi_epi32(lstep1[41], lstep1[55]); |
| 2181 u[ 4] = _mm_unpacklo_epi32(lstep1[42], lstep1[52]); |
| 2182 u[ 5] = _mm_unpackhi_epi32(lstep1[42], lstep1[52]); |
| 2183 u[ 6] = _mm_unpacklo_epi32(lstep1[43], lstep1[53]); |
| 2184 u[ 7] = _mm_unpackhi_epi32(lstep1[43], lstep1[53]); |
| 2185 u[ 8] = _mm_unpacklo_epi32(lstep1[44], lstep1[50]); |
| 2186 u[ 9] = _mm_unpackhi_epi32(lstep1[44], lstep1[50]); |
| 2187 u[10] = _mm_unpacklo_epi32(lstep1[45], lstep1[51]); |
| 2188 u[11] = _mm_unpackhi_epi32(lstep1[45], lstep1[51]); |
| 2189 u[12] = _mm_unpacklo_epi32(lstep1[46], lstep1[48]); |
| 2190 u[13] = _mm_unpackhi_epi32(lstep1[46], lstep1[48]); |
| 2191 u[14] = _mm_unpacklo_epi32(lstep1[47], lstep1[49]); |
| 2192 u[15] = _mm_unpackhi_epi32(lstep1[47], lstep1[49]); |
| 2193 |
| 2194 v[ 0] = k_madd_epi32(u[ 0], k32_p27_p05); |
| 2195 v[ 1] = k_madd_epi32(u[ 1], k32_p27_p05); |
| 2196 v[ 2] = k_madd_epi32(u[ 2], k32_p27_p05); |
| 2197 v[ 3] = k_madd_epi32(u[ 3], k32_p27_p05); |
| 2198 v[ 4] = k_madd_epi32(u[ 4], k32_p11_p21); |
| 2199 v[ 5] = k_madd_epi32(u[ 5], k32_p11_p21); |
| 2200 v[ 6] = k_madd_epi32(u[ 6], k32_p11_p21); |
| 2201 v[ 7] = k_madd_epi32(u[ 7], k32_p11_p21); |
| 2202 v[ 8] = k_madd_epi32(u[ 8], k32_p19_p13); |
| 2203 v[ 9] = k_madd_epi32(u[ 9], k32_p19_p13); |
| 2204 v[10] = k_madd_epi32(u[10], k32_p19_p13); |
| 2205 v[11] = k_madd_epi32(u[11], k32_p19_p13); |
| 2206 v[12] = k_madd_epi32(u[12], k32_p03_p29); |
| 2207 v[13] = k_madd_epi32(u[13], k32_p03_p29); |
| 2208 v[14] = k_madd_epi32(u[14], k32_p03_p29); |
| 2209 v[15] = k_madd_epi32(u[15], k32_p03_p29); |
| 2210 v[16] = k_madd_epi32(u[12], k32_m29_p03); |
| 2211 v[17] = k_madd_epi32(u[13], k32_m29_p03); |
| 2212 v[18] = k_madd_epi32(u[14], k32_m29_p03); |
| 2213 v[19] = k_madd_epi32(u[15], k32_m29_p03); |
| 2214 v[20] = k_madd_epi32(u[ 8], k32_m13_p19); |
| 2215 v[21] = k_madd_epi32(u[ 9], k32_m13_p19); |
| 2216 v[22] = k_madd_epi32(u[10], k32_m13_p19); |
| 2217 v[23] = k_madd_epi32(u[11], k32_m13_p19); |
| 2218 v[24] = k_madd_epi32(u[ 4], k32_m21_p11); |
| 2219 v[25] = k_madd_epi32(u[ 5], k32_m21_p11); |
| 2220 v[26] = k_madd_epi32(u[ 6], k32_m21_p11); |
| 2221 v[27] = k_madd_epi32(u[ 7], k32_m21_p11); |
| 2222 v[28] = k_madd_epi32(u[ 0], k32_m05_p27); |
| 2223 v[29] = k_madd_epi32(u[ 1], k32_m05_p27); |
| 2224 v[30] = k_madd_epi32(u[ 2], k32_m05_p27); |
| 2225 v[31] = k_madd_epi32(u[ 3], k32_m05_p27); |
| 2226 |
| 2227 u[ 0] = k_packs_epi64(v[ 0], v[ 1]); |
| 2228 u[ 1] = k_packs_epi64(v[ 2], v[ 3]); |
| 2229 u[ 2] = k_packs_epi64(v[ 4], v[ 5]); |
| 2230 u[ 3] = k_packs_epi64(v[ 6], v[ 7]); |
| 2231 u[ 4] = k_packs_epi64(v[ 8], v[ 9]); |
| 2232 u[ 5] = k_packs_epi64(v[10], v[11]); |
| 2233 u[ 6] = k_packs_epi64(v[12], v[13]); |
| 2234 u[ 7] = k_packs_epi64(v[14], v[15]); |
| 2235 u[ 8] = k_packs_epi64(v[16], v[17]); |
| 2236 u[ 9] = k_packs_epi64(v[18], v[19]); |
| 2237 u[10] = k_packs_epi64(v[20], v[21]); |
| 2238 u[11] = k_packs_epi64(v[22], v[23]); |
| 2239 u[12] = k_packs_epi64(v[24], v[25]); |
| 2240 u[13] = k_packs_epi64(v[26], v[27]); |
| 2241 u[14] = k_packs_epi64(v[28], v[29]); |
| 2242 u[15] = k_packs_epi64(v[30], v[31]); |
| 2243 |
| 2244 v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); |
| 2245 v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); |
| 2246 v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); |
| 2247 v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); |
| 2248 v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); |
| 2249 v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); |
| 2250 v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); |
| 2251 v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); |
| 2252 v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); |
| 2253 v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); |
| 2254 v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); |
| 2255 v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); |
| 2256 v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); |
| 2257 v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); |
| 2258 v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); |
| 2259 v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); |
| 2260 |
| 2261 u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); |
| 2262 u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); |
| 2263 u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); |
| 2264 u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); |
| 2265 u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); |
| 2266 u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); |
| 2267 u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); |
| 2268 u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); |
| 2269 u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); |
| 2270 u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); |
| 2271 u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); |
| 2272 u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); |
| 2273 u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); |
| 2274 u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); |
| 2275 u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); |
| 2276 u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); |
| 2277 |
| 2278 v[ 0] = _mm_cmplt_epi32(u[ 0], kZero); |
| 2279 v[ 1] = _mm_cmplt_epi32(u[ 1], kZero); |
| 2280 v[ 2] = _mm_cmplt_epi32(u[ 2], kZero); |
| 2281 v[ 3] = _mm_cmplt_epi32(u[ 3], kZero); |
| 2282 v[ 4] = _mm_cmplt_epi32(u[ 4], kZero); |
| 2283 v[ 5] = _mm_cmplt_epi32(u[ 5], kZero); |
| 2284 v[ 6] = _mm_cmplt_epi32(u[ 6], kZero); |
| 2285 v[ 7] = _mm_cmplt_epi32(u[ 7], kZero); |
| 2286 v[ 8] = _mm_cmplt_epi32(u[ 8], kZero); |
| 2287 v[ 9] = _mm_cmplt_epi32(u[ 9], kZero); |
| 2288 v[10] = _mm_cmplt_epi32(u[10], kZero); |
| 2289 v[11] = _mm_cmplt_epi32(u[11], kZero); |
| 2290 v[12] = _mm_cmplt_epi32(u[12], kZero); |
| 2291 v[13] = _mm_cmplt_epi32(u[13], kZero); |
| 2292 v[14] = _mm_cmplt_epi32(u[14], kZero); |
| 2293 v[15] = _mm_cmplt_epi32(u[15], kZero); |
| 2294 |
| 2295 u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]); |
| 2296 u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]); |
| 2297 u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]); |
| 2298 u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]); |
| 2299 u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]); |
| 2300 u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]); |
| 2301 u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]); |
| 2302 u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]); |
| 2303 u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]); |
| 2304 u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]); |
| 2305 u[10] = _mm_sub_epi32(u[10], v[10]); |
| 2306 u[11] = _mm_sub_epi32(u[11], v[11]); |
| 2307 u[12] = _mm_sub_epi32(u[12], v[12]); |
| 2308 u[13] = _mm_sub_epi32(u[13], v[13]); |
| 2309 u[14] = _mm_sub_epi32(u[14], v[14]); |
| 2310 u[15] = _mm_sub_epi32(u[15], v[15]); |
| 2311 |
| 2312 v[0] = _mm_add_epi32(u[0], K32One); |
| 2313 v[1] = _mm_add_epi32(u[1], K32One); |
| 2314 v[2] = _mm_add_epi32(u[2], K32One); |
| 2315 v[3] = _mm_add_epi32(u[3], K32One); |
| 2316 v[4] = _mm_add_epi32(u[4], K32One); |
| 2317 v[5] = _mm_add_epi32(u[5], K32One); |
| 2318 v[6] = _mm_add_epi32(u[6], K32One); |
| 2319 v[7] = _mm_add_epi32(u[7], K32One); |
| 2320 v[8] = _mm_add_epi32(u[8], K32One); |
| 2321 v[9] = _mm_add_epi32(u[9], K32One); |
| 2322 v[10] = _mm_add_epi32(u[10], K32One); |
| 2323 v[11] = _mm_add_epi32(u[11], K32One); |
| 2324 v[12] = _mm_add_epi32(u[12], K32One); |
| 2325 v[13] = _mm_add_epi32(u[13], K32One); |
| 2326 v[14] = _mm_add_epi32(u[14], K32One); |
| 2327 v[15] = _mm_add_epi32(u[15], K32One); |
| 2328 |
| 2329 u[0] = _mm_srai_epi32(v[0], 2); |
| 2330 u[1] = _mm_srai_epi32(v[1], 2); |
| 2331 u[2] = _mm_srai_epi32(v[2], 2); |
| 2332 u[3] = _mm_srai_epi32(v[3], 2); |
| 2333 u[4] = _mm_srai_epi32(v[4], 2); |
| 2334 u[5] = _mm_srai_epi32(v[5], 2); |
| 2335 u[6] = _mm_srai_epi32(v[6], 2); |
| 2336 u[7] = _mm_srai_epi32(v[7], 2); |
| 2337 u[8] = _mm_srai_epi32(v[8], 2); |
| 2338 u[9] = _mm_srai_epi32(v[9], 2); |
| 2339 u[10] = _mm_srai_epi32(v[10], 2); |
| 2340 u[11] = _mm_srai_epi32(v[11], 2); |
| 2341 u[12] = _mm_srai_epi32(v[12], 2); |
| 2342 u[13] = _mm_srai_epi32(v[13], 2); |
| 2343 u[14] = _mm_srai_epi32(v[14], 2); |
| 2344 u[15] = _mm_srai_epi32(v[15], 2); |
| 2345 |
| 2346 out[ 5] = _mm_packs_epi32(u[0], u[1]); |
| 2347 out[21] = _mm_packs_epi32(u[2], u[3]); |
| 2348 out[13] = _mm_packs_epi32(u[4], u[5]); |
| 2349 out[29] = _mm_packs_epi32(u[6], u[7]); |
| 2350 out[ 3] = _mm_packs_epi32(u[8], u[9]); |
| 2351 out[19] = _mm_packs_epi32(u[10], u[11]); |
| 2352 out[11] = _mm_packs_epi32(u[12], u[13]); |
| 2353 out[27] = _mm_packs_epi32(u[14], u[15]); |
| 2354 } |
| 2355 } |
| 2356 #endif |
| 2357 // Transpose the results, do it as four 8x8 transposes. |
| 2358 { |
| 2359 int transpose_block; |
| 2360 int16_t *output; |
| 2361 if (0 == pass) { |
| 2362 output = &intermediate[column_start * 32]; |
| 2363 } else { |
| 2364 output = &output_org[column_start * 32]; |
| 2365 } |
| 2366 for (transpose_block = 0; transpose_block < 4; ++transpose_block) { |
| 2367 __m128i *this_out = &out[8 * transpose_block]; |
| 2368 // 00 01 02 03 04 05 06 07 |
| 2369 // 10 11 12 13 14 15 16 17 |
| 2370 // 20 21 22 23 24 25 26 27 |
| 2371 // 30 31 32 33 34 35 36 37 |
| 2372 // 40 41 42 43 44 45 46 47 |
| 2373 // 50 51 52 53 54 55 56 57 |
| 2374 // 60 61 62 63 64 65 66 67 |
| 2375 // 70 71 72 73 74 75 76 77 |
| 2376 const __m128i tr0_0 = _mm_unpacklo_epi16(this_out[0], this_out[1]); |
| 2377 const __m128i tr0_1 = _mm_unpacklo_epi16(this_out[2], this_out[3]); |
| 2378 const __m128i tr0_2 = _mm_unpackhi_epi16(this_out[0], this_out[1]); |
| 2379 const __m128i tr0_3 = _mm_unpackhi_epi16(this_out[2], this_out[3]); |
| 2380 const __m128i tr0_4 = _mm_unpacklo_epi16(this_out[4], this_out[5]); |
| 2381 const __m128i tr0_5 = _mm_unpacklo_epi16(this_out[6], this_out[7]); |
| 2382 const __m128i tr0_6 = _mm_unpackhi_epi16(this_out[4], this_out[5]); |
| 2383 const __m128i tr0_7 = _mm_unpackhi_epi16(this_out[6], this_out[7]); |
| 2384 // 00 10 01 11 02 12 03 13 |
| 2385 // 20 30 21 31 22 32 23 33 |
| 2386 // 04 14 05 15 06 16 07 17 |
| 2387 // 24 34 25 35 26 36 27 37 |
| 2388 // 40 50 41 51 42 52 43 53 |
| 2389 // 60 70 61 71 62 72 63 73 |
| 2390 // 54 54 55 55 56 56 57 57 |
| 2391 // 64 74 65 75 66 76 67 77 |
| 2392 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); |
| 2393 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); |
| 2394 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); |
| 2395 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); |
| 2396 const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); |
| 2397 const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); |
| 2398 const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); |
| 2399 const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); |
| 2400 // 00 10 20 30 01 11 21 31 |
| 2401 // 40 50 60 70 41 51 61 71 |
| 2402 // 02 12 22 32 03 13 23 33 |
| 2403 // 42 52 62 72 43 53 63 73 |
| 2404 // 04 14 24 34 05 15 21 36 |
| 2405 // 44 54 64 74 45 55 61 76 |
| 2406 // 06 16 26 36 07 17 27 37 |
| 2407 // 46 56 66 76 47 57 67 77 |
| 2408 __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4); |
| 2409 __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4); |
| 2410 __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6); |
| 2411 __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6); |
| 2412 __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5); |
| 2413 __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5); |
| 2414 __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7); |
| 2415 __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7); |
| 2416 // 00 10 20 30 40 50 60 70 |
| 2417 // 01 11 21 31 41 51 61 71 |
| 2418 // 02 12 22 32 42 52 62 72 |
| 2419 // 03 13 23 33 43 53 63 73 |
| 2420 // 04 14 24 34 44 54 64 74 |
| 2421 // 05 15 25 35 45 55 65 75 |
| 2422 // 06 16 26 36 46 56 66 76 |
| 2423 // 07 17 27 37 47 57 67 77 |
| 2424 if (0 == pass) { |
| 2425 // output[j] = (output[j] + 1 + (output[j] > 0)) >> 2; |
| 2426 // TODO(cd): see quality impact of only doing |
| 2427 // output[j] = (output[j] + 1) >> 2; |
| 2428 // which would remove the code between here ... |
| 2429 __m128i tr2_0_0 = _mm_cmpgt_epi16(tr2_0, kZero); |
| 2430 __m128i tr2_1_0 = _mm_cmpgt_epi16(tr2_1, kZero); |
| 2431 __m128i tr2_2_0 = _mm_cmpgt_epi16(tr2_2, kZero); |
| 2432 __m128i tr2_3_0 = _mm_cmpgt_epi16(tr2_3, kZero); |
| 2433 __m128i tr2_4_0 = _mm_cmpgt_epi16(tr2_4, kZero); |
| 2434 __m128i tr2_5_0 = _mm_cmpgt_epi16(tr2_5, kZero); |
| 2435 __m128i tr2_6_0 = _mm_cmpgt_epi16(tr2_6, kZero); |
| 2436 __m128i tr2_7_0 = _mm_cmpgt_epi16(tr2_7, kZero); |
| 2437 tr2_0 = _mm_sub_epi16(tr2_0, tr2_0_0); |
| 2438 tr2_1 = _mm_sub_epi16(tr2_1, tr2_1_0); |
| 2439 tr2_2 = _mm_sub_epi16(tr2_2, tr2_2_0); |
| 2440 tr2_3 = _mm_sub_epi16(tr2_3, tr2_3_0); |
| 2441 tr2_4 = _mm_sub_epi16(tr2_4, tr2_4_0); |
| 2442 tr2_5 = _mm_sub_epi16(tr2_5, tr2_5_0); |
| 2443 tr2_6 = _mm_sub_epi16(tr2_6, tr2_6_0); |
| 2444 tr2_7 = _mm_sub_epi16(tr2_7, tr2_7_0); |
| 2445 // ... and here. |
| 2446 // PS: also change code in vp9/encoder/vp9_dct.c |
| 2447 tr2_0 = _mm_add_epi16(tr2_0, kOne); |
| 2448 tr2_1 = _mm_add_epi16(tr2_1, kOne); |
| 2449 tr2_2 = _mm_add_epi16(tr2_2, kOne); |
| 2450 tr2_3 = _mm_add_epi16(tr2_3, kOne); |
| 2451 tr2_4 = _mm_add_epi16(tr2_4, kOne); |
| 2452 tr2_5 = _mm_add_epi16(tr2_5, kOne); |
| 2453 tr2_6 = _mm_add_epi16(tr2_6, kOne); |
| 2454 tr2_7 = _mm_add_epi16(tr2_7, kOne); |
| 2455 tr2_0 = _mm_srai_epi16(tr2_0, 2); |
| 2456 tr2_1 = _mm_srai_epi16(tr2_1, 2); |
| 2457 tr2_2 = _mm_srai_epi16(tr2_2, 2); |
| 2458 tr2_3 = _mm_srai_epi16(tr2_3, 2); |
| 2459 tr2_4 = _mm_srai_epi16(tr2_4, 2); |
| 2460 tr2_5 = _mm_srai_epi16(tr2_5, 2); |
| 2461 tr2_6 = _mm_srai_epi16(tr2_6, 2); |
| 2462 tr2_7 = _mm_srai_epi16(tr2_7, 2); |
| 2463 } |
| 2464 // Note: even though all these stores are aligned, using the aligned |
| 2465 // intrinsic make the code slightly slower. |
| 2466 _mm_storeu_si128((__m128i *)(output + 0 * 32), tr2_0); |
| 2467 _mm_storeu_si128((__m128i *)(output + 1 * 32), tr2_1); |
| 2468 _mm_storeu_si128((__m128i *)(output + 2 * 32), tr2_2); |
| 2469 _mm_storeu_si128((__m128i *)(output + 3 * 32), tr2_3); |
| 2470 _mm_storeu_si128((__m128i *)(output + 4 * 32), tr2_4); |
| 2471 _mm_storeu_si128((__m128i *)(output + 5 * 32), tr2_5); |
| 2472 _mm_storeu_si128((__m128i *)(output + 6 * 32), tr2_6); |
| 2473 _mm_storeu_si128((__m128i *)(output + 7 * 32), tr2_7); |
| 2474 // Process next 8x8 |
| 2475 output += 8; |
| 2476 } |
| 2477 } |
| 2478 } |
| 2479 } |
| 2480 } |
OLD | NEW |