OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include <emmintrin.h> // SSE2 | 11 #include <emmintrin.h> // SSE2 |
12 #include "vp9/common/vp9_idct.h" // for cospi constants | 12 #include "vp9/common/vp9_idct.h" // for cospi constants |
13 #include "vpx_ports/mem.h" | 13 #include "vpx_ports/mem.h" |
14 | 14 |
15 void vp9_fdct4x4_sse2(const int16_t *input, int16_t *output, int stride) { | 15 void vp9_fdct4x4_sse2(const int16_t *input, int16_t *output, int stride) { |
16 // The 2D transform is done with two passes which are actually pretty | 16 // This 2D transform implements 4 vertical 1D transforms followed |
17 // similar. In the first one, we transform the columns and transpose | 17 // by 4 horizontal 1D transforms. The multiplies and adds are as given |
18 // the results. In the second one, we transform the rows. To achieve that, | 18 // by Chen, Smith and Fralick ('77). The commands for moving the data |
19 // as the first pass results are transposed, we transpose the columns (that | 19 // around have been minimized by hand. |
20 // is the transposed rows) and transpose the results (so that it goes back | 20 // For the purposes of the comments, the 16 inputs are referred to at i0 |
21 // in normal/row positions). | 21 // through iF (in raster order), intermediate variables are a0, b0, c0 |
22 int pass; | 22 // through f, and correspond to the in-place computations mapped to input |
| 23 // locations. The outputs, o0 through oF are labeled according to the |
| 24 // output locations. |
| 25 |
23 // Constants | 26 // Constants |
24 // When we use them, in one case, they are all the same. In all others | 27 // These are the coefficients used for the multiplies. |
25 // it's a pair of them that we need to repeat four times. This is done | 28 // In the comments, pN means cos(N pi /64) and mN is -cos(N pi /64), |
26 // by constructing the 32 bit constant corresponding to that pair. | 29 // where cospi_N_64 = cos(N pi /64) |
27 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); | 30 const __m128i k__cospi_A = _mm_setr_epi16(cospi_16_64, cospi_16_64, |
28 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); | 31 cospi_16_64, cospi_16_64, |
29 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); | 32 cospi_16_64, -cospi_16_64, |
30 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); | 33 cospi_16_64, -cospi_16_64); |
| 34 const __m128i k__cospi_B = _mm_setr_epi16(cospi_16_64, -cospi_16_64, |
| 35 cospi_16_64, -cospi_16_64, |
| 36 cospi_16_64, cospi_16_64, |
| 37 cospi_16_64, cospi_16_64); |
| 38 const __m128i k__cospi_C = _mm_setr_epi16(cospi_8_64, cospi_24_64, |
| 39 cospi_8_64, cospi_24_64, |
| 40 cospi_24_64, -cospi_8_64, |
| 41 cospi_24_64, -cospi_8_64); |
| 42 const __m128i k__cospi_D = _mm_setr_epi16(cospi_24_64, -cospi_8_64, |
| 43 cospi_24_64, -cospi_8_64, |
| 44 cospi_8_64, cospi_24_64, |
| 45 cospi_8_64, cospi_24_64); |
| 46 const __m128i k__cospi_E = _mm_setr_epi16(cospi_16_64, cospi_16_64, |
| 47 cospi_16_64, cospi_16_64, |
| 48 cospi_16_64, cospi_16_64, |
| 49 cospi_16_64, cospi_16_64); |
| 50 const __m128i k__cospi_F = _mm_setr_epi16(cospi_16_64, -cospi_16_64, |
| 51 cospi_16_64, -cospi_16_64, |
| 52 cospi_16_64, -cospi_16_64, |
| 53 cospi_16_64, -cospi_16_64); |
| 54 const __m128i k__cospi_G = _mm_setr_epi16(cospi_8_64, cospi_24_64, |
| 55 cospi_8_64, cospi_24_64, |
| 56 -cospi_8_64, -cospi_24_64, |
| 57 -cospi_8_64, -cospi_24_64); |
| 58 const __m128i k__cospi_H = _mm_setr_epi16(cospi_24_64, -cospi_8_64, |
| 59 cospi_24_64, -cospi_8_64, |
| 60 -cospi_24_64, cospi_8_64, |
| 61 -cospi_24_64, cospi_8_64); |
| 62 |
31 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); | 63 const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); |
| 64 // This second rounding constant saves doing some extra adds at the end |
| 65 const __m128i k__DCT_CONST_ROUNDING2 = _mm_set1_epi32(DCT_CONST_ROUNDING |
| 66 +(DCT_CONST_ROUNDING << 1)); |
| 67 const int DCT_CONST_BITS2 = DCT_CONST_BITS+2; |
32 const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); | 68 const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); |
33 const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); | 69 const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); |
34 const __m128i kOne = _mm_set1_epi16(1); | |
35 __m128i in0, in1; | 70 __m128i in0, in1; |
| 71 |
36 // Load inputs. | 72 // Load inputs. |
37 { | 73 { |
38 in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); | 74 in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); |
| 75 in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); |
| 76 in1 = _mm_unpacklo_epi64(in1, _mm_loadl_epi64((const __m128i *) |
| 77 (input + 2 * stride))); |
39 in0 = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *) | 78 in0 = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *) |
40 (input + 1 * stride))); | 79 (input + 3 * stride))); |
41 in1 = _mm_loadl_epi64((const __m128i *)(input + 2 * stride)); | 80 // in0 = [i0 i1 i2 i3 iC iD iE iF] |
42 in1 = _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *) | 81 // in1 = [i4 i5 i6 i7 i8 i9 iA iB] |
43 (input + 3 * stride)), in1); | |
44 | 82 |
45 // x = x << 4 | 83 |
| 84 // multiply by 16 to give some extra precision |
46 in0 = _mm_slli_epi16(in0, 4); | 85 in0 = _mm_slli_epi16(in0, 4); |
47 in1 = _mm_slli_epi16(in1, 4); | 86 in1 = _mm_slli_epi16(in1, 4); |
48 // if (i == 0 && input[0]) input[0] += 1; | 87 // if (i == 0 && input[0]) input[0] += 1; |
| 88 // add 1 to the upper left pixel if it is non-zero, which helps reduce |
| 89 // the round-trip error |
49 { | 90 { |
50 // The mask will only contain whether the first value is zero, all | 91 // The mask will only contain whether the first value is zero, all |
51 // other comparison will fail as something shifted by 4 (above << 4) | 92 // other comparison will fail as something shifted by 4 (above << 4) |
52 // can never be equal to one. To increment in the non-zero case, we | 93 // can never be equal to one. To increment in the non-zero case, we |
53 // add the mask and one for the first element: | 94 // add the mask and one for the first element: |
54 // - if zero, mask = -1, v = v - 1 + 1 = v | 95 // - if zero, mask = -1, v = v - 1 + 1 = v |
55 // - if non-zero, mask = 0, v = v + 0 + 1 = v + 1 | 96 // - if non-zero, mask = 0, v = v + 0 + 1 = v + 1 |
56 __m128i mask = _mm_cmpeq_epi16(in0, k__nonzero_bias_a); | 97 __m128i mask = _mm_cmpeq_epi16(in0, k__nonzero_bias_a); |
57 in0 = _mm_add_epi16(in0, mask); | 98 in0 = _mm_add_epi16(in0, mask); |
58 in0 = _mm_add_epi16(in0, k__nonzero_bias_b); | 99 in0 = _mm_add_epi16(in0, k__nonzero_bias_b); |
59 } | 100 } |
60 } | 101 } |
61 // Do the two transform/transpose passes | 102 // There are 4 total stages, alternating between an add/subtract stage |
62 for (pass = 0; pass < 2; ++pass) { | 103 // followed by an multiply-and-add stage. |
63 // Transform 1/2: Add/subtract | 104 { |
64 const __m128i r0 = _mm_add_epi16(in0, in1); | 105 // Stage 1: Add/subtract |
65 const __m128i r1 = _mm_sub_epi16(in0, in1); | 106 |
66 const __m128i r2 = _mm_unpacklo_epi64(r0, r1); | 107 // in0 = [i0 i1 i2 i3 iC iD iE iF] |
67 const __m128i r3 = _mm_unpackhi_epi64(r0, r1); | 108 // in1 = [i4 i5 i6 i7 i8 i9 iA iB] |
68 // Transform 1/2: Interleave to do the multiply by constants which gets us | 109 const __m128i r0 = _mm_unpacklo_epi16(in0, in1); |
69 // into 32 bits. | 110 const __m128i r1 = _mm_unpackhi_epi16(in0, in1); |
70 const __m128i t0 = _mm_unpacklo_epi16(r2, r3); | 111 // r0 = [i0 i4 i1 i5 i2 i6 i3 i7] |
71 const __m128i t2 = _mm_unpackhi_epi16(r2, r3); | 112 // r1 = [iC i8 iD i9 iE iA iF iB] |
72 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); | 113 const __m128i r2 = _mm_shuffle_epi32(r0, 0xB4); |
73 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16); | 114 const __m128i r3 = _mm_shuffle_epi32(r1, 0xB4); |
74 const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p08_p24); | 115 // r2 = [i0 i4 i1 i5 i3 i7 i2 i6] |
75 const __m128i u6 = _mm_madd_epi16(t2, k__cospi_p24_m08); | 116 // r3 = [iC i8 iD i9 iF iB iE iA] |
| 117 |
| 118 const __m128i t0 = _mm_add_epi16(r2, r3); |
| 119 const __m128i t1 = _mm_sub_epi16(r2, r3); |
| 120 // t0 = [a0 a4 a1 a5 a3 a7 a2 a6] |
| 121 // t1 = [aC a8 aD a9 aF aB aE aA] |
| 122 |
| 123 // Stage 2: multiply by constants (which gets us into 32 bits). |
| 124 // The constants needed here are: |
| 125 // k__cospi_A = [p16 p16 p16 p16 p16 m16 p16 m16] |
| 126 // k__cospi_B = [p16 m16 p16 m16 p16 p16 p16 p16] |
| 127 // k__cospi_C = [p08 p24 p08 p24 p24 m08 p24 m08] |
| 128 // k__cospi_D = [p24 m08 p24 m08 p08 p24 p08 p24] |
| 129 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_A); |
| 130 const __m128i u2 = _mm_madd_epi16(t0, k__cospi_B); |
| 131 const __m128i u1 = _mm_madd_epi16(t1, k__cospi_C); |
| 132 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_D); |
| 133 // Then add and right-shift to get back to 16-bit range |
76 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); | 134 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); |
| 135 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); |
77 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); | 136 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); |
78 const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); | 137 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); |
79 const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); | |
80 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); | 138 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); |
| 139 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); |
81 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); | 140 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); |
82 const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); | 141 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); |
83 const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); | 142 // w0 = [b0 b1 b7 b6] |
84 // Combine and transpose | 143 // w1 = [b8 b9 bF bE] |
85 const __m128i res0 = _mm_packs_epi32(w0, w2); | 144 // w2 = [b4 b5 b3 b2] |
86 const __m128i res1 = _mm_packs_epi32(w4, w6); | 145 // w3 = [bC bD bB bA] |
87 // 00 01 02 03 20 21 22 23 | 146 const __m128i x0 = _mm_packs_epi32(w0, w1); |
88 // 10 11 12 13 30 31 32 33 | 147 const __m128i x1 = _mm_packs_epi32(w2, w3); |
89 const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1); | 148 // x0 = [b0 b1 b7 b6 b8 b9 bF bE] |
90 const __m128i tr0_1 = _mm_unpackhi_epi16(res0, res1); | 149 // x1 = [b4 b5 b3 b2 bC bD bB bA] |
91 // 00 10 01 11 02 12 03 13 | 150 in0 = _mm_shuffle_epi32(x0, 0xD8); |
92 // 20 30 21 31 22 32 23 33 | 151 in1 = _mm_shuffle_epi32(x1, 0x8D); |
93 in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); | 152 // in0 = [b0 b1 b8 b9 b7 b6 bF bE] |
94 in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); | 153 // in1 = [b3 b2 bB bA b4 b5 bC bD] |
95 in1 = _mm_shuffle_epi32(in1, 0x4E); | |
96 // 00 10 20 30 01 11 21 31 in0 contains 0 followed by 1 | |
97 // 02 12 22 32 03 13 23 33 in1 contains 2 followed by 3 | |
98 } | 154 } |
99 in1 = _mm_shuffle_epi32(in1, 0x4E); | |
100 // Post-condition output and store it (v + 1) >> 2, taking advantage | |
101 // of the fact 1/3 are stored just after 0/2. | |
102 { | 155 { |
103 __m128i out01 = _mm_add_epi16(in0, kOne); | 156 // vertical DCTs finished. Now we do the horizontal DCTs. |
104 __m128i out23 = _mm_add_epi16(in1, kOne); | 157 // Stage 3: Add/subtract |
105 out01 = _mm_srai_epi16(out01, 2); | 158 |
106 out23 = _mm_srai_epi16(out23, 2); | 159 const __m128i t0 = _mm_add_epi16(in0, in1); |
107 _mm_storeu_si128((__m128i *)(output + 0 * 4), out01); | 160 const __m128i t1 = _mm_sub_epi16(in0, in1); |
108 _mm_storeu_si128((__m128i *)(output + 2 * 4), out23); | 161 // t0 = [c0 c1 c8 c9 c4 c5 cC cD] |
| 162 // t1 = [c3 c2 cB cA -c7 -c6 -cF -cE] |
| 163 |
| 164 // Stage 4: multiply by constants (which gets us into 32 bits). |
| 165 // The constants needed here are: |
| 166 // k__cospi_E = [p16 p16 p16 p16 p16 p16 p16 p16] |
| 167 // k__cospi_F = [p16 m16 p16 m16 p16 m16 p16 m16] |
| 168 // k__cospi_G = [p08 p24 p08 p24 m08 m24 m08 m24] |
| 169 // k__cospi_H = [p24 m08 p24 m08 m24 p08 m24 p08] |
| 170 const __m128i u0 = _mm_madd_epi16(t0, k__cospi_E); |
| 171 const __m128i u1 = _mm_madd_epi16(t0, k__cospi_F); |
| 172 const __m128i u2 = _mm_madd_epi16(t1, k__cospi_G); |
| 173 const __m128i u3 = _mm_madd_epi16(t1, k__cospi_H); |
| 174 // Then add and right-shift to get back to 16-bit range |
| 175 // but this combines the final right-shift as well to save operations |
| 176 // This unusual rounding operations is to maintain bit-accurate |
| 177 // compatibility with the c version of this function which has two |
| 178 // rounding steps in a row. |
| 179 const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING2); |
| 180 const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING2); |
| 181 const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING2); |
| 182 const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING2); |
| 183 const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS2); |
| 184 const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS2); |
| 185 const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS2); |
| 186 const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS2); |
| 187 // w0 = [o0 o4 o8 oC] |
| 188 // w1 = [o2 o6 oA oE] |
| 189 // w2 = [o1 o5 o9 oD] |
| 190 // w3 = [o3 o7 oB oF] |
| 191 // remember the o's are numbered according to the correct output location |
| 192 const __m128i x0 = _mm_packs_epi32(w0, w1); |
| 193 const __m128i x1 = _mm_packs_epi32(w2, w3); |
| 194 // x0 = [o0 o4 o8 oC o2 o6 oA oE] |
| 195 // x1 = [o1 o5 o9 oD o3 o7 oB oF] |
| 196 const __m128i y0 = _mm_unpacklo_epi16(x0, x1); |
| 197 const __m128i y1 = _mm_unpackhi_epi16(x0, x1); |
| 198 // y0 = [o0 o1 o4 o5 o8 o9 oC oD] |
| 199 // y1 = [o2 o3 o6 o7 oA oB oE oF] |
| 200 in0 = _mm_unpacklo_epi32(y0, y1); |
| 201 // in0 = [o0 o1 o2 o3 o4 o5 o6 o7] |
| 202 in1 = _mm_unpackhi_epi32(y0, y1); |
| 203 // in1 = [o8 o9 oA oB oC oD oE oF] |
| 204 } |
| 205 // Post-condition (v + 1) >> 2 is now incorporated into previous |
| 206 // add and right-shift commands. Only 2 store instructions needed |
| 207 // because we are using the fact that 1/3 are stored just after 0/2. |
| 208 { |
| 209 _mm_storeu_si128((__m128i *)(output + 0 * 4), in0); |
| 210 _mm_storeu_si128((__m128i *)(output + 2 * 4), in1); |
109 } | 211 } |
110 } | 212 } |
111 | 213 |
| 214 |
112 static INLINE void load_buffer_4x4(const int16_t *input, __m128i *in, | 215 static INLINE void load_buffer_4x4(const int16_t *input, __m128i *in, |
113 int stride) { | 216 int stride) { |
114 const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); | 217 const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); |
115 const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); | 218 const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); |
116 __m128i mask; | 219 __m128i mask; |
117 | 220 |
118 in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); | 221 in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); |
119 in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); | 222 in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); |
120 in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride)); | 223 in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride)); |
121 in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride)); | 224 in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride)); |
(...skipping 2459 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2581 #define FDCT32x32_HIGH_PRECISION 0 | 2684 #define FDCT32x32_HIGH_PRECISION 0 |
2582 #include "vp9/encoder/x86/vp9_dct32x32_sse2.c" | 2685 #include "vp9/encoder/x86/vp9_dct32x32_sse2.c" |
2583 #undef FDCT32x32_2D | 2686 #undef FDCT32x32_2D |
2584 #undef FDCT32x32_HIGH_PRECISION | 2687 #undef FDCT32x32_HIGH_PRECISION |
2585 | 2688 |
2586 #define FDCT32x32_2D vp9_fdct32x32_sse2 | 2689 #define FDCT32x32_2D vp9_fdct32x32_sse2 |
2587 #define FDCT32x32_HIGH_PRECISION 1 | 2690 #define FDCT32x32_HIGH_PRECISION 1 |
2588 #include "vp9/encoder/x86/vp9_dct32x32_sse2.c" // NOLINT | 2691 #include "vp9/encoder/x86/vp9_dct32x32_sse2.c" // NOLINT |
2589 #undef FDCT32x32_2D | 2692 #undef FDCT32x32_2D |
2590 #undef FDCT32x32_HIGH_PRECISION | 2693 #undef FDCT32x32_HIGH_PRECISION |
OLD | NEW |