OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 Google Inc. All Rights Reserved. |
| 2 // |
| 3 // Use of this source code is governed by a BSD-style license |
| 4 // that can be found in the COPYING file in the root of the source |
| 5 // tree. An additional intellectual property rights grant can be found |
| 6 // in the file PATENTS. All contributing project authors may |
| 7 // be found in the AUTHORS file in the root of the source tree. |
| 8 // ----------------------------------------------------------------------------- |
| 9 // |
| 10 // SSE4 version of some encoding functions. |
| 11 // |
| 12 // Author: Skal (pascal.massimino@gmail.com) |
| 13 |
| 14 #include "./dsp.h" |
| 15 |
| 16 #if defined(WEBP_USE_SSE41) |
| 17 #include <smmintrin.h> |
| 18 #include <stdlib.h> // for abs() |
| 19 |
| 20 #include "../enc/vp8enci.h" |
| 21 |
| 22 //------------------------------------------------------------------------------ |
| 23 // Compute susceptibility based on DCT-coeff histograms. |
| 24 |
| 25 static void CollectHistogram(const uint8_t* ref, const uint8_t* pred, |
| 26 int start_block, int end_block, |
| 27 VP8Histogram* const histo) { |
| 28 const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH); |
| 29 int j; |
| 30 int distribution[MAX_COEFF_THRESH + 1] = { 0 }; |
| 31 for (j = start_block; j < end_block; ++j) { |
| 32 int16_t out[16]; |
| 33 int k; |
| 34 |
| 35 VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out); |
| 36 |
| 37 // Convert coefficients to bin (within out[]). |
| 38 { |
| 39 // Load. |
| 40 const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]); |
| 41 const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]); |
| 42 // v = abs(out) >> 3 |
| 43 const __m128i abs0 = _mm_abs_epi16(out0); |
| 44 const __m128i abs1 = _mm_abs_epi16(out1); |
| 45 const __m128i v0 = _mm_srai_epi16(abs0, 3); |
| 46 const __m128i v1 = _mm_srai_epi16(abs1, 3); |
| 47 // bin = min(v, MAX_COEFF_THRESH) |
| 48 const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh); |
| 49 const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh); |
| 50 // Store. |
| 51 _mm_storeu_si128((__m128i*)&out[0], bin0); |
| 52 _mm_storeu_si128((__m128i*)&out[8], bin1); |
| 53 } |
| 54 |
| 55 // Convert coefficients to bin. |
| 56 for (k = 0; k < 16; ++k) { |
| 57 ++distribution[out[k]]; |
| 58 } |
| 59 } |
| 60 VP8SetHistogramData(distribution, histo); |
| 61 } |
| 62 |
| 63 //------------------------------------------------------------------------------ |
| 64 // Texture distortion |
| 65 // |
| 66 // We try to match the spectral content (weighted) between source and |
| 67 // reconstructed samples. |
| 68 |
| 69 // Hadamard transform |
| 70 // Returns the difference between the weighted sum of the absolute value of |
| 71 // transformed coefficients. |
| 72 static int TTransform(const uint8_t* inA, const uint8_t* inB, |
| 73 const uint16_t* const w) { |
| 74 __m128i tmp_0, tmp_1, tmp_2, tmp_3; |
| 75 |
| 76 // Load, combine and transpose inputs. |
| 77 { |
| 78 const __m128i inA_0 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 0]); |
| 79 const __m128i inA_1 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 1]); |
| 80 const __m128i inA_2 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 2]); |
| 81 const __m128i inA_3 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 3]); |
| 82 const __m128i inB_0 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 0]); |
| 83 const __m128i inB_1 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 1]); |
| 84 const __m128i inB_2 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 2]); |
| 85 const __m128i inB_3 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 3]); |
| 86 |
| 87 // Combine inA and inB (we'll do two transforms in parallel). |
| 88 const __m128i inAB_0 = _mm_unpacklo_epi8(inA_0, inB_0); |
| 89 const __m128i inAB_1 = _mm_unpacklo_epi8(inA_1, inB_1); |
| 90 const __m128i inAB_2 = _mm_unpacklo_epi8(inA_2, inB_2); |
| 91 const __m128i inAB_3 = _mm_unpacklo_epi8(inA_3, inB_3); |
| 92 // a00 b00 a01 b01 a02 b03 a03 b03 0 0 0 0 0 0 0 0 |
| 93 // a10 b10 a11 b11 a12 b12 a13 b13 0 0 0 0 0 0 0 0 |
| 94 // a20 b20 a21 b21 a22 b22 a23 b23 0 0 0 0 0 0 0 0 |
| 95 // a30 b30 a31 b31 a32 b32 a33 b33 0 0 0 0 0 0 0 0 |
| 96 |
| 97 // Transpose the two 4x4, discarding the filling zeroes. |
| 98 const __m128i transpose0_0 = _mm_unpacklo_epi8(inAB_0, inAB_2); |
| 99 const __m128i transpose0_1 = _mm_unpacklo_epi8(inAB_1, inAB_3); |
| 100 // a00 a20 b00 b20 a01 a21 b01 b21 a02 a22 b02 b22 a03 a23 b03 b23 |
| 101 // a10 a30 b10 b30 a11 a31 b11 b31 a12 a32 b12 b32 a13 a33 b13 b33 |
| 102 const __m128i transpose1_0 = _mm_unpacklo_epi8(transpose0_0, transpose0_1); |
| 103 const __m128i transpose1_1 = _mm_unpackhi_epi8(transpose0_0, transpose0_1); |
| 104 // a00 a10 a20 a30 b00 b10 b20 b30 a01 a11 a21 a31 b01 b11 b21 b31 |
| 105 // a02 a12 a22 a32 b02 b12 b22 b32 a03 a13 a23 a33 b03 b13 b23 b33 |
| 106 |
| 107 // Convert to 16b. |
| 108 tmp_0 = _mm_cvtepu8_epi16(transpose1_0); |
| 109 tmp_1 = _mm_cvtepu8_epi16(_mm_srli_si128(transpose1_0, 8)); |
| 110 tmp_2 = _mm_cvtepu8_epi16(transpose1_1); |
| 111 tmp_3 = _mm_cvtepu8_epi16(_mm_srli_si128(transpose1_1, 8)); |
| 112 // a00 a10 a20 a30 b00 b10 b20 b30 |
| 113 // a01 a11 a21 a31 b01 b11 b21 b31 |
| 114 // a02 a12 a22 a32 b02 b12 b22 b32 |
| 115 // a03 a13 a23 a33 b03 b13 b23 b33 |
| 116 } |
| 117 |
| 118 // Horizontal pass and subsequent transpose. |
| 119 { |
| 120 // Calculate a and b (two 4x4 at once). |
| 121 const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2); |
| 122 const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3); |
| 123 const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); |
| 124 const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); |
| 125 const __m128i b0 = _mm_add_epi16(a0, a1); |
| 126 const __m128i b1 = _mm_add_epi16(a3, a2); |
| 127 const __m128i b2 = _mm_sub_epi16(a3, a2); |
| 128 const __m128i b3 = _mm_sub_epi16(a0, a1); |
| 129 // a00 a01 a02 a03 b00 b01 b02 b03 |
| 130 // a10 a11 a12 a13 b10 b11 b12 b13 |
| 131 // a20 a21 a22 a23 b20 b21 b22 b23 |
| 132 // a30 a31 a32 a33 b30 b31 b32 b33 |
| 133 |
| 134 // Transpose the two 4x4. |
| 135 const __m128i transpose0_0 = _mm_unpacklo_epi16(b0, b1); |
| 136 const __m128i transpose0_1 = _mm_unpacklo_epi16(b2, b3); |
| 137 const __m128i transpose0_2 = _mm_unpackhi_epi16(b0, b1); |
| 138 const __m128i transpose0_3 = _mm_unpackhi_epi16(b2, b3); |
| 139 // a00 a10 a01 a11 a02 a12 a03 a13 |
| 140 // a20 a30 a21 a31 a22 a32 a23 a33 |
| 141 // b00 b10 b01 b11 b02 b12 b03 b13 |
| 142 // b20 b30 b21 b31 b22 b32 b23 b33 |
| 143 const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); |
| 144 const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); |
| 145 const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); |
| 146 const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); |
| 147 // a00 a10 a20 a30 a01 a11 a21 a31 |
| 148 // b00 b10 b20 b30 b01 b11 b21 b31 |
| 149 // a02 a12 a22 a32 a03 a13 a23 a33 |
| 150 // b02 b12 a22 b32 b03 b13 b23 b33 |
| 151 tmp_0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); |
| 152 tmp_1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); |
| 153 tmp_2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); |
| 154 tmp_3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); |
| 155 // a00 a10 a20 a30 b00 b10 b20 b30 |
| 156 // a01 a11 a21 a31 b01 b11 b21 b31 |
| 157 // a02 a12 a22 a32 b02 b12 b22 b32 |
| 158 // a03 a13 a23 a33 b03 b13 b23 b33 |
| 159 } |
| 160 |
| 161 // Vertical pass and difference of weighted sums. |
| 162 { |
| 163 // Load all inputs. |
| 164 const __m128i w_0 = _mm_loadu_si128((const __m128i*)&w[0]); |
| 165 const __m128i w_8 = _mm_loadu_si128((const __m128i*)&w[8]); |
| 166 |
| 167 // Calculate a and b (two 4x4 at once). |
| 168 const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2); |
| 169 const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3); |
| 170 const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); |
| 171 const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); |
| 172 const __m128i b0 = _mm_add_epi16(a0, a1); |
| 173 const __m128i b1 = _mm_add_epi16(a3, a2); |
| 174 const __m128i b2 = _mm_sub_epi16(a3, a2); |
| 175 const __m128i b3 = _mm_sub_epi16(a0, a1); |
| 176 |
| 177 // Separate the transforms of inA and inB. |
| 178 __m128i A_b0 = _mm_unpacklo_epi64(b0, b1); |
| 179 __m128i A_b2 = _mm_unpacklo_epi64(b2, b3); |
| 180 __m128i B_b0 = _mm_unpackhi_epi64(b0, b1); |
| 181 __m128i B_b2 = _mm_unpackhi_epi64(b2, b3); |
| 182 |
| 183 A_b0 = _mm_abs_epi16(A_b0); |
| 184 A_b2 = _mm_abs_epi16(A_b2); |
| 185 B_b0 = _mm_abs_epi16(B_b0); |
| 186 B_b2 = _mm_abs_epi16(B_b2); |
| 187 |
| 188 // weighted sums |
| 189 A_b0 = _mm_madd_epi16(A_b0, w_0); |
| 190 A_b2 = _mm_madd_epi16(A_b2, w_8); |
| 191 B_b0 = _mm_madd_epi16(B_b0, w_0); |
| 192 B_b2 = _mm_madd_epi16(B_b2, w_8); |
| 193 A_b0 = _mm_add_epi32(A_b0, A_b2); |
| 194 B_b0 = _mm_add_epi32(B_b0, B_b2); |
| 195 |
| 196 // difference of weighted sums |
| 197 A_b2 = _mm_sub_epi32(A_b0, B_b0); |
| 198 // cascading summation of the differences |
| 199 B_b0 = _mm_hadd_epi32(A_b2, A_b2); |
| 200 B_b2 = _mm_hadd_epi32(B_b0, B_b0); |
| 201 return _mm_cvtsi128_si32(B_b2); |
| 202 } |
| 203 } |
| 204 |
| 205 static int Disto4x4(const uint8_t* const a, const uint8_t* const b, |
| 206 const uint16_t* const w) { |
| 207 const int diff_sum = TTransform(a, b, w); |
| 208 return abs(diff_sum) >> 5; |
| 209 } |
| 210 |
| 211 static int Disto16x16(const uint8_t* const a, const uint8_t* const b, |
| 212 const uint16_t* const w) { |
| 213 int D = 0; |
| 214 int x, y; |
| 215 for (y = 0; y < 16 * BPS; y += 4 * BPS) { |
| 216 for (x = 0; x < 16; x += 4) { |
| 217 D += Disto4x4(a + x + y, b + x + y, w); |
| 218 } |
| 219 } |
| 220 return D; |
| 221 } |
| 222 |
| 223 //------------------------------------------------------------------------------ |
| 224 // Quantization |
| 225 // |
| 226 |
| 227 // Generates a pshufb constant for shuffling 16b words. |
| 228 #define PSHUFB_CST(A,B,C,D,E,F,G,H) \ |
| 229 _mm_set_epi8(2 * (H) + 1, 2 * (H) + 0, 2 * (G) + 1, 2 * (G) + 0, \ |
| 230 2 * (F) + 1, 2 * (F) + 0, 2 * (E) + 1, 2 * (E) + 0, \ |
| 231 2 * (D) + 1, 2 * (D) + 0, 2 * (C) + 1, 2 * (C) + 0, \ |
| 232 2 * (B) + 1, 2 * (B) + 0, 2 * (A) + 1, 2 * (A) + 0) |
| 233 |
| 234 static WEBP_INLINE int DoQuantizeBlock(int16_t in[16], int16_t out[16], |
| 235 const uint16_t* const sharpen, |
| 236 const VP8Matrix* const mtx) { |
| 237 const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL); |
| 238 const __m128i zero = _mm_setzero_si128(); |
| 239 __m128i out0, out8; |
| 240 __m128i packed_out; |
| 241 |
| 242 // Load all inputs. |
| 243 __m128i in0 = _mm_loadu_si128((__m128i*)&in[0]); |
| 244 __m128i in8 = _mm_loadu_si128((__m128i*)&in[8]); |
| 245 const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq_[0]); |
| 246 const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq_[8]); |
| 247 const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q_[0]); |
| 248 const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q_[8]); |
| 249 |
| 250 // coeff = abs(in) |
| 251 __m128i coeff0 = _mm_abs_epi16(in0); |
| 252 __m128i coeff8 = _mm_abs_epi16(in8); |
| 253 |
| 254 // coeff = abs(in) + sharpen |
| 255 if (sharpen != NULL) { |
| 256 const __m128i sharpen0 = _mm_loadu_si128((const __m128i*)&sharpen[0]); |
| 257 const __m128i sharpen8 = _mm_loadu_si128((const __m128i*)&sharpen[8]); |
| 258 coeff0 = _mm_add_epi16(coeff0, sharpen0); |
| 259 coeff8 = _mm_add_epi16(coeff8, sharpen8); |
| 260 } |
| 261 |
| 262 // out = (coeff * iQ + B) >> QFIX |
| 263 { |
| 264 // doing calculations with 32b precision (QFIX=17) |
| 265 // out = (coeff * iQ) |
| 266 const __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0); |
| 267 const __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0); |
| 268 const __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8); |
| 269 const __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8); |
| 270 __m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H); |
| 271 __m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H); |
| 272 __m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H); |
| 273 __m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H); |
| 274 // out = (coeff * iQ + B) |
| 275 const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias_[0]); |
| 276 const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias_[4]); |
| 277 const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias_[8]); |
| 278 const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias_[12]); |
| 279 out_00 = _mm_add_epi32(out_00, bias_00); |
| 280 out_04 = _mm_add_epi32(out_04, bias_04); |
| 281 out_08 = _mm_add_epi32(out_08, bias_08); |
| 282 out_12 = _mm_add_epi32(out_12, bias_12); |
| 283 // out = QUANTDIV(coeff, iQ, B, QFIX) |
| 284 out_00 = _mm_srai_epi32(out_00, QFIX); |
| 285 out_04 = _mm_srai_epi32(out_04, QFIX); |
| 286 out_08 = _mm_srai_epi32(out_08, QFIX); |
| 287 out_12 = _mm_srai_epi32(out_12, QFIX); |
| 288 |
| 289 // pack result as 16b |
| 290 out0 = _mm_packs_epi32(out_00, out_04); |
| 291 out8 = _mm_packs_epi32(out_08, out_12); |
| 292 |
| 293 // if (coeff > 2047) coeff = 2047 |
| 294 out0 = _mm_min_epi16(out0, max_coeff_2047); |
| 295 out8 = _mm_min_epi16(out8, max_coeff_2047); |
| 296 } |
| 297 |
| 298 // put sign back |
| 299 out0 = _mm_sign_epi16(out0, in0); |
| 300 out8 = _mm_sign_epi16(out8, in8); |
| 301 |
| 302 // in = out * Q |
| 303 in0 = _mm_mullo_epi16(out0, q0); |
| 304 in8 = _mm_mullo_epi16(out8, q8); |
| 305 |
| 306 _mm_storeu_si128((__m128i*)&in[0], in0); |
| 307 _mm_storeu_si128((__m128i*)&in[8], in8); |
| 308 |
| 309 // zigzag the output before storing it. The re-ordering is: |
| 310 // 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15 |
| 311 // -> 0 1 4[8]5 2 3 6 | 9 12 13 10 [7]11 14 15 |
| 312 // There's only two misplaced entries ([8] and [7]) that are crossing the |
| 313 // reg's boundaries. |
| 314 // We use pshufb instead of pshuflo/pshufhi. |
| 315 { |
| 316 const __m128i kCst_lo = PSHUFB_CST(0, 1, 4, -1, 5, 2, 3, 6); |
| 317 const __m128i kCst_7 = PSHUFB_CST(-1, -1, -1, -1, 7, -1, -1, -1); |
| 318 const __m128i tmp_lo = _mm_shuffle_epi8(out0, kCst_lo); |
| 319 const __m128i tmp_7 = _mm_shuffle_epi8(out0, kCst_7); // extract #7 |
| 320 const __m128i kCst_hi = PSHUFB_CST(1, 4, 5, 2, -1, 3, 6, 7); |
| 321 const __m128i kCst_8 = PSHUFB_CST(-1, -1, -1, 0, -1, -1, -1, -1); |
| 322 const __m128i tmp_hi = _mm_shuffle_epi8(out8, kCst_hi); |
| 323 const __m128i tmp_8 = _mm_shuffle_epi8(out8, kCst_8); // extract #8 |
| 324 const __m128i out_z0 = _mm_or_si128(tmp_lo, tmp_8); |
| 325 const __m128i out_z8 = _mm_or_si128(tmp_hi, tmp_7); |
| 326 _mm_storeu_si128((__m128i*)&out[0], out_z0); |
| 327 _mm_storeu_si128((__m128i*)&out[8], out_z8); |
| 328 packed_out = _mm_packs_epi16(out_z0, out_z8); |
| 329 } |
| 330 |
| 331 // detect if all 'out' values are zeroes or not |
| 332 return (_mm_movemask_epi8(_mm_cmpeq_epi8(packed_out, zero)) != 0xffff); |
| 333 } |
| 334 |
| 335 #undef PSHUFB_CST |
| 336 |
| 337 static int QuantizeBlock(int16_t in[16], int16_t out[16], |
| 338 const VP8Matrix* const mtx) { |
| 339 return DoQuantizeBlock(in, out, &mtx->sharpen_[0], mtx); |
| 340 } |
| 341 |
| 342 static int QuantizeBlockWHT(int16_t in[16], int16_t out[16], |
| 343 const VP8Matrix* const mtx) { |
| 344 return DoQuantizeBlock(in, out, NULL, mtx); |
| 345 } |
| 346 |
| 347 static int Quantize2Blocks(int16_t in[32], int16_t out[32], |
| 348 const VP8Matrix* const mtx) { |
| 349 int nz; |
| 350 const uint16_t* const sharpen = &mtx->sharpen_[0]; |
| 351 nz = DoQuantizeBlock(in + 0 * 16, out + 0 * 16, sharpen, mtx) << 0; |
| 352 nz |= DoQuantizeBlock(in + 1 * 16, out + 1 * 16, sharpen, mtx) << 1; |
| 353 return nz; |
| 354 } |
| 355 |
| 356 //------------------------------------------------------------------------------ |
| 357 // Entry point |
| 358 |
| 359 extern void VP8EncDspInitSSE41(void); |
| 360 WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitSSE41(void) { |
| 361 VP8CollectHistogram = CollectHistogram; |
| 362 VP8EncQuantizeBlock = QuantizeBlock; |
| 363 VP8EncQuantize2Blocks = Quantize2Blocks; |
| 364 VP8EncQuantizeBlockWHT = QuantizeBlockWHT; |
| 365 VP8TDisto4x4 = Disto4x4; |
| 366 VP8TDisto16x16 = Disto16x16; |
| 367 } |
| 368 |
| 369 #else // !WEBP_USE_SSE41 |
| 370 |
| 371 WEBP_DSP_INIT_STUB(VP8EncDspInitSSE41) |
| 372 |
| 373 #endif // WEBP_USE_SSE41 |
OLD | NEW |