OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 |
| 11 #include <emmintrin.h> // SSE2 |
| 12 |
| 13 #include "./vpx_config.h" |
| 14 #include "./vpx_dsp_rtcd.h" |
| 15 |
| 16 #include "vpx_ports/mem.h" |
| 17 |
| 18 typedef void (*getNxMvar_fn_t) (const unsigned char *src, int src_stride, |
| 19 const unsigned char *ref, int ref_stride, |
| 20 unsigned int *sse, int *sum); |
| 21 |
| 22 unsigned int vpx_get_mb_ss_sse2(const int16_t *src) { |
| 23 __m128i vsum = _mm_setzero_si128(); |
| 24 int i; |
| 25 |
| 26 for (i = 0; i < 32; ++i) { |
| 27 const __m128i v = _mm_loadu_si128((const __m128i *)src); |
| 28 vsum = _mm_add_epi32(vsum, _mm_madd_epi16(v, v)); |
| 29 src += 8; |
| 30 } |
| 31 |
| 32 vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 8)); |
| 33 vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 4)); |
| 34 return _mm_cvtsi128_si32(vsum); |
| 35 } |
| 36 |
| 37 #define READ64(p, stride, i) \ |
| 38 _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const uint32_t *)(p + i * stride)), \ |
| 39 _mm_cvtsi32_si128(*(const uint32_t *)(p + (i + 1) * stride))) |
| 40 |
| 41 static void get4x4var_sse2(const uint8_t *src, int src_stride, |
| 42 const uint8_t *ref, int ref_stride, |
| 43 unsigned int *sse, int *sum) { |
| 44 const __m128i zero = _mm_setzero_si128(); |
| 45 const __m128i src0 = _mm_unpacklo_epi8(READ64(src, src_stride, 0), zero); |
| 46 const __m128i src1 = _mm_unpacklo_epi8(READ64(src, src_stride, 2), zero); |
| 47 const __m128i ref0 = _mm_unpacklo_epi8(READ64(ref, ref_stride, 0), zero); |
| 48 const __m128i ref1 = _mm_unpacklo_epi8(READ64(ref, ref_stride, 2), zero); |
| 49 const __m128i diff0 = _mm_sub_epi16(src0, ref0); |
| 50 const __m128i diff1 = _mm_sub_epi16(src1, ref1); |
| 51 |
| 52 // sum |
| 53 __m128i vsum = _mm_add_epi16(diff0, diff1); |
| 54 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8)); |
| 55 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4)); |
| 56 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 2)); |
| 57 *sum = (int16_t)_mm_extract_epi16(vsum, 0); |
| 58 |
| 59 // sse |
| 60 vsum = _mm_add_epi32(_mm_madd_epi16(diff0, diff0), |
| 61 _mm_madd_epi16(diff1, diff1)); |
| 62 vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 8)); |
| 63 vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 4)); |
| 64 *sse = _mm_cvtsi128_si32(vsum); |
| 65 } |
| 66 |
| 67 void vpx_get8x8var_sse2(const uint8_t *src, int src_stride, |
| 68 const uint8_t *ref, int ref_stride, |
| 69 unsigned int *sse, int *sum) { |
| 70 const __m128i zero = _mm_setzero_si128(); |
| 71 __m128i vsum = _mm_setzero_si128(); |
| 72 __m128i vsse = _mm_setzero_si128(); |
| 73 int i; |
| 74 |
| 75 for (i = 0; i < 8; i += 2) { |
| 76 const __m128i src0 = _mm_unpacklo_epi8(_mm_loadl_epi64( |
| 77 (const __m128i *)(src + i * src_stride)), zero); |
| 78 const __m128i ref0 = _mm_unpacklo_epi8(_mm_loadl_epi64( |
| 79 (const __m128i *)(ref + i * ref_stride)), zero); |
| 80 const __m128i diff0 = _mm_sub_epi16(src0, ref0); |
| 81 |
| 82 const __m128i src1 = _mm_unpacklo_epi8(_mm_loadl_epi64( |
| 83 (const __m128i *)(src + (i + 1) * src_stride)), zero); |
| 84 const __m128i ref1 = _mm_unpacklo_epi8(_mm_loadl_epi64( |
| 85 (const __m128i *)(ref + (i + 1) * ref_stride)), zero); |
| 86 const __m128i diff1 = _mm_sub_epi16(src1, ref1); |
| 87 |
| 88 vsum = _mm_add_epi16(vsum, diff0); |
| 89 vsum = _mm_add_epi16(vsum, diff1); |
| 90 vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff0, diff0)); |
| 91 vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff1, diff1)); |
| 92 } |
| 93 |
| 94 // sum |
| 95 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8)); |
| 96 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4)); |
| 97 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 2)); |
| 98 *sum = (int16_t)_mm_extract_epi16(vsum, 0); |
| 99 |
| 100 // sse |
| 101 vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 8)); |
| 102 vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 4)); |
| 103 *sse = _mm_cvtsi128_si32(vsse); |
| 104 } |
| 105 |
| 106 void vpx_get16x16var_sse2(const uint8_t *src, int src_stride, |
| 107 const uint8_t *ref, int ref_stride, |
| 108 unsigned int *sse, int *sum) { |
| 109 const __m128i zero = _mm_setzero_si128(); |
| 110 __m128i vsum = _mm_setzero_si128(); |
| 111 __m128i vsse = _mm_setzero_si128(); |
| 112 int i; |
| 113 |
| 114 for (i = 0; i < 16; ++i) { |
| 115 const __m128i s = _mm_loadu_si128((const __m128i *)src); |
| 116 const __m128i r = _mm_loadu_si128((const __m128i *)ref); |
| 117 |
| 118 const __m128i src0 = _mm_unpacklo_epi8(s, zero); |
| 119 const __m128i ref0 = _mm_unpacklo_epi8(r, zero); |
| 120 const __m128i diff0 = _mm_sub_epi16(src0, ref0); |
| 121 |
| 122 const __m128i src1 = _mm_unpackhi_epi8(s, zero); |
| 123 const __m128i ref1 = _mm_unpackhi_epi8(r, zero); |
| 124 const __m128i diff1 = _mm_sub_epi16(src1, ref1); |
| 125 |
| 126 vsum = _mm_add_epi16(vsum, diff0); |
| 127 vsum = _mm_add_epi16(vsum, diff1); |
| 128 vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff0, diff0)); |
| 129 vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff1, diff1)); |
| 130 |
| 131 src += src_stride; |
| 132 ref += ref_stride; |
| 133 } |
| 134 |
| 135 // sum |
| 136 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8)); |
| 137 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4)); |
| 138 *sum = (int16_t)_mm_extract_epi16(vsum, 0) + |
| 139 (int16_t)_mm_extract_epi16(vsum, 1); |
| 140 |
| 141 // sse |
| 142 vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 8)); |
| 143 vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 4)); |
| 144 *sse = _mm_cvtsi128_si32(vsse); |
| 145 } |
| 146 |
| 147 |
| 148 static void variance_sse2(const unsigned char *src, int src_stride, |
| 149 const unsigned char *ref, int ref_stride, |
| 150 int w, int h, unsigned int *sse, int *sum, |
| 151 getNxMvar_fn_t var_fn, int block_size) { |
| 152 int i, j; |
| 153 |
| 154 *sse = 0; |
| 155 *sum = 0; |
| 156 |
| 157 for (i = 0; i < h; i += block_size) { |
| 158 for (j = 0; j < w; j += block_size) { |
| 159 unsigned int sse0; |
| 160 int sum0; |
| 161 var_fn(src + src_stride * i + j, src_stride, |
| 162 ref + ref_stride * i + j, ref_stride, &sse0, &sum0); |
| 163 *sse += sse0; |
| 164 *sum += sum0; |
| 165 } |
| 166 } |
| 167 } |
| 168 |
| 169 unsigned int vpx_variance4x4_sse2(const unsigned char *src, int src_stride, |
| 170 const unsigned char *ref, int ref_stride, |
| 171 unsigned int *sse) { |
| 172 int sum; |
| 173 get4x4var_sse2(src, src_stride, ref, ref_stride, sse, &sum); |
| 174 return *sse - (((unsigned int)sum * sum) >> 4); |
| 175 } |
| 176 |
| 177 unsigned int vpx_variance8x4_sse2(const uint8_t *src, int src_stride, |
| 178 const uint8_t *ref, int ref_stride, |
| 179 unsigned int *sse) { |
| 180 int sum; |
| 181 variance_sse2(src, src_stride, ref, ref_stride, 8, 4, |
| 182 sse, &sum, get4x4var_sse2, 4); |
| 183 return *sse - (((unsigned int)sum * sum) >> 5); |
| 184 } |
| 185 |
| 186 unsigned int vpx_variance4x8_sse2(const uint8_t *src, int src_stride, |
| 187 const uint8_t *ref, int ref_stride, |
| 188 unsigned int *sse) { |
| 189 int sum; |
| 190 variance_sse2(src, src_stride, ref, ref_stride, 4, 8, |
| 191 sse, &sum, get4x4var_sse2, 4); |
| 192 return *sse - (((unsigned int)sum * sum) >> 5); |
| 193 } |
| 194 |
| 195 unsigned int vpx_variance8x8_sse2(const unsigned char *src, int src_stride, |
| 196 const unsigned char *ref, int ref_stride, |
| 197 unsigned int *sse) { |
| 198 int sum; |
| 199 vpx_get8x8var_sse2(src, src_stride, ref, ref_stride, sse, &sum); |
| 200 return *sse - (((unsigned int)sum * sum) >> 6); |
| 201 } |
| 202 |
| 203 unsigned int vpx_variance16x8_sse2(const unsigned char *src, int src_stride, |
| 204 const unsigned char *ref, int ref_stride, |
| 205 unsigned int *sse) { |
| 206 int sum; |
| 207 variance_sse2(src, src_stride, ref, ref_stride, 16, 8, |
| 208 sse, &sum, vpx_get8x8var_sse2, 8); |
| 209 return *sse - (((unsigned int)sum * sum) >> 7); |
| 210 } |
| 211 |
| 212 unsigned int vpx_variance8x16_sse2(const unsigned char *src, int src_stride, |
| 213 const unsigned char *ref, int ref_stride, |
| 214 unsigned int *sse) { |
| 215 int sum; |
| 216 variance_sse2(src, src_stride, ref, ref_stride, 8, 16, |
| 217 sse, &sum, vpx_get8x8var_sse2, 8); |
| 218 return *sse - (((unsigned int)sum * sum) >> 7); |
| 219 } |
| 220 |
| 221 unsigned int vpx_variance16x16_sse2(const unsigned char *src, int src_stride, |
| 222 const unsigned char *ref, int ref_stride, |
| 223 unsigned int *sse) { |
| 224 int sum; |
| 225 vpx_get16x16var_sse2(src, src_stride, ref, ref_stride, sse, &sum); |
| 226 return *sse - (((unsigned int)sum * sum) >> 8); |
| 227 } |
| 228 |
| 229 unsigned int vpx_variance32x32_sse2(const uint8_t *src, int src_stride, |
| 230 const uint8_t *ref, int ref_stride, |
| 231 unsigned int *sse) { |
| 232 int sum; |
| 233 variance_sse2(src, src_stride, ref, ref_stride, 32, 32, |
| 234 sse, &sum, vpx_get16x16var_sse2, 16); |
| 235 return *sse - (((int64_t)sum * sum) >> 10); |
| 236 } |
| 237 |
| 238 unsigned int vpx_variance32x16_sse2(const uint8_t *src, int src_stride, |
| 239 const uint8_t *ref, int ref_stride, |
| 240 unsigned int *sse) { |
| 241 int sum; |
| 242 variance_sse2(src, src_stride, ref, ref_stride, 32, 16, |
| 243 sse, &sum, vpx_get16x16var_sse2, 16); |
| 244 return *sse - (((int64_t)sum * sum) >> 9); |
| 245 } |
| 246 |
| 247 unsigned int vpx_variance16x32_sse2(const uint8_t *src, int src_stride, |
| 248 const uint8_t *ref, int ref_stride, |
| 249 unsigned int *sse) { |
| 250 int sum; |
| 251 variance_sse2(src, src_stride, ref, ref_stride, 16, 32, |
| 252 sse, &sum, vpx_get16x16var_sse2, 16); |
| 253 return *sse - (((int64_t)sum * sum) >> 9); |
| 254 } |
| 255 |
| 256 unsigned int vpx_variance64x64_sse2(const uint8_t *src, int src_stride, |
| 257 const uint8_t *ref, int ref_stride, |
| 258 unsigned int *sse) { |
| 259 int sum; |
| 260 variance_sse2(src, src_stride, ref, ref_stride, 64, 64, |
| 261 sse, &sum, vpx_get16x16var_sse2, 16); |
| 262 return *sse - (((int64_t)sum * sum) >> 12); |
| 263 } |
| 264 |
| 265 unsigned int vpx_variance64x32_sse2(const uint8_t *src, int src_stride, |
| 266 const uint8_t *ref, int ref_stride, |
| 267 unsigned int *sse) { |
| 268 int sum; |
| 269 variance_sse2(src, src_stride, ref, ref_stride, 64, 32, |
| 270 sse, &sum, vpx_get16x16var_sse2, 16); |
| 271 return *sse - (((int64_t)sum * sum) >> 11); |
| 272 } |
| 273 |
| 274 unsigned int vpx_variance32x64_sse2(const uint8_t *src, int src_stride, |
| 275 const uint8_t *ref, int ref_stride, |
| 276 unsigned int *sse) { |
| 277 int sum; |
| 278 variance_sse2(src, src_stride, ref, ref_stride, 32, 64, |
| 279 sse, &sum, vpx_get16x16var_sse2, 16); |
| 280 return *sse - (((int64_t)sum * sum) >> 11); |
| 281 } |
| 282 |
| 283 unsigned int vpx_mse8x8_sse2(const uint8_t *src, int src_stride, |
| 284 const uint8_t *ref, int ref_stride, |
| 285 unsigned int *sse) { |
| 286 vpx_variance8x8_sse2(src, src_stride, ref, ref_stride, sse); |
| 287 return *sse; |
| 288 } |
| 289 |
| 290 unsigned int vpx_mse8x16_sse2(const uint8_t *src, int src_stride, |
| 291 const uint8_t *ref, int ref_stride, |
| 292 unsigned int *sse) { |
| 293 vpx_variance8x16_sse2(src, src_stride, ref, ref_stride, sse); |
| 294 return *sse; |
| 295 } |
| 296 |
| 297 unsigned int vpx_mse16x8_sse2(const uint8_t *src, int src_stride, |
| 298 const uint8_t *ref, int ref_stride, |
| 299 unsigned int *sse) { |
| 300 vpx_variance16x8_sse2(src, src_stride, ref, ref_stride, sse); |
| 301 return *sse; |
| 302 } |
| 303 |
| 304 unsigned int vpx_mse16x16_sse2(const uint8_t *src, int src_stride, |
| 305 const uint8_t *ref, int ref_stride, |
| 306 unsigned int *sse) { |
| 307 vpx_variance16x16_sse2(src, src_stride, ref, ref_stride, sse); |
| 308 return *sse; |
| 309 } |
OLD | NEW |