Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(403)

Side by Side Diff: source/libvpx/vp9/encoder/x86/vp9_variance_sse2.c

Issue 554673004: libvpx: Pull from upstream (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp9/encoder/x86/vp9_variance_mmx.c ('k') | source/libvpx/vp9/vp9_common.mk » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include <emmintrin.h> // SSE2
12
11 #include "./vpx_config.h" 13 #include "./vpx_config.h"
12 14
13 #include "vp9/encoder/vp9_variance.h" 15 #include "vp9/encoder/vp9_variance.h"
14 #include "vpx_ports/mem.h" 16 #include "vpx_ports/mem.h"
15 17
16 typedef unsigned int (*variance_fn_t) (const unsigned char *src, int src_stride, 18 typedef unsigned int (*variance_fn_t) (const unsigned char *src, int src_stride,
17 const unsigned char *ref, int ref_stride, 19 const unsigned char *ref, int ref_stride,
18 unsigned int *sse, int *sum); 20 unsigned int *sse, int *sum);
19 21
20 unsigned int vp9_get4x4var_mmx(const unsigned char *src, int src_stride, 22 unsigned int vp9_get_mb_ss_sse2(const int16_t *src) {
21 const unsigned char *ref, int ref_stride, 23 __m128i vsum = _mm_setzero_si128();
22 unsigned int *sse, int *sum); 24 int i;
23 25
26 for (i = 0; i < 32; ++i) {
27 const __m128i v = _mm_loadu_si128((const __m128i *)src);
28 vsum = _mm_add_epi32(vsum, _mm_madd_epi16(v, v));
29 src += 8;
30 }
24 31
25 unsigned int vp9_get8x8var_sse2(const unsigned char *src, int src_stride, 32 vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 8));
26 const unsigned char *ref, int ref_stride, 33 vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 4));
27 unsigned int *sse, int *sum); 34 return _mm_cvtsi128_si32(vsum);
35 }
28 36
29 unsigned int vp9_get16x16var_sse2(const unsigned char *src, int src_stride, 37 #define READ64(p, stride, i) \
30 const unsigned char *ref, int ref_stride, 38 _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const uint32_t *)(p + i * stride)), \
31 unsigned int *sse, int *sum); 39 _mm_cvtsi32_si128(*(const uint32_t *)(p + (i + 1) * stride)))
40
41 unsigned int vp9_get4x4var_sse2(const uint8_t *src, int src_stride,
42 const uint8_t *ref, int ref_stride,
43 unsigned int *sse, int *sum) {
44 const __m128i zero = _mm_setzero_si128();
45 const __m128i src0 = _mm_unpacklo_epi8(READ64(src, src_stride, 0), zero);
46 const __m128i src1 = _mm_unpacklo_epi8(READ64(src, src_stride, 2), zero);
47 const __m128i ref0 = _mm_unpacklo_epi8(READ64(ref, ref_stride, 0), zero);
48 const __m128i ref1 = _mm_unpacklo_epi8(READ64(ref, ref_stride, 2), zero);
49 const __m128i diff0 = _mm_sub_epi16(src0, ref0);
50 const __m128i diff1 = _mm_sub_epi16(src1, ref1);
51
52 // sum
53 __m128i vsum = _mm_add_epi16(diff0, diff1);
54 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
55 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
56 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 2));
57 *sum = (int16_t)_mm_extract_epi16(vsum, 0);
58
59 // sse
60 vsum = _mm_add_epi32(_mm_madd_epi16(diff0, diff0),
61 _mm_madd_epi16(diff1, diff1));
62 vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 8));
63 vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 4));
64 *sse = _mm_cvtsi128_si32(vsum);
65
66 return 0;
67 }
68
69 unsigned int vp9_get8x8var_sse2(const uint8_t *src, int src_stride,
70 const uint8_t *ref, int ref_stride,
71 unsigned int *sse, int *sum) {
72 const __m128i zero = _mm_setzero_si128();
73 __m128i vsum = _mm_setzero_si128();
74 __m128i vsse = _mm_setzero_si128();
75 int i;
76
77 for (i = 0; i < 8; i += 2) {
78 const __m128i src0 = _mm_unpacklo_epi8(_mm_loadl_epi64(
79 (const __m128i *)(src + i * src_stride)), zero);
80 const __m128i ref0 = _mm_unpacklo_epi8(_mm_loadl_epi64(
81 (const __m128i *)(ref + i * ref_stride)), zero);
82 const __m128i diff0 = _mm_sub_epi16(src0, ref0);
83
84 const __m128i src1 = _mm_unpacklo_epi8(_mm_loadl_epi64(
85 (const __m128i *)(src + (i + 1) * src_stride)), zero);
86 const __m128i ref1 = _mm_unpacklo_epi8(_mm_loadl_epi64(
87 (const __m128i *)(ref + (i + 1) * ref_stride)), zero);
88 const __m128i diff1 = _mm_sub_epi16(src1, ref1);
89
90 vsum = _mm_add_epi16(vsum, diff0);
91 vsum = _mm_add_epi16(vsum, diff1);
92 vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff0, diff0));
93 vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff1, diff1));
94 }
95
96 // sum
97 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
98 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
99 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 2));
100 *sum = (int16_t)_mm_extract_epi16(vsum, 0);
101
102 // sse
103 vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 8));
104 vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 4));
105 *sse = _mm_cvtsi128_si32(vsse);
106
107 return 0;
108 }
109
110 unsigned int vp9_get16x16var_sse2(const uint8_t *src, int src_stride,
111 const uint8_t *ref, int ref_stride,
112 unsigned int *sse, int *sum) {
113 const __m128i zero = _mm_setzero_si128();
114 __m128i vsum = _mm_setzero_si128();
115 __m128i vsse = _mm_setzero_si128();
116 int i;
117
118 for (i = 0; i < 16; ++i) {
119 const __m128i s = _mm_loadu_si128((const __m128i *)src);
120 const __m128i r = _mm_loadu_si128((const __m128i *)ref);
121
122 const __m128i src0 = _mm_unpacklo_epi8(s, zero);
123 const __m128i ref0 = _mm_unpacklo_epi8(r, zero);
124 const __m128i diff0 = _mm_sub_epi16(src0, ref0);
125
126 const __m128i src1 = _mm_unpackhi_epi8(s, zero);
127 const __m128i ref1 = _mm_unpackhi_epi8(r, zero);
128 const __m128i diff1 = _mm_sub_epi16(src1, ref1);
129
130 vsum = _mm_add_epi16(vsum, diff0);
131 vsum = _mm_add_epi16(vsum, diff1);
132 vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff0, diff0));
133 vsse = _mm_add_epi32(vsse, _mm_madd_epi16(diff1, diff1));
134
135 src += src_stride;
136 ref += ref_stride;
137 }
138
139 // sum
140 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8));
141 vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4));
142 *sum = (int16_t)_mm_extract_epi16(vsum, 0) +
143 (int16_t)_mm_extract_epi16(vsum, 1);
144
145 // sse
146 vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 8));
147 vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 4));
148 *sse = _mm_cvtsi128_si32(vsse);
149
150 return 0;
151 }
152
32 153
33 static void variance_sse2(const unsigned char *src, int src_stride, 154 static void variance_sse2(const unsigned char *src, int src_stride,
34 const unsigned char *ref, int ref_stride, 155 const unsigned char *ref, int ref_stride,
35 int w, int h, unsigned int *sse, int *sum, 156 int w, int h, unsigned int *sse, int *sum,
36 variance_fn_t var_fn, int block_size) { 157 variance_fn_t var_fn, int block_size) {
37 int i, j; 158 int i, j;
38 159
39 *sse = 0; 160 *sse = 0;
40 *sum = 0; 161 *sum = 0;
41 162
42 for (i = 0; i < h; i += block_size) { 163 for (i = 0; i < h; i += block_size) {
43 for (j = 0; j < w; j += block_size) { 164 for (j = 0; j < w; j += block_size) {
44 unsigned int sse0; 165 unsigned int sse0;
45 int sum0; 166 int sum0;
46 var_fn(src + src_stride * i + j, src_stride, 167 var_fn(src + src_stride * i + j, src_stride,
47 ref + ref_stride * i + j, ref_stride, &sse0, &sum0); 168 ref + ref_stride * i + j, ref_stride, &sse0, &sum0);
48 *sse += sse0; 169 *sse += sse0;
49 *sum += sum0; 170 *sum += sum0;
50 } 171 }
51 } 172 }
52 } 173 }
53 174
54 unsigned int vp9_variance4x4_sse2(const unsigned char *src, int src_stride, 175 unsigned int vp9_variance4x4_sse2(const unsigned char *src, int src_stride,
55 const unsigned char *ref, int ref_stride, 176 const unsigned char *ref, int ref_stride,
56 unsigned int *sse) { 177 unsigned int *sse) {
57 int sum; 178 int sum;
58 variance_sse2(src, src_stride, ref, ref_stride, 4, 4, 179 vp9_get4x4var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
59 sse, &sum, vp9_get4x4var_mmx, 4);
60 return *sse - (((unsigned int)sum * sum) >> 4); 180 return *sse - (((unsigned int)sum * sum) >> 4);
61 } 181 }
62 182
63 unsigned int vp9_variance8x4_sse2(const uint8_t *src, int src_stride, 183 unsigned int vp9_variance8x4_sse2(const uint8_t *src, int src_stride,
64 const uint8_t *ref, int ref_stride, 184 const uint8_t *ref, int ref_stride,
65 unsigned int *sse) { 185 unsigned int *sse) {
66 int sum; 186 int sum;
67 variance_sse2(src, src_stride, ref, ref_stride, 8, 4, 187 variance_sse2(src, src_stride, ref, ref_stride, 8, 4,
68 sse, &sum, vp9_get4x4var_mmx, 4); 188 sse, &sum, vp9_get4x4var_sse2, 4);
69 return *sse - (((unsigned int)sum * sum) >> 5); 189 return *sse - (((unsigned int)sum * sum) >> 5);
70 } 190 }
71 191
72 unsigned int vp9_variance4x8_sse2(const uint8_t *src, int src_stride, 192 unsigned int vp9_variance4x8_sse2(const uint8_t *src, int src_stride,
73 const uint8_t *ref, int ref_stride, 193 const uint8_t *ref, int ref_stride,
74 unsigned int *sse) { 194 unsigned int *sse) {
75 int sum; 195 int sum;
76 variance_sse2(src, src_stride, ref, ref_stride, 4, 8, 196 variance_sse2(src, src_stride, ref, ref_stride, 4, 8,
77 sse, &sum, vp9_get4x4var_mmx, 4); 197 sse, &sum, vp9_get4x4var_sse2, 4);
78 return *sse - (((unsigned int)sum * sum) >> 5); 198 return *sse - (((unsigned int)sum * sum) >> 5);
79 } 199 }
80 200
81 unsigned int vp9_variance8x8_sse2(const unsigned char *src, int src_stride, 201 unsigned int vp9_variance8x8_sse2(const unsigned char *src, int src_stride,
82 const unsigned char *ref, int ref_stride, 202 const unsigned char *ref, int ref_stride,
83 unsigned int *sse) { 203 unsigned int *sse) {
84 int sum; 204 int sum;
85 variance_sse2(src, src_stride, ref, ref_stride, 8, 8, 205 vp9_get8x8var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
86 sse, &sum, vp9_get8x8var_sse2, 8);
87 return *sse - (((unsigned int)sum * sum) >> 6); 206 return *sse - (((unsigned int)sum * sum) >> 6);
88 } 207 }
89 208
90 unsigned int vp9_variance16x8_sse2(const unsigned char *src, int src_stride, 209 unsigned int vp9_variance16x8_sse2(const unsigned char *src, int src_stride,
91 const unsigned char *ref, int ref_stride, 210 const unsigned char *ref, int ref_stride,
92 unsigned int *sse) { 211 unsigned int *sse) {
93 int sum; 212 int sum;
94 variance_sse2(src, src_stride, ref, ref_stride, 16, 8, 213 variance_sse2(src, src_stride, ref, ref_stride, 16, 8,
95 sse, &sum, vp9_get8x8var_sse2, 8); 214 sse, &sum, vp9_get8x8var_sse2, 8);
96 return *sse - (((unsigned int)sum * sum) >> 7); 215 return *sse - (((unsigned int)sum * sum) >> 7);
97 } 216 }
98 217
99 unsigned int vp9_variance8x16_sse2(const unsigned char *src, int src_stride, 218 unsigned int vp9_variance8x16_sse2(const unsigned char *src, int src_stride,
100 const unsigned char *ref, int ref_stride, 219 const unsigned char *ref, int ref_stride,
101 unsigned int *sse) { 220 unsigned int *sse) {
102 int sum; 221 int sum;
103 variance_sse2(src, src_stride, ref, ref_stride, 8, 16, 222 variance_sse2(src, src_stride, ref, ref_stride, 8, 16,
104 sse, &sum, vp9_get8x8var_sse2, 8); 223 sse, &sum, vp9_get8x8var_sse2, 8);
105 return *sse - (((unsigned int)sum * sum) >> 7); 224 return *sse - (((unsigned int)sum * sum) >> 7);
106 } 225 }
107 226
108 unsigned int vp9_variance16x16_sse2(const unsigned char *src, int src_stride, 227 unsigned int vp9_variance16x16_sse2(const unsigned char *src, int src_stride,
109 const unsigned char *ref, int ref_stride, 228 const unsigned char *ref, int ref_stride,
110 unsigned int *sse) { 229 unsigned int *sse) {
111 int sum; 230 int sum;
112 variance_sse2(src, src_stride, ref, ref_stride, 16, 16, 231 vp9_get16x16var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
113 sse, &sum, vp9_get16x16var_sse2, 16);
114 return *sse - (((unsigned int)sum * sum) >> 8); 232 return *sse - (((unsigned int)sum * sum) >> 8);
115 } 233 }
116 234
117 unsigned int vp9_mse16x16_sse2(const unsigned char *src, int src_stride,
118 const unsigned char *ref, int ref_stride,
119 unsigned int *sse) {
120 int sum;
121 vp9_get16x16var_sse2(src, src_stride, ref, ref_stride, sse, &sum);
122 return *sse;
123 }
124
125 unsigned int vp9_variance32x32_sse2(const uint8_t *src, int src_stride, 235 unsigned int vp9_variance32x32_sse2(const uint8_t *src, int src_stride,
126 const uint8_t *ref, int ref_stride, 236 const uint8_t *ref, int ref_stride,
127 unsigned int *sse) { 237 unsigned int *sse) {
128 int sum; 238 int sum;
129 variance_sse2(src, src_stride, ref, ref_stride, 32, 32, 239 variance_sse2(src, src_stride, ref, ref_stride, 32, 32,
130 sse, &sum, vp9_get16x16var_sse2, 16); 240 sse, &sum, vp9_get16x16var_sse2, 16);
131 return *sse - (((int64_t)sum * sum) >> 10); 241 return *sse - (((int64_t)sum * sum) >> 10);
132 } 242 }
133 243
134 unsigned int vp9_variance32x16_sse2(const uint8_t *src, int src_stride, 244 unsigned int vp9_variance32x16_sse2(const uint8_t *src, int src_stride,
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
169 279
170 unsigned int vp9_variance32x64_sse2(const uint8_t *src, int src_stride, 280 unsigned int vp9_variance32x64_sse2(const uint8_t *src, int src_stride,
171 const uint8_t *ref, int ref_stride, 281 const uint8_t *ref, int ref_stride,
172 unsigned int *sse) { 282 unsigned int *sse) {
173 int sum; 283 int sum;
174 variance_sse2(src, src_stride, ref, ref_stride, 32, 64, 284 variance_sse2(src, src_stride, ref, ref_stride, 32, 64,
175 sse, &sum, vp9_get16x16var_sse2, 16); 285 sse, &sum, vp9_get16x16var_sse2, 16);
176 return *sse - (((int64_t)sum * sum) >> 11); 286 return *sse - (((int64_t)sum * sum) >> 11);
177 } 287 }
178 288
289 unsigned int vp9_mse8x8_sse2(const uint8_t *src, int src_stride,
290 const uint8_t *ref, int ref_stride,
291 unsigned int *sse) {
292 vp9_variance8x8_sse2(src, src_stride, ref, ref_stride, sse);
293 return *sse;
294 }
295
296 unsigned int vp9_mse8x16_sse2(const uint8_t *src, int src_stride,
297 const uint8_t *ref, int ref_stride,
298 unsigned int *sse) {
299 vp9_variance8x16_sse2(src, src_stride, ref, ref_stride, sse);
300 return *sse;
301 }
302
303 unsigned int vp9_mse16x8_sse2(const uint8_t *src, int src_stride,
304 const uint8_t *ref, int ref_stride,
305 unsigned int *sse) {
306 vp9_variance16x8_sse2(src, src_stride, ref, ref_stride, sse);
307 return *sse;
308 }
309
310 unsigned int vp9_mse16x16_sse2(const uint8_t *src, int src_stride,
311 const uint8_t *ref, int ref_stride,
312 unsigned int *sse) {
313 vp9_variance16x16_sse2(src, src_stride, ref, ref_stride, sse);
314 return *sse;
315 }
316
179 #define DECL(w, opt) \ 317 #define DECL(w, opt) \
180 int vp9_sub_pixel_variance##w##xh_##opt(const uint8_t *src, \ 318 int vp9_sub_pixel_variance##w##xh_##opt(const uint8_t *src, \
181 ptrdiff_t src_stride, \ 319 ptrdiff_t src_stride, \
182 int x_offset, int y_offset, \ 320 int x_offset, int y_offset, \
183 const uint8_t *dst, \ 321 const uint8_t *dst, \
184 ptrdiff_t dst_stride, \ 322 ptrdiff_t dst_stride, \
185 int height, unsigned int *sse) 323 int height, unsigned int *sse)
186 #define DECLS(opt1, opt2) \ 324 #define DECLS(opt1, opt2) \
187 DECL(4, opt2); \ 325 DECL(4, opt2); \
188 DECL(8, opt1); \ 326 DECL(8, opt1); \
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after
325 FN(8, 8, 8, 3, 3, opt1, (unsigned int)); \ 463 FN(8, 8, 8, 3, 3, opt1, (unsigned int)); \
326 FN(8, 4, 8, 3, 2, opt1, (unsigned int)); \ 464 FN(8, 4, 8, 3, 2, opt1, (unsigned int)); \
327 FN(4, 8, 4, 2, 3, opt2, (unsigned int)); \ 465 FN(4, 8, 4, 2, 3, opt2, (unsigned int)); \
328 FN(4, 4, 4, 2, 2, opt2, (unsigned int)) 466 FN(4, 4, 4, 2, 2, opt2, (unsigned int))
329 467
330 FNS(sse2, sse); 468 FNS(sse2, sse);
331 FNS(ssse3, ssse3); 469 FNS(ssse3, ssse3);
332 470
333 #undef FNS 471 #undef FNS
334 #undef FN 472 #undef FN
OLDNEW
« no previous file with comments | « source/libvpx/vp9/encoder/x86/vp9_variance_mmx.c ('k') | source/libvpx/vp9/vp9_common.mk » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698