Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1248)

Side by Side Diff: source/libvpx/vp9/common/x86/vp9_high_loopfilter_intrin_sse2.c

Issue 668403002: libvpx: Pull from upstream (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <emmintrin.h> // SSE2
12
13 #include "./vp9_rtcd.h"
14 #include "vp9/common/vp9_loopfilter.h"
15 #include "vpx_ports/emmintrin_compat.h"
16
17 static INLINE __m128i signed_char_clamp_bd_sse2(__m128i value, int bd) {
18 __m128i ubounded;
19 __m128i lbounded;
20 __m128i retval;
21
22 const __m128i zero = _mm_set1_epi16(0);
23 const __m128i one = _mm_set1_epi16(1);
24 const __m128i t80 = _mm_slli_epi16(_mm_set1_epi16(0x80), bd - 8);
25 const __m128i max = _mm_subs_epi16(
26 _mm_subs_epi16(_mm_slli_epi16(one, bd), one), t80);
27 const __m128i min = _mm_subs_epi16(zero, t80);
28 ubounded = _mm_cmpgt_epi16(value, max);
29 lbounded = _mm_cmplt_epi16(value, min);
30 retval = _mm_andnot_si128(_mm_or_si128(ubounded, lbounded), value);
31 ubounded = _mm_and_si128(ubounded, max);
32 lbounded = _mm_and_si128(lbounded, min);
33 retval = _mm_or_si128(retval, ubounded);
34 retval = _mm_or_si128(retval, lbounded);
35 return retval;
36 }
37
38 // TODO(debargha, peter): Break up large functions into smaller ones
39 // in this file.
40 static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s,
41 int p,
42 const uint8_t *_blimit,
43 const uint8_t *_limit,
44 const uint8_t *_thresh,
45 int bd) {
46 const __m128i zero = _mm_set1_epi16(0);
47 const __m128i one = _mm_set1_epi16(1);
48 const __m128i blimit = _mm_slli_epi16(
49 _mm_unpacklo_epi8(
50 _mm_load_si128((const __m128i *)_blimit), zero), bd - 8);
51 const __m128i limit = _mm_slli_epi16(
52 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), bd - 8);
53 const __m128i thresh = _mm_slli_epi16(
54 _mm_unpacklo_epi8(
55 _mm_load_si128((const __m128i *)_thresh), zero), bd - 8);
56 __m128i q7, p7, q6, p6, q5, p5, q4, p4, q3, p3, q2, p2, q1, p1, q0, p0;
57 __m128i mask, hev, flat, flat2, abs_p1p0, abs_q1q0;
58 __m128i ps1, qs1, ps0, qs0;
59 __m128i abs_p0q0, abs_p1q1, ffff, work;
60 __m128i filt, work_a, filter1, filter2;
61 __m128i flat2_q6, flat2_p6, flat2_q5, flat2_p5, flat2_q4, flat2_p4;
62 __m128i flat2_q3, flat2_p3, flat2_q2, flat2_p2, flat2_q1, flat2_p1;
63 __m128i flat2_q0, flat2_p0;
64 __m128i flat_q2, flat_p2, flat_q1, flat_p1, flat_q0, flat_p0;
65 __m128i pixelFilter_p, pixelFilter_q;
66 __m128i pixetFilter_p2p1p0, pixetFilter_q2q1q0;
67 __m128i sum_p7, sum_q7, sum_p3, sum_q3;
68 __m128i t4, t3, t80, t1;
69 __m128i eight, four;
70
71 q4 = _mm_load_si128((__m128i *)(s + 4 * p));
72 p4 = _mm_load_si128((__m128i *)(s - 5 * p));
73 q3 = _mm_load_si128((__m128i *)(s + 3 * p));
74 p3 = _mm_load_si128((__m128i *)(s - 4 * p));
75 q2 = _mm_load_si128((__m128i *)(s + 2 * p));
76 p2 = _mm_load_si128((__m128i *)(s - 3 * p));
77 q1 = _mm_load_si128((__m128i *)(s + 1 * p));
78 p1 = _mm_load_si128((__m128i *)(s - 2 * p));
79 q0 = _mm_load_si128((__m128i *)(s + 0 * p));
80 p0 = _mm_load_si128((__m128i *)(s - 1 * p));
81
82 // highbd_filter_mask
83 abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1));
84 abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1));
85
86 ffff = _mm_cmpeq_epi16(abs_p1p0, abs_p1p0);
87
88 abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0), _mm_subs_epu16(q0, p0));
89 abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1), _mm_subs_epu16(q1, p1));
90
91 // highbd_hev_mask (in C code this is actually called from highbd_filter4)
92 flat = _mm_max_epi16(abs_p1p0, abs_q1q0);
93 hev = _mm_subs_epu16(flat, thresh);
94 hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff);
95
96 abs_p0q0 =_mm_adds_epu16(abs_p0q0, abs_p0q0); // abs(p0 - q0) * 2
97 abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1); // abs(p1 - q1) / 2
98 mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit);
99 mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
100 mask = _mm_and_si128(mask, _mm_adds_epu16(limit, one));
101 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p1, p0),
102 _mm_subs_epu16(p0, p1)),
103 _mm_or_si128(_mm_subs_epu16(q1, q0),
104 _mm_subs_epu16(q0, q1)));
105 mask = _mm_max_epi16(work, mask);
106 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p1),
107 _mm_subs_epu16(p1, p2)),
108 _mm_or_si128(_mm_subs_epu16(q2, q1),
109 _mm_subs_epu16(q1, q2)));
110 mask = _mm_max_epi16(work, mask);
111 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p3, p2),
112 _mm_subs_epu16(p2, p3)),
113 _mm_or_si128(_mm_subs_epu16(q3, q2),
114 _mm_subs_epu16(q2, q3)));
115 mask = _mm_max_epi16(work, mask);
116
117 mask = _mm_subs_epu16(mask, limit);
118 mask = _mm_cmpeq_epi16(mask, zero); // return ~mask
119
120 // lp filter
121 // highbd_filter4
122 t4 = _mm_set1_epi16(4);
123 t3 = _mm_set1_epi16(3);
124 t80 = _mm_slli_epi16(_mm_set1_epi16(0x80), bd - 8);
125 t1 = _mm_set1_epi16(0x1);
126
127 ps1 = _mm_subs_epi16(p1, t80);
128 qs1 = _mm_subs_epi16(q1, t80);
129 ps0 = _mm_subs_epi16(p0, t80);
130 qs0 = _mm_subs_epi16(q0, t80);
131
132 filt = _mm_and_si128(
133 signed_char_clamp_bd_sse2(_mm_subs_epi16(ps1, qs1), bd), hev);
134 work_a = _mm_subs_epi16(qs0, ps0);
135 filt = _mm_adds_epi16(filt, work_a);
136 filt = _mm_adds_epi16(filt, work_a);
137 filt = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, work_a), bd);
138 filt = _mm_and_si128(filt, mask);
139
140 filter1 = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, t4), bd);
141 filter2 = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, t3), bd);
142
143 // Filter1 >> 3
144 filter1 = _mm_srai_epi16(filter1, 0x3);
145 filter2 = _mm_srai_epi16(filter2, 0x3);
146
147 qs0 = _mm_adds_epi16(
148 signed_char_clamp_bd_sse2(_mm_subs_epi16(qs0, filter1), bd),
149 t80);
150 ps0 = _mm_adds_epi16(
151 signed_char_clamp_bd_sse2(_mm_adds_epi16(ps0, filter2), bd),
152 t80);
153 filt = _mm_adds_epi16(filter1, t1);
154 filt = _mm_srai_epi16(filt, 1);
155 filt = _mm_andnot_si128(hev, filt);
156
157 qs1 = _mm_adds_epi16(
158 signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd),
159 t80);
160 ps1 = _mm_adds_epi16(
161 signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd),
162 t80);
163 // end highbd_filter4
164 // loopfilter done
165
166 // highbd_flat_mask4
167 flat = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p0),
168 _mm_subs_epu16(p0, p2)),
169 _mm_or_si128(_mm_subs_epu16(p3, p0),
170 _mm_subs_epu16(p0, p3)));
171 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(q2, q0),
172 _mm_subs_epu16(q0, q2)),
173 _mm_or_si128(_mm_subs_epu16(q3, q0),
174 _mm_subs_epu16(q0, q3)));
175 flat = _mm_max_epi16(work, flat);
176 work = _mm_max_epi16(abs_p1p0, abs_q1q0);
177 flat = _mm_max_epi16(work, flat);
178 flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, bd - 8));
179 flat = _mm_cmpeq_epi16(flat, zero);
180 // end flat_mask4
181
182 // flat & mask = flat && mask (as used in filter8)
183 // (because, in both vars, each block of 16 either all 1s or all 0s)
184 flat = _mm_and_si128(flat, mask);
185
186 p5 = _mm_load_si128((__m128i *)(s - 6 * p));
187 q5 = _mm_load_si128((__m128i *)(s + 5 * p));
188 p6 = _mm_load_si128((__m128i *)(s - 7 * p));
189 q6 = _mm_load_si128((__m128i *)(s + 6 * p));
190 p7 = _mm_load_si128((__m128i *)(s - 8 * p));
191 q7 = _mm_load_si128((__m128i *)(s + 7 * p));
192
193 // highbd_flat_mask5 (arguments passed in are p0, q0, p4-p7, q4-q7
194 // but referred to as p0-p4 & q0-q4 in fn)
195 flat2 = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p4, p0),
196 _mm_subs_epu16(p0, p4)),
197 _mm_or_si128(_mm_subs_epu16(q4, q0),
198 _mm_subs_epu16(q0, q4)));
199
200 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p5, p0),
201 _mm_subs_epu16(p0, p5)),
202 _mm_or_si128(_mm_subs_epu16(q5, q0),
203 _mm_subs_epu16(q0, q5)));
204 flat2 = _mm_max_epi16(work, flat2);
205
206 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p6, p0),
207 _mm_subs_epu16(p0, p6)),
208 _mm_or_si128(_mm_subs_epu16(q6, q0),
209 _mm_subs_epu16(q0, q6)));
210 flat2 = _mm_max_epi16(work, flat2);
211
212 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p7, p0),
213 _mm_subs_epu16(p0, p7)),
214 _mm_or_si128(_mm_subs_epu16(q7, q0),
215 _mm_subs_epu16(q0, q7)));
216 flat2 = _mm_max_epi16(work, flat2);
217
218 flat2 = _mm_subs_epu16(flat2, _mm_slli_epi16(one, bd - 8));
219 flat2 = _mm_cmpeq_epi16(flat2, zero);
220 flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask
221 // end highbd_flat_mask5
222
223 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
224 // flat and wide flat calculations
225 eight = _mm_set1_epi16(8);
226 four = _mm_set1_epi16(4);
227
228 pixelFilter_p = _mm_add_epi16(_mm_add_epi16(p6, p5),
229 _mm_add_epi16(p4, p3));
230 pixelFilter_q = _mm_add_epi16(_mm_add_epi16(q6, q5),
231 _mm_add_epi16(q4, q3));
232
233 pixetFilter_p2p1p0 = _mm_add_epi16(p0, _mm_add_epi16(p2, p1));
234 pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0);
235
236 pixetFilter_q2q1q0 = _mm_add_epi16(q0, _mm_add_epi16(q2, q1));
237 pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0);
238 pixelFilter_p = _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p,
239 pixelFilter_q));
240 pixetFilter_p2p1p0 = _mm_add_epi16(four,
241 _mm_add_epi16(pixetFilter_p2p1p0,
242 pixetFilter_q2q1q0));
243 flat2_p0 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
244 _mm_add_epi16(p7, p0)), 4);
245 flat2_q0 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
246 _mm_add_epi16(q7, q0)), 4);
247 flat_p0 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
248 _mm_add_epi16(p3, p0)), 3);
249 flat_q0 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
250 _mm_add_epi16(q3, q0)), 3);
251
252 sum_p7 = _mm_add_epi16(p7, p7);
253 sum_q7 = _mm_add_epi16(q7, q7);
254 sum_p3 = _mm_add_epi16(p3, p3);
255 sum_q3 = _mm_add_epi16(q3, q3);
256
257 pixelFilter_q = _mm_sub_epi16(pixelFilter_p, p6);
258 pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q6);
259 flat2_p1 = _mm_srli_epi16(
260 _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p1)), 4);
261 flat2_q1 = _mm_srli_epi16(
262 _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q1)), 4);
263
264 pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_p2p1p0, p2);
265 pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q2);
266 flat_p1 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
267 _mm_add_epi16(sum_p3, p1)), 3);
268 flat_q1 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0,
269 _mm_add_epi16(sum_q3, q1)), 3);
270
271 sum_p7 = _mm_add_epi16(sum_p7, p7);
272 sum_q7 = _mm_add_epi16(sum_q7, q7);
273 sum_p3 = _mm_add_epi16(sum_p3, p3);
274 sum_q3 = _mm_add_epi16(sum_q3, q3);
275
276 pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q5);
277 pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p5);
278 flat2_p2 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
279 _mm_add_epi16(sum_p7, p2)), 4);
280 flat2_q2 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
281 _mm_add_epi16(sum_q7, q2)), 4);
282
283 pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q1);
284 pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_q2q1q0, p1);
285 flat_p2 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0,
286 _mm_add_epi16(sum_p3, p2)), 3);
287 flat_q2 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0,
288 _mm_add_epi16(sum_q3, q2)), 3);
289
290 sum_p7 = _mm_add_epi16(sum_p7, p7);
291 sum_q7 = _mm_add_epi16(sum_q7, q7);
292 pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q4);
293 pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p4);
294 flat2_p3 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
295 _mm_add_epi16(sum_p7, p3)), 4);
296 flat2_q3 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
297 _mm_add_epi16(sum_q7, q3)), 4);
298
299 sum_p7 = _mm_add_epi16(sum_p7, p7);
300 sum_q7 = _mm_add_epi16(sum_q7, q7);
301 pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q3);
302 pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p3);
303 flat2_p4 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
304 _mm_add_epi16(sum_p7, p4)), 4);
305 flat2_q4 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
306 _mm_add_epi16(sum_q7, q4)), 4);
307
308 sum_p7 = _mm_add_epi16(sum_p7, p7);
309 sum_q7 = _mm_add_epi16(sum_q7, q7);
310 pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q2);
311 pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p2);
312 flat2_p5 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
313 _mm_add_epi16(sum_p7, p5)), 4);
314 flat2_q5 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
315 _mm_add_epi16(sum_q7, q5)), 4);
316
317 sum_p7 = _mm_add_epi16(sum_p7, p7);
318 sum_q7 = _mm_add_epi16(sum_q7, q7);
319 pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q1);
320 pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p1);
321 flat2_p6 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p,
322 _mm_add_epi16(sum_p7, p6)), 4);
323 flat2_q6 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q,
324 _mm_add_epi16(sum_q7, q6)), 4);
325
326 // wide flat
327 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
328
329 // highbd_filter8
330 p2 = _mm_andnot_si128(flat, p2);
331 // p2 remains unchanged if !(flat && mask)
332 flat_p2 = _mm_and_si128(flat, flat_p2);
333 // when (flat && mask)
334 p2 = _mm_or_si128(p2, flat_p2); // full list of p2 values
335 q2 = _mm_andnot_si128(flat, q2);
336 flat_q2 = _mm_and_si128(flat, flat_q2);
337 q2 = _mm_or_si128(q2, flat_q2); // full list of q2 values
338
339 ps1 = _mm_andnot_si128(flat, ps1);
340 // p1 takes the value assigned to in in filter4 if !(flat && mask)
341 flat_p1 = _mm_and_si128(flat, flat_p1);
342 // when (flat && mask)
343 p1 = _mm_or_si128(ps1, flat_p1); // full list of p1 values
344 qs1 = _mm_andnot_si128(flat, qs1);
345 flat_q1 = _mm_and_si128(flat, flat_q1);
346 q1 = _mm_or_si128(qs1, flat_q1); // full list of q1 values
347
348 ps0 = _mm_andnot_si128(flat, ps0);
349 // p0 takes the value assigned to in in filter4 if !(flat && mask)
350 flat_p0 = _mm_and_si128(flat, flat_p0);
351 // when (flat && mask)
352 p0 = _mm_or_si128(ps0, flat_p0); // full list of p0 values
353 qs0 = _mm_andnot_si128(flat, qs0);
354 flat_q0 = _mm_and_si128(flat, flat_q0);
355 q0 = _mm_or_si128(qs0, flat_q0); // full list of q0 values
356 // end highbd_filter8
357
358 // highbd_filter16
359 p6 = _mm_andnot_si128(flat2, p6);
360 // p6 remains unchanged if !(flat2 && flat && mask)
361 flat2_p6 = _mm_and_si128(flat2, flat2_p6);
362 // get values for when (flat2 && flat && mask)
363 p6 = _mm_or_si128(p6, flat2_p6); // full list of p6 values
364 q6 = _mm_andnot_si128(flat2, q6);
365 // q6 remains unchanged if !(flat2 && flat && mask)
366 flat2_q6 = _mm_and_si128(flat2, flat2_q6);
367 // get values for when (flat2 && flat && mask)
368 q6 = _mm_or_si128(q6, flat2_q6); // full list of q6 values
369 _mm_store_si128((__m128i *)(s - 7 * p), p6);
370 _mm_store_si128((__m128i *)(s + 6 * p), q6);
371
372 p5 = _mm_andnot_si128(flat2, p5);
373 // p5 remains unchanged if !(flat2 && flat && mask)
374 flat2_p5 = _mm_and_si128(flat2, flat2_p5);
375 // get values for when (flat2 && flat && mask)
376 p5 = _mm_or_si128(p5, flat2_p5);
377 // full list of p5 values
378 q5 = _mm_andnot_si128(flat2, q5);
379 // q5 remains unchanged if !(flat2 && flat && mask)
380 flat2_q5 = _mm_and_si128(flat2, flat2_q5);
381 // get values for when (flat2 && flat && mask)
382 q5 = _mm_or_si128(q5, flat2_q5);
383 // full list of q5 values
384 _mm_store_si128((__m128i *)(s - 6 * p), p5);
385 _mm_store_si128((__m128i *)(s + 5 * p), q5);
386
387 p4 = _mm_andnot_si128(flat2, p4);
388 // p4 remains unchanged if !(flat2 && flat && mask)
389 flat2_p4 = _mm_and_si128(flat2, flat2_p4);
390 // get values for when (flat2 && flat && mask)
391 p4 = _mm_or_si128(p4, flat2_p4); // full list of p4 values
392 q4 = _mm_andnot_si128(flat2, q4);
393 // q4 remains unchanged if !(flat2 && flat && mask)
394 flat2_q4 = _mm_and_si128(flat2, flat2_q4);
395 // get values for when (flat2 && flat && mask)
396 q4 = _mm_or_si128(q4, flat2_q4); // full list of q4 values
397 _mm_store_si128((__m128i *)(s - 5 * p), p4);
398 _mm_store_si128((__m128i *)(s + 4 * p), q4);
399
400 p3 = _mm_andnot_si128(flat2, p3);
401 // p3 takes value from highbd_filter8 if !(flat2 && flat && mask)
402 flat2_p3 = _mm_and_si128(flat2, flat2_p3);
403 // get values for when (flat2 && flat && mask)
404 p3 = _mm_or_si128(p3, flat2_p3); // full list of p3 values
405 q3 = _mm_andnot_si128(flat2, q3);
406 // q3 takes value from highbd_filter8 if !(flat2 && flat && mask)
407 flat2_q3 = _mm_and_si128(flat2, flat2_q3);
408 // get values for when (flat2 && flat && mask)
409 q3 = _mm_or_si128(q3, flat2_q3); // full list of q3 values
410 _mm_store_si128((__m128i *)(s - 4 * p), p3);
411 _mm_store_si128((__m128i *)(s + 3 * p), q3);
412
413 p2 = _mm_andnot_si128(flat2, p2);
414 // p2 takes value from highbd_filter8 if !(flat2 && flat && mask)
415 flat2_p2 = _mm_and_si128(flat2, flat2_p2);
416 // get values for when (flat2 && flat && mask)
417 p2 = _mm_or_si128(p2, flat2_p2);
418 // full list of p2 values
419 q2 = _mm_andnot_si128(flat2, q2);
420 // q2 takes value from highbd_filter8 if !(flat2 && flat && mask)
421 flat2_q2 = _mm_and_si128(flat2, flat2_q2);
422 // get values for when (flat2 && flat && mask)
423 q2 = _mm_or_si128(q2, flat2_q2); // full list of q2 values
424 _mm_store_si128((__m128i *)(s - 3 * p), p2);
425 _mm_store_si128((__m128i *)(s + 2 * p), q2);
426
427 p1 = _mm_andnot_si128(flat2, p1);
428 // p1 takes value from highbd_filter8 if !(flat2 && flat && mask)
429 flat2_p1 = _mm_and_si128(flat2, flat2_p1);
430 // get values for when (flat2 && flat && mask)
431 p1 = _mm_or_si128(p1, flat2_p1); // full list of p1 values
432 q1 = _mm_andnot_si128(flat2, q1);
433 // q1 takes value from highbd_filter8 if !(flat2 && flat && mask)
434 flat2_q1 = _mm_and_si128(flat2, flat2_q1);
435 // get values for when (flat2 && flat && mask)
436 q1 = _mm_or_si128(q1, flat2_q1); // full list of q1 values
437 _mm_store_si128((__m128i *)(s - 2 * p), p1);
438 _mm_store_si128((__m128i *)(s + 1 * p), q1);
439
440 p0 = _mm_andnot_si128(flat2, p0);
441 // p0 takes value from highbd_filter8 if !(flat2 && flat && mask)
442 flat2_p0 = _mm_and_si128(flat2, flat2_p0);
443 // get values for when (flat2 && flat && mask)
444 p0 = _mm_or_si128(p0, flat2_p0); // full list of p0 values
445 q0 = _mm_andnot_si128(flat2, q0);
446 // q0 takes value from highbd_filter8 if !(flat2 && flat && mask)
447 flat2_q0 = _mm_and_si128(flat2, flat2_q0);
448 // get values for when (flat2 && flat && mask)
449 q0 = _mm_or_si128(q0, flat2_q0); // full list of q0 values
450 _mm_store_si128((__m128i *)(s - 1 * p), p0);
451 _mm_store_si128((__m128i *)(s - 0 * p), q0);
452 }
453
454 static void highbd_mb_lpf_horizontal_edge_w_sse2_16(uint16_t *s,
455 int p,
456 const uint8_t *_blimit,
457 const uint8_t *_limit,
458 const uint8_t *_thresh,
459 int bd) {
460 highbd_mb_lpf_horizontal_edge_w_sse2_8(s, p, _blimit, _limit, _thresh, bd);
461 highbd_mb_lpf_horizontal_edge_w_sse2_8(s + 8, p, _blimit, _limit, _thresh,
462 bd);
463 }
464
465 // TODO(yunqingwang): remove count and call these 2 functions(8 or 16) directly.
466 void vp9_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p,
467 const uint8_t *_blimit,
468 const uint8_t *_limit,
469 const uint8_t *_thresh,
470 int count, int bd) {
471 if (count == 1)
472 highbd_mb_lpf_horizontal_edge_w_sse2_8(s, p, _blimit, _limit, _thresh, bd);
473 else
474 highbd_mb_lpf_horizontal_edge_w_sse2_16(s, p, _blimit, _limit, _thresh, bd);
475 }
476
477 void vp9_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p,
478 const uint8_t *_blimit,
479 const uint8_t *_limit,
480 const uint8_t *_thresh,
481 int count, int bd) {
482 DECLARE_ALIGNED_ARRAY(16, uint16_t, flat_op2, 16);
483 DECLARE_ALIGNED_ARRAY(16, uint16_t, flat_op1, 16);
484 DECLARE_ALIGNED_ARRAY(16, uint16_t, flat_op0, 16);
485 DECLARE_ALIGNED_ARRAY(16, uint16_t, flat_oq2, 16);
486 DECLARE_ALIGNED_ARRAY(16, uint16_t, flat_oq1, 16);
487 DECLARE_ALIGNED_ARRAY(16, uint16_t, flat_oq0, 16);
488 const __m128i zero = _mm_set1_epi16(0);
489 const __m128i blimit = _mm_slli_epi16(
490 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero),
491 bd - 8);
492 const __m128i limit = _mm_slli_epi16(
493 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero),
494 bd - 8);
495 const __m128i thresh = _mm_slli_epi16(
496 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero),
497 bd - 8);
498 __m128i mask, hev, flat;
499 __m128i p3 = _mm_load_si128((__m128i *)(s - 4 * p));
500 __m128i q3 = _mm_load_si128((__m128i *)(s + 3 * p));
501 __m128i p2 = _mm_load_si128((__m128i *)(s - 3 * p));
502 __m128i q2 = _mm_load_si128((__m128i *)(s + 2 * p));
503 __m128i p1 = _mm_load_si128((__m128i *)(s - 2 * p));
504 __m128i q1 = _mm_load_si128((__m128i *)(s + 1 * p));
505 __m128i p0 = _mm_load_si128((__m128i *)(s - 1 * p));
506 __m128i q0 = _mm_load_si128((__m128i *)(s + 0 * p));
507 const __m128i one = _mm_set1_epi16(1);
508 const __m128i ffff = _mm_cmpeq_epi16(one, one);
509 __m128i abs_p1q1, abs_p0q0, abs_q1q0, abs_p1p0, work;
510 const __m128i four = _mm_set1_epi16(4);
511 __m128i workp_a, workp_b, workp_shft;
512
513 const __m128i t4 = _mm_set1_epi16(4);
514 const __m128i t3 = _mm_set1_epi16(3);
515 const __m128i t80 = _mm_slli_epi16(_mm_set1_epi16(0x80), bd - 8);
516 const __m128i t1 = _mm_set1_epi16(0x1);
517 const __m128i ps1 = _mm_subs_epi16(p1, t80);
518 const __m128i ps0 = _mm_subs_epi16(p0, t80);
519 const __m128i qs0 = _mm_subs_epi16(q0, t80);
520 const __m128i qs1 = _mm_subs_epi16(q1, t80);
521 __m128i filt;
522 __m128i work_a;
523 __m128i filter1, filter2;
524
525 (void)count;
526
527 // filter_mask and hev_mask
528 abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0),
529 _mm_subs_epu16(p0, p1));
530 abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0),
531 _mm_subs_epu16(q0, q1));
532
533 abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0),
534 _mm_subs_epu16(q0, p0));
535 abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1),
536 _mm_subs_epu16(q1, p1));
537 flat = _mm_max_epi16(abs_p1p0, abs_q1q0);
538 hev = _mm_subs_epu16(flat, thresh);
539 hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff);
540
541 abs_p0q0 =_mm_adds_epu16(abs_p0q0, abs_p0q0);
542 abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
543 mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit);
544 mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
545 // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
546 // So taking maximums continues to work:
547 mask = _mm_and_si128(mask, _mm_adds_epu16(limit, one));
548 mask = _mm_max_epi16(abs_p1p0, mask);
549 // mask |= (abs(p1 - p0) > limit) * -1;
550 mask = _mm_max_epi16(abs_q1q0, mask);
551 // mask |= (abs(q1 - q0) > limit) * -1;
552
553 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p1),
554 _mm_subs_epu16(p1, p2)),
555 _mm_or_si128(_mm_subs_epu16(q2, q1),
556 _mm_subs_epu16(q1, q2)));
557 mask = _mm_max_epi16(work, mask);
558 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p3, p2),
559 _mm_subs_epu16(p2, p3)),
560 _mm_or_si128(_mm_subs_epu16(q3, q2),
561 _mm_subs_epu16(q2, q3)));
562 mask = _mm_max_epi16(work, mask);
563 mask = _mm_subs_epu16(mask, limit);
564 mask = _mm_cmpeq_epi16(mask, zero);
565
566 // flat_mask4
567 flat = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p0),
568 _mm_subs_epu16(p0, p2)),
569 _mm_or_si128(_mm_subs_epu16(q2, q0),
570 _mm_subs_epu16(q0, q2)));
571 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p3, p0),
572 _mm_subs_epu16(p0, p3)),
573 _mm_or_si128(_mm_subs_epu16(q3, q0),
574 _mm_subs_epu16(q0, q3)));
575 flat = _mm_max_epi16(work, flat);
576 flat = _mm_max_epi16(abs_p1p0, flat);
577 flat = _mm_max_epi16(abs_q1q0, flat);
578 flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, bd - 8));
579 flat = _mm_cmpeq_epi16(flat, zero);
580 flat = _mm_and_si128(flat, mask); // flat & mask
581
582 // Added before shift for rounding part of ROUND_POWER_OF_TWO
583
584 workp_a = _mm_add_epi16(_mm_add_epi16(p3, p3), _mm_add_epi16(p2, p1));
585 workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), p0);
586 workp_b = _mm_add_epi16(_mm_add_epi16(q0, p2), p3);
587 workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
588 _mm_store_si128((__m128i *)&flat_op2[0], workp_shft);
589
590 workp_b = _mm_add_epi16(_mm_add_epi16(q0, q1), p1);
591 workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
592 _mm_store_si128((__m128i *)&flat_op1[0], workp_shft);
593
594 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q2);
595 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p1), p0);
596 workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
597 _mm_store_si128((__m128i *)&flat_op0[0], workp_shft);
598
599 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q3);
600 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p0), q0);
601 workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
602 _mm_store_si128((__m128i *)&flat_oq0[0], workp_shft);
603
604 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p2), q3);
605 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q0), q1);
606 workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
607 _mm_store_si128((__m128i *)&flat_oq1[0], workp_shft);
608
609 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p1), q3);
610 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q1), q2);
611 workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3);
612 _mm_store_si128((__m128i *)&flat_oq2[0], workp_shft);
613
614 // lp filter
615 filt = signed_char_clamp_bd_sse2(_mm_subs_epi16(ps1, qs1), bd);
616 filt = _mm_and_si128(filt, hev);
617 work_a = _mm_subs_epi16(qs0, ps0);
618 filt = _mm_adds_epi16(filt, work_a);
619 filt = _mm_adds_epi16(filt, work_a);
620 filt = _mm_adds_epi16(filt, work_a);
621 // (vp9_filter + 3 * (qs0 - ps0)) & mask
622 filt = signed_char_clamp_bd_sse2(filt, bd);
623 filt = _mm_and_si128(filt, mask);
624
625 filter1 = _mm_adds_epi16(filt, t4);
626 filter2 = _mm_adds_epi16(filt, t3);
627
628 // Filter1 >> 3
629 filter1 = signed_char_clamp_bd_sse2(filter1, bd);
630 filter1 = _mm_srai_epi16(filter1, 3);
631
632 // Filter2 >> 3
633 filter2 = signed_char_clamp_bd_sse2(filter2, bd);
634 filter2 = _mm_srai_epi16(filter2, 3);
635
636 // filt >> 1
637 filt = _mm_adds_epi16(filter1, t1);
638 filt = _mm_srai_epi16(filt, 1);
639 // filter = ROUND_POWER_OF_TWO(filter1, 1) & ~hev;
640 filt = _mm_andnot_si128(hev, filt);
641
642 work_a = signed_char_clamp_bd_sse2(_mm_subs_epi16(qs0, filter1), bd);
643 work_a = _mm_adds_epi16(work_a, t80);
644 q0 = _mm_load_si128((__m128i *)flat_oq0);
645 work_a = _mm_andnot_si128(flat, work_a);
646 q0 = _mm_and_si128(flat, q0);
647 q0 = _mm_or_si128(work_a, q0);
648
649 work_a = signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd);
650 work_a = _mm_adds_epi16(work_a, t80);
651 q1 = _mm_load_si128((__m128i *)flat_oq1);
652 work_a = _mm_andnot_si128(flat, work_a);
653 q1 = _mm_and_si128(flat, q1);
654 q1 = _mm_or_si128(work_a, q1);
655
656 work_a = _mm_loadu_si128((__m128i *)(s + 2 * p));
657 q2 = _mm_load_si128((__m128i *)flat_oq2);
658 work_a = _mm_andnot_si128(flat, work_a);
659 q2 = _mm_and_si128(flat, q2);
660 q2 = _mm_or_si128(work_a, q2);
661
662 work_a = signed_char_clamp_bd_sse2(_mm_adds_epi16(ps0, filter2), bd);
663 work_a = _mm_adds_epi16(work_a, t80);
664 p0 = _mm_load_si128((__m128i *)flat_op0);
665 work_a = _mm_andnot_si128(flat, work_a);
666 p0 = _mm_and_si128(flat, p0);
667 p0 = _mm_or_si128(work_a, p0);
668
669 work_a = signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd);
670 work_a = _mm_adds_epi16(work_a, t80);
671 p1 = _mm_load_si128((__m128i *)flat_op1);
672 work_a = _mm_andnot_si128(flat, work_a);
673 p1 = _mm_and_si128(flat, p1);
674 p1 = _mm_or_si128(work_a, p1);
675
676 work_a = _mm_loadu_si128((__m128i *)(s - 3 * p));
677 p2 = _mm_load_si128((__m128i *)flat_op2);
678 work_a = _mm_andnot_si128(flat, work_a);
679 p2 = _mm_and_si128(flat, p2);
680 p2 = _mm_or_si128(work_a, p2);
681
682 _mm_store_si128((__m128i *)(s - 3 * p), p2);
683 _mm_store_si128((__m128i *)(s - 2 * p), p1);
684 _mm_store_si128((__m128i *)(s - 1 * p), p0);
685 _mm_store_si128((__m128i *)(s + 0 * p), q0);
686 _mm_store_si128((__m128i *)(s + 1 * p), q1);
687 _mm_store_si128((__m128i *)(s + 2 * p), q2);
688 }
689
690 void vp9_highbd_lpf_horizontal_8_dual_sse2(uint16_t *s, int p,
691 const uint8_t *_blimit0,
692 const uint8_t *_limit0,
693 const uint8_t *_thresh0,
694 const uint8_t *_blimit1,
695 const uint8_t *_limit1,
696 const uint8_t *_thresh1,
697 int bd) {
698 vp9_highbd_lpf_horizontal_8_sse2(s, p, _blimit0, _limit0, _thresh0, 1, bd);
699 vp9_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1,
700 1, bd);
701 }
702
703 void vp9_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p,
704 const uint8_t *_blimit,
705 const uint8_t *_limit,
706 const uint8_t *_thresh,
707 int count, int bd) {
708 const __m128i zero = _mm_set1_epi16(0);
709 const __m128i blimit = _mm_slli_epi16(
710 _mm_unpacklo_epi8(
711 _mm_load_si128((const __m128i *)_blimit), zero), bd - 8);
712 const __m128i limit = _mm_slli_epi16(
713 _mm_unpacklo_epi8(
714 _mm_load_si128((const __m128i *)_limit), zero), bd - 8);
715 const __m128i thresh = _mm_slli_epi16(
716 _mm_unpacklo_epi8(
717 _mm_load_si128((const __m128i *)_thresh), zero), bd - 8);
718 __m128i mask, hev, flat;
719 __m128i p3 = _mm_loadu_si128((__m128i *)(s - 4 * p));
720 __m128i p2 = _mm_loadu_si128((__m128i *)(s - 3 * p));
721 __m128i p1 = _mm_loadu_si128((__m128i *)(s - 2 * p));
722 __m128i p0 = _mm_loadu_si128((__m128i *)(s - 1 * p));
723 __m128i q0 = _mm_loadu_si128((__m128i *)(s - 0 * p));
724 __m128i q1 = _mm_loadu_si128((__m128i *)(s + 1 * p));
725 __m128i q2 = _mm_loadu_si128((__m128i *)(s + 2 * p));
726 __m128i q3 = _mm_loadu_si128((__m128i *)(s + 3 * p));
727 const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0),
728 _mm_subs_epu16(p0, p1));
729 const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0),
730 _mm_subs_epu16(q0, q1));
731 const __m128i ffff = _mm_cmpeq_epi16(abs_p1p0, abs_p1p0);
732 const __m128i one = _mm_set1_epi16(1);
733 __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0),
734 _mm_subs_epu16(q0, p0));
735 __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1),
736 _mm_subs_epu16(q1, p1));
737 __m128i work;
738 const __m128i t4 = _mm_set1_epi16(4);
739 const __m128i t3 = _mm_set1_epi16(3);
740 const __m128i t80 = _mm_slli_epi16(_mm_set1_epi16(0x80), bd - 8);
741 const __m128i tff80 = _mm_slli_epi16(_mm_set1_epi16(0xff80), bd - 8);
742 const __m128i tffe0 = _mm_slli_epi16(_mm_set1_epi16(0xffe0), bd - 8);
743 const __m128i t1f = _mm_srli_epi16(_mm_set1_epi16(0x1fff), 16 - bd);
744 // equivalent to shifting 0x1f left by bitdepth - 8
745 // and setting new bits to 1
746 const __m128i t1 = _mm_set1_epi16(0x1);
747 const __m128i t7f = _mm_srli_epi16(_mm_set1_epi16(0x7fff), 16 - bd);
748 // equivalent to shifting 0x7f left by bitdepth - 8
749 // and setting new bits to 1
750 const __m128i ps1 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s - 2 * p)),
751 t80);
752 const __m128i ps0 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s - 1 * p)),
753 t80);
754 const __m128i qs0 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s + 0 * p)),
755 t80);
756 const __m128i qs1 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s + 1 * p)),
757 t80);
758 __m128i filt;
759 __m128i work_a;
760 __m128i filter1, filter2;
761
762 (void)count;
763
764 // filter_mask and hev_mask
765 flat = _mm_max_epi16(abs_p1p0, abs_q1q0);
766 hev = _mm_subs_epu16(flat, thresh);
767 hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff);
768
769 abs_p0q0 =_mm_adds_epu16(abs_p0q0, abs_p0q0);
770 abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1);
771 mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit);
772 mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff);
773 // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1;
774 // So taking maximums continues to work:
775 mask = _mm_and_si128(mask, _mm_adds_epu16(limit, one));
776 mask = _mm_max_epi16(flat, mask);
777 // mask |= (abs(p1 - p0) > limit) * -1;
778 // mask |= (abs(q1 - q0) > limit) * -1;
779 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p1),
780 _mm_subs_epu16(p1, p2)),
781 _mm_or_si128(_mm_subs_epu16(p3, p2),
782 _mm_subs_epu16(p2, p3)));
783 mask = _mm_max_epi16(work, mask);
784 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(q2, q1),
785 _mm_subs_epu16(q1, q2)),
786 _mm_or_si128(_mm_subs_epu16(q3, q2),
787 _mm_subs_epu16(q2, q3)));
788 mask = _mm_max_epi16(work, mask);
789 mask = _mm_subs_epu16(mask, limit);
790 mask = _mm_cmpeq_epi16(mask, zero);
791
792 // filter4
793 filt = signed_char_clamp_bd_sse2(_mm_subs_epi16(ps1, qs1), bd);
794 filt = _mm_and_si128(filt, hev);
795 work_a = _mm_subs_epi16(qs0, ps0);
796 filt = _mm_adds_epi16(filt, work_a);
797 filt = _mm_adds_epi16(filt, work_a);
798 filt = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, work_a), bd);
799 // (vp9_filter + 3 * (qs0 - ps0)) & mask
800 filt = _mm_and_si128(filt, mask);
801
802 filter1 = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, t4), bd);
803 filter2 = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, t3), bd);
804
805 // Filter1 >> 3
806 work_a = _mm_cmpgt_epi16(zero, filter1); // get the values that are <0
807 filter1 = _mm_srli_epi16(filter1, 3);
808 work_a = _mm_and_si128(work_a, tffe0); // sign bits for the values < 0
809 filter1 = _mm_and_si128(filter1, t1f); // clamp the range
810 filter1 = _mm_or_si128(filter1, work_a); // reinsert the sign bits
811
812 // Filter2 >> 3
813 work_a = _mm_cmpgt_epi16(zero, filter2);
814 filter2 = _mm_srli_epi16(filter2, 3);
815 work_a = _mm_and_si128(work_a, tffe0);
816 filter2 = _mm_and_si128(filter2, t1f);
817 filter2 = _mm_or_si128(filter2, work_a);
818
819 // filt >> 1
820 filt = _mm_adds_epi16(filter1, t1);
821 work_a = _mm_cmpgt_epi16(zero, filt);
822 filt = _mm_srli_epi16(filt, 1);
823 work_a = _mm_and_si128(work_a, tff80);
824 filt = _mm_and_si128(filt, t7f);
825 filt = _mm_or_si128(filt, work_a);
826
827 filt = _mm_andnot_si128(hev, filt);
828
829 q0 = _mm_adds_epi16(
830 signed_char_clamp_bd_sse2(_mm_subs_epi16(qs0, filter1), bd), t80);
831 q1 = _mm_adds_epi16(
832 signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd), t80);
833 p0 = _mm_adds_epi16(
834 signed_char_clamp_bd_sse2(_mm_adds_epi16(ps0, filter2), bd), t80);
835 p1 = _mm_adds_epi16(
836 signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd), t80);
837
838 _mm_storeu_si128((__m128i *)(s - 2 * p), p1);
839 _mm_storeu_si128((__m128i *)(s - 1 * p), p0);
840 _mm_storeu_si128((__m128i *)(s + 0 * p), q0);
841 _mm_storeu_si128((__m128i *)(s + 1 * p), q1);
842 }
843
844 void vp9_highbd_lpf_horizontal_4_dual_sse2(uint16_t *s, int p,
845 const uint8_t *_blimit0,
846 const uint8_t *_limit0,
847 const uint8_t *_thresh0,
848 const uint8_t *_blimit1,
849 const uint8_t *_limit1,
850 const uint8_t *_thresh1,
851 int bd) {
852 vp9_highbd_lpf_horizontal_4_sse2(s, p, _blimit0, _limit0, _thresh0, 1, bd);
853 vp9_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, 1,
854 bd);
855 }
856
857 static INLINE void highbd_transpose(uint16_t *src[], int in_p,
858 uint16_t *dst[], int out_p,
859 int num_8x8_to_transpose) {
860 int idx8x8 = 0;
861 __m128i p0, p1, p2, p3, p4, p5, p6, p7, x0, x1, x2, x3, x4, x5, x6, x7;
862 do {
863 uint16_t *in = src[idx8x8];
864 uint16_t *out = dst[idx8x8];
865
866 p0 = _mm_loadu_si128((__m128i *)(in + 0*in_p)); // 00 01 02 03 04 05 06 07
867 p1 = _mm_loadu_si128((__m128i *)(in + 1*in_p)); // 10 11 12 13 14 15 16 17
868 p2 = _mm_loadu_si128((__m128i *)(in + 2*in_p)); // 20 21 22 23 24 25 26 27
869 p3 = _mm_loadu_si128((__m128i *)(in + 3*in_p)); // 30 31 32 33 34 35 36 37
870 p4 = _mm_loadu_si128((__m128i *)(in + 4*in_p)); // 40 41 42 43 44 45 46 47
871 p5 = _mm_loadu_si128((__m128i *)(in + 5*in_p)); // 50 51 52 53 54 55 56 57
872 p6 = _mm_loadu_si128((__m128i *)(in + 6*in_p)); // 60 61 62 63 64 65 66 67
873 p7 = _mm_loadu_si128((__m128i *)(in + 7*in_p)); // 70 71 72 73 74 75 76 77
874 // 00 10 01 11 02 12 03 13
875 x0 = _mm_unpacklo_epi16(p0, p1);
876 // 20 30 21 31 22 32 23 33
877 x1 = _mm_unpacklo_epi16(p2, p3);
878 // 40 50 41 51 42 52 43 53
879 x2 = _mm_unpacklo_epi16(p4, p5);
880 // 60 70 61 71 62 72 63 73
881 x3 = _mm_unpacklo_epi16(p6, p7);
882 // 00 10 20 30 01 11 21 31
883 x4 = _mm_unpacklo_epi32(x0, x1);
884 // 40 50 60 70 41 51 61 71
885 x5 = _mm_unpacklo_epi32(x2, x3);
886 // 00 10 20 30 40 50 60 70
887 x6 = _mm_unpacklo_epi64(x4, x5);
888 // 01 11 21 31 41 51 61 71
889 x7 = _mm_unpackhi_epi64(x4, x5);
890
891 _mm_storeu_si128((__m128i *)(out + 0*out_p), x6);
892 // 00 10 20 30 40 50 60 70
893 _mm_storeu_si128((__m128i *)(out + 1*out_p), x7);
894 // 01 11 21 31 41 51 61 71
895
896 // 02 12 22 32 03 13 23 33
897 x4 = _mm_unpackhi_epi32(x0, x1);
898 // 42 52 62 72 43 53 63 73
899 x5 = _mm_unpackhi_epi32(x2, x3);
900 // 02 12 22 32 42 52 62 72
901 x6 = _mm_unpacklo_epi64(x4, x5);
902 // 03 13 23 33 43 53 63 73
903 x7 = _mm_unpackhi_epi64(x4, x5);
904
905 _mm_storeu_si128((__m128i *)(out + 2*out_p), x6);
906 // 02 12 22 32 42 52 62 72
907 _mm_storeu_si128((__m128i *)(out + 3*out_p), x7);
908 // 03 13 23 33 43 53 63 73
909
910 // 04 14 05 15 06 16 07 17
911 x0 = _mm_unpackhi_epi16(p0, p1);
912 // 24 34 25 35 26 36 27 37
913 x1 = _mm_unpackhi_epi16(p2, p3);
914 // 44 54 45 55 46 56 47 57
915 x2 = _mm_unpackhi_epi16(p4, p5);
916 // 64 74 65 75 66 76 67 77
917 x3 = _mm_unpackhi_epi16(p6, p7);
918 // 04 14 24 34 05 15 25 35
919 x4 = _mm_unpacklo_epi32(x0, x1);
920 // 44 54 64 74 45 55 65 75
921 x5 = _mm_unpacklo_epi32(x2, x3);
922 // 04 14 24 34 44 54 64 74
923 x6 = _mm_unpacklo_epi64(x4, x5);
924 // 05 15 25 35 45 55 65 75
925 x7 = _mm_unpackhi_epi64(x4, x5);
926
927 _mm_storeu_si128((__m128i *)(out + 4*out_p), x6);
928 // 04 14 24 34 44 54 64 74
929 _mm_storeu_si128((__m128i *)(out + 5*out_p), x7);
930 // 05 15 25 35 45 55 65 75
931
932 // 06 16 26 36 07 17 27 37
933 x4 = _mm_unpackhi_epi32(x0, x1);
934 // 46 56 66 76 47 57 67 77
935 x5 = _mm_unpackhi_epi32(x2, x3);
936 // 06 16 26 36 46 56 66 76
937 x6 = _mm_unpacklo_epi64(x4, x5);
938 // 07 17 27 37 47 57 67 77
939 x7 = _mm_unpackhi_epi64(x4, x5);
940
941 _mm_storeu_si128((__m128i *)(out + 6*out_p), x6);
942 // 06 16 26 36 46 56 66 76
943 _mm_storeu_si128((__m128i *)(out + 7*out_p), x7);
944 // 07 17 27 37 47 57 67 77
945 } while (++idx8x8 < num_8x8_to_transpose);
946 }
947
948 static INLINE void highbd_transpose8x16(uint16_t *in0, uint16_t *in1,
949 int in_p, uint16_t *out, int out_p) {
950 uint16_t *src0[1];
951 uint16_t *src1[1];
952 uint16_t *dest0[1];
953 uint16_t *dest1[1];
954 src0[0] = in0;
955 src1[0] = in1;
956 dest0[0] = out;
957 dest1[0] = out + 8;
958 highbd_transpose(src0, in_p, dest0, out_p, 1);
959 highbd_transpose(src1, in_p, dest1, out_p, 1);
960 }
961
962 void vp9_highbd_lpf_vertical_4_sse2(uint16_t *s, int p,
963 const uint8_t *blimit,
964 const uint8_t *limit,
965 const uint8_t *thresh,
966 int count, int bd) {
967 DECLARE_ALIGNED_ARRAY(16, uint16_t, t_dst, 8 * 8);
968 uint16_t *src[1];
969 uint16_t *dst[1];
970 (void)count;
971
972 // Transpose 8x8
973 src[0] = s - 4;
974 dst[0] = t_dst;
975
976 highbd_transpose(src, p, dst, 8, 1);
977
978 // Loop filtering
979 vp9_highbd_lpf_horizontal_4_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1,
980 bd);
981
982 src[0] = t_dst;
983 dst[0] = s - 4;
984
985 // Transpose back
986 highbd_transpose(src, 8, dst, p, 1);
987 }
988
989 void vp9_highbd_lpf_vertical_4_dual_sse2(uint16_t *s, int p,
990 const uint8_t *blimit0,
991 const uint8_t *limit0,
992 const uint8_t *thresh0,
993 const uint8_t *blimit1,
994 const uint8_t *limit1,
995 const uint8_t *thresh1,
996 int bd) {
997 DECLARE_ALIGNED_ARRAY(16, uint16_t, t_dst, 16 * 8);
998 uint16_t *src[2];
999 uint16_t *dst[2];
1000
1001 // Transpose 8x16
1002 highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
1003
1004 // Loop filtering
1005 vp9_highbd_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0,
1006 thresh0, blimit1, limit1, thresh1, bd);
1007 src[0] = t_dst;
1008 src[1] = t_dst + 8;
1009 dst[0] = s - 4;
1010 dst[1] = s - 4 + p * 8;
1011
1012 // Transpose back
1013 highbd_transpose(src, 16, dst, p, 2);
1014 }
1015
1016 void vp9_highbd_lpf_vertical_8_sse2(uint16_t *s, int p,
1017 const uint8_t *blimit,
1018 const uint8_t *limit,
1019 const uint8_t *thresh,
1020 int count, int bd) {
1021 DECLARE_ALIGNED_ARRAY(16, uint16_t, t_dst, 8 * 8);
1022 uint16_t *src[1];
1023 uint16_t *dst[1];
1024 (void)count;
1025
1026 // Transpose 8x8
1027 src[0] = s - 4;
1028 dst[0] = t_dst;
1029
1030 highbd_transpose(src, p, dst, 8, 1);
1031
1032 // Loop filtering
1033 vp9_highbd_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1,
1034 bd);
1035
1036 src[0] = t_dst;
1037 dst[0] = s - 4;
1038
1039 // Transpose back
1040 highbd_transpose(src, 8, dst, p, 1);
1041 }
1042
1043 void vp9_highbd_lpf_vertical_8_dual_sse2(uint16_t *s, int p,
1044 const uint8_t *blimit0,
1045 const uint8_t *limit0,
1046 const uint8_t *thresh0,
1047 const uint8_t *blimit1,
1048 const uint8_t *limit1,
1049 const uint8_t *thresh1,
1050 int bd) {
1051 DECLARE_ALIGNED_ARRAY(16, uint16_t, t_dst, 16 * 8);
1052 uint16_t *src[2];
1053 uint16_t *dst[2];
1054
1055 // Transpose 8x16
1056 highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16);
1057
1058 // Loop filtering
1059 vp9_highbd_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0,
1060 thresh0, blimit1, limit1, thresh1, bd);
1061 src[0] = t_dst;
1062 src[1] = t_dst + 8;
1063
1064 dst[0] = s - 4;
1065 dst[1] = s - 4 + p * 8;
1066
1067 // Transpose back
1068 highbd_transpose(src, 16, dst, p, 2);
1069 }
1070
1071 void vp9_highbd_lpf_vertical_16_sse2(uint16_t *s, int p,
1072 const uint8_t *blimit,
1073 const uint8_t *limit,
1074 const uint8_t *thresh,
1075 int bd) {
1076 DECLARE_ALIGNED_ARRAY(16, uint16_t, t_dst, 8 * 16);
1077 uint16_t *src[2];
1078 uint16_t *dst[2];
1079
1080 src[0] = s - 8;
1081 src[1] = s;
1082 dst[0] = t_dst;
1083 dst[1] = t_dst + 8 * 8;
1084
1085 // Transpose 16x8
1086 highbd_transpose(src, p, dst, 8, 2);
1087
1088 // Loop filtering
1089 highbd_mb_lpf_horizontal_edge_w_sse2_8(t_dst + 8 * 8, 8, blimit, limit,
1090 thresh, bd);
1091 src[0] = t_dst;
1092 src[1] = t_dst + 8 * 8;
1093 dst[0] = s - 8;
1094 dst[1] = s;
1095
1096 // Transpose back
1097 highbd_transpose(src, 8, dst, p, 2);
1098 }
1099
1100 void vp9_highbd_lpf_vertical_16_dual_sse2(uint16_t *s,
1101 int p,
1102 const uint8_t *blimit,
1103 const uint8_t *limit,
1104 const uint8_t *thresh,
1105 int bd) {
1106 DECLARE_ALIGNED_ARRAY(16, uint16_t, t_dst, 256);
1107
1108 // Transpose 16x16
1109 highbd_transpose8x16(s - 8, s - 8 + 8 * p, p, t_dst, 16);
1110 highbd_transpose8x16(s, s + 8 * p, p, t_dst + 8 * 16, 16);
1111
1112 // Loop filtering
1113 highbd_mb_lpf_horizontal_edge_w_sse2_16(t_dst + 8 * 16, 16, blimit, limit,
1114 thresh, bd);
1115
1116 // Transpose back
1117 highbd_transpose8x16(t_dst, t_dst + 8 * 16, 16, s - 8, p);
1118 highbd_transpose8x16(t_dst + 8, t_dst + 8 + 8 * 16, 16, s - 8 + 8 * p, p);
1119 }
OLDNEW
« no previous file with comments | « source/libvpx/vp9/common/x86/vp9_high_intrapred_sse2.asm ('k') | source/libvpx/vp9/common/x86/vp9_high_subpixel_8t_sse2.asm » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698