OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include <emmintrin.h> | 11 #include <emmintrin.h> |
12 | 12 |
13 #include "./vpx_config.h" | 13 #include "./vpx_config.h" |
14 #include "./vp9_rtcd.h" | 14 #include "./vp9_rtcd.h" |
15 | 15 |
16 #include "vpx_ports/emmintrin_compat.h" | 16 #include "vpx_ports/emmintrin_compat.h" |
17 #include "vpx/vpx_integer.h" | 17 #include "vpx/vpx_integer.h" |
18 #include "vp9/common/vp9_reconinter.h" | 18 #include "vp9/common/vp9_reconinter.h" |
19 #include "vp9/encoder/vp9_context_tree.h" | 19 #include "vp9/encoder/vp9_context_tree.h" |
20 #include "vp9/encoder/vp9_denoiser.h" | 20 #include "vp9/encoder/vp9_denoiser.h" |
21 #include "vpx_mem/vpx_mem.h" | 21 #include "vpx_mem/vpx_mem.h" |
22 | 22 |
23 // Compute the sum of all pixel differences of this MB. | 23 // Compute the sum of all pixel differences of this MB. |
24 static INLINE int sum_diff_16x1(__m128i acc_diff) { | 24 static INLINE int sum_diff_16x1(__m128i acc_diff) { |
25 const __m128i k_1 = _mm_set1_epi16(1); | 25 const __m128i k_1 = _mm_set1_epi16(1); |
26 const __m128i acc_diff_lo = _mm_srai_epi16( | 26 const __m128i acc_diff_lo = |
27 _mm_unpacklo_epi8(acc_diff, acc_diff), 8); | 27 _mm_srai_epi16(_mm_unpacklo_epi8(acc_diff, acc_diff), 8); |
28 const __m128i acc_diff_hi = _mm_srai_epi16( | 28 const __m128i acc_diff_hi = |
29 _mm_unpackhi_epi8(acc_diff, acc_diff), 8); | 29 _mm_srai_epi16(_mm_unpackhi_epi8(acc_diff, acc_diff), 8); |
30 const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi); | 30 const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi); |
31 const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1); | 31 const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1); |
32 const __m128i hgfe_dcba = _mm_add_epi32(hg_fe_dc_ba, | 32 const __m128i hgfe_dcba = |
33 _mm_srli_si128(hg_fe_dc_ba, 8)); | 33 _mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8)); |
34 const __m128i hgfedcba = _mm_add_epi32(hgfe_dcba, | 34 const __m128i hgfedcba = |
35 _mm_srli_si128(hgfe_dcba, 4)); | 35 _mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4)); |
36 int sum_diff = _mm_cvtsi128_si32(hgfedcba); | 36 return _mm_cvtsi128_si32(hgfedcba); |
37 return sum_diff; | |
38 } | 37 } |
39 | 38 |
40 // Denoise a 16x1 vector. | 39 // Denoise a 16x1 vector. |
41 static INLINE __m128i vp9_denoiser_16x1_sse2(const uint8_t *sig, | 40 static INLINE __m128i vp9_denoiser_16x1_sse2(const uint8_t *sig, |
42 const uint8_t *mc_running_avg_y, | 41 const uint8_t *mc_running_avg_y, |
43 uint8_t *running_avg_y, | 42 uint8_t *running_avg_y, |
44 const __m128i *k_0, | 43 const __m128i *k_0, |
45 const __m128i *k_4, | 44 const __m128i *k_4, |
46 const __m128i *k_8, | 45 const __m128i *k_8, |
47 const __m128i *k_16, | 46 const __m128i *k_16, |
48 const __m128i *l3, | 47 const __m128i *l3, |
49 const __m128i *l32, | 48 const __m128i *l32, |
50 const __m128i *l21, | 49 const __m128i *l21, |
51 __m128i acc_diff) { | 50 __m128i acc_diff) { |
52 // Calculate differences | 51 // Calculate differences |
53 const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0])); | 52 const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0])); |
54 const __m128i v_mc_running_avg_y = _mm_loadu_si128( | 53 const __m128i v_mc_running_avg_y = |
55 (const __m128i *)(&mc_running_avg_y[0])); | 54 _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0])); |
56 __m128i v_running_avg_y; | 55 __m128i v_running_avg_y; |
57 const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); | 56 const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); |
58 const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); | 57 const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); |
59 // Obtain the sign. FF if diff is negative. | 58 // Obtain the sign. FF if diff is negative. |
60 const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, *k_0); | 59 const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, *k_0); |
61 // Clamp absolute difference to 16 to be used to get mask. Doing this | 60 // Clamp absolute difference to 16 to be used to get mask. Doing this |
62 // allows us to use _mm_cmpgt_epi8, which operates on signed byte. | 61 // allows us to use _mm_cmpgt_epi8, which operates on signed byte. |
63 const __m128i clamped_absdiff = _mm_min_epu8( | 62 const __m128i clamped_absdiff = |
64 _mm_or_si128(pdiff, ndiff), *k_16); | 63 _mm_min_epu8(_mm_or_si128(pdiff, ndiff), *k_16); |
65 // Get masks for l2 l1 and l0 adjustments. | 64 // Get masks for l2 l1 and l0 adjustments. |
66 const __m128i mask2 = _mm_cmpgt_epi8(*k_16, clamped_absdiff); | 65 const __m128i mask2 = _mm_cmpgt_epi8(*k_16, clamped_absdiff); |
67 const __m128i mask1 = _mm_cmpgt_epi8(*k_8, clamped_absdiff); | 66 const __m128i mask1 = _mm_cmpgt_epi8(*k_8, clamped_absdiff); |
68 const __m128i mask0 = _mm_cmpgt_epi8(*k_4, clamped_absdiff); | 67 const __m128i mask0 = _mm_cmpgt_epi8(*k_4, clamped_absdiff); |
69 // Get adjustments for l2, l1, and l0. | 68 // Get adjustments for l2, l1, and l0. |
70 __m128i adj2 = _mm_and_si128(mask2, *l32); | 69 __m128i adj2 = _mm_and_si128(mask2, *l32); |
71 const __m128i adj1 = _mm_and_si128(mask1, *l21); | 70 const __m128i adj1 = _mm_and_si128(mask1, *l21); |
72 const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); | 71 const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); |
73 __m128i adj, padj, nadj; | 72 __m128i adj, padj, nadj; |
74 | 73 |
(...skipping 13 matching lines...) Expand all Loading... |
88 _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); | 87 _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); |
89 | 88 |
90 // Adjustments <=7, and each element in acc_diff can fit in signed | 89 // Adjustments <=7, and each element in acc_diff can fit in signed |
91 // char. | 90 // char. |
92 acc_diff = _mm_adds_epi8(acc_diff, padj); | 91 acc_diff = _mm_adds_epi8(acc_diff, padj); |
93 acc_diff = _mm_subs_epi8(acc_diff, nadj); | 92 acc_diff = _mm_subs_epi8(acc_diff, nadj); |
94 return acc_diff; | 93 return acc_diff; |
95 } | 94 } |
96 | 95 |
97 // Denoise a 16x1 vector with a weaker filter. | 96 // Denoise a 16x1 vector with a weaker filter. |
98 static INLINE __m128i vp9_denoiser_adj_16x1_sse2(const uint8_t *sig, | 97 static INLINE __m128i vp9_denoiser_adj_16x1_sse2( |
99 const uint8_t *mc_running_avg_y, | 98 const uint8_t *sig, const uint8_t *mc_running_avg_y, |
100 uint8_t *running_avg_y, | 99 uint8_t *running_avg_y, const __m128i k_0, |
101 const __m128i k_0, | 100 const __m128i k_delta, __m128i acc_diff) { |
102 const __m128i k_delta, | |
103 __m128i acc_diff) { | |
104 __m128i v_running_avg_y = _mm_loadu_si128((__m128i *)(&running_avg_y[0])); | 101 __m128i v_running_avg_y = _mm_loadu_si128((__m128i *)(&running_avg_y[0])); |
105 // Calculate differences. | 102 // Calculate differences. |
106 const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0])); | 103 const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0])); |
107 const __m128i v_mc_running_avg_y = | 104 const __m128i v_mc_running_avg_y = |
108 _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0])); | 105 _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0])); |
109 const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); | 106 const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); |
110 const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); | 107 const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); |
111 // Obtain the sign. FF if diff is negative. | 108 // Obtain the sign. FF if diff is negative. |
112 const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); | 109 const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); |
113 // Clamp absolute difference to delta to get the adjustment. | 110 // Clamp absolute difference to delta to get the adjustment. |
114 const __m128i adj = | 111 const __m128i adj = |
115 _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); | 112 _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); |
116 // Restore the sign and get positive and negative adjustments. | 113 // Restore the sign and get positive and negative adjustments. |
117 __m128i padj, nadj; | 114 __m128i padj, nadj; |
118 padj = _mm_andnot_si128(diff_sign, adj); | 115 padj = _mm_andnot_si128(diff_sign, adj); |
119 nadj = _mm_and_si128(diff_sign, adj); | 116 nadj = _mm_and_si128(diff_sign, adj); |
120 // Calculate filtered value. | 117 // Calculate filtered value. |
121 v_running_avg_y = _mm_subs_epu8(v_running_avg_y, padj); | 118 v_running_avg_y = _mm_subs_epu8(v_running_avg_y, padj); |
122 v_running_avg_y = _mm_adds_epu8(v_running_avg_y, nadj); | 119 v_running_avg_y = _mm_adds_epu8(v_running_avg_y, nadj); |
123 _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); | 120 _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); |
124 | 121 |
125 // Accumulate the adjustments. | 122 // Accumulate the adjustments. |
126 acc_diff = _mm_subs_epi8(acc_diff, padj); | 123 acc_diff = _mm_subs_epi8(acc_diff, padj); |
127 acc_diff = _mm_adds_epi8(acc_diff, nadj); | 124 acc_diff = _mm_adds_epi8(acc_diff, nadj); |
128 return acc_diff; | 125 return acc_diff; |
129 } | 126 } |
130 | 127 |
131 static int vp9_denoiser_4xM_sse2(const uint8_t *sig, int sig_stride, | 128 // Denoiser for 4xM and 8xM blocks. |
132 const uint8_t *mc_running_avg_y, | 129 static int vp9_denoiser_NxM_sse2_small( |
133 int mc_avg_y_stride, | 130 const uint8_t *sig, int sig_stride, const uint8_t *mc_running_avg_y, |
134 uint8_t *running_avg_y, int avg_y_stride, | 131 int mc_avg_y_stride, uint8_t *running_avg_y, int avg_y_stride, |
135 int increase_denoising, | 132 int increase_denoising, BLOCK_SIZE bs, int motion_magnitude, int width) { |
136 BLOCK_SIZE bs, | 133 int sum_diff_thresh, r, sum_diff = 0; |
137 int motion_magnitude) { | 134 const int shift_inc = (increase_denoising && |
138 int sum_diff_thresh; | 135 motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? |
139 int r; | 136 1 : 0; |
140 int shift_inc = (increase_denoising && | 137 uint8_t sig_buffer[8][16], mc_running_buffer[8][16], running_buffer[8][16]; |
141 motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; | |
142 unsigned char sig_buffer[2][16], mc_running_buffer[2][16], | |
143 running_buffer[2][16]; | |
144 __m128i acc_diff = _mm_setzero_si128(); | 138 __m128i acc_diff = _mm_setzero_si128(); |
145 const __m128i k_0 = _mm_setzero_si128(); | 139 const __m128i k_0 = _mm_setzero_si128(); |
146 const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); | 140 const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); |
147 const __m128i k_8 = _mm_set1_epi8(8); | 141 const __m128i k_8 = _mm_set1_epi8(8); |
148 const __m128i k_16 = _mm_set1_epi8(16); | 142 const __m128i k_16 = _mm_set1_epi8(16); |
149 // Modify each level's adjustment according to motion_magnitude. | 143 // Modify each level's adjustment according to motion_magnitude. |
150 const __m128i l3 = _mm_set1_epi8( | 144 const __m128i l3 = _mm_set1_epi8( |
151 (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? | 145 (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6); |
152 7 + shift_inc : 6); | |
153 // Difference between level 3 and level 2 is 2. | 146 // Difference between level 3 and level 2 is 2. |
154 const __m128i l32 = _mm_set1_epi8(2); | 147 const __m128i l32 = _mm_set1_epi8(2); |
155 // Difference between level 2 and level 1 is 1. | 148 // Difference between level 2 and level 1 is 1. |
156 const __m128i l21 = _mm_set1_epi8(1); | 149 const __m128i l21 = _mm_set1_epi8(1); |
157 int sum_diff = 0; | 150 const uint8_t shift = (width == 4) ? 2 : 1; |
158 | 151 |
159 for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> 2); ++r) { | 152 for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> shift); ++r) { |
160 vpx_memcpy(sig_buffer[r], sig, 4); | 153 vpx_memcpy(sig_buffer[r], sig, width); |
161 vpx_memcpy(sig_buffer[r] + 4, sig + sig_stride, 4); | 154 vpx_memcpy(sig_buffer[r] + width, sig + sig_stride, width); |
162 vpx_memcpy(sig_buffer[r] + 8, sig + sig_stride * 2, 4); | 155 vpx_memcpy(mc_running_buffer[r], mc_running_avg_y, width); |
163 vpx_memcpy(sig_buffer[r] + 12, sig + sig_stride * 3, 4); | 156 vpx_memcpy(mc_running_buffer[r] + width, |
164 vpx_memcpy(mc_running_buffer[r], mc_running_avg_y, 4); | 157 mc_running_avg_y + mc_avg_y_stride, width); |
165 vpx_memcpy(mc_running_buffer[r] + 4, mc_running_avg_y + | 158 vpx_memcpy(running_buffer[r], running_avg_y, width); |
166 mc_avg_y_stride, 4); | 159 vpx_memcpy(running_buffer[r] + width, |
167 vpx_memcpy(mc_running_buffer[r] + 8, mc_running_avg_y + | 160 running_avg_y + avg_y_stride, width); |
168 mc_avg_y_stride * 2, 4); | 161 if (width == 4) { |
169 vpx_memcpy(mc_running_buffer[r] + 12, mc_running_avg_y + | 162 vpx_memcpy(sig_buffer[r] + width * 2, sig + sig_stride * 2, width); |
170 mc_avg_y_stride * 3, 4); | 163 vpx_memcpy(sig_buffer[r] + width * 3, sig + sig_stride * 3, width); |
171 vpx_memcpy(running_buffer[r], running_avg_y, 4); | 164 vpx_memcpy(mc_running_buffer[r] + width * 2, |
172 vpx_memcpy(running_buffer[r] + 4, running_avg_y + | 165 mc_running_avg_y + mc_avg_y_stride * 2, width); |
173 avg_y_stride, 4); | 166 vpx_memcpy(mc_running_buffer[r] + width * 3, |
174 vpx_memcpy(running_buffer[r] + 8, running_avg_y + | 167 mc_running_avg_y + mc_avg_y_stride * 3, width); |
175 avg_y_stride * 2, 4); | 168 vpx_memcpy(running_buffer[r] + width * 2, |
176 vpx_memcpy(running_buffer[r] + 12, running_avg_y + | 169 running_avg_y + avg_y_stride * 2, width); |
177 avg_y_stride * 3, 4); | 170 vpx_memcpy(running_buffer[r] + width * 3, |
| 171 running_avg_y + avg_y_stride * 3, width); |
| 172 } |
178 acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r], | 173 acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r], |
179 mc_running_buffer[r], | 174 mc_running_buffer[r], |
180 running_buffer[r], | 175 running_buffer[r], |
181 &k_0, &k_4, &k_8, &k_16, | 176 &k_0, &k_4, &k_8, &k_16, |
182 &l3, &l32, &l21, acc_diff); | 177 &l3, &l32, &l21, acc_diff); |
183 vpx_memcpy(running_avg_y, running_buffer[r], 4); | 178 vpx_memcpy(running_avg_y, running_buffer[r], width); |
184 vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + 4, 4); | 179 vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, width); |
185 vpx_memcpy(running_avg_y + avg_y_stride * 2, | 180 if (width == 4) { |
186 running_buffer[r] + 8, 4); | 181 vpx_memcpy(running_avg_y + avg_y_stride * 2, |
187 vpx_memcpy(running_avg_y + avg_y_stride * 3, | 182 running_buffer[r] + width * 2, width); |
188 running_buffer[r] + 12, 4); | 183 vpx_memcpy(running_avg_y + avg_y_stride * 3, |
| 184 running_buffer[r] + width * 3, width); |
| 185 } |
189 // Update pointers for next iteration. | 186 // Update pointers for next iteration. |
190 sig += (sig_stride << 2); | 187 sig += (sig_stride << shift); |
191 mc_running_avg_y += (mc_avg_y_stride << 2); | 188 mc_running_avg_y += (mc_avg_y_stride << shift); |
192 running_avg_y += (avg_y_stride << 2); | 189 running_avg_y += (avg_y_stride << shift); |
193 } | 190 } |
194 | 191 |
195 { | 192 { |
196 sum_diff = sum_diff_16x1(acc_diff); | 193 sum_diff = sum_diff_16x1(acc_diff); |
197 sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); | 194 sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); |
198 if (abs(sum_diff) > sum_diff_thresh) { | 195 if (abs(sum_diff) > sum_diff_thresh) { |
199 // Before returning to copy the block (i.e., apply no denoising), | 196 // Before returning to copy the block (i.e., apply no denoising), |
200 // checK if we can still apply some (weaker) temporal filtering to | 197 // check if we can still apply some (weaker) temporal filtering to |
201 // this block, that would otherwise not be denoised at all. Simplest | 198 // this block, that would otherwise not be denoised at all. Simplest |
202 // is to apply an additional adjustment to running_avg_y to bring it | 199 // is to apply an additional adjustment to running_avg_y to bring it |
203 // closer to sig. The adjustment is capped by a maximum delta, and | 200 // closer to sig. The adjustment is capped by a maximum delta, and |
204 // chosen such that in most cases the resulting sum_diff will be | 201 // chosen such that in most cases the resulting sum_diff will be |
205 // within the accceptable range given by sum_diff_thresh. | 202 // within the acceptable range given by sum_diff_thresh. |
206 | 203 |
207 // The delta is set by the excess of absolute pixel diff over the | 204 // The delta is set by the excess of absolute pixel diff over the |
208 // threshold. | 205 // threshold. |
209 int delta = ((abs(sum_diff) - sum_diff_thresh) | 206 const int delta = ((abs(sum_diff) - sum_diff_thresh) >> |
210 >> num_pels_log2_lookup[bs]) + 1; | 207 num_pels_log2_lookup[bs]) + 1; |
211 // Only apply the adjustment for max delta up to 3. | 208 // Only apply the adjustment for max delta up to 3. |
212 if (delta < 4) { | 209 if (delta < 4) { |
213 const __m128i k_delta = _mm_set1_epi8(delta); | 210 const __m128i k_delta = _mm_set1_epi8(delta); |
214 running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); | 211 running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); |
215 sum_diff = 0; | 212 for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> shift); ++r) { |
216 for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> 2); ++r) { | |
217 acc_diff = vp9_denoiser_adj_16x1_sse2( | 213 acc_diff = vp9_denoiser_adj_16x1_sse2( |
218 sig_buffer[r], mc_running_buffer[r], | 214 sig_buffer[r], mc_running_buffer[r], running_buffer[r], |
219 running_buffer[r], k_0, k_delta, | 215 k_0, k_delta, acc_diff); |
220 acc_diff); | 216 vpx_memcpy(running_avg_y, running_buffer[r], width); |
221 vpx_memcpy(running_avg_y, running_buffer[r], 4); | 217 vpx_memcpy(running_avg_y + avg_y_stride, |
222 vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + 4, 4); | 218 running_buffer[r] + width, width); |
223 vpx_memcpy(running_avg_y + avg_y_stride * 2, | 219 if (width == 4) { |
224 running_buffer[r] + 8, 4); | 220 vpx_memcpy(running_avg_y + avg_y_stride * 2, |
225 vpx_memcpy(running_avg_y + avg_y_stride * 3, | 221 running_buffer[r] + width * 2, width); |
226 running_buffer[r] + 12, 4); | 222 vpx_memcpy(running_avg_y + avg_y_stride * 3, |
| 223 running_buffer[r] + width * 3, width); |
| 224 } |
227 // Update pointers for next iteration. | 225 // Update pointers for next iteration. |
228 running_avg_y += (avg_y_stride << 2); | 226 running_avg_y += (avg_y_stride << shift); |
229 } | 227 } |
230 sum_diff = sum_diff_16x1(acc_diff); | 228 sum_diff = sum_diff_16x1(acc_diff); |
231 if (abs(sum_diff) > sum_diff_thresh) { | 229 if (abs(sum_diff) > sum_diff_thresh) { |
232 return COPY_BLOCK; | 230 return COPY_BLOCK; |
233 } | 231 } |
234 } else { | 232 } else { |
235 return COPY_BLOCK; | 233 return COPY_BLOCK; |
236 } | 234 } |
237 } | 235 } |
238 } | 236 } |
239 return FILTER_BLOCK; | 237 return FILTER_BLOCK; |
240 } | 238 } |
241 | 239 |
242 static int vp9_denoiser_8xM_sse2(const uint8_t *sig, int sig_stride, | 240 // Denoiser for 16xM, 32xM and 64xM blocks |
243 const uint8_t *mc_running_avg_y, | 241 static int vp9_denoiser_NxM_sse2_big(const uint8_t *sig, int sig_stride, |
244 int mc_avg_y_stride, | 242 const uint8_t *mc_running_avg_y, |
245 uint8_t *running_avg_y, int avg_y_stride, | 243 int mc_avg_y_stride, |
246 int increase_denoising, | 244 uint8_t *running_avg_y, |
247 BLOCK_SIZE bs, | 245 int avg_y_stride, |
248 int motion_magnitude) { | 246 int increase_denoising, BLOCK_SIZE bs, |
249 int sum_diff_thresh; | 247 int motion_magnitude) { |
250 int r; | 248 int sum_diff_thresh, r, c, sum_diff = 0; |
251 int shift_inc = (increase_denoising && | 249 const int shift_inc = (increase_denoising && |
252 motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; | 250 motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? |
253 unsigned char sig_buffer[8][16], mc_running_buffer[8][16], | 251 1 : 0; |
254 running_buffer[8][16]; | |
255 __m128i acc_diff = _mm_setzero_si128(); | |
256 const __m128i k_0 = _mm_setzero_si128(); | |
257 const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); | |
258 const __m128i k_8 = _mm_set1_epi8(8); | |
259 const __m128i k_16 = _mm_set1_epi8(16); | |
260 // Modify each level's adjustment according to motion_magnitude. | |
261 const __m128i l3 = _mm_set1_epi8( | |
262 (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? | |
263 7 + shift_inc : 6); | |
264 // Difference between level 3 and level 2 is 2. | |
265 const __m128i l32 = _mm_set1_epi8(2); | |
266 // Difference between level 2 and level 1 is 1. | |
267 const __m128i l21 = _mm_set1_epi8(1); | |
268 int sum_diff = 0; | |
269 | |
270 for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> 1); ++r) { | |
271 vpx_memcpy(sig_buffer[r], sig, 8); | |
272 vpx_memcpy(sig_buffer[r] + 8, sig + sig_stride, 8); | |
273 vpx_memcpy(mc_running_buffer[r], mc_running_avg_y, 8); | |
274 vpx_memcpy(mc_running_buffer[r] + 8, mc_running_avg_y + | |
275 mc_avg_y_stride, 8); | |
276 vpx_memcpy(running_buffer[r], running_avg_y, 8); | |
277 vpx_memcpy(running_buffer[r] + 8, running_avg_y + | |
278 avg_y_stride, 8); | |
279 acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r], | |
280 mc_running_buffer[r], | |
281 running_buffer[r], | |
282 &k_0, &k_4, &k_8, &k_16, | |
283 &l3, &l32, &l21, acc_diff); | |
284 vpx_memcpy(running_avg_y, running_buffer[r], 8); | |
285 vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + 8, 8); | |
286 // Update pointers for next iteration. | |
287 sig += (sig_stride << 1); | |
288 mc_running_avg_y += (mc_avg_y_stride << 1); | |
289 running_avg_y += (avg_y_stride << 1); | |
290 } | |
291 | |
292 { | |
293 sum_diff = sum_diff_16x1(acc_diff); | |
294 sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); | |
295 if (abs(sum_diff) > sum_diff_thresh) { | |
296 // Before returning to copy the block (i.e., apply no denoising), | |
297 // checK if we can still apply some (weaker) temporal filtering to | |
298 // this block, that would otherwise not be denoised at all. Simplest | |
299 // is to apply an additional adjustment to running_avg_y to bring it | |
300 // closer to sig. The adjustment is capped by a maximum delta, and | |
301 // chosen such that in most cases the resulting sum_diff will be | |
302 // within the accceptable range given by sum_diff_thresh. | |
303 | |
304 // The delta is set by the excess of absolute pixel diff over the | |
305 // threshold. | |
306 int delta = ((abs(sum_diff) - sum_diff_thresh) | |
307 >> num_pels_log2_lookup[bs]) + 1; | |
308 // Only apply the adjustment for max delta up to 3. | |
309 if (delta < 4) { | |
310 const __m128i k_delta = _mm_set1_epi8(delta); | |
311 running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); | |
312 for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> 1); ++r) { | |
313 acc_diff = vp9_denoiser_adj_16x1_sse2( | |
314 sig_buffer[r], mc_running_buffer[r], | |
315 running_buffer[r], k_0, k_delta, | |
316 acc_diff); | |
317 vpx_memcpy(running_avg_y, running_buffer[r], 8); | |
318 vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + 8, 8); | |
319 // Update pointers for next iteration. | |
320 running_avg_y += (avg_y_stride << 1); | |
321 } | |
322 sum_diff = sum_diff_16x1(acc_diff); | |
323 if (abs(sum_diff) > sum_diff_thresh) { | |
324 return COPY_BLOCK; | |
325 } | |
326 } else { | |
327 return COPY_BLOCK; | |
328 } | |
329 } | |
330 } | |
331 return FILTER_BLOCK; | |
332 } | |
333 | |
334 static int vp9_denoiser_64_32_16xM_sse2(const uint8_t *sig, int sig_stride, | |
335 const uint8_t *mc_running_avg_y, | |
336 int mc_avg_y_stride, | |
337 uint8_t *running_avg_y, | |
338 int avg_y_stride, | |
339 int increase_denoising, BLOCK_SIZE bs, | |
340 int motion_magnitude) { | |
341 int sum_diff_thresh; | |
342 int r, c; | |
343 int shift_inc = (increase_denoising && | |
344 motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; | |
345 __m128i acc_diff[4][4]; | 252 __m128i acc_diff[4][4]; |
346 const __m128i k_0 = _mm_setzero_si128(); | 253 const __m128i k_0 = _mm_setzero_si128(); |
347 const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); | 254 const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); |
348 const __m128i k_8 = _mm_set1_epi8(8); | 255 const __m128i k_8 = _mm_set1_epi8(8); |
349 const __m128i k_16 = _mm_set1_epi8(16); | 256 const __m128i k_16 = _mm_set1_epi8(16); |
350 // Modify each level's adjustment according to motion_magnitude. | 257 // Modify each level's adjustment according to motion_magnitude. |
351 const __m128i l3 = _mm_set1_epi8( | 258 const __m128i l3 = _mm_set1_epi8( |
352 (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? | 259 (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6); |
353 7 + shift_inc : 6); | |
354 // Difference between level 3 and level 2 is 2. | 260 // Difference between level 3 and level 2 is 2. |
355 const __m128i l32 = _mm_set1_epi8(2); | 261 const __m128i l32 = _mm_set1_epi8(2); |
356 // Difference between level 2 and level 1 is 1. | 262 // Difference between level 2 and level 1 is 1. |
357 const __m128i l21 = _mm_set1_epi8(1); | 263 const __m128i l21 = _mm_set1_epi8(1); |
358 int sum_diff = 0; | |
359 | 264 |
360 for (c = 0; c < 4; ++c) { | 265 for (c = 0; c < 4; ++c) { |
361 for (r = 0; r < 4; ++r) { | 266 for (r = 0; r < 4; ++r) { |
362 acc_diff[c][r] = _mm_setzero_si128(); | 267 acc_diff[c][r] = _mm_setzero_si128(); |
363 } | 268 } |
364 } | 269 } |
365 | 270 |
366 for (r = 0; r < (4 << b_height_log2_lookup[bs]); r++) { | 271 for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { |
367 for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { | 272 for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { |
368 acc_diff[c>>4][r>>4] = vp9_denoiser_16x1_sse2( | 273 acc_diff[c>>4][r>>4] = vp9_denoiser_16x1_sse2( |
369 sig, mc_running_avg_y, | 274 sig, mc_running_avg_y, running_avg_y, &k_0, &k_4, |
370 running_avg_y, | 275 &k_8, &k_16, &l3, &l32, &l21, acc_diff[c>>4][r>>4]); |
371 &k_0, &k_4, &k_8, &k_16, | |
372 &l3, &l32, &l21, acc_diff[c>>4][r>>4]); | |
373 // Update pointers for next iteration. | 276 // Update pointers for next iteration. |
374 sig += 16; | 277 sig += 16; |
375 mc_running_avg_y += 16; | 278 mc_running_avg_y += 16; |
376 running_avg_y += 16; | 279 running_avg_y += 16; |
377 } | 280 } |
378 | 281 |
379 if ((r + 1) % 16 == 0 || (bs == BLOCK_16X8 && r == 7)) { | 282 if ((r + 1) % 16 == 0 || (bs == BLOCK_16X8 && r == 7)) { |
380 for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { | 283 for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { |
381 sum_diff += sum_diff_16x1(acc_diff[c>>4][r>>4]); | 284 sum_diff += sum_diff_16x1(acc_diff[c>>4][r>>4]); |
382 } | 285 } |
383 } | 286 } |
384 | 287 |
385 // Update pointers for next iteration. | 288 // Update pointers for next iteration. |
386 sig = sig - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + sig_stride; | 289 sig = sig - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + sig_stride; |
387 mc_running_avg_y = mc_running_avg_y - | 290 mc_running_avg_y = mc_running_avg_y - |
388 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + | 291 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + |
389 mc_avg_y_stride; | 292 mc_avg_y_stride; |
390 running_avg_y = running_avg_y - | 293 running_avg_y = running_avg_y - |
391 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + | 294 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + |
392 avg_y_stride; | 295 avg_y_stride; |
393 } | 296 } |
394 | 297 |
395 { | 298 { |
396 sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); | 299 sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); |
397 if (abs(sum_diff) > sum_diff_thresh) { | 300 if (abs(sum_diff) > sum_diff_thresh) { |
398 int delta = ((abs(sum_diff) - sum_diff_thresh) | 301 const int delta = ((abs(sum_diff) - sum_diff_thresh) >> |
399 >> num_pels_log2_lookup[bs]) + 1; | 302 num_pels_log2_lookup[bs]) + 1; |
400 | 303 |
401 // Only apply the adjustment for max delta up to 3. | 304 // Only apply the adjustment for max delta up to 3. |
402 if (delta < 4) { | 305 if (delta < 4) { |
403 const __m128i k_delta = _mm_set1_epi8(delta); | 306 const __m128i k_delta = _mm_set1_epi8(delta); |
404 sig -= sig_stride * (4 << b_height_log2_lookup[bs]); | 307 sig -= sig_stride * (4 << b_height_log2_lookup[bs]); |
405 mc_running_avg_y -= mc_avg_y_stride * (4 << b_height_log2_lookup[bs]); | 308 mc_running_avg_y -= mc_avg_y_stride * (4 << b_height_log2_lookup[bs]); |
406 running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); | 309 running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); |
407 sum_diff = 0; | 310 sum_diff = 0; |
408 for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { | 311 for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { |
409 for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { | 312 for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { |
410 acc_diff[c>>4][r>>4] = vp9_denoiser_adj_16x1_sse2( | 313 acc_diff[c>>4][r>>4] = vp9_denoiser_adj_16x1_sse2( |
411 sig, mc_running_avg_y, | 314 sig, mc_running_avg_y, running_avg_y, k_0, |
412 running_avg_y, k_0, | 315 k_delta, acc_diff[c>>4][r>>4]); |
413 k_delta, acc_diff[c>>4][r>>4]); | |
414 // Update pointers for next iteration. | 316 // Update pointers for next iteration. |
415 sig += 16; | 317 sig += 16; |
416 mc_running_avg_y += 16; | 318 mc_running_avg_y += 16; |
417 running_avg_y += 16; | 319 running_avg_y += 16; |
418 } | 320 } |
419 | 321 |
420 if ((r + 1) % 16 == 0 || (bs == BLOCK_16X8 && r == 7)) { | 322 if ((r + 1) % 16 == 0 || (bs == BLOCK_16X8 && r == 7)) { |
421 for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { | 323 for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { |
422 sum_diff += sum_diff_16x1(acc_diff[c>>4][r>>4]); | 324 sum_diff += sum_diff_16x1(acc_diff[c>>4][r>>4]); |
423 } | 325 } |
(...skipping 18 matching lines...) Expand all Loading... |
442 } | 344 } |
443 | 345 |
444 int vp9_denoiser_filter_sse2(const uint8_t *sig, int sig_stride, | 346 int vp9_denoiser_filter_sse2(const uint8_t *sig, int sig_stride, |
445 const uint8_t *mc_avg, | 347 const uint8_t *mc_avg, |
446 int mc_avg_stride, | 348 int mc_avg_stride, |
447 uint8_t *avg, int avg_stride, | 349 uint8_t *avg, int avg_stride, |
448 int increase_denoising, | 350 int increase_denoising, |
449 BLOCK_SIZE bs, | 351 BLOCK_SIZE bs, |
450 int motion_magnitude) { | 352 int motion_magnitude) { |
451 if (bs == BLOCK_4X4 || bs == BLOCK_4X8) { | 353 if (bs == BLOCK_4X4 || bs == BLOCK_4X8) { |
452 return vp9_denoiser_4xM_sse2(sig, sig_stride, | 354 return vp9_denoiser_NxM_sse2_small(sig, sig_stride, |
453 mc_avg, mc_avg_stride, | 355 mc_avg, mc_avg_stride, |
454 avg, avg_stride, | 356 avg, avg_stride, |
455 increase_denoising, | 357 increase_denoising, |
456 bs, motion_magnitude); | 358 bs, motion_magnitude, 4); |
457 } else if (bs == BLOCK_8X4 || bs == BLOCK_8X8 || bs == BLOCK_8X16) { | 359 } else if (bs == BLOCK_8X4 || bs == BLOCK_8X8 || bs == BLOCK_8X16) { |
458 return vp9_denoiser_8xM_sse2(sig, sig_stride, | 360 return vp9_denoiser_NxM_sse2_small(sig, sig_stride, |
459 mc_avg, mc_avg_stride, | 361 mc_avg, mc_avg_stride, |
460 avg, avg_stride, | 362 avg, avg_stride, |
461 increase_denoising, | 363 increase_denoising, |
462 bs, motion_magnitude); | 364 bs, motion_magnitude, 8); |
463 } else if (bs == BLOCK_16X8 || bs == BLOCK_16X16 || bs == BLOCK_16X32 || | 365 } else if (bs == BLOCK_16X8 || bs == BLOCK_16X16 || bs == BLOCK_16X32 || |
464 bs == BLOCK_32X16|| bs == BLOCK_32X32 || bs == BLOCK_32X64 || | 366 bs == BLOCK_32X16|| bs == BLOCK_32X32 || bs == BLOCK_32X64 || |
465 bs == BLOCK_64X32 || bs == BLOCK_64X64) { | 367 bs == BLOCK_64X32 || bs == BLOCK_64X64) { |
466 return vp9_denoiser_64_32_16xM_sse2(sig, sig_stride, | 368 return vp9_denoiser_NxM_sse2_big(sig, sig_stride, |
467 mc_avg, mc_avg_stride, | 369 mc_avg, mc_avg_stride, |
468 avg, avg_stride, | 370 avg, avg_stride, |
469 increase_denoising, | 371 increase_denoising, |
470 bs, motion_magnitude); | 372 bs, motion_magnitude); |
471 } else { | 373 } else { |
472 return COPY_BLOCK; | 374 return COPY_BLOCK; |
473 } | 375 } |
474 } | 376 } |
OLD | NEW |