OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 |
| 11 #include <emmintrin.h> |
| 12 |
| 13 #include "./vpx_config.h" |
| 14 #include "./vp9_rtcd.h" |
| 15 |
| 16 #include "vpx_ports/emmintrin_compat.h" |
| 17 #include "vpx/vpx_integer.h" |
| 18 #include "vp9/common/vp9_reconinter.h" |
| 19 #include "vp9/encoder/vp9_context_tree.h" |
| 20 #include "vp9/encoder/vp9_denoiser.h" |
| 21 #include "vpx_mem/vpx_mem.h" |
| 22 |
| 23 // Compute the sum of all pixel differences of this MB. |
| 24 static INLINE int sum_diff_16x1(__m128i acc_diff) { |
| 25 const __m128i k_1 = _mm_set1_epi16(1); |
| 26 const __m128i acc_diff_lo = _mm_srai_epi16( |
| 27 _mm_unpacklo_epi8(acc_diff, acc_diff), 8); |
| 28 const __m128i acc_diff_hi = _mm_srai_epi16( |
| 29 _mm_unpackhi_epi8(acc_diff, acc_diff), 8); |
| 30 const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi); |
| 31 const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1); |
| 32 const __m128i hgfe_dcba = _mm_add_epi32(hg_fe_dc_ba, |
| 33 _mm_srli_si128(hg_fe_dc_ba, 8)); |
| 34 const __m128i hgfedcba = _mm_add_epi32(hgfe_dcba, |
| 35 _mm_srli_si128(hgfe_dcba, 4)); |
| 36 int sum_diff = _mm_cvtsi128_si32(hgfedcba); |
| 37 return sum_diff; |
| 38 } |
| 39 |
| 40 // Denoise a 16x1 vector. |
| 41 static INLINE __m128i vp9_denoiser_16x1_sse2(const uint8_t *sig, |
| 42 const uint8_t *mc_running_avg_y, |
| 43 uint8_t *running_avg_y, |
| 44 const __m128i *k_0, |
| 45 const __m128i *k_4, |
| 46 const __m128i *k_8, |
| 47 const __m128i *k_16, |
| 48 const __m128i *l3, |
| 49 const __m128i *l32, |
| 50 const __m128i *l21, |
| 51 __m128i acc_diff) { |
| 52 // Calculate differences |
| 53 const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0])); |
| 54 const __m128i v_mc_running_avg_y = _mm_loadu_si128( |
| 55 (const __m128i *)(&mc_running_avg_y[0])); |
| 56 __m128i v_running_avg_y; |
| 57 const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); |
| 58 const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); |
| 59 // Obtain the sign. FF if diff is negative. |
| 60 const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, *k_0); |
| 61 // Clamp absolute difference to 16 to be used to get mask. Doing this |
| 62 // allows us to use _mm_cmpgt_epi8, which operates on signed byte. |
| 63 const __m128i clamped_absdiff = _mm_min_epu8( |
| 64 _mm_or_si128(pdiff, ndiff), *k_16); |
| 65 // Get masks for l2 l1 and l0 adjustments. |
| 66 const __m128i mask2 = _mm_cmpgt_epi8(*k_16, clamped_absdiff); |
| 67 const __m128i mask1 = _mm_cmpgt_epi8(*k_8, clamped_absdiff); |
| 68 const __m128i mask0 = _mm_cmpgt_epi8(*k_4, clamped_absdiff); |
| 69 // Get adjustments for l2, l1, and l0. |
| 70 __m128i adj2 = _mm_and_si128(mask2, *l32); |
| 71 const __m128i adj1 = _mm_and_si128(mask1, *l21); |
| 72 const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); |
| 73 __m128i adj, padj, nadj; |
| 74 |
| 75 // Combine the adjustments and get absolute adjustments. |
| 76 adj2 = _mm_add_epi8(adj2, adj1); |
| 77 adj = _mm_sub_epi8(*l3, adj2); |
| 78 adj = _mm_andnot_si128(mask0, adj); |
| 79 adj = _mm_or_si128(adj, adj0); |
| 80 |
| 81 // Restore the sign and get positive and negative adjustments. |
| 82 padj = _mm_andnot_si128(diff_sign, adj); |
| 83 nadj = _mm_and_si128(diff_sign, adj); |
| 84 |
| 85 // Calculate filtered value. |
| 86 v_running_avg_y = _mm_adds_epu8(v_sig, padj); |
| 87 v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj); |
| 88 _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); |
| 89 |
| 90 // Adjustments <=7, and each element in acc_diff can fit in signed |
| 91 // char. |
| 92 acc_diff = _mm_adds_epi8(acc_diff, padj); |
| 93 acc_diff = _mm_subs_epi8(acc_diff, nadj); |
| 94 return acc_diff; |
| 95 } |
| 96 |
| 97 // Denoise a 16x1 vector with a weaker filter. |
| 98 static INLINE __m128i vp9_denoiser_adj_16x1_sse2(const uint8_t *sig, |
| 99 const uint8_t *mc_running_avg_y, |
| 100 uint8_t *running_avg_y, |
| 101 const __m128i k_0, |
| 102 const __m128i k_delta, |
| 103 __m128i acc_diff) { |
| 104 __m128i v_running_avg_y = _mm_loadu_si128((__m128i *)(&running_avg_y[0])); |
| 105 // Calculate differences. |
| 106 const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0])); |
| 107 const __m128i v_mc_running_avg_y = |
| 108 _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0])); |
| 109 const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); |
| 110 const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); |
| 111 // Obtain the sign. FF if diff is negative. |
| 112 const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); |
| 113 // Clamp absolute difference to delta to get the adjustment. |
| 114 const __m128i adj = |
| 115 _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); |
| 116 // Restore the sign and get positive and negative adjustments. |
| 117 __m128i padj, nadj; |
| 118 padj = _mm_andnot_si128(diff_sign, adj); |
| 119 nadj = _mm_and_si128(diff_sign, adj); |
| 120 // Calculate filtered value. |
| 121 v_running_avg_y = _mm_subs_epu8(v_running_avg_y, padj); |
| 122 v_running_avg_y = _mm_adds_epu8(v_running_avg_y, nadj); |
| 123 _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); |
| 124 |
| 125 // Accumulate the adjustments. |
| 126 acc_diff = _mm_subs_epi8(acc_diff, padj); |
| 127 acc_diff = _mm_adds_epi8(acc_diff, nadj); |
| 128 return acc_diff; |
| 129 } |
| 130 |
| 131 static int vp9_denoiser_4xM_sse2(const uint8_t *sig, int sig_stride, |
| 132 const uint8_t *mc_running_avg_y, |
| 133 int mc_avg_y_stride, |
| 134 uint8_t *running_avg_y, int avg_y_stride, |
| 135 int increase_denoising, |
| 136 BLOCK_SIZE bs, |
| 137 int motion_magnitude) { |
| 138 int sum_diff_thresh; |
| 139 int r; |
| 140 int shift_inc = (increase_denoising && |
| 141 motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; |
| 142 unsigned char sig_buffer[2][16], mc_running_buffer[2][16], |
| 143 running_buffer[2][16]; |
| 144 __m128i acc_diff = _mm_setzero_si128(); |
| 145 const __m128i k_0 = _mm_setzero_si128(); |
| 146 const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); |
| 147 const __m128i k_8 = _mm_set1_epi8(8); |
| 148 const __m128i k_16 = _mm_set1_epi8(16); |
| 149 // Modify each level's adjustment according to motion_magnitude. |
| 150 const __m128i l3 = _mm_set1_epi8( |
| 151 (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? |
| 152 7 + shift_inc : 6); |
| 153 // Difference between level 3 and level 2 is 2. |
| 154 const __m128i l32 = _mm_set1_epi8(2); |
| 155 // Difference between level 2 and level 1 is 1. |
| 156 const __m128i l21 = _mm_set1_epi8(1); |
| 157 int sum_diff = 0; |
| 158 |
| 159 for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> 2); ++r) { |
| 160 vpx_memcpy(sig_buffer[r], sig, 4); |
| 161 vpx_memcpy(sig_buffer[r] + 4, sig + sig_stride, 4); |
| 162 vpx_memcpy(sig_buffer[r] + 8, sig + sig_stride * 2, 4); |
| 163 vpx_memcpy(sig_buffer[r] + 12, sig + sig_stride * 3, 4); |
| 164 vpx_memcpy(mc_running_buffer[r], mc_running_avg_y, 4); |
| 165 vpx_memcpy(mc_running_buffer[r] + 4, mc_running_avg_y + |
| 166 mc_avg_y_stride, 4); |
| 167 vpx_memcpy(mc_running_buffer[r] + 8, mc_running_avg_y + |
| 168 mc_avg_y_stride * 2, 4); |
| 169 vpx_memcpy(mc_running_buffer[r] + 12, mc_running_avg_y + |
| 170 mc_avg_y_stride * 3, 4); |
| 171 vpx_memcpy(running_buffer[r], running_avg_y, 4); |
| 172 vpx_memcpy(running_buffer[r] + 4, running_avg_y + |
| 173 avg_y_stride, 4); |
| 174 vpx_memcpy(running_buffer[r] + 8, running_avg_y + |
| 175 avg_y_stride * 2, 4); |
| 176 vpx_memcpy(running_buffer[r] + 12, running_avg_y + |
| 177 avg_y_stride * 3, 4); |
| 178 acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r], |
| 179 mc_running_buffer[r], |
| 180 running_buffer[r], |
| 181 &k_0, &k_4, &k_8, &k_16, |
| 182 &l3, &l32, &l21, acc_diff); |
| 183 vpx_memcpy(running_avg_y, running_buffer[r], 4); |
| 184 vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + 4, 4); |
| 185 vpx_memcpy(running_avg_y + avg_y_stride * 2, |
| 186 running_buffer[r] + 8, 4); |
| 187 vpx_memcpy(running_avg_y + avg_y_stride * 3, |
| 188 running_buffer[r] + 12, 4); |
| 189 // Update pointers for next iteration. |
| 190 sig += (sig_stride << 2); |
| 191 mc_running_avg_y += (mc_avg_y_stride << 2); |
| 192 running_avg_y += (avg_y_stride << 2); |
| 193 } |
| 194 |
| 195 { |
| 196 sum_diff = sum_diff_16x1(acc_diff); |
| 197 sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); |
| 198 if (abs(sum_diff) > sum_diff_thresh) { |
| 199 // Before returning to copy the block (i.e., apply no denoising), |
| 200 // checK if we can still apply some (weaker) temporal filtering to |
| 201 // this block, that would otherwise not be denoised at all. Simplest |
| 202 // is to apply an additional adjustment to running_avg_y to bring it |
| 203 // closer to sig. The adjustment is capped by a maximum delta, and |
| 204 // chosen such that in most cases the resulting sum_diff will be |
| 205 // within the accceptable range given by sum_diff_thresh. |
| 206 |
| 207 // The delta is set by the excess of absolute pixel diff over the |
| 208 // threshold. |
| 209 int delta = ((abs(sum_diff) - sum_diff_thresh) |
| 210 >> num_pels_log2_lookup[bs]) + 1; |
| 211 // Only apply the adjustment for max delta up to 3. |
| 212 if (delta < 4) { |
| 213 const __m128i k_delta = _mm_set1_epi8(delta); |
| 214 running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); |
| 215 sum_diff = 0; |
| 216 for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> 2); ++r) { |
| 217 acc_diff = vp9_denoiser_adj_16x1_sse2( |
| 218 sig_buffer[r], mc_running_buffer[r], |
| 219 running_buffer[r], k_0, k_delta, |
| 220 acc_diff); |
| 221 vpx_memcpy(running_avg_y, running_buffer[r], 4); |
| 222 vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + 4, 4); |
| 223 vpx_memcpy(running_avg_y + avg_y_stride * 2, |
| 224 running_buffer[r] + 8, 4); |
| 225 vpx_memcpy(running_avg_y + avg_y_stride * 3, |
| 226 running_buffer[r] + 12, 4); |
| 227 // Update pointers for next iteration. |
| 228 running_avg_y += (avg_y_stride << 2); |
| 229 } |
| 230 sum_diff = sum_diff_16x1(acc_diff); |
| 231 if (abs(sum_diff) > sum_diff_thresh) { |
| 232 return COPY_BLOCK; |
| 233 } |
| 234 } else { |
| 235 return COPY_BLOCK; |
| 236 } |
| 237 } |
| 238 } |
| 239 return FILTER_BLOCK; |
| 240 } |
| 241 |
| 242 static int vp9_denoiser_8xM_sse2(const uint8_t *sig, int sig_stride, |
| 243 const uint8_t *mc_running_avg_y, |
| 244 int mc_avg_y_stride, |
| 245 uint8_t *running_avg_y, int avg_y_stride, |
| 246 int increase_denoising, |
| 247 BLOCK_SIZE bs, |
| 248 int motion_magnitude) { |
| 249 int sum_diff_thresh; |
| 250 int r; |
| 251 int shift_inc = (increase_denoising && |
| 252 motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; |
| 253 unsigned char sig_buffer[8][16], mc_running_buffer[8][16], |
| 254 running_buffer[8][16]; |
| 255 __m128i acc_diff = _mm_setzero_si128(); |
| 256 const __m128i k_0 = _mm_setzero_si128(); |
| 257 const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); |
| 258 const __m128i k_8 = _mm_set1_epi8(8); |
| 259 const __m128i k_16 = _mm_set1_epi8(16); |
| 260 // Modify each level's adjustment according to motion_magnitude. |
| 261 const __m128i l3 = _mm_set1_epi8( |
| 262 (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? |
| 263 7 + shift_inc : 6); |
| 264 // Difference between level 3 and level 2 is 2. |
| 265 const __m128i l32 = _mm_set1_epi8(2); |
| 266 // Difference between level 2 and level 1 is 1. |
| 267 const __m128i l21 = _mm_set1_epi8(1); |
| 268 int sum_diff = 0; |
| 269 |
| 270 for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> 1); ++r) { |
| 271 vpx_memcpy(sig_buffer[r], sig, 8); |
| 272 vpx_memcpy(sig_buffer[r] + 8, sig + sig_stride, 8); |
| 273 vpx_memcpy(mc_running_buffer[r], mc_running_avg_y, 8); |
| 274 vpx_memcpy(mc_running_buffer[r] + 8, mc_running_avg_y + |
| 275 mc_avg_y_stride, 8); |
| 276 vpx_memcpy(running_buffer[r], running_avg_y, 8); |
| 277 vpx_memcpy(running_buffer[r] + 8, running_avg_y + |
| 278 avg_y_stride, 8); |
| 279 acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r], |
| 280 mc_running_buffer[r], |
| 281 running_buffer[r], |
| 282 &k_0, &k_4, &k_8, &k_16, |
| 283 &l3, &l32, &l21, acc_diff); |
| 284 vpx_memcpy(running_avg_y, running_buffer[r], 8); |
| 285 vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + 8, 8); |
| 286 // Update pointers for next iteration. |
| 287 sig += (sig_stride << 1); |
| 288 mc_running_avg_y += (mc_avg_y_stride << 1); |
| 289 running_avg_y += (avg_y_stride << 1); |
| 290 } |
| 291 |
| 292 { |
| 293 sum_diff = sum_diff_16x1(acc_diff); |
| 294 sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); |
| 295 if (abs(sum_diff) > sum_diff_thresh) { |
| 296 // Before returning to copy the block (i.e., apply no denoising), |
| 297 // checK if we can still apply some (weaker) temporal filtering to |
| 298 // this block, that would otherwise not be denoised at all. Simplest |
| 299 // is to apply an additional adjustment to running_avg_y to bring it |
| 300 // closer to sig. The adjustment is capped by a maximum delta, and |
| 301 // chosen such that in most cases the resulting sum_diff will be |
| 302 // within the accceptable range given by sum_diff_thresh. |
| 303 |
| 304 // The delta is set by the excess of absolute pixel diff over the |
| 305 // threshold. |
| 306 int delta = ((abs(sum_diff) - sum_diff_thresh) |
| 307 >> num_pels_log2_lookup[bs]) + 1; |
| 308 // Only apply the adjustment for max delta up to 3. |
| 309 if (delta < 4) { |
| 310 const __m128i k_delta = _mm_set1_epi8(delta); |
| 311 running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); |
| 312 for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> 1); ++r) { |
| 313 acc_diff = vp9_denoiser_adj_16x1_sse2( |
| 314 sig_buffer[r], mc_running_buffer[r], |
| 315 running_buffer[r], k_0, k_delta, |
| 316 acc_diff); |
| 317 vpx_memcpy(running_avg_y, running_buffer[r], 8); |
| 318 vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + 8, 8); |
| 319 // Update pointers for next iteration. |
| 320 running_avg_y += (avg_y_stride << 1); |
| 321 } |
| 322 sum_diff = sum_diff_16x1(acc_diff); |
| 323 if (abs(sum_diff) > sum_diff_thresh) { |
| 324 return COPY_BLOCK; |
| 325 } |
| 326 } else { |
| 327 return COPY_BLOCK; |
| 328 } |
| 329 } |
| 330 } |
| 331 return FILTER_BLOCK; |
| 332 } |
| 333 |
| 334 static int vp9_denoiser_64_32_16xM_sse2(const uint8_t *sig, int sig_stride, |
| 335 const uint8_t *mc_running_avg_y, |
| 336 int mc_avg_y_stride, |
| 337 uint8_t *running_avg_y, |
| 338 int avg_y_stride, |
| 339 int increase_denoising, BLOCK_SIZE bs, |
| 340 int motion_magnitude) { |
| 341 int sum_diff_thresh; |
| 342 int r, c; |
| 343 int shift_inc = (increase_denoising && |
| 344 motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; |
| 345 __m128i acc_diff[4][4]; |
| 346 const __m128i k_0 = _mm_setzero_si128(); |
| 347 const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); |
| 348 const __m128i k_8 = _mm_set1_epi8(8); |
| 349 const __m128i k_16 = _mm_set1_epi8(16); |
| 350 // Modify each level's adjustment according to motion_magnitude. |
| 351 const __m128i l3 = _mm_set1_epi8( |
| 352 (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? |
| 353 7 + shift_inc : 6); |
| 354 // Difference between level 3 and level 2 is 2. |
| 355 const __m128i l32 = _mm_set1_epi8(2); |
| 356 // Difference between level 2 and level 1 is 1. |
| 357 const __m128i l21 = _mm_set1_epi8(1); |
| 358 int sum_diff = 0; |
| 359 |
| 360 for (c = 0; c < 4; ++c) { |
| 361 for (r = 0; r < 4; ++r) { |
| 362 acc_diff[c][r] = _mm_setzero_si128(); |
| 363 } |
| 364 } |
| 365 |
| 366 for (r = 0; r < (4 << b_height_log2_lookup[bs]); r++) { |
| 367 for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { |
| 368 acc_diff[c>>4][r>>4] = vp9_denoiser_16x1_sse2( |
| 369 sig, mc_running_avg_y, |
| 370 running_avg_y, |
| 371 &k_0, &k_4, &k_8, &k_16, |
| 372 &l3, &l32, &l21, acc_diff[c>>4][r>>4]); |
| 373 // Update pointers for next iteration. |
| 374 sig += 16; |
| 375 mc_running_avg_y += 16; |
| 376 running_avg_y += 16; |
| 377 } |
| 378 |
| 379 if ((r + 1) % 16 == 0 || (bs == BLOCK_16X8 && r == 7)) { |
| 380 for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { |
| 381 sum_diff += sum_diff_16x1(acc_diff[c>>4][r>>4]); |
| 382 } |
| 383 } |
| 384 |
| 385 // Update pointers for next iteration. |
| 386 sig = sig - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + sig_stride; |
| 387 mc_running_avg_y = mc_running_avg_y - |
| 388 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + |
| 389 mc_avg_y_stride; |
| 390 running_avg_y = running_avg_y - |
| 391 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + |
| 392 avg_y_stride; |
| 393 } |
| 394 |
| 395 { |
| 396 sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); |
| 397 if (abs(sum_diff) > sum_diff_thresh) { |
| 398 int delta = ((abs(sum_diff) - sum_diff_thresh) |
| 399 >> num_pels_log2_lookup[bs]) + 1; |
| 400 |
| 401 // Only apply the adjustment for max delta up to 3. |
| 402 if (delta < 4) { |
| 403 const __m128i k_delta = _mm_set1_epi8(delta); |
| 404 sig -= sig_stride * (4 << b_height_log2_lookup[bs]); |
| 405 mc_running_avg_y -= mc_avg_y_stride * (4 << b_height_log2_lookup[bs]); |
| 406 running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); |
| 407 sum_diff = 0; |
| 408 for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { |
| 409 for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { |
| 410 acc_diff[c>>4][r>>4] = vp9_denoiser_adj_16x1_sse2( |
| 411 sig, mc_running_avg_y, |
| 412 running_avg_y, k_0, |
| 413 k_delta, acc_diff[c>>4][r>>4]); |
| 414 // Update pointers for next iteration. |
| 415 sig += 16; |
| 416 mc_running_avg_y += 16; |
| 417 running_avg_y += 16; |
| 418 } |
| 419 |
| 420 if ((r + 1) % 16 == 0 || (bs == BLOCK_16X8 && r == 7)) { |
| 421 for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { |
| 422 sum_diff += sum_diff_16x1(acc_diff[c>>4][r>>4]); |
| 423 } |
| 424 } |
| 425 sig = sig - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + sig_stride; |
| 426 mc_running_avg_y = mc_running_avg_y - |
| 427 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + |
| 428 mc_avg_y_stride; |
| 429 running_avg_y = running_avg_y - |
| 430 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + |
| 431 avg_y_stride; |
| 432 } |
| 433 if (abs(sum_diff) > sum_diff_thresh) { |
| 434 return COPY_BLOCK; |
| 435 } |
| 436 } else { |
| 437 return COPY_BLOCK; |
| 438 } |
| 439 } |
| 440 } |
| 441 return FILTER_BLOCK; |
| 442 } |
| 443 |
| 444 int vp9_denoiser_filter_sse2(const uint8_t *sig, int sig_stride, |
| 445 const uint8_t *mc_avg, |
| 446 int mc_avg_stride, |
| 447 uint8_t *avg, int avg_stride, |
| 448 int increase_denoising, |
| 449 BLOCK_SIZE bs, |
| 450 int motion_magnitude) { |
| 451 if (bs == BLOCK_4X4 || bs == BLOCK_4X8) { |
| 452 return vp9_denoiser_4xM_sse2(sig, sig_stride, |
| 453 mc_avg, mc_avg_stride, |
| 454 avg, avg_stride, |
| 455 increase_denoising, |
| 456 bs, motion_magnitude); |
| 457 } else if (bs == BLOCK_8X4 || bs == BLOCK_8X8 || bs == BLOCK_8X16) { |
| 458 return vp9_denoiser_8xM_sse2(sig, sig_stride, |
| 459 mc_avg, mc_avg_stride, |
| 460 avg, avg_stride, |
| 461 increase_denoising, |
| 462 bs, motion_magnitude); |
| 463 } else if (bs == BLOCK_16X8 || bs == BLOCK_16X16 || bs == BLOCK_16X32 || |
| 464 bs == BLOCK_32X16|| bs == BLOCK_32X32 || bs == BLOCK_32X64 || |
| 465 bs == BLOCK_64X32 || bs == BLOCK_64X64) { |
| 466 return vp9_denoiser_64_32_16xM_sse2(sig, sig_stride, |
| 467 mc_avg, mc_avg_stride, |
| 468 avg, avg_stride, |
| 469 increase_denoising, |
| 470 bs, motion_magnitude); |
| 471 } else { |
| 472 return COPY_BLOCK; |
| 473 } |
| 474 } |
OLD | NEW |