Index: source/libvpx/vp9/encoder/x86/vp9_denoiser_sse2.c |
=================================================================== |
--- source/libvpx/vp9/encoder/x86/vp9_denoiser_sse2.c (revision 293081) |
+++ source/libvpx/vp9/encoder/x86/vp9_denoiser_sse2.c (working copy) |
@@ -23,18 +23,17 @@ |
// Compute the sum of all pixel differences of this MB. |
static INLINE int sum_diff_16x1(__m128i acc_diff) { |
const __m128i k_1 = _mm_set1_epi16(1); |
- const __m128i acc_diff_lo = _mm_srai_epi16( |
- _mm_unpacklo_epi8(acc_diff, acc_diff), 8); |
- const __m128i acc_diff_hi = _mm_srai_epi16( |
- _mm_unpackhi_epi8(acc_diff, acc_diff), 8); |
+ const __m128i acc_diff_lo = |
+ _mm_srai_epi16(_mm_unpacklo_epi8(acc_diff, acc_diff), 8); |
+ const __m128i acc_diff_hi = |
+ _mm_srai_epi16(_mm_unpackhi_epi8(acc_diff, acc_diff), 8); |
const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi); |
const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1); |
- const __m128i hgfe_dcba = _mm_add_epi32(hg_fe_dc_ba, |
- _mm_srli_si128(hg_fe_dc_ba, 8)); |
- const __m128i hgfedcba = _mm_add_epi32(hgfe_dcba, |
- _mm_srli_si128(hgfe_dcba, 4)); |
- int sum_diff = _mm_cvtsi128_si32(hgfedcba); |
- return sum_diff; |
+ const __m128i hgfe_dcba = |
+ _mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8)); |
+ const __m128i hgfedcba = |
+ _mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4)); |
+ return _mm_cvtsi128_si32(hgfedcba); |
} |
// Denoise a 16x1 vector. |
@@ -51,8 +50,8 @@ |
__m128i acc_diff) { |
// Calculate differences |
const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0])); |
- const __m128i v_mc_running_avg_y = _mm_loadu_si128( |
- (const __m128i *)(&mc_running_avg_y[0])); |
+ const __m128i v_mc_running_avg_y = |
+ _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0])); |
__m128i v_running_avg_y; |
const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); |
const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); |
@@ -60,8 +59,8 @@ |
const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, *k_0); |
// Clamp absolute difference to 16 to be used to get mask. Doing this |
// allows us to use _mm_cmpgt_epi8, which operates on signed byte. |
- const __m128i clamped_absdiff = _mm_min_epu8( |
- _mm_or_si128(pdiff, ndiff), *k_16); |
+ const __m128i clamped_absdiff = |
+ _mm_min_epu8(_mm_or_si128(pdiff, ndiff), *k_16); |
// Get masks for l2 l1 and l0 adjustments. |
const __m128i mask2 = _mm_cmpgt_epi8(*k_16, clamped_absdiff); |
const __m128i mask1 = _mm_cmpgt_epi8(*k_8, clamped_absdiff); |
@@ -95,17 +94,15 @@ |
} |
// Denoise a 16x1 vector with a weaker filter. |
-static INLINE __m128i vp9_denoiser_adj_16x1_sse2(const uint8_t *sig, |
- const uint8_t *mc_running_avg_y, |
- uint8_t *running_avg_y, |
- const __m128i k_0, |
- const __m128i k_delta, |
- __m128i acc_diff) { |
+static INLINE __m128i vp9_denoiser_adj_16x1_sse2( |
+ const uint8_t *sig, const uint8_t *mc_running_avg_y, |
+ uint8_t *running_avg_y, const __m128i k_0, |
+ const __m128i k_delta, __m128i acc_diff) { |
__m128i v_running_avg_y = _mm_loadu_si128((__m128i *)(&running_avg_y[0])); |
// Calculate differences. |
const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0])); |
const __m128i v_mc_running_avg_y = |
- _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0])); |
+ _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0])); |
const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); |
const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); |
// Obtain the sign. FF if diff is negative. |
@@ -112,7 +109,7 @@ |
const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); |
// Clamp absolute difference to delta to get the adjustment. |
const __m128i adj = |
- _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); |
+ _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); |
// Restore the sign and get positive and negative adjustments. |
__m128i padj, nadj; |
padj = _mm_andnot_si128(diff_sign, adj); |
@@ -128,19 +125,16 @@ |
return acc_diff; |
} |
-static int vp9_denoiser_4xM_sse2(const uint8_t *sig, int sig_stride, |
- const uint8_t *mc_running_avg_y, |
- int mc_avg_y_stride, |
- uint8_t *running_avg_y, int avg_y_stride, |
- int increase_denoising, |
- BLOCK_SIZE bs, |
- int motion_magnitude) { |
- int sum_diff_thresh; |
- int r; |
- int shift_inc = (increase_denoising && |
- motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; |
- unsigned char sig_buffer[2][16], mc_running_buffer[2][16], |
- running_buffer[2][16]; |
+// Denoiser for 4xM and 8xM blocks. |
+static int vp9_denoiser_NxM_sse2_small( |
+ const uint8_t *sig, int sig_stride, const uint8_t *mc_running_avg_y, |
+ int mc_avg_y_stride, uint8_t *running_avg_y, int avg_y_stride, |
+ int increase_denoising, BLOCK_SIZE bs, int motion_magnitude, int width) { |
+ int sum_diff_thresh, r, sum_diff = 0; |
+ const int shift_inc = (increase_denoising && |
+ motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? |
+ 1 : 0; |
+ uint8_t sig_buffer[8][16], mc_running_buffer[8][16], running_buffer[8][16]; |
__m128i acc_diff = _mm_setzero_si128(); |
const __m128i k_0 = _mm_setzero_si128(); |
const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); |
@@ -148,48 +142,51 @@ |
const __m128i k_16 = _mm_set1_epi8(16); |
// Modify each level's adjustment according to motion_magnitude. |
const __m128i l3 = _mm_set1_epi8( |
- (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? |
- 7 + shift_inc : 6); |
+ (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6); |
// Difference between level 3 and level 2 is 2. |
const __m128i l32 = _mm_set1_epi8(2); |
// Difference between level 2 and level 1 is 1. |
const __m128i l21 = _mm_set1_epi8(1); |
- int sum_diff = 0; |
+ const uint8_t shift = (width == 4) ? 2 : 1; |
- for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> 2); ++r) { |
- vpx_memcpy(sig_buffer[r], sig, 4); |
- vpx_memcpy(sig_buffer[r] + 4, sig + sig_stride, 4); |
- vpx_memcpy(sig_buffer[r] + 8, sig + sig_stride * 2, 4); |
- vpx_memcpy(sig_buffer[r] + 12, sig + sig_stride * 3, 4); |
- vpx_memcpy(mc_running_buffer[r], mc_running_avg_y, 4); |
- vpx_memcpy(mc_running_buffer[r] + 4, mc_running_avg_y + |
- mc_avg_y_stride, 4); |
- vpx_memcpy(mc_running_buffer[r] + 8, mc_running_avg_y + |
- mc_avg_y_stride * 2, 4); |
- vpx_memcpy(mc_running_buffer[r] + 12, mc_running_avg_y + |
- mc_avg_y_stride * 3, 4); |
- vpx_memcpy(running_buffer[r], running_avg_y, 4); |
- vpx_memcpy(running_buffer[r] + 4, running_avg_y + |
- avg_y_stride, 4); |
- vpx_memcpy(running_buffer[r] + 8, running_avg_y + |
- avg_y_stride * 2, 4); |
- vpx_memcpy(running_buffer[r] + 12, running_avg_y + |
- avg_y_stride * 3, 4); |
+ for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> shift); ++r) { |
+ vpx_memcpy(sig_buffer[r], sig, width); |
+ vpx_memcpy(sig_buffer[r] + width, sig + sig_stride, width); |
+ vpx_memcpy(mc_running_buffer[r], mc_running_avg_y, width); |
+ vpx_memcpy(mc_running_buffer[r] + width, |
+ mc_running_avg_y + mc_avg_y_stride, width); |
+ vpx_memcpy(running_buffer[r], running_avg_y, width); |
+ vpx_memcpy(running_buffer[r] + width, |
+ running_avg_y + avg_y_stride, width); |
+ if (width == 4) { |
+ vpx_memcpy(sig_buffer[r] + width * 2, sig + sig_stride * 2, width); |
+ vpx_memcpy(sig_buffer[r] + width * 3, sig + sig_stride * 3, width); |
+ vpx_memcpy(mc_running_buffer[r] + width * 2, |
+ mc_running_avg_y + mc_avg_y_stride * 2, width); |
+ vpx_memcpy(mc_running_buffer[r] + width * 3, |
+ mc_running_avg_y + mc_avg_y_stride * 3, width); |
+ vpx_memcpy(running_buffer[r] + width * 2, |
+ running_avg_y + avg_y_stride * 2, width); |
+ vpx_memcpy(running_buffer[r] + width * 3, |
+ running_avg_y + avg_y_stride * 3, width); |
+ } |
acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r], |
mc_running_buffer[r], |
running_buffer[r], |
&k_0, &k_4, &k_8, &k_16, |
&l3, &l32, &l21, acc_diff); |
- vpx_memcpy(running_avg_y, running_buffer[r], 4); |
- vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + 4, 4); |
- vpx_memcpy(running_avg_y + avg_y_stride * 2, |
- running_buffer[r] + 8, 4); |
- vpx_memcpy(running_avg_y + avg_y_stride * 3, |
- running_buffer[r] + 12, 4); |
+ vpx_memcpy(running_avg_y, running_buffer[r], width); |
+ vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, width); |
+ if (width == 4) { |
+ vpx_memcpy(running_avg_y + avg_y_stride * 2, |
+ running_buffer[r] + width * 2, width); |
+ vpx_memcpy(running_avg_y + avg_y_stride * 3, |
+ running_buffer[r] + width * 3, width); |
+ } |
// Update pointers for next iteration. |
- sig += (sig_stride << 2); |
- mc_running_avg_y += (mc_avg_y_stride << 2); |
- running_avg_y += (avg_y_stride << 2); |
+ sig += (sig_stride << shift); |
+ mc_running_avg_y += (mc_avg_y_stride << shift); |
+ running_avg_y += (avg_y_stride << shift); |
} |
{ |
@@ -197,35 +194,36 @@ |
sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); |
if (abs(sum_diff) > sum_diff_thresh) { |
// Before returning to copy the block (i.e., apply no denoising), |
- // checK if we can still apply some (weaker) temporal filtering to |
+ // check if we can still apply some (weaker) temporal filtering to |
// this block, that would otherwise not be denoised at all. Simplest |
// is to apply an additional adjustment to running_avg_y to bring it |
// closer to sig. The adjustment is capped by a maximum delta, and |
// chosen such that in most cases the resulting sum_diff will be |
- // within the accceptable range given by sum_diff_thresh. |
+ // within the acceptable range given by sum_diff_thresh. |
// The delta is set by the excess of absolute pixel diff over the |
// threshold. |
- int delta = ((abs(sum_diff) - sum_diff_thresh) |
- >> num_pels_log2_lookup[bs]) + 1; |
+ const int delta = ((abs(sum_diff) - sum_diff_thresh) >> |
+ num_pels_log2_lookup[bs]) + 1; |
// Only apply the adjustment for max delta up to 3. |
if (delta < 4) { |
const __m128i k_delta = _mm_set1_epi8(delta); |
running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); |
- sum_diff = 0; |
- for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> 2); ++r) { |
+ for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> shift); ++r) { |
acc_diff = vp9_denoiser_adj_16x1_sse2( |
- sig_buffer[r], mc_running_buffer[r], |
- running_buffer[r], k_0, k_delta, |
- acc_diff); |
- vpx_memcpy(running_avg_y, running_buffer[r], 4); |
- vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + 4, 4); |
- vpx_memcpy(running_avg_y + avg_y_stride * 2, |
- running_buffer[r] + 8, 4); |
- vpx_memcpy(running_avg_y + avg_y_stride * 3, |
- running_buffer[r] + 12, 4); |
+ sig_buffer[r], mc_running_buffer[r], running_buffer[r], |
+ k_0, k_delta, acc_diff); |
+ vpx_memcpy(running_avg_y, running_buffer[r], width); |
+ vpx_memcpy(running_avg_y + avg_y_stride, |
+ running_buffer[r] + width, width); |
+ if (width == 4) { |
+ vpx_memcpy(running_avg_y + avg_y_stride * 2, |
+ running_buffer[r] + width * 2, width); |
+ vpx_memcpy(running_avg_y + avg_y_stride * 3, |
+ running_buffer[r] + width * 3, width); |
+ } |
// Update pointers for next iteration. |
- running_avg_y += (avg_y_stride << 2); |
+ running_avg_y += (avg_y_stride << shift); |
} |
sum_diff = sum_diff_16x1(acc_diff); |
if (abs(sum_diff) > sum_diff_thresh) { |
@@ -239,109 +237,18 @@ |
return FILTER_BLOCK; |
} |
-static int vp9_denoiser_8xM_sse2(const uint8_t *sig, int sig_stride, |
- const uint8_t *mc_running_avg_y, |
- int mc_avg_y_stride, |
- uint8_t *running_avg_y, int avg_y_stride, |
- int increase_denoising, |
- BLOCK_SIZE bs, |
- int motion_magnitude) { |
- int sum_diff_thresh; |
- int r; |
- int shift_inc = (increase_denoising && |
- motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; |
- unsigned char sig_buffer[8][16], mc_running_buffer[8][16], |
- running_buffer[8][16]; |
- __m128i acc_diff = _mm_setzero_si128(); |
- const __m128i k_0 = _mm_setzero_si128(); |
- const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); |
- const __m128i k_8 = _mm_set1_epi8(8); |
- const __m128i k_16 = _mm_set1_epi8(16); |
- // Modify each level's adjustment according to motion_magnitude. |
- const __m128i l3 = _mm_set1_epi8( |
- (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? |
- 7 + shift_inc : 6); |
- // Difference between level 3 and level 2 is 2. |
- const __m128i l32 = _mm_set1_epi8(2); |
- // Difference between level 2 and level 1 is 1. |
- const __m128i l21 = _mm_set1_epi8(1); |
- int sum_diff = 0; |
- |
- for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> 1); ++r) { |
- vpx_memcpy(sig_buffer[r], sig, 8); |
- vpx_memcpy(sig_buffer[r] + 8, sig + sig_stride, 8); |
- vpx_memcpy(mc_running_buffer[r], mc_running_avg_y, 8); |
- vpx_memcpy(mc_running_buffer[r] + 8, mc_running_avg_y + |
- mc_avg_y_stride, 8); |
- vpx_memcpy(running_buffer[r], running_avg_y, 8); |
- vpx_memcpy(running_buffer[r] + 8, running_avg_y + |
- avg_y_stride, 8); |
- acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r], |
- mc_running_buffer[r], |
- running_buffer[r], |
- &k_0, &k_4, &k_8, &k_16, |
- &l3, &l32, &l21, acc_diff); |
- vpx_memcpy(running_avg_y, running_buffer[r], 8); |
- vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + 8, 8); |
- // Update pointers for next iteration. |
- sig += (sig_stride << 1); |
- mc_running_avg_y += (mc_avg_y_stride << 1); |
- running_avg_y += (avg_y_stride << 1); |
- } |
- |
- { |
- sum_diff = sum_diff_16x1(acc_diff); |
- sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); |
- if (abs(sum_diff) > sum_diff_thresh) { |
- // Before returning to copy the block (i.e., apply no denoising), |
- // checK if we can still apply some (weaker) temporal filtering to |
- // this block, that would otherwise not be denoised at all. Simplest |
- // is to apply an additional adjustment to running_avg_y to bring it |
- // closer to sig. The adjustment is capped by a maximum delta, and |
- // chosen such that in most cases the resulting sum_diff will be |
- // within the accceptable range given by sum_diff_thresh. |
- |
- // The delta is set by the excess of absolute pixel diff over the |
- // threshold. |
- int delta = ((abs(sum_diff) - sum_diff_thresh) |
- >> num_pels_log2_lookup[bs]) + 1; |
- // Only apply the adjustment for max delta up to 3. |
- if (delta < 4) { |
- const __m128i k_delta = _mm_set1_epi8(delta); |
- running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); |
- for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> 1); ++r) { |
- acc_diff = vp9_denoiser_adj_16x1_sse2( |
- sig_buffer[r], mc_running_buffer[r], |
- running_buffer[r], k_0, k_delta, |
- acc_diff); |
- vpx_memcpy(running_avg_y, running_buffer[r], 8); |
- vpx_memcpy(running_avg_y + avg_y_stride, running_buffer[r] + 8, 8); |
- // Update pointers for next iteration. |
- running_avg_y += (avg_y_stride << 1); |
- } |
- sum_diff = sum_diff_16x1(acc_diff); |
- if (abs(sum_diff) > sum_diff_thresh) { |
- return COPY_BLOCK; |
- } |
- } else { |
- return COPY_BLOCK; |
- } |
- } |
- } |
- return FILTER_BLOCK; |
-} |
- |
-static int vp9_denoiser_64_32_16xM_sse2(const uint8_t *sig, int sig_stride, |
- const uint8_t *mc_running_avg_y, |
- int mc_avg_y_stride, |
- uint8_t *running_avg_y, |
- int avg_y_stride, |
- int increase_denoising, BLOCK_SIZE bs, |
- int motion_magnitude) { |
- int sum_diff_thresh; |
- int r, c; |
- int shift_inc = (increase_denoising && |
- motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; |
+// Denoiser for 16xM, 32xM and 64xM blocks |
+static int vp9_denoiser_NxM_sse2_big(const uint8_t *sig, int sig_stride, |
+ const uint8_t *mc_running_avg_y, |
+ int mc_avg_y_stride, |
+ uint8_t *running_avg_y, |
+ int avg_y_stride, |
+ int increase_denoising, BLOCK_SIZE bs, |
+ int motion_magnitude) { |
+ int sum_diff_thresh, r, c, sum_diff = 0; |
+ const int shift_inc = (increase_denoising && |
+ motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? |
+ 1 : 0; |
__m128i acc_diff[4][4]; |
const __m128i k_0 = _mm_setzero_si128(); |
const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); |
@@ -349,13 +256,11 @@ |
const __m128i k_16 = _mm_set1_epi8(16); |
// Modify each level's adjustment according to motion_magnitude. |
const __m128i l3 = _mm_set1_epi8( |
- (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? |
- 7 + shift_inc : 6); |
+ (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6); |
// Difference between level 3 and level 2 is 2. |
const __m128i l32 = _mm_set1_epi8(2); |
// Difference between level 2 and level 1 is 1. |
const __m128i l21 = _mm_set1_epi8(1); |
- int sum_diff = 0; |
for (c = 0; c < 4; ++c) { |
for (r = 0; r < 4; ++r) { |
@@ -363,13 +268,11 @@ |
} |
} |
- for (r = 0; r < (4 << b_height_log2_lookup[bs]); r++) { |
+ for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { |
for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { |
acc_diff[c>>4][r>>4] = vp9_denoiser_16x1_sse2( |
- sig, mc_running_avg_y, |
- running_avg_y, |
- &k_0, &k_4, &k_8, &k_16, |
- &l3, &l32, &l21, acc_diff[c>>4][r>>4]); |
+ sig, mc_running_avg_y, running_avg_y, &k_0, &k_4, |
+ &k_8, &k_16, &l3, &l32, &l21, acc_diff[c>>4][r>>4]); |
// Update pointers for next iteration. |
sig += 16; |
mc_running_avg_y += 16; |
@@ -385,8 +288,8 @@ |
// Update pointers for next iteration. |
sig = sig - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + sig_stride; |
mc_running_avg_y = mc_running_avg_y - |
- 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + |
- mc_avg_y_stride; |
+ 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + |
+ mc_avg_y_stride; |
running_avg_y = running_avg_y - |
16 * ((4 << b_width_log2_lookup[bs]) >> 4) + |
avg_y_stride; |
@@ -395,8 +298,8 @@ |
{ |
sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); |
if (abs(sum_diff) > sum_diff_thresh) { |
- int delta = ((abs(sum_diff) - sum_diff_thresh) |
- >> num_pels_log2_lookup[bs]) + 1; |
+ const int delta = ((abs(sum_diff) - sum_diff_thresh) >> |
+ num_pels_log2_lookup[bs]) + 1; |
// Only apply the adjustment for max delta up to 3. |
if (delta < 4) { |
@@ -408,9 +311,8 @@ |
for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { |
for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { |
acc_diff[c>>4][r>>4] = vp9_denoiser_adj_16x1_sse2( |
- sig, mc_running_avg_y, |
- running_avg_y, k_0, |
- k_delta, acc_diff[c>>4][r>>4]); |
+ sig, mc_running_avg_y, running_avg_y, k_0, |
+ k_delta, acc_diff[c>>4][r>>4]); |
// Update pointers for next iteration. |
sig += 16; |
mc_running_avg_y += 16; |
@@ -449,25 +351,25 @@ |
BLOCK_SIZE bs, |
int motion_magnitude) { |
if (bs == BLOCK_4X4 || bs == BLOCK_4X8) { |
- return vp9_denoiser_4xM_sse2(sig, sig_stride, |
- mc_avg, mc_avg_stride, |
- avg, avg_stride, |
- increase_denoising, |
- bs, motion_magnitude); |
+ return vp9_denoiser_NxM_sse2_small(sig, sig_stride, |
+ mc_avg, mc_avg_stride, |
+ avg, avg_stride, |
+ increase_denoising, |
+ bs, motion_magnitude, 4); |
} else if (bs == BLOCK_8X4 || bs == BLOCK_8X8 || bs == BLOCK_8X16) { |
- return vp9_denoiser_8xM_sse2(sig, sig_stride, |
- mc_avg, mc_avg_stride, |
- avg, avg_stride, |
- increase_denoising, |
- bs, motion_magnitude); |
+ return vp9_denoiser_NxM_sse2_small(sig, sig_stride, |
+ mc_avg, mc_avg_stride, |
+ avg, avg_stride, |
+ increase_denoising, |
+ bs, motion_magnitude, 8); |
} else if (bs == BLOCK_16X8 || bs == BLOCK_16X16 || bs == BLOCK_16X32 || |
bs == BLOCK_32X16|| bs == BLOCK_32X32 || bs == BLOCK_32X64 || |
bs == BLOCK_64X32 || bs == BLOCK_64X64) { |
- return vp9_denoiser_64_32_16xM_sse2(sig, sig_stride, |
- mc_avg, mc_avg_stride, |
- avg, avg_stride, |
- increase_denoising, |
- bs, motion_magnitude); |
+ return vp9_denoiser_NxM_sse2_big(sig, sig_stride, |
+ mc_avg, mc_avg_stride, |
+ avg, avg_stride, |
+ increase_denoising, |
+ bs, motion_magnitude); |
} else { |
return COPY_BLOCK; |
} |