Index: source/libvpx/vpx_dsp/x86/convolve.h |
diff --git a/source/libvpx/vpx_dsp/x86/convolve.h b/source/libvpx/vpx_dsp/x86/convolve.h |
index c0144981b493526072200103f411bdb8f0c2cd3b..b6fbfcf928a92ab771a380625ea17a15ed6954b5 100644 |
--- a/source/libvpx/vpx_dsp/x86/convolve.h |
+++ b/source/libvpx/vpx_dsp/x86/convolve.h |
@@ -31,81 +31,76 @@ typedef void filter8_1dfunction ( |
const int16_t *filter_x, int x_step_q4, \ |
const int16_t *filter_y, int y_step_q4, \ |
int w, int h) { \ |
- if (step_q4 == 16 && filter[3] != 128) { \ |
- if (filter[0] || filter[1] || filter[2]) { \ |
- while (w >= 16) { \ |
- vpx_filter_block1d16_##dir##8_##avg##opt(src_start, \ |
- src_stride, \ |
- dst, \ |
- dst_stride, \ |
- h, \ |
- filter); \ |
- src += 16; \ |
- dst += 16; \ |
- w -= 16; \ |
- } \ |
- while (w >= 8) { \ |
- vpx_filter_block1d8_##dir##8_##avg##opt(src_start, \ |
- src_stride, \ |
- dst, \ |
- dst_stride, \ |
- h, \ |
- filter); \ |
- src += 8; \ |
- dst += 8; \ |
- w -= 8; \ |
- } \ |
- while (w >= 4) { \ |
- vpx_filter_block1d4_##dir##8_##avg##opt(src_start, \ |
- src_stride, \ |
- dst, \ |
- dst_stride, \ |
- h, \ |
- filter); \ |
- src += 4; \ |
- dst += 4; \ |
- w -= 4; \ |
- } \ |
- } else { \ |
- while (w >= 16) { \ |
- vpx_filter_block1d16_##dir##2_##avg##opt(src, \ |
- src_stride, \ |
- dst, \ |
- dst_stride, \ |
- h, \ |
- filter); \ |
- src += 16; \ |
- dst += 16; \ |
- w -= 16; \ |
- } \ |
- while (w >= 8) { \ |
- vpx_filter_block1d8_##dir##2_##avg##opt(src, \ |
- src_stride, \ |
- dst, \ |
- dst_stride, \ |
- h, \ |
- filter); \ |
- src += 8; \ |
- dst += 8; \ |
- w -= 8; \ |
- } \ |
- while (w >= 4) { \ |
- vpx_filter_block1d4_##dir##2_##avg##opt(src, \ |
- src_stride, \ |
- dst, \ |
- dst_stride, \ |
- h, \ |
- filter); \ |
- src += 4; \ |
- dst += 4; \ |
- w -= 4; \ |
- } \ |
+ assert(filter[3] != 128); \ |
+ assert(step_q4 == 16); \ |
+ if (filter[0] || filter[1] || filter[2]) { \ |
+ while (w >= 16) { \ |
+ vpx_filter_block1d16_##dir##8_##avg##opt(src_start, \ |
+ src_stride, \ |
+ dst, \ |
+ dst_stride, \ |
+ h, \ |
+ filter); \ |
+ src += 16; \ |
+ dst += 16; \ |
+ w -= 16; \ |
+ } \ |
+ while (w >= 8) { \ |
+ vpx_filter_block1d8_##dir##8_##avg##opt(src_start, \ |
+ src_stride, \ |
+ dst, \ |
+ dst_stride, \ |
+ h, \ |
+ filter); \ |
+ src += 8; \ |
+ dst += 8; \ |
+ w -= 8; \ |
+ } \ |
+ while (w >= 4) { \ |
+ vpx_filter_block1d4_##dir##8_##avg##opt(src_start, \ |
+ src_stride, \ |
+ dst, \ |
+ dst_stride, \ |
+ h, \ |
+ filter); \ |
+ src += 4; \ |
+ dst += 4; \ |
+ w -= 4; \ |
+ } \ |
+ } else { \ |
+ while (w >= 16) { \ |
+ vpx_filter_block1d16_##dir##2_##avg##opt(src, \ |
+ src_stride, \ |
+ dst, \ |
+ dst_stride, \ |
+ h, \ |
+ filter); \ |
+ src += 16; \ |
+ dst += 16; \ |
+ w -= 16; \ |
+ } \ |
+ while (w >= 8) { \ |
+ vpx_filter_block1d8_##dir##2_##avg##opt(src, \ |
+ src_stride, \ |
+ dst, \ |
+ dst_stride, \ |
+ h, \ |
+ filter); \ |
+ src += 8; \ |
+ dst += 8; \ |
+ w -= 8; \ |
+ } \ |
+ while (w >= 4) { \ |
+ vpx_filter_block1d4_##dir##2_##avg##opt(src, \ |
+ src_stride, \ |
+ dst, \ |
+ dst_stride, \ |
+ h, \ |
+ filter); \ |
+ src += 4; \ |
+ dst += 4; \ |
+ w -= 4; \ |
} \ |
- } \ |
- if (w) { \ |
- vpx_convolve8_##name##_c(src, src_stride, dst, dst_stride, \ |
- filter_x, x_step_q4, filter_y, y_step_q4, \ |
- w, h); \ |
} \ |
} |
@@ -115,30 +110,29 @@ void vpx_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \ |
const int16_t *filter_x, int x_step_q4, \ |
const int16_t *filter_y, int y_step_q4, \ |
int w, int h) { \ |
+ assert(filter_x[3] != 128); \ |
+ assert(filter_y[3] != 128); \ |
assert(w <= 64); \ |
assert(h <= 64); \ |
- if (x_step_q4 == 16 && y_step_q4 == 16) { \ |
- if (filter_x[0] || filter_x[1] || filter_x[2] || filter_x[3] == 128 || \ |
- filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) { \ |
- DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 71]); \ |
- vpx_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, 64, \ |
- filter_x, x_step_q4, filter_y, y_step_q4, \ |
- w, h + 7); \ |
- vpx_convolve8_##avg##vert_##opt(fdata2 + 3 * 64, 64, dst, dst_stride, \ |
- filter_x, x_step_q4, filter_y, \ |
- y_step_q4, w, h); \ |
- } else { \ |
- DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 65]); \ |
- vpx_convolve8_horiz_##opt(src, src_stride, fdata2, 64, \ |
- filter_x, x_step_q4, filter_y, y_step_q4, \ |
- w, h + 1); \ |
- vpx_convolve8_##avg##vert_##opt(fdata2, 64, dst, dst_stride, \ |
- filter_x, x_step_q4, filter_y, \ |
- y_step_q4, w, h); \ |
- } \ |
+ assert(x_step_q4 == 16); \ |
+ assert(y_step_q4 == 16); \ |
+ if (filter_x[0] || filter_x[1] || filter_x[2]|| \ |
+ filter_y[0] || filter_y[1] || filter_y[2]) { \ |
+ DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 71]); \ |
+ vpx_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, 64, \ |
+ filter_x, x_step_q4, filter_y, y_step_q4, \ |
+ w, h + 7); \ |
+ vpx_convolve8_##avg##vert_##opt(fdata2 + 3 * 64, 64, dst, dst_stride, \ |
+ filter_x, x_step_q4, filter_y, \ |
+ y_step_q4, w, h); \ |
} else { \ |
- vpx_convolve8_##avg##c(src, src_stride, dst, dst_stride, \ |
- filter_x, x_step_q4, filter_y, y_step_q4, w, h); \ |
+ DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 65]); \ |
+ vpx_convolve8_horiz_##opt(src, src_stride, fdata2, 64, \ |
+ filter_x, x_step_q4, filter_y, y_step_q4, \ |
+ w, h + 1); \ |
+ vpx_convolve8_##avg##vert_##opt(fdata2, 64, dst, dst_stride, \ |
+ filter_x, x_step_q4, filter_y, \ |
+ y_step_q4, w, h); \ |
} \ |
} |