| Index: source/libvpx/vp9/common/vp9_reconinter.c
|
| ===================================================================
|
| --- source/libvpx/vp9/common/vp9_reconinter.c (revision 177019)
|
| +++ source/libvpx/vp9/common/vp9_reconinter.c (working copy)
|
| @@ -9,7 +9,7 @@
|
| */
|
|
|
|
|
| -#include "vpx_ports/config.h"
|
| +#include "./vpx_config.h"
|
| #include "vpx/vpx_integer.h"
|
| #include "vp9/common/vp9_blockd.h"
|
| #include "vp9/common/vp9_reconinter.h"
|
| @@ -18,45 +18,58 @@
|
| void vp9_setup_interp_filters(MACROBLOCKD *xd,
|
| INTERPOLATIONFILTERTYPE mcomp_filter_type,
|
| VP9_COMMON *cm) {
|
| +#if CONFIG_ENABLE_6TAP
|
| if (mcomp_filter_type == SIXTAP) {
|
| - xd->subpixel_predict = vp9_sixtap_predict;
|
| + xd->subpixel_predict4x4 = vp9_sixtap_predict4x4;
|
| xd->subpixel_predict8x4 = vp9_sixtap_predict8x4;
|
| xd->subpixel_predict8x8 = vp9_sixtap_predict8x8;
|
| xd->subpixel_predict16x16 = vp9_sixtap_predict16x16;
|
| - xd->subpixel_predict_avg = vp9_sixtap_predict_avg;
|
| + xd->subpixel_predict_avg4x4 = vp9_sixtap_predict_avg4x4;
|
| xd->subpixel_predict_avg8x8 = vp9_sixtap_predict_avg8x8;
|
| xd->subpixel_predict_avg16x16 = vp9_sixtap_predict_avg16x16;
|
| - } else if (mcomp_filter_type == EIGHTTAP || mcomp_filter_type == SWITCHABLE) {
|
| - xd->subpixel_predict = vp9_eighttap_predict;
|
| + } else {
|
| +#endif
|
| + if (mcomp_filter_type == EIGHTTAP || mcomp_filter_type == SWITCHABLE) {
|
| + xd->subpixel_predict4x4 = vp9_eighttap_predict4x4;
|
| xd->subpixel_predict8x4 = vp9_eighttap_predict8x4;
|
| xd->subpixel_predict8x8 = vp9_eighttap_predict8x8;
|
| xd->subpixel_predict16x16 = vp9_eighttap_predict16x16;
|
| - xd->subpixel_predict_avg = vp9_eighttap_predict_avg4x4;
|
| + xd->subpixel_predict_avg4x4 = vp9_eighttap_predict_avg4x4;
|
| xd->subpixel_predict_avg8x8 = vp9_eighttap_predict_avg8x8;
|
| xd->subpixel_predict_avg16x16 = vp9_eighttap_predict_avg16x16;
|
| + } else if (mcomp_filter_type == EIGHTTAP_SMOOTH) {
|
| + xd->subpixel_predict4x4 = vp9_eighttap_predict4x4_smooth;
|
| + xd->subpixel_predict8x4 = vp9_eighttap_predict8x4_smooth;
|
| + xd->subpixel_predict8x8 = vp9_eighttap_predict8x8_smooth;
|
| + xd->subpixel_predict16x16 = vp9_eighttap_predict16x16_smooth;
|
| + xd->subpixel_predict_avg4x4 = vp9_eighttap_predict_avg4x4_smooth;
|
| + xd->subpixel_predict_avg8x8 = vp9_eighttap_predict_avg8x8_smooth;
|
| + xd->subpixel_predict_avg16x16 = vp9_eighttap_predict_avg16x16_smooth;
|
| } else if (mcomp_filter_type == EIGHTTAP_SHARP) {
|
| - xd->subpixel_predict = vp9_eighttap_predict_sharp;
|
| + xd->subpixel_predict4x4 = vp9_eighttap_predict4x4_sharp;
|
| xd->subpixel_predict8x4 = vp9_eighttap_predict8x4_sharp;
|
| xd->subpixel_predict8x8 = vp9_eighttap_predict8x8_sharp;
|
| xd->subpixel_predict16x16 = vp9_eighttap_predict16x16_sharp;
|
| - xd->subpixel_predict_avg = vp9_eighttap_predict_avg4x4_sharp;
|
| + xd->subpixel_predict_avg4x4 = vp9_eighttap_predict_avg4x4_sharp;
|
| xd->subpixel_predict_avg8x8 = vp9_eighttap_predict_avg8x8_sharp;
|
| xd->subpixel_predict_avg16x16 = vp9_eighttap_predict_avg16x16_sharp_c;
|
| - }
|
| - else {
|
| - xd->subpixel_predict = vp9_bilinear_predict4x4;
|
| + } else {
|
| + xd->subpixel_predict4x4 = vp9_bilinear_predict4x4;
|
| xd->subpixel_predict8x4 = vp9_bilinear_predict8x4;
|
| xd->subpixel_predict8x8 = vp9_bilinear_predict8x8;
|
| xd->subpixel_predict16x16 = vp9_bilinear_predict16x16;
|
| - xd->subpixel_predict_avg = vp9_bilinear_predict_avg4x4;
|
| + xd->subpixel_predict_avg4x4 = vp9_bilinear_predict_avg4x4;
|
| xd->subpixel_predict_avg8x8 = vp9_bilinear_predict_avg8x8;
|
| xd->subpixel_predict_avg16x16 = vp9_bilinear_predict_avg16x16;
|
| }
|
| +#if CONFIG_ENABLE_6TAP
|
| + }
|
| +#endif
|
| }
|
|
|
| -void vp9_copy_mem16x16_c(unsigned char *src,
|
| +void vp9_copy_mem16x16_c(uint8_t *src,
|
| int src_stride,
|
| - unsigned char *dst,
|
| + uint8_t *dst,
|
| int dst_stride) {
|
| int r;
|
|
|
| @@ -91,9 +104,9 @@
|
| }
|
| }
|
|
|
| -void vp9_avg_mem16x16_c(unsigned char *src,
|
| +void vp9_avg_mem16x16_c(uint8_t *src,
|
| int src_stride,
|
| - unsigned char *dst,
|
| + uint8_t *dst,
|
| int dst_stride) {
|
| int r;
|
|
|
| @@ -109,9 +122,9 @@
|
| }
|
| }
|
|
|
| -void vp9_copy_mem8x8_c(unsigned char *src,
|
| +void vp9_copy_mem8x8_c(uint8_t *src,
|
| int src_stride,
|
| - unsigned char *dst,
|
| + uint8_t *dst,
|
| int dst_stride) {
|
| int r;
|
|
|
| @@ -134,9 +147,9 @@
|
| }
|
| }
|
|
|
| -void vp9_avg_mem8x8_c(unsigned char *src,
|
| +void vp9_avg_mem8x8_c(uint8_t *src,
|
| int src_stride,
|
| - unsigned char *dst,
|
| + uint8_t *dst,
|
| int dst_stride) {
|
| int r;
|
|
|
| @@ -152,9 +165,9 @@
|
| }
|
| }
|
|
|
| -void vp9_copy_mem8x4_c(unsigned char *src,
|
| +void vp9_copy_mem8x4_c(uint8_t *src,
|
| int src_stride,
|
| - unsigned char *dst,
|
| + uint8_t *dst,
|
| int dst_stride) {
|
| int r;
|
|
|
| @@ -179,9 +192,9 @@
|
|
|
| void vp9_build_inter_predictors_b(BLOCKD *d, int pitch, vp9_subpix_fn_t sppf) {
|
| int r;
|
| - unsigned char *ptr_base;
|
| - unsigned char *ptr;
|
| - unsigned char *pred_ptr = d->predictor;
|
| + uint8_t *ptr_base;
|
| + uint8_t *ptr;
|
| + uint8_t *pred_ptr = d->predictor;
|
| int_mv mv;
|
|
|
| ptr_base = *(d->base_pre);
|
| @@ -221,9 +234,9 @@
|
| void vp9_build_2nd_inter_predictors_b(BLOCKD *d, int pitch,
|
| vp9_subpix_fn_t sppf) {
|
| int r;
|
| - unsigned char *ptr_base;
|
| - unsigned char *ptr;
|
| - unsigned char *pred_ptr = d->predictor;
|
| + uint8_t *ptr_base;
|
| + uint8_t *ptr;
|
| + uint8_t *pred_ptr = d->predictor;
|
| int_mv mv;
|
|
|
| ptr_base = *(d->base_second_pre);
|
| @@ -251,9 +264,9 @@
|
| }
|
|
|
| void vp9_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
|
| - unsigned char *ptr_base;
|
| - unsigned char *ptr;
|
| - unsigned char *pred_ptr = d->predictor;
|
| + uint8_t *ptr_base;
|
| + uint8_t *ptr;
|
| + uint8_t *pred_ptr = d->predictor;
|
| int_mv mv;
|
|
|
| ptr_base = *(d->base_pre);
|
| @@ -277,9 +290,9 @@
|
| */
|
| void vp9_build_2nd_inter_predictors4b(MACROBLOCKD *xd,
|
| BLOCKD *d, int pitch) {
|
| - unsigned char *ptr_base;
|
| - unsigned char *ptr;
|
| - unsigned char *pred_ptr = d->predictor;
|
| + uint8_t *ptr_base;
|
| + uint8_t *ptr;
|
| + uint8_t *pred_ptr = d->predictor;
|
| int_mv mv;
|
|
|
| ptr_base = *(d->base_second_pre);
|
| @@ -296,9 +309,9 @@
|
| }
|
|
|
| static void build_inter_predictors2b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
|
| - unsigned char *ptr_base;
|
| - unsigned char *ptr;
|
| - unsigned char *pred_ptr = d->predictor;
|
| + uint8_t *ptr_base;
|
| + uint8_t *ptr;
|
| + uint8_t *pred_ptr = d->predictor;
|
| int_mv mv;
|
|
|
| ptr_base = *(d->base_pre);
|
| @@ -314,133 +327,7 @@
|
| }
|
| }
|
|
|
| -
|
| /*encoder only*/
|
| -#if CONFIG_PRED_FILTER
|
| -
|
| -// Select the thresholded or non-thresholded filter
|
| -#define USE_THRESH_FILTER 0
|
| -
|
| -#define PRED_FILT_LEN 5
|
| -
|
| -static const int filt_shift = 4;
|
| -static const int pred_filter[PRED_FILT_LEN] = {1, 2, 10, 2, 1};
|
| -// Alternative filter {1, 1, 4, 1, 1}
|
| -
|
| -#if !USE_THRESH_FILTER
|
| -void filter_mb(unsigned char *src, int src_stride,
|
| - unsigned char *dst, int dst_stride,
|
| - int width, int height) {
|
| - int i, j, k;
|
| - unsigned int Temp[32 * 32];
|
| - unsigned int *pTmp = Temp;
|
| - unsigned char *pSrc = src - (1 + src_stride) * (PRED_FILT_LEN / 2);
|
| -
|
| - // Horizontal
|
| - for (i = 0; i < height + PRED_FILT_LEN - 1; i++) {
|
| - for (j = 0; j < width; j++) {
|
| - int sum = 0;
|
| - for (k = 0; k < PRED_FILT_LEN; k++)
|
| - sum += pSrc[j + k] * pred_filter[k];
|
| - pTmp[j] = sum;
|
| - }
|
| -
|
| - pSrc += src_stride;
|
| - pTmp += width;
|
| - }
|
| -
|
| - // Vertical
|
| - pTmp = Temp;
|
| - for (i = 0; i < width; i++) {
|
| - unsigned char *pDst = dst + i;
|
| - for (j = 0; j < height; j++) {
|
| - int sum = 0;
|
| - for (k = 0; k < PRED_FILT_LEN; k++)
|
| - sum += pTmp[(j + k) * width] * pred_filter[k];
|
| - // Round
|
| - sum = (sum + ((1 << (filt_shift << 1)) >> 1)) >> (filt_shift << 1);
|
| - pDst[j * dst_stride] = (sum < 0 ? 0 : sum > 255 ? 255 : sum);
|
| - }
|
| - ++pTmp;
|
| - }
|
| -}
|
| -#else
|
| -// Based on vp9_post_proc_down_and_across_c (vp9_postproc.c)
|
| -void filter_mb(unsigned char *src, int src_stride,
|
| - unsigned char *dst, int dst_stride,
|
| - int width, int height) {
|
| - unsigned char *pSrc, *pDst;
|
| - int row;
|
| - int col;
|
| - int i;
|
| - int v;
|
| - unsigned char d[8];
|
| -
|
| - /* TODO flimit should be linked to the quantizer value */
|
| - int flimit = 7;
|
| -
|
| - for (row = 0; row < height; row++) {
|
| - /* post_proc_down for one row */
|
| - pSrc = src;
|
| - pDst = dst;
|
| -
|
| - for (col = 0; col < width; col++) {
|
| - int kernel = (1 << (filt_shift - 1));
|
| - int v = pSrc[col];
|
| -
|
| - for (i = -2; i <= 2; i++) {
|
| - if (abs(v - pSrc[col + i * src_stride]) > flimit)
|
| - goto down_skip_convolve;
|
| -
|
| - kernel += pred_filter[2 + i] * pSrc[col + i * src_stride];
|
| - }
|
| -
|
| - v = (kernel >> filt_shift);
|
| - down_skip_convolve:
|
| - pDst[col] = v;
|
| - }
|
| -
|
| - /* now post_proc_across */
|
| - pSrc = dst;
|
| - pDst = dst;
|
| -
|
| - for (i = 0; i < 8; i++)
|
| - d[i] = pSrc[i];
|
| -
|
| - for (col = 0; col < width; col++) {
|
| - int kernel = (1 << (filt_shift - 1));
|
| - v = pSrc[col];
|
| -
|
| - d[col & 7] = v;
|
| -
|
| - for (i = -2; i <= 2; i++) {
|
| - if (abs(v - pSrc[col + i]) > flimit)
|
| - goto across_skip_convolve;
|
| -
|
| - kernel += pred_filter[2 + i] * pSrc[col + i];
|
| - }
|
| -
|
| - d[col & 7] = (kernel >> filt_shift);
|
| - across_skip_convolve:
|
| -
|
| - if (col >= 2)
|
| - pDst[col - 2] = d[(col - 2) & 7];
|
| - }
|
| -
|
| - /* handle the last two pixels */
|
| - pDst[col - 2] = d[(col - 2) & 7];
|
| - pDst[col - 1] = d[(col - 1) & 7];
|
| -
|
| - /* next row */
|
| - src += src_stride;
|
| - dst += dst_stride;
|
| - }
|
| -}
|
| -#endif // !USE_THRESH_FILTER
|
| -
|
| -#endif // CONFIG_PRED_FILTER
|
| -
|
| -/*encoder only*/
|
| void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd) {
|
| int i, j;
|
| BLOCKD *blockd = xd->block;
|
| @@ -524,13 +411,13 @@
|
| if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
|
| build_inter_predictors2b(xd, d0, 8);
|
| else {
|
| - vp9_build_inter_predictors_b(d0, 8, xd->subpixel_predict);
|
| - vp9_build_inter_predictors_b(d1, 8, xd->subpixel_predict);
|
| + vp9_build_inter_predictors_b(d0, 8, xd->subpixel_predict4x4);
|
| + vp9_build_inter_predictors_b(d1, 8, xd->subpixel_predict4x4);
|
| }
|
|
|
| if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
|
| - vp9_build_2nd_inter_predictors_b(d0, 8, xd->subpixel_predict_avg);
|
| - vp9_build_2nd_inter_predictors_b(d1, 8, xd->subpixel_predict_avg);
|
| + vp9_build_2nd_inter_predictors_b(d0, 8, xd->subpixel_predict_avg4x4);
|
| + vp9_build_2nd_inter_predictors_b(d1, 8, xd->subpixel_predict_avg4x4);
|
| }
|
| }
|
| }
|
| @@ -573,11 +460,11 @@
|
|
|
| /*encoder only*/
|
| void vp9_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
| - unsigned char *dst_y,
|
| + uint8_t *dst_y,
|
| int dst_ystride,
|
| int clamp_mvs) {
|
| - unsigned char *ptr_base = xd->pre.y_buffer;
|
| - unsigned char *ptr;
|
| + uint8_t *ptr_base = xd->pre.y_buffer;
|
| + uint8_t *ptr;
|
| int pre_stride = xd->block[0].pre_stride;
|
| int_mv ymv;
|
|
|
| @@ -588,30 +475,7 @@
|
|
|
| ptr = ptr_base + (ymv.as_mv.row >> 3) * pre_stride + (ymv.as_mv.col >> 3);
|
|
|
| -#if CONFIG_PRED_FILTER
|
| - if (xd->mode_info_context->mbmi.pred_filter_enabled) {
|
| if ((ymv.as_mv.row | ymv.as_mv.col) & 7) {
|
| - // Sub-pel filter needs extended input
|
| - int len = 15 + (VP9_INTERP_EXTEND << 1);
|
| - unsigned char Temp[32 * 32]; // Data required by sub-pel filter
|
| - unsigned char *pTemp = Temp + (VP9_INTERP_EXTEND - 1) * (len + 1);
|
| -
|
| - // Copy extended MB into Temp array, applying the spatial filter
|
| - filter_mb(ptr - (VP9_INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
| - Temp, len, len, len);
|
| -
|
| - // Sub-pel interpolation
|
| - xd->subpixel_predict16x16(pTemp, len,
|
| - (ymv.as_mv.col & 7) << 1,
|
| - (ymv.as_mv.row & 7) << 1,
|
| - dst_y, dst_ystride);
|
| - } else {
|
| - // Apply spatial filter to create the prediction directly
|
| - filter_mb(ptr, pre_stride, dst_y, dst_ystride, 16, 16);
|
| - }
|
| - } else
|
| -#endif
|
| - if ((ymv.as_mv.row | ymv.as_mv.col) & 7) {
|
| xd->subpixel_predict16x16(ptr, pre_stride,
|
| (ymv.as_mv.col & 7) << 1,
|
| (ymv.as_mv.row & 7) << 1,
|
| @@ -622,11 +486,11 @@
|
| }
|
|
|
| void vp9_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
| - unsigned char *dst_u,
|
| - unsigned char *dst_v,
|
| + uint8_t *dst_u,
|
| + uint8_t *dst_v,
|
| int dst_uvstride) {
|
| int offset;
|
| - unsigned char *uptr, *vptr;
|
| + uint8_t *uptr, *vptr;
|
| int pre_stride = xd->block[0].pre_stride;
|
| int_mv _o16x16mv;
|
| int_mv _16x16mv;
|
| @@ -659,37 +523,6 @@
|
| uptr = xd->pre.u_buffer + offset;
|
| vptr = xd->pre.v_buffer + offset;
|
|
|
| -#if CONFIG_PRED_FILTER
|
| - if (xd->mode_info_context->mbmi.pred_filter_enabled) {
|
| - int i;
|
| - unsigned char *pSrc = uptr;
|
| - unsigned char *pDst = dst_u;
|
| - int len = 7 + (VP9_INTERP_EXTEND << 1);
|
| - unsigned char Temp[32 * 32]; // Data required by the sub-pel filter
|
| - unsigned char *pTemp = Temp + (VP9_INTERP_EXTEND - 1) * (len + 1);
|
| -
|
| - // U & V
|
| - for (i = 0; i < 2; i++) {
|
| - if (_o16x16mv.as_int & 0x000f000f) {
|
| - // Copy extended MB into Temp array, applying the spatial filter
|
| - filter_mb(pSrc - (VP9_INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
| - Temp, len, len, len);
|
| -
|
| - // Sub-pel filter
|
| - xd->subpixel_predict8x8(pTemp, len,
|
| - _o16x16mv.as_mv.col & 15,
|
| - _o16x16mv.as_mv.row & 15,
|
| - pDst, dst_uvstride);
|
| - } else {
|
| - filter_mb(pSrc, pre_stride, pDst, dst_uvstride, 8, 8);
|
| - }
|
| -
|
| - // V
|
| - pSrc = vptr;
|
| - pDst = dst_v;
|
| - }
|
| - } else
|
| -#endif
|
| if (_o16x16mv.as_int & 0x000f000f) {
|
| xd->subpixel_predict8x8(uptr, pre_stride, _o16x16mv.as_mv.col & 15,
|
| _o16x16mv.as_mv.row & 15, dst_u, dst_uvstride);
|
| @@ -703,20 +536,19 @@
|
|
|
|
|
| void vp9_build_1st_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
| - unsigned char *dst_y,
|
| - unsigned char *dst_u,
|
| - unsigned char *dst_v,
|
| + uint8_t *dst_y,
|
| + uint8_t *dst_u,
|
| + uint8_t *dst_v,
|
| int dst_ystride, int dst_uvstride) {
|
| vp9_build_1st_inter16x16_predictors_mby(xd, dst_y, dst_ystride,
|
| xd->mode_info_context->mbmi.need_to_clamp_mvs);
|
| vp9_build_1st_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
|
| }
|
|
|
| -#if CONFIG_SUPERBLOCKS
|
| void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
|
| - unsigned char *dst_y,
|
| - unsigned char *dst_u,
|
| - unsigned char *dst_v,
|
| + uint8_t *dst_y,
|
| + uint8_t *dst_u,
|
| + uint8_t *dst_v,
|
| int dst_ystride,
|
| int dst_uvstride) {
|
| uint8_t *y1 = x->pre.y_buffer, *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
|
| @@ -781,14 +613,77 @@
|
| }
|
| #endif
|
| }
|
| +
|
| +void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
|
| + uint8_t *dst_y,
|
| + uint8_t *dst_u,
|
| + uint8_t *dst_v,
|
| + int dst_ystride,
|
| + int dst_uvstride) {
|
| + uint8_t *y1 = x->pre.y_buffer, *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
|
| + uint8_t *y2 = x->second_pre.y_buffer, *u2 = x->second_pre.u_buffer,
|
| + *v2 = x->second_pre.v_buffer;
|
| + int edge[4], n;
|
| +
|
| + edge[0] = x->mb_to_top_edge;
|
| + edge[1] = x->mb_to_bottom_edge;
|
| + edge[2] = x->mb_to_left_edge;
|
| + edge[3] = x->mb_to_right_edge;
|
| +
|
| + for (n = 0; n < 4; n++) {
|
| + const int x_idx = n & 1, y_idx = n >> 1;
|
| +
|
| + x->mb_to_top_edge = edge[0] - ((y_idx * 32) << 3);
|
| + x->mb_to_bottom_edge = edge[1] + (((1 - y_idx) * 32) << 3);
|
| + x->mb_to_left_edge = edge[2] - ((x_idx * 32) << 3);
|
| + x->mb_to_right_edge = edge[3] + (((1 - x_idx) * 32) << 3);
|
| +
|
| + x->pre.y_buffer = y1 + y_idx * 32 * x->pre.y_stride + x_idx * 32;
|
| + x->pre.u_buffer = u1 + y_idx * 16 * x->pre.uv_stride + x_idx * 16;
|
| + x->pre.v_buffer = v1 + y_idx * 16 * x->pre.uv_stride + x_idx * 16;
|
| +
|
| + if (x->mode_info_context->mbmi.second_ref_frame > 0) {
|
| + x->second_pre.y_buffer = y2 + y_idx * 32 * x->pre.y_stride + x_idx * 32;
|
| + x->second_pre.u_buffer = u2 + y_idx * 16 * x->pre.uv_stride + x_idx * 16;
|
| + x->second_pre.v_buffer = v2 + y_idx * 16 * x->pre.uv_stride + x_idx * 16;
|
| + }
|
| +
|
| + vp9_build_inter32x32_predictors_sb(x,
|
| + dst_y + y_idx * 32 * dst_ystride + x_idx * 32,
|
| + dst_u + y_idx * 16 * dst_uvstride + x_idx * 16,
|
| + dst_v + y_idx * 16 * dst_uvstride + x_idx * 16,
|
| + dst_ystride, dst_uvstride);
|
| + }
|
| +
|
| + x->mb_to_top_edge = edge[0];
|
| + x->mb_to_bottom_edge = edge[1];
|
| + x->mb_to_left_edge = edge[2];
|
| + x->mb_to_right_edge = edge[3];
|
| +
|
| + x->pre.y_buffer = y1;
|
| + x->pre.u_buffer = u1;
|
| + x->pre.v_buffer = v1;
|
| +
|
| + if (x->mode_info_context->mbmi.second_ref_frame > 0) {
|
| + x->second_pre.y_buffer = y2;
|
| + x->second_pre.u_buffer = u2;
|
| + x->second_pre.v_buffer = v2;
|
| + }
|
| +
|
| +#if CONFIG_COMP_INTERINTRA_PRED
|
| + if (x->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
|
| + vp9_build_interintra_64x64_predictors_sb(x, dst_y, dst_u, dst_v,
|
| + dst_ystride, dst_uvstride);
|
| + }
|
| #endif
|
| +}
|
|
|
| /*
|
| * The following functions should be called after an initial
|
| * call to vp9_build_1st_inter16x16_predictors_mb() or _mby()/_mbuv().
|
| - * It will run a second sixtap filter on a (different) ref
|
| + * It will run a second filter on a (different) ref
|
| * frame and average the result with the output of the
|
| - * first sixtap filter. The second reference frame is stored
|
| + * first filter. The second reference frame is stored
|
| * in x->second_pre (the reference frame index is in
|
| * x->mode_info_context->mbmi.second_ref_frame). The second
|
| * motion vector is x->mode_info_context->mbmi.second_mv.
|
| @@ -798,15 +693,15 @@
|
| * single reference framer.
|
| */
|
| void vp9_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
| - unsigned char *dst_y,
|
| + uint8_t *dst_y,
|
| int dst_ystride) {
|
| - unsigned char *ptr;
|
| + uint8_t *ptr;
|
|
|
| int_mv _16x16mv;
|
| int mv_row;
|
| int mv_col;
|
|
|
| - unsigned char *ptr_base = xd->second_pre.y_buffer;
|
| + uint8_t *ptr_base = xd->second_pre.y_buffer;
|
| int pre_stride = xd->block[0].pre_stride;
|
|
|
| _16x16mv.as_int = xd->mode_info_context->mbmi.mv[1].as_int;
|
| @@ -819,44 +714,20 @@
|
|
|
| ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
|
|
|
| -#if CONFIG_PRED_FILTER
|
| - if (xd->mode_info_context->mbmi.pred_filter_enabled) {
|
| - if ((mv_row | mv_col) & 7) {
|
| - // Sub-pel filter needs extended input
|
| - int len = 15 + (VP9_INTERP_EXTEND << 1);
|
| - unsigned char Temp[32 * 32]; // Data required by sub-pel filter
|
| - unsigned char *pTemp = Temp + (VP9_INTERP_EXTEND - 1) * (len + 1);
|
| -
|
| - // Copy extended MB into Temp array, applying the spatial filter
|
| - filter_mb(ptr - (VP9_INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
| - Temp, len, len, len);
|
| -
|
| - // Sub-pel filter
|
| - xd->subpixel_predict_avg16x16(pTemp, len, (mv_col & 7) << 1,
|
| - (mv_row & 7) << 1, dst_y, dst_ystride);
|
| - } else {
|
| - // TODO Needs to AVERAGE with the dst_y
|
| - // For now, do not apply the prediction filter in these cases!
|
| - vp9_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
|
| - }
|
| - } else
|
| -#endif // CONFIG_PRED_FILTER
|
| - {
|
| - if ((mv_row | mv_col) & 7) {
|
| - xd->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7) << 1,
|
| - (mv_row & 7) << 1, dst_y, dst_ystride);
|
| - } else {
|
| - vp9_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
|
| - }
|
| + if ((mv_row | mv_col) & 7) {
|
| + xd->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7) << 1,
|
| + (mv_row & 7) << 1, dst_y, dst_ystride);
|
| + } else {
|
| + vp9_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
|
| }
|
| }
|
|
|
| void vp9_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
| - unsigned char *dst_u,
|
| - unsigned char *dst_v,
|
| + uint8_t *dst_u,
|
| + uint8_t *dst_v,
|
| int dst_uvstride) {
|
| int offset;
|
| - unsigned char *uptr, *vptr;
|
| + uint8_t *uptr, *vptr;
|
|
|
| int_mv _16x16mv;
|
| int mv_row;
|
| @@ -887,37 +758,6 @@
|
| uptr = xd->second_pre.u_buffer + offset;
|
| vptr = xd->second_pre.v_buffer + offset;
|
|
|
| -#if CONFIG_PRED_FILTER
|
| - if (xd->mode_info_context->mbmi.pred_filter_enabled) {
|
| - int i;
|
| - int len = 7 + (VP9_INTERP_EXTEND << 1);
|
| - unsigned char Temp[32 * 32]; // Data required by sub-pel filter
|
| - unsigned char *pTemp = Temp + (VP9_INTERP_EXTEND - 1) * (len + 1);
|
| - unsigned char *pSrc = uptr;
|
| - unsigned char *pDst = dst_u;
|
| -
|
| - // U & V
|
| - for (i = 0; i < 2; i++) {
|
| - if ((omv_row | omv_col) & 15) {
|
| - // Copy extended MB into Temp array, applying the spatial filter
|
| - filter_mb(pSrc - (VP9_INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
|
| - Temp, len, len, len);
|
| -
|
| - // Sub-pel filter
|
| - xd->subpixel_predict_avg8x8(pTemp, len, omv_col & 15,
|
| - omv_row & 15, pDst, dst_uvstride);
|
| - } else {
|
| - // TODO Needs to AVERAGE with the dst_[u|v]
|
| - // For now, do not apply the prediction filter here!
|
| - vp9_avg_mem8x8(pSrc, pre_stride, pDst, dst_uvstride);
|
| - }
|
| -
|
| - // V
|
| - pSrc = vptr;
|
| - pDst = dst_v;
|
| - }
|
| - } else
|
| -#endif // CONFIG_PRED_FILTER
|
| if ((omv_row | omv_col) & 15) {
|
| xd->subpixel_predict_avg8x8(uptr, pre_stride, omv_col & 15,
|
| omv_row & 15, dst_u, dst_uvstride);
|
| @@ -930,9 +770,9 @@
|
| }
|
|
|
| void vp9_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
| - unsigned char *dst_y,
|
| - unsigned char *dst_u,
|
| - unsigned char *dst_v,
|
| + uint8_t *dst_y,
|
| + uint8_t *dst_u,
|
| + uint8_t *dst_v,
|
| int dst_ystride,
|
| int dst_uvstride) {
|
| vp9_build_2nd_inter16x16_predictors_mby(xd, dst_y, dst_ystride);
|
| @@ -995,13 +835,13 @@
|
| if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
|
| build_inter_predictors2b(xd, d0, 16);
|
| else {
|
| - vp9_build_inter_predictors_b(d0, 16, xd->subpixel_predict);
|
| - vp9_build_inter_predictors_b(d1, 16, xd->subpixel_predict);
|
| + vp9_build_inter_predictors_b(d0, 16, xd->subpixel_predict4x4);
|
| + vp9_build_inter_predictors_b(d1, 16, xd->subpixel_predict4x4);
|
| }
|
|
|
| if (mbmi->second_ref_frame > 0) {
|
| - vp9_build_2nd_inter_predictors_b(d0, 16, xd->subpixel_predict_avg);
|
| - vp9_build_2nd_inter_predictors_b(d1, 16, xd->subpixel_predict_avg);
|
| + vp9_build_2nd_inter_predictors_b(d0, 16, xd->subpixel_predict_avg4x4);
|
| + vp9_build_2nd_inter_predictors_b(d1, 16, xd->subpixel_predict_avg4x4);
|
| }
|
| }
|
| }
|
| @@ -1013,13 +853,13 @@
|
| if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
|
| build_inter_predictors2b(xd, d0, 8);
|
| else {
|
| - vp9_build_inter_predictors_b(d0, 8, xd->subpixel_predict);
|
| - vp9_build_inter_predictors_b(d1, 8, xd->subpixel_predict);
|
| + vp9_build_inter_predictors_b(d0, 8, xd->subpixel_predict4x4);
|
| + vp9_build_inter_predictors_b(d1, 8, xd->subpixel_predict4x4);
|
| }
|
|
|
| if (mbmi->second_ref_frame > 0) {
|
| - vp9_build_2nd_inter_predictors_b(d0, 8, xd->subpixel_predict_avg);
|
| - vp9_build_2nd_inter_predictors_b(d1, 8, xd->subpixel_predict_avg);
|
| + vp9_build_2nd_inter_predictors_b(d0, 8, xd->subpixel_predict_avg4x4);
|
| + vp9_build_2nd_inter_predictors_b(d1, 8, xd->subpixel_predict_avg4x4);
|
| }
|
| }
|
| }
|
|
|