| Index: source/libvpx/vp9/common/vp9_reconinter.c
|
| ===================================================================
|
| --- source/libvpx/vp9/common/vp9_reconinter.c (revision 219822)
|
| +++ source/libvpx/vp9/common/vp9_reconinter.c (working copy)
|
| @@ -10,163 +10,17 @@
|
|
|
| #include <assert.h>
|
|
|
| +#include "./vpx_scale_rtcd.h"
|
| #include "./vpx_config.h"
|
| +
|
| #include "vpx/vpx_integer.h"
|
| +
|
| #include "vp9/common/vp9_blockd.h"
|
| #include "vp9/common/vp9_filter.h"
|
| #include "vp9/common/vp9_reconinter.h"
|
| #include "vp9/common/vp9_reconintra.h"
|
| -#include "./vpx_scale_rtcd.h"
|
|
|
| -static int scale_value_x_with_scaling(int val,
|
| - const struct scale_factors *scale) {
|
| - return (val * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT);
|
| -}
|
|
|
| -static int scale_value_y_with_scaling(int val,
|
| - const struct scale_factors *scale) {
|
| - return (val * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT);
|
| -}
|
| -
|
| -static int unscaled_value(int val, const struct scale_factors *scale) {
|
| - (void) scale;
|
| - return val;
|
| -}
|
| -
|
| -static MV32 mv_q3_to_q4_with_scaling(const MV *mv,
|
| - const struct scale_factors *scale) {
|
| - const MV32 res = {
|
| - ((mv->row << 1) * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT)
|
| - + scale->y_offset_q4,
|
| - ((mv->col << 1) * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT)
|
| - + scale->x_offset_q4
|
| - };
|
| - return res;
|
| -}
|
| -
|
| -static MV32 mv_q3_to_q4_without_scaling(const MV *mv,
|
| - const struct scale_factors *scale) {
|
| - const MV32 res = {
|
| - mv->row << 1,
|
| - mv->col << 1
|
| - };
|
| - return res;
|
| -}
|
| -
|
| -static MV32 mv_q4_with_scaling(const MV *mv,
|
| - const struct scale_factors *scale) {
|
| - const MV32 res = {
|
| - (mv->row * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT) + scale->y_offset_q4,
|
| - (mv->col * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT) + scale->x_offset_q4
|
| - };
|
| - return res;
|
| -}
|
| -
|
| -static MV32 mv_q4_without_scaling(const MV *mv,
|
| - const struct scale_factors *scale) {
|
| - const MV32 res = {
|
| - mv->row,
|
| - mv->col
|
| - };
|
| - return res;
|
| -}
|
| -
|
| -static void set_offsets_with_scaling(struct scale_factors *scale,
|
| - int row, int col) {
|
| - const int x_q4 = 16 * col;
|
| - const int y_q4 = 16 * row;
|
| -
|
| - scale->x_offset_q4 = (x_q4 * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT) & 0xf;
|
| - scale->y_offset_q4 = (y_q4 * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT) & 0xf;
|
| -}
|
| -
|
| -static void set_offsets_without_scaling(struct scale_factors *scale,
|
| - int row, int col) {
|
| - scale->x_offset_q4 = 0;
|
| - scale->y_offset_q4 = 0;
|
| -}
|
| -
|
| -static int get_fixed_point_scale_factor(int other_size, int this_size) {
|
| - // Calculate scaling factor once for each reference frame
|
| - // and use fixed point scaling factors in decoding and encoding routines.
|
| - // Hardware implementations can calculate scale factor in device driver
|
| - // and use multiplication and shifting on hardware instead of division.
|
| - return (other_size << VP9_REF_SCALE_SHIFT) / this_size;
|
| -}
|
| -
|
| -void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
|
| - int other_w, int other_h,
|
| - int this_w, int this_h) {
|
| - scale->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w);
|
| - scale->x_offset_q4 = 0; // calculated per-mb
|
| - scale->x_step_q4 = (16 * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT);
|
| -
|
| - scale->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h);
|
| - scale->y_offset_q4 = 0; // calculated per-mb
|
| - scale->y_step_q4 = (16 * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT);
|
| -
|
| - if ((other_w == this_w) && (other_h == this_h)) {
|
| - scale->scale_value_x = unscaled_value;
|
| - scale->scale_value_y = unscaled_value;
|
| - scale->set_scaled_offsets = set_offsets_without_scaling;
|
| - scale->scale_mv_q3_to_q4 = mv_q3_to_q4_without_scaling;
|
| - scale->scale_mv_q4 = mv_q4_without_scaling;
|
| - } else {
|
| - scale->scale_value_x = scale_value_x_with_scaling;
|
| - scale->scale_value_y = scale_value_y_with_scaling;
|
| - scale->set_scaled_offsets = set_offsets_with_scaling;
|
| - scale->scale_mv_q3_to_q4 = mv_q3_to_q4_with_scaling;
|
| - scale->scale_mv_q4 = mv_q4_with_scaling;
|
| - }
|
| -
|
| - // TODO(agrange): Investigate the best choice of functions to use here
|
| - // for EIGHTTAP_SMOOTH. Since it is not interpolating, need to choose what
|
| - // to do at full-pel offsets. The current selection, where the filter is
|
| - // applied in one direction only, and not at all for 0,0, seems to give the
|
| - // best quality, but it may be worth trying an additional mode that does
|
| - // do the filtering on full-pel.
|
| - if (scale->x_step_q4 == 16) {
|
| - if (scale->y_step_q4 == 16) {
|
| - // No scaling in either direction.
|
| - scale->predict[0][0][0] = vp9_convolve_copy;
|
| - scale->predict[0][0][1] = vp9_convolve_avg;
|
| - scale->predict[0][1][0] = vp9_convolve8_vert;
|
| - scale->predict[0][1][1] = vp9_convolve8_avg_vert;
|
| - scale->predict[1][0][0] = vp9_convolve8_horiz;
|
| - scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
|
| - } else {
|
| - // No scaling in x direction. Must always scale in the y direction.
|
| - scale->predict[0][0][0] = vp9_convolve8_vert;
|
| - scale->predict[0][0][1] = vp9_convolve8_avg_vert;
|
| - scale->predict[0][1][0] = vp9_convolve8_vert;
|
| - scale->predict[0][1][1] = vp9_convolve8_avg_vert;
|
| - scale->predict[1][0][0] = vp9_convolve8;
|
| - scale->predict[1][0][1] = vp9_convolve8_avg;
|
| - }
|
| - } else {
|
| - if (scale->y_step_q4 == 16) {
|
| - // No scaling in the y direction. Must always scale in the x direction.
|
| - scale->predict[0][0][0] = vp9_convolve8_horiz;
|
| - scale->predict[0][0][1] = vp9_convolve8_avg_horiz;
|
| - scale->predict[0][1][0] = vp9_convolve8;
|
| - scale->predict[0][1][1] = vp9_convolve8_avg;
|
| - scale->predict[1][0][0] = vp9_convolve8_horiz;
|
| - scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
|
| - } else {
|
| - // Must always scale in both directions.
|
| - scale->predict[0][0][0] = vp9_convolve8;
|
| - scale->predict[0][0][1] = vp9_convolve8_avg;
|
| - scale->predict[0][1][0] = vp9_convolve8;
|
| - scale->predict[0][1][1] = vp9_convolve8_avg;
|
| - scale->predict[1][0][0] = vp9_convolve8;
|
| - scale->predict[1][0][1] = vp9_convolve8_avg;
|
| - }
|
| - }
|
| - // 2D subpel motion always gets filtered in both directions
|
| - scale->predict[1][1][0] = vp9_convolve8;
|
| - scale->predict[1][1][1] = vp9_convolve8_avg;
|
| -}
|
| -
|
| void vp9_setup_interp_filters(MACROBLOCKD *xd,
|
| INTERPOLATIONFILTERTYPE mcomp_filter_type,
|
| VP9_COMMON *cm) {
|
| @@ -197,19 +51,20 @@
|
|
|
| void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
|
| uint8_t *dst, int dst_stride,
|
| - const int_mv *src_mv,
|
| + const MV *src_mv,
|
| const struct scale_factors *scale,
|
| - int w, int h, int weight,
|
| + int w, int h, int ref,
|
| const struct subpix_fn_table *subpix,
|
| enum mv_precision precision) {
|
| - const MV32 mv = precision == MV_PRECISION_Q4
|
| - ? scale->scale_mv_q4(&src_mv->as_mv, scale)
|
| - : scale->scale_mv_q3_to_q4(&src_mv->as_mv, scale);
|
| - const int subpel_x = mv.col & 15;
|
| - const int subpel_y = mv.row & 15;
|
| + const int is_q4 = precision == MV_PRECISION_Q4;
|
| + const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row << 1,
|
| + is_q4 ? src_mv->col : src_mv->col << 1 };
|
| + const MV32 mv = scale->scale_mv(&mv_q4, scale);
|
| + const int subpel_x = mv.col & SUBPEL_MASK;
|
| + const int subpel_y = mv.row & SUBPEL_MASK;
|
|
|
| - src += (mv.row >> 4) * src_stride + (mv.col >> 4);
|
| - scale->predict[!!subpel_x][!!subpel_y][weight](
|
| + src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
|
| + scale->predict[subpel_x != 0][subpel_y != 0][ref](
|
| src, src_stride, dst, dst_stride,
|
| subpix->filter_x[subpel_x], scale->x_step_q4,
|
| subpix->filter_y[subpel_y], scale->y_step_q4,
|
| @@ -220,195 +75,187 @@
|
| return (value < 0 ? value - 2 : value + 2) / 4;
|
| }
|
|
|
| -static int mi_mv_pred_row_q4(MACROBLOCKD *mb, int idx) {
|
| - const int temp = mb->mode_info_context->bmi[0].as_mv[idx].as_mv.row +
|
| - mb->mode_info_context->bmi[1].as_mv[idx].as_mv.row +
|
| - mb->mode_info_context->bmi[2].as_mv[idx].as_mv.row +
|
| - mb->mode_info_context->bmi[3].as_mv[idx].as_mv.row;
|
| - return round_mv_comp_q4(temp);
|
| +static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) {
|
| + MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row +
|
| + mi->bmi[1].as_mv[idx].as_mv.row +
|
| + mi->bmi[2].as_mv[idx].as_mv.row +
|
| + mi->bmi[3].as_mv[idx].as_mv.row),
|
| + round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col +
|
| + mi->bmi[1].as_mv[idx].as_mv.col +
|
| + mi->bmi[2].as_mv[idx].as_mv.col +
|
| + mi->bmi[3].as_mv[idx].as_mv.col) };
|
| + return res;
|
| }
|
|
|
| -static int mi_mv_pred_col_q4(MACROBLOCKD *mb, int idx) {
|
| - const int temp = mb->mode_info_context->bmi[0].as_mv[idx].as_mv.col +
|
| - mb->mode_info_context->bmi[1].as_mv[idx].as_mv.col +
|
| - mb->mode_info_context->bmi[2].as_mv[idx].as_mv.col +
|
| - mb->mode_info_context->bmi[3].as_mv[idx].as_mv.col;
|
| - return round_mv_comp_q4(temp);
|
| -}
|
| -
|
| // TODO(jkoleszar): yet another mv clamping function :-(
|
| -MV clamp_mv_to_umv_border_sb(const MV *src_mv,
|
| - int bwl, int bhl, int ss_x, int ss_y,
|
| - int mb_to_left_edge, int mb_to_top_edge,
|
| - int mb_to_right_edge, int mb_to_bottom_edge) {
|
| - /* If the MV points so far into the UMV border that no visible pixels
|
| - * are used for reconstruction, the subpel part of the MV can be
|
| - * discarded and the MV limited to 16 pixels with equivalent results.
|
| - */
|
| - const int spel_left = (VP9_INTERP_EXTEND + (4 << bwl)) << 4;
|
| - const int spel_right = spel_left - (1 << 4);
|
| - const int spel_top = (VP9_INTERP_EXTEND + (4 << bhl)) << 4;
|
| - const int spel_bottom = spel_top - (1 << 4);
|
| - MV clamped_mv;
|
| -
|
| +MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
|
| + int bw, int bh, int ss_x, int ss_y) {
|
| + // If the MV points so far into the UMV border that no visible pixels
|
| + // are used for reconstruction, the subpel part of the MV can be
|
| + // discarded and the MV limited to 16 pixels with equivalent results.
|
| + const int spel_left = (VP9_INTERP_EXTEND + bw) << SUBPEL_BITS;
|
| + const int spel_right = spel_left - SUBPEL_SHIFTS;
|
| + const int spel_top = (VP9_INTERP_EXTEND + bh) << SUBPEL_BITS;
|
| + const int spel_bottom = spel_top - SUBPEL_SHIFTS;
|
| + MV clamped_mv = {
|
| + src_mv->row << (1 - ss_y),
|
| + src_mv->col << (1 - ss_x)
|
| + };
|
| assert(ss_x <= 1);
|
| assert(ss_y <= 1);
|
| - clamped_mv.col = clamp(src_mv->col << (1 - ss_x),
|
| - (mb_to_left_edge << (1 - ss_x)) - spel_left,
|
| - (mb_to_right_edge << (1 - ss_x)) + spel_right);
|
| - clamped_mv.row = clamp(src_mv->row << (1 - ss_y),
|
| - (mb_to_top_edge << (1 - ss_y)) - spel_top,
|
| - (mb_to_bottom_edge << (1 - ss_y)) + spel_bottom);
|
| +
|
| + clamp_mv(&clamped_mv, (xd->mb_to_left_edge << (1 - ss_x)) - spel_left,
|
| + (xd->mb_to_right_edge << (1 - ss_x)) + spel_right,
|
| + (xd->mb_to_top_edge << (1 - ss_y)) - spel_top,
|
| + (xd->mb_to_bottom_edge << (1 - ss_y)) + spel_bottom);
|
| +
|
| return clamped_mv;
|
| }
|
|
|
| struct build_inter_predictors_args {
|
| MACROBLOCKD *xd;
|
| - int x;
|
| - int y;
|
| - uint8_t* dst[MAX_MB_PLANE];
|
| - int dst_stride[MAX_MB_PLANE];
|
| - uint8_t* pre[2][MAX_MB_PLANE];
|
| - int pre_stride[2][MAX_MB_PLANE];
|
| + int x, y;
|
| };
|
| -static void build_inter_predictors(int plane, int block,
|
| - BLOCK_SIZE_TYPE bsize,
|
| +
|
| +static void build_inter_predictors(int plane, int block, BLOCK_SIZE bsize,
|
| int pred_w, int pred_h,
|
| void *argv) {
|
| const struct build_inter_predictors_args* const arg = argv;
|
| - MACROBLOCKD * const xd = arg->xd;
|
| - const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
|
| - const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
|
| - const int x = 4 * (block & ((1 << bwl) - 1)), y = 4 * (block >> bwl);
|
| - const int use_second_ref = xd->mode_info_context->mbmi.ref_frame[1] > 0;
|
| - int which_mv;
|
| + MACROBLOCKD *const xd = arg->xd;
|
| + struct macroblockd_plane *const pd = &xd->plane[plane];
|
| + const int bwl = b_width_log2(bsize) - pd->subsampling_x;
|
| + const int bw = 4 << bwl;
|
| + const int bh = plane_block_height(bsize, pd);
|
| + const int x = 4 * (block & ((1 << bwl) - 1));
|
| + const int y = 4 * (block >> bwl);
|
| + const MODE_INFO *const mi = xd->mode_info_context;
|
| + const int use_second_ref = mi->mbmi.ref_frame[1] > 0;
|
| + int ref;
|
|
|
| - assert(x < (4 << bwl));
|
| - assert(y < (4 << bhl));
|
| - assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
|
| - 4 << pred_w == (4 << bwl));
|
| - assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
|
| - 4 << pred_h == (4 << bhl));
|
| + assert(x < bw);
|
| + assert(y < bh);
|
| + assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_w == bw);
|
| + assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_h == bh);
|
|
|
| - for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
|
| - // source
|
| - const uint8_t * const base_pre = arg->pre[which_mv][plane];
|
| - const int pre_stride = arg->pre_stride[which_mv][plane];
|
| - const uint8_t *const pre = base_pre +
|
| - scaled_buffer_offset(x, y, pre_stride, &xd->scale_factor[which_mv]);
|
| - struct scale_factors * const scale = &xd->scale_factor[which_mv];
|
| + for (ref = 0; ref < 1 + use_second_ref; ++ref) {
|
| + struct scale_factors *const scale = &xd->scale_factor[ref];
|
| + struct buf_2d *const pre_buf = &pd->pre[ref];
|
| + struct buf_2d *const dst_buf = &pd->dst;
|
|
|
| - // dest
|
| - uint8_t *const dst = arg->dst[plane] + arg->dst_stride[plane] * y + x;
|
| + const uint8_t *const pre = pre_buf->buf + scaled_buffer_offset(x, y,
|
| + pre_buf->stride, scale);
|
|
|
| - // motion vector
|
| - const MV *mv;
|
| - MV split_chroma_mv;
|
| - int_mv clamped_mv;
|
| + uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
|
|
|
| - if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
|
| - if (plane == 0) {
|
| - mv = &xd->mode_info_context->bmi[block].as_mv[which_mv].as_mv;
|
| - } else {
|
| - // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the
|
| - // same MV (the average of the 4 luma MVs) but we could do something
|
| - // smarter for non-4:2:0. Just punt for now, pending the changes to get
|
| - // rid of SPLITMV mode entirely.
|
| - split_chroma_mv.row = mi_mv_pred_row_q4(xd, which_mv);
|
| - split_chroma_mv.col = mi_mv_pred_col_q4(xd, which_mv);
|
| - mv = &split_chroma_mv;
|
| - }
|
| - } else {
|
| - mv = &xd->mode_info_context->mbmi.mv[which_mv].as_mv;
|
| - }
|
| + // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the
|
| + // same MV (the average of the 4 luma MVs) but we could do something
|
| + // smarter for non-4:2:0. Just punt for now, pending the changes to get
|
| + // rid of SPLITMV mode entirely.
|
| + const MV mv = mi->mbmi.sb_type < BLOCK_8X8
|
| + ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv
|
| + : mi_mv_pred_q4(mi, ref))
|
| + : mi->mbmi.mv[ref].as_mv;
|
|
|
| - /* TODO(jkoleszar): This clamping is done in the incorrect place for the
|
| - * scaling case. It needs to be done on the scaled MV, not the pre-scaling
|
| - * MV. Note however that it performs the subsampling aware scaling so
|
| - * that the result is always q4.
|
| - */
|
| - clamped_mv.as_mv = clamp_mv_to_umv_border_sb(mv, bwl, bhl,
|
| - xd->plane[plane].subsampling_x,
|
| - xd->plane[plane].subsampling_y,
|
| - xd->mb_to_left_edge,
|
| - xd->mb_to_top_edge,
|
| - xd->mb_to_right_edge,
|
| - xd->mb_to_bottom_edge);
|
| - scale->set_scaled_offsets(scale, arg->y + y, arg->x + x);
|
| + // TODO(jkoleszar): This clamping is done in the incorrect place for the
|
| + // scaling case. It needs to be done on the scaled MV, not the pre-scaling
|
| + // MV. Note however that it performs the subsampling aware scaling so
|
| + // that the result is always q4.
|
| + const MV res_mv = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
|
| + pd->subsampling_x,
|
| + pd->subsampling_y);
|
|
|
| - vp9_build_inter_predictor(pre, pre_stride,
|
| - dst, arg->dst_stride[plane],
|
| - &clamped_mv, &xd->scale_factor[which_mv],
|
| - 4 << pred_w, 4 << pred_h, which_mv,
|
| + scale->set_scaled_offsets(scale, arg->y + y, arg->x + x);
|
| + vp9_build_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
|
| + &res_mv, scale,
|
| + 4 << pred_w, 4 << pred_h, ref,
|
| &xd->subpix, MV_PRECISION_Q4);
|
| }
|
| }
|
| -void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
|
| - int mi_row,
|
| - int mi_col,
|
| - BLOCK_SIZE_TYPE bsize) {
|
| - struct build_inter_predictors_args args = {
|
| - xd, mi_col * MI_SIZE, mi_row * MI_SIZE,
|
| - {xd->plane[0].dst.buf, NULL, NULL}, {xd->plane[0].dst.stride, 0, 0},
|
| - {{xd->plane[0].pre[0].buf, NULL, NULL},
|
| - {xd->plane[0].pre[1].buf, NULL, NULL}},
|
| - {{xd->plane[0].pre[0].stride, 0, 0}, {xd->plane[0].pre[1].stride, 0, 0}},
|
| - };
|
|
|
| - foreach_predicted_block_in_plane(xd, bsize, 0, build_inter_predictors, &args);
|
| +// TODO(jkoleszar): In principle, pred_w, pred_h are unnecessary, as we could
|
| +// calculate the subsampled BLOCK_SIZE, but that type isn't defined for
|
| +// sizes smaller than 16x16 yet.
|
| +typedef void (*foreach_predicted_block_visitor)(int plane, int block,
|
| + BLOCK_SIZE bsize,
|
| + int pred_w, int pred_h,
|
| + void *arg);
|
| +static INLINE void foreach_predicted_block_in_plane(
|
| + const MACROBLOCKD* const xd, BLOCK_SIZE bsize, int plane,
|
| + foreach_predicted_block_visitor visit, void *arg) {
|
| + int i, x, y;
|
| +
|
| + // block sizes in number of 4x4 blocks log 2 ("*_b")
|
| + // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
|
| + // subsampled size of the block
|
| + const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
|
| + const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
|
| +
|
| + // size of the predictor to use.
|
| + int pred_w, pred_h;
|
| +
|
| + if (xd->mode_info_context->mbmi.sb_type < BLOCK_8X8) {
|
| + assert(bsize == BLOCK_8X8);
|
| + pred_w = 0;
|
| + pred_h = 0;
|
| + } else {
|
| + pred_w = bwl;
|
| + pred_h = bhl;
|
| + }
|
| + assert(pred_w <= bwl);
|
| + assert(pred_h <= bhl);
|
| +
|
| + // visit each subblock in raster order
|
| + i = 0;
|
| + for (y = 0; y < 1 << bhl; y += 1 << pred_h) {
|
| + for (x = 0; x < 1 << bwl; x += 1 << pred_w) {
|
| + visit(plane, i, bsize, pred_w, pred_h, arg);
|
| + i += 1 << pred_w;
|
| + }
|
| + i += (1 << (bwl + pred_h)) - (1 << bwl);
|
| + }
|
| }
|
| -void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
|
| - int mi_row,
|
| - int mi_col,
|
| - BLOCK_SIZE_TYPE bsize) {
|
| - struct build_inter_predictors_args args = {
|
| - xd, mi_col * MI_SIZE, mi_row * MI_SIZE,
|
| -#if CONFIG_ALPHA
|
| - {NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf,
|
| - xd->plane[3].dst.buf},
|
| - {0, xd->plane[1].dst.stride, xd->plane[1].dst.stride,
|
| - xd->plane[3].dst.stride},
|
| - {{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf,
|
| - xd->plane[3].pre[0].buf},
|
| - {NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf,
|
| - xd->plane[3].pre[1].buf}},
|
| - {{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride,
|
| - xd->plane[3].pre[0].stride},
|
| - {0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride,
|
| - xd->plane[3].pre[1].stride}},
|
| -#else
|
| - {NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf},
|
| - {0, xd->plane[1].dst.stride, xd->plane[1].dst.stride},
|
| - {{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf},
|
| - {NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf}},
|
| - {{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride},
|
| - {0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride}},
|
| -#endif
|
| - };
|
| - foreach_predicted_block_uv(xd, bsize, build_inter_predictors, &args);
|
| +
|
| +static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
|
| + int mi_row, int mi_col,
|
| + int plane_from, int plane_to) {
|
| + int plane;
|
| + for (plane = plane_from; plane <= plane_to; ++plane) {
|
| + struct build_inter_predictors_args args = {
|
| + xd, mi_col * MI_SIZE, mi_row * MI_SIZE,
|
| + };
|
| + foreach_predicted_block_in_plane(xd, bsize, plane, build_inter_predictors,
|
| + &args);
|
| + }
|
| }
|
| -void vp9_build_inter_predictors_sb(MACROBLOCKD *xd,
|
| - int mi_row, int mi_col,
|
| - BLOCK_SIZE_TYPE bsize) {
|
|
|
| - vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
|
| - vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize);
|
| +void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
|
| + BLOCK_SIZE bsize) {
|
| + build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
|
| }
|
| +void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
|
| + BLOCK_SIZE bsize) {
|
| + build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
|
| + MAX_MB_PLANE - 1);
|
| +}
|
| +void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
|
| + BLOCK_SIZE bsize) {
|
| + build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
|
| + MAX_MB_PLANE - 1);
|
| +}
|
|
|
| // TODO(dkovalev: find better place for this function)
|
| void vp9_setup_scale_factors(VP9_COMMON *cm, int i) {
|
| const int ref = cm->active_ref_idx[i];
|
| struct scale_factors *const sf = &cm->active_ref_scale[i];
|
| if (ref >= NUM_YV12_BUFFERS) {
|
| - memset(sf, 0, sizeof(*sf));
|
| + vp9_zero(*sf);
|
| } else {
|
| YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref];
|
| vp9_setup_scale_factors_for_frame(sf,
|
| fb->y_crop_width, fb->y_crop_height,
|
| cm->width, cm->height);
|
|
|
| - if (sf->x_scale_fp != VP9_REF_NO_SCALE ||
|
| - sf->y_scale_fp != VP9_REF_NO_SCALE)
|
| + if (vp9_is_scaled(sf))
|
| vp9_extend_frame_borders(fb, cm->subsampling_x, cm->subsampling_y);
|
| }
|
| }
|
|
|