Index: source/libvpx/vp9/common/vp9_blockd.h |
=================================================================== |
--- source/libvpx/vp9/common/vp9_blockd.h (revision 251189) |
+++ source/libvpx/vp9/common/vp9_blockd.h (working copy) |
@@ -30,7 +30,7 @@ |
#endif |
#define BLOCK_SIZE_GROUPS 4 |
-#define MBSKIP_CONTEXTS 3 |
+#define SKIP_CONTEXTS 3 |
#define INTER_MODE_CONTEXTS 7 |
/* Segment Feature Masks */ |
@@ -89,7 +89,6 @@ |
#define INTER_OFFSET(mode) ((mode) - NEARESTMV) |
- |
/* For keyframes, intra block modes are predicted by the (already decoded) |
modes for the Y blocks to the left and above us; for interframes, there |
is a single probability table. */ |
@@ -129,7 +128,7 @@ |
uint8_t mode_context[MAX_REF_FRAMES]; |
- unsigned char skip_coeff; // 0=need to decode coeffs, 1=no coefficients |
+ unsigned char skip; // 0=need to decode coeffs, 1=no coefficients |
unsigned char segment_id; // Segment id for this block. |
// Flags used for prediction status of various bit-stream signals |
@@ -153,34 +152,12 @@ |
return mbmi->ref_frame[1] > INTRA_FRAME; |
} |
-static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mi, |
- const MODE_INFO *left_mi, int b) { |
- if (b == 0 || b == 2) { |
- if (!left_mi || is_inter_block(&left_mi->mbmi)) |
- return DC_PRED; |
+MB_PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi, |
+ const MODE_INFO *left_mi, int b); |
- return left_mi->mbmi.sb_type < BLOCK_8X8 ? left_mi->bmi[b + 1].as_mode |
- : left_mi->mbmi.mode; |
- } else { |
- assert(b == 1 || b == 3); |
- return cur_mi->bmi[b - 1].as_mode; |
- } |
-} |
+MB_PREDICTION_MODE vp9_above_block_mode(const MODE_INFO *cur_mi, |
+ const MODE_INFO *above_mi, int b); |
-static MB_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mi, |
- const MODE_INFO *above_mi, int b) { |
- if (b == 0 || b == 1) { |
- if (!above_mi || is_inter_block(&above_mi->mbmi)) |
- return DC_PRED; |
- |
- return above_mi->mbmi.sb_type < BLOCK_8X8 ? above_mi->bmi[b + 2].as_mode |
- : above_mi->mbmi.mode; |
- } else { |
- assert(b == 2 || b == 3); |
- return cur_mi->bmi[b - 2].as_mode; |
- } |
-} |
- |
enum mv_precision { |
MV_PRECISION_Q3, |
MV_PRECISION_Q4 |
@@ -204,7 +181,7 @@ |
int subsampling_y; |
struct buf_2d dst; |
struct buf_2d pre[2]; |
- int16_t *dequant; |
+ const int16_t *dequant; |
ENTROPY_CONTEXT *above_context; |
ENTROPY_CONTEXT *left_context; |
}; |
@@ -252,7 +229,7 @@ |
/* Inverse transform function pointers. */ |
void (*itxm_add)(const int16_t *input, uint8_t *dest, int stride, int eob); |
- const interp_kernel *interp_kernel; |
+ const InterpKernel *interp_kernel; |
int corrupted; |
@@ -266,7 +243,8 @@ |
-static BLOCK_SIZE get_subsize(BLOCK_SIZE bsize, PARTITION_TYPE partition) { |
+static INLINE BLOCK_SIZE get_subsize(BLOCK_SIZE bsize, |
+ PARTITION_TYPE partition) { |
const BLOCK_SIZE subsize = subsize_lookup[partition][bsize]; |
assert(subsize < BLOCK_SIZES); |
return subsize; |
@@ -298,23 +276,9 @@ |
: DCT_DCT; |
} |
-static void setup_block_dptrs(MACROBLOCKD *xd, int ss_x, int ss_y) { |
- int i; |
+void vp9_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y); |
- for (i = 0; i < MAX_MB_PLANE; i++) { |
- xd->plane[i].plane_type = i ? PLANE_TYPE_UV : PLANE_TYPE_Y; |
- xd->plane[i].subsampling_x = i ? ss_x : 0; |
- xd->plane[i].subsampling_y = i ? ss_y : 0; |
- } |
-#if CONFIG_ALPHA |
- // TODO(jkoleszar): Using the Y w/h for now |
- xd->plane[3].plane_type = PLANE_TYPE_Y; |
- xd->plane[3].subsampling_x = 0; |
- xd->plane[3].subsampling_y = 0; |
-#endif |
-} |
- |
-static TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize) { |
+static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize) { |
if (bsize < BLOCK_8X8) { |
return TX_4X4; |
} else { |
@@ -324,12 +288,12 @@ |
} |
} |
-static TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) { |
+static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) { |
return get_uv_tx_size_impl(mbmi->tx_size, mbmi->sb_type); |
} |
-static BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize, |
- const struct macroblockd_plane *pd) { |
+static INLINE BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize, |
+ const struct macroblockd_plane *pd) { |
BLOCK_SIZE bs = ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y]; |
assert(bs < BLOCK_SIZES); |
return bs; |
@@ -340,77 +304,18 @@ |
TX_SIZE tx_size, |
void *arg); |
-static INLINE void foreach_transformed_block_in_plane( |
+void vp9_foreach_transformed_block_in_plane( |
const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, |
- foreach_transformed_block_visitor visit, void *arg) { |
- const struct macroblockd_plane *const pd = &xd->plane[plane]; |
- const MB_MODE_INFO* mbmi = &xd->mi_8x8[0]->mbmi; |
- // block and transform sizes, in number of 4x4 blocks log 2 ("*_b") |
- // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 |
- // transform size varies per plane, look it up in a common way. |
- const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) |
- : mbmi->tx_size; |
- const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); |
- const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; |
- const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; |
- const int step = 1 << (tx_size << 1); |
- int i; |
+ foreach_transformed_block_visitor visit, void *arg); |
- // If mb_to_right_edge is < 0 we are in a situation in which |
- // the current block size extends into the UMV and we won't |
- // visit the sub blocks that are wholly within the UMV. |
- if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) { |
- int r, c; |
- int max_blocks_wide = num_4x4_w; |
- int max_blocks_high = num_4x4_h; |
- |
- // xd->mb_to_right_edge is in units of pixels * 8. This converts |
- // it to 4x4 block sizes. |
- if (xd->mb_to_right_edge < 0) |
- max_blocks_wide += (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); |
- |
- if (xd->mb_to_bottom_edge < 0) |
- max_blocks_high += (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); |
- |
- i = 0; |
- // Unlike the normal case - in here we have to keep track of the |
- // row and column of the blocks we use so that we know if we are in |
- // the unrestricted motion border. |
- for (r = 0; r < num_4x4_h; r += (1 << tx_size)) { |
- for (c = 0; c < num_4x4_w; c += (1 << tx_size)) { |
- if (r < max_blocks_high && c < max_blocks_wide) |
- visit(plane, i, plane_bsize, tx_size, arg); |
- i += step; |
- } |
- } |
- } else { |
- for (i = 0; i < num_4x4_w * num_4x4_h; i += step) |
- visit(plane, i, plane_bsize, tx_size, arg); |
- } |
-} |
- |
-static INLINE void foreach_transformed_block( |
+void vp9_foreach_transformed_block( |
const MACROBLOCKD* const xd, BLOCK_SIZE bsize, |
- foreach_transformed_block_visitor visit, void *arg) { |
- int plane; |
+ foreach_transformed_block_visitor visit, void *arg); |
- for (plane = 0; plane < MAX_MB_PLANE; plane++) |
- foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg); |
-} |
- |
-static INLINE void foreach_transformed_block_uv( |
- const MACROBLOCKD* const xd, BLOCK_SIZE bsize, |
- foreach_transformed_block_visitor visit, void *arg) { |
- int plane; |
- |
- for (plane = 1; plane < MAX_MB_PLANE; plane++) |
- foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg); |
-} |
- |
-static void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize, |
- TX_SIZE tx_size, int block, |
- int *x, int *y) { |
+static INLINE void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize, |
+ TX_SIZE tx_size, int block, |
+ int *x, int *y) { |
const int bwl = b_width_log2(plane_bsize); |
const int tx_cols_log2 = bwl - tx_size; |
const int tx_cols = 1 << tx_cols_log2; |
@@ -419,50 +324,13 @@ |
*y = (raster_mb >> tx_cols_log2) << tx_size; |
} |
-static void set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, |
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size, |
- int has_eob, int aoff, int loff) { |
- ENTROPY_CONTEXT *const a = pd->above_context + aoff; |
- ENTROPY_CONTEXT *const l = pd->left_context + loff; |
- const int tx_size_in_blocks = 1 << tx_size; |
+void vp9_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, |
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob, |
+ int aoff, int loff); |
- // above |
- if (has_eob && xd->mb_to_right_edge < 0) { |
- int i; |
- const int blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize] + |
- (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); |
- int above_contexts = tx_size_in_blocks; |
- if (above_contexts + aoff > blocks_wide) |
- above_contexts = blocks_wide - aoff; |
- for (i = 0; i < above_contexts; ++i) |
- a[i] = has_eob; |
- for (i = above_contexts; i < tx_size_in_blocks; ++i) |
- a[i] = 0; |
- } else { |
- vpx_memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); |
- } |
- |
- // left |
- if (has_eob && xd->mb_to_bottom_edge < 0) { |
- int i; |
- const int blocks_high = num_4x4_blocks_high_lookup[plane_bsize] + |
- (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); |
- int left_contexts = tx_size_in_blocks; |
- if (left_contexts + loff > blocks_high) |
- left_contexts = blocks_high - loff; |
- |
- for (i = 0; i < left_contexts; ++i) |
- l[i] = has_eob; |
- for (i = left_contexts; i < tx_size_in_blocks; ++i) |
- l[i] = 0; |
- } else { |
- vpx_memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); |
- } |
-} |
- |
-static int get_tx_eob(const struct segmentation *seg, int segment_id, |
- TX_SIZE tx_size) { |
+static INLINE int get_tx_eob(const struct segmentation *seg, int segment_id, |
+ TX_SIZE tx_size) { |
const int eob_max = 16 << (tx_size << 1); |
return vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max; |
} |