Index: source/libvpx/vp9/encoder/vp9_encodeframe.c |
=================================================================== |
--- source/libvpx/vp9/encoder/vp9_encodeframe.c (revision 223100) |
+++ source/libvpx/vp9/encoder/vp9_encodeframe.c (working copy) |
@@ -41,6 +41,15 @@ |
#define DBG_PRNT_SEGMAP 0 |
+ |
+static const TX_SIZE tx_mode_to_biggest_tx_size[TX_MODES] = { |
+ TX_4X4, // ONLY_4X4 |
+ TX_8X8, // ONLY_8X8 |
+ TX_16X16, // ONLY_16X16 |
+ TX_32X32, // ONLY_32X32 |
+ TX_32X32, // TX_MODE_SELECT |
+}; |
+ |
// #define ENC_DEBUG |
#ifdef ENC_DEBUG |
int enc_debug = 0; |
@@ -342,7 +351,8 @@ |
MACROBLOCK *const x = &cpi->mb; |
MACROBLOCKD *const xd = &x->e_mbd; |
MODE_INFO *mi = &ctx->mic; |
- MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; |
+ MB_MODE_INFO * const mbmi = &xd->this_mi->mbmi; |
+ MODE_INFO *mi_addr = xd->this_mi; |
int mb_mode_index = ctx->best_mode_index; |
const int mis = cm->mode_info_stride; |
@@ -355,13 +365,15 @@ |
assert(mi->mbmi.ref_frame[1] < MAX_REF_FRAMES); |
assert(mi->mbmi.sb_type == bsize); |
+ *mi_addr = *mi; |
+ |
// Restore the coding context of the MB to that that was in place |
// when the mode was picked for it |
for (y = 0; y < mi_height; y++) |
for (x_idx = 0; x_idx < mi_width; x_idx++) |
if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx |
&& (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) |
- xd->mode_info_context[x_idx + y * mis] = *mi; |
+ xd->mi_8x8[x_idx + y * mis] = mi_addr; |
// FIXME(rbultje) I'm pretty sure this should go to the end of this block |
// (i.e. after the output_enabled) |
@@ -430,15 +442,6 @@ |
vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv); |
} |
- if (bsize > BLOCK_8X8 && mbmi->mode == NEWMV) { |
- int i, j; |
- for (j = 0; j < mi_height; ++j) |
- for (i = 0; i < mi_width; ++i) |
- if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > i |
- && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > j) |
- xd->mode_info_context[mis * j + i].mbmi = *mbmi; |
- } |
- |
if (cm->mcomp_filter_type == SWITCHABLE && is_inter_mode(mbmi->mode)) { |
const int ctx = vp9_get_pred_context_switchable_interp(xd); |
++cm->counts.switchable_interp[ctx][mbmi->interp_filter]; |
@@ -492,12 +495,19 @@ |
/* pointers to mode info contexts */ |
x->partition_info = x->pi + idx_str; |
- xd->mode_info_context = cm->mi + idx_str; |
- mbmi = &xd->mode_info_context->mbmi; |
+ |
+ xd->mi_8x8 = cm->mi_grid_visible + idx_str; |
+ xd->prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str; |
+ |
// Special case: if prev_mi is NULL, the previous mode info context |
// cannot be used. |
- xd->prev_mode_info_context = cm->prev_mi ? cm->prev_mi + idx_str : NULL; |
+ xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL; |
+ xd->this_mi = |
+ xd->mi_8x8[0] = cm->mi + idx_str; |
+ |
+ mbmi = &xd->this_mi->mbmi; |
+ |
// Set up destination pointers |
setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mi_row, mi_col); |
@@ -573,10 +583,10 @@ |
} |
set_offsets(cpi, mi_row, mi_col, bsize); |
- xd->mode_info_context->mbmi.sb_type = bsize; |
+ xd->this_mi->mbmi.sb_type = bsize; |
// Set to zero to make sure we do not use the previous encoded frame stats |
- xd->mode_info_context->mbmi.skip_coeff = 0; |
+ xd->this_mi->mbmi.skip_coeff = 0; |
x->source_variance = get_sby_perpixel_variance(cpi, x, bsize); |
@@ -597,7 +607,7 @@ |
VP9_COMMON *const cm = &cpi->common; |
MACROBLOCK *const x = &cpi->mb; |
MACROBLOCKD *const xd = &x->e_mbd; |
- MODE_INFO *mi = xd->mode_info_context; |
+ MODE_INFO *mi = xd->this_mi; |
MB_MODE_INFO *const mbmi = &mi->mbmi; |
if (cm->frame_type != KEY_FRAME) { |
@@ -765,7 +775,7 @@ |
return; |
if (sub_index != -1) |
- *(get_sb_index(xd, bsize)) = sub_index; |
+ *get_sb_index(xd, bsize) = sub_index; |
if (bsize < BLOCK_8X8) { |
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 |
@@ -792,7 +802,7 @@ |
MACROBLOCKD * const xd = &x->e_mbd; |
BLOCK_SIZE c1 = BLOCK_8X8; |
const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4; |
- int UNINITIALIZED_IS_SAFE(pl); |
+ int pl = 0; |
PARTITION_TYPE partition; |
BLOCK_SIZE subsize; |
int i; |
@@ -835,7 +845,7 @@ |
for (i = 0; i < 4; i++) { |
const int x_idx = i & 1, y_idx = i >> 1; |
- *(get_sb_index(xd, subsize)) = i; |
+ *get_sb_index(xd, subsize) = i; |
encode_sb(cpi, tp, mi_row + y_idx * bs, mi_col + x_idx * bs, |
output_enabled, subsize); |
} |
@@ -851,45 +861,108 @@ |
} |
} |
-static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m, BLOCK_SIZE bsize) { |
+// Check to see if the given partition size is allowed for a specified number |
+// of 8x8 block rows and columns remaining in the image. |
+// If not then return the largest allowed partition size |
+static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, |
+ int rows_left, int cols_left, |
+ int *bh, int *bw) { |
+ if ((rows_left <= 0) || (cols_left <= 0)) { |
+ return MIN(bsize, BLOCK_8X8); |
+ } else { |
+ for (; bsize > 0; --bsize) { |
+ *bh = num_8x8_blocks_high_lookup[bsize]; |
+ *bw = num_8x8_blocks_wide_lookup[bsize]; |
+ if ((*bh <= rows_left) && (*bw <= cols_left)) { |
+ break; |
+ } |
+ } |
+ } |
+ return bsize; |
+} |
+ |
+// This function attempts to set all mode info entries in a given SB64 |
+// to the same block partition size. |
+// However, at the bottom and right borders of the image the requested size |
+// may not be allowed in which case this code attempts to choose the largest |
+// allowable partition. |
+static void set_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, |
+ int mi_row, int mi_col) { |
VP9_COMMON *const cm = &cpi->common; |
+ BLOCK_SIZE bsize = cpi->sf.always_this_block_size; |
const int mis = cm->mode_info_stride; |
+ int row8x8_remaining = cm->cur_tile_mi_row_end - mi_row; |
+ int col8x8_remaining = cm->cur_tile_mi_col_end - mi_col; |
int block_row, block_col; |
- for (block_row = 0; block_row < 8; ++block_row) { |
- for (block_col = 0; block_col < 8; ++block_col) { |
- m[block_row * mis + block_col].mbmi.sb_type = bsize; |
+ MODE_INFO * mi_upper_left = cm->mi + mi_row * mis + mi_col; |
+ int bh = num_8x8_blocks_high_lookup[bsize]; |
+ int bw = num_8x8_blocks_wide_lookup[bsize]; |
+ |
+ assert((row8x8_remaining > 0) && (col8x8_remaining > 0)); |
+ |
+ // Apply the requested partition size to the SB64 if it is all "in image" |
+ if ((col8x8_remaining >= MI_BLOCK_SIZE) && |
+ (row8x8_remaining >= MI_BLOCK_SIZE)) { |
+ for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) { |
+ for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) { |
+ int index = block_row * mis + block_col; |
+ mi_8x8[index] = mi_upper_left + index; |
+ mi_8x8[index]->mbmi.sb_type = bsize; |
+ } |
} |
+ } else { |
+ // Else this is a partial SB64. |
+ for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) { |
+ for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) { |
+ int index = block_row * mis + block_col; |
+ // Find a partition size that fits |
+ bsize = find_partition_size(cpi->sf.always_this_block_size, |
+ (row8x8_remaining - block_row), |
+ (col8x8_remaining - block_col), &bh, &bw); |
+ mi_8x8[index] = mi_upper_left + index; |
+ mi_8x8[index]->mbmi.sb_type = bsize; |
+ } |
+ } |
} |
} |
-static void copy_partitioning(VP9_COMP *cpi, MODE_INFO *m, MODE_INFO *p) { |
+ |
+static void copy_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, |
+ MODE_INFO **prev_mi_8x8) { |
VP9_COMMON *const cm = &cpi->common; |
const int mis = cm->mode_info_stride; |
int block_row, block_col; |
+ |
for (block_row = 0; block_row < 8; ++block_row) { |
for (block_col = 0; block_col < 8; ++block_col) { |
- m[block_row * mis + block_col].mbmi.sb_type = |
- p[block_row * mis + block_col].mbmi.sb_type; |
+ MODE_INFO * prev_mi = prev_mi_8x8[block_row * mis + block_col]; |
+ BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0; |
+ int offset; |
+ |
+ if (prev_mi) { |
+ offset = prev_mi - cm->prev_mi; |
+ mi_8x8[block_row * mis + block_col] = cm->mi + offset; |
+ mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type; |
+ } |
} |
} |
} |
-static void set_block_size(VP9_COMMON * const cm, MODE_INFO *m, |
+static void set_block_size(VP9_COMMON * const cm, MODE_INFO **mi_8x8, |
BLOCK_SIZE bsize, int mis, int mi_row, |
int mi_col) { |
- int row, col; |
- int bwl = b_width_log2(bsize); |
- int bhl = b_height_log2(bsize); |
- int bsl = (bwl > bhl ? bwl : bhl); |
+ int r, c; |
+ const int bs = MAX(num_8x8_blocks_wide_lookup[bsize], |
+ num_8x8_blocks_high_lookup[bsize]); |
+ const int idx_str = mis * mi_row + mi_col; |
+ MODE_INFO **const mi2 = &mi_8x8[idx_str]; |
- int bs = (1 << bsl) / 2; // Block size in units of 8 pels. |
- MODE_INFO *m2 = m + mi_row * mis + mi_col; |
- for (row = 0; row < bs; row++) { |
- for (col = 0; col < bs; col++) { |
- if (mi_row + row >= cm->mi_rows || mi_col + col >= cm->mi_cols) |
- continue; |
- m2[row * mis + col].mbmi.sb_type = bsize; |
- } |
- } |
+ mi2[0] = cm->mi + idx_str; |
+ mi2[0]->mbmi.sb_type = bsize; |
+ |
+ for (r = 0; r < bs; r++) |
+ for (c = 0; c < bs; c++) |
+ if (mi_row + r < cm->mi_rows && mi_col + c < cm->mi_cols) |
+ mi2[r * mis + c] = mi2[0]; |
} |
typedef struct { |
@@ -1035,7 +1108,7 @@ |
#else // !PERFORM_RANDOM_PARTITIONING |
-static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m, |
+static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO **m, |
BLOCK_SIZE bsize, int mi_row, |
int mi_col, int mi_size) { |
VP9_COMMON * const cm = &cpi->common; |
@@ -1073,8 +1146,8 @@ |
} |
#endif // PERFORM_RANDOM_PARTITIONING |
-static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row, |
- int mi_col) { |
+static void choose_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, |
+ int mi_row, int mi_col) { |
VP9_COMMON * const cm = &cpi->common; |
MACROBLOCK *x = &cpi->mb; |
MACROBLOCKD *xd = &cpi->mb.e_mbd; |
@@ -1117,13 +1190,16 @@ |
&xd->scale_factor[0]); |
setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col, |
&xd->scale_factor[1]); |
- xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME; |
- xd->mode_info_context->mbmi.sb_type = BLOCK_64X64; |
- vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]], |
+ |
+ xd->this_mi->mbmi.ref_frame[0] = LAST_FRAME; |
+ xd->this_mi->mbmi.sb_type = BLOCK_64X64; |
+ vp9_find_best_ref_mvs(xd, |
+ mi_8x8[0]->mbmi.ref_mvs[mi_8x8[0]->mbmi.ref_frame[0]], |
&nearest_mv, &near_mv); |
- xd->mode_info_context->mbmi.mv[0] = nearest_mv; |
+ xd->this_mi->mbmi.mv[0] = nearest_mv; |
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64); |
+ |
d = xd->plane[0].dst.buf; |
dp = xd->plane[0].dst.stride; |
} |
@@ -1160,24 +1236,24 @@ |
// Now go through the entire structure, splitting every block size until |
// we get to one that's got a variance lower than our threshold, or we |
// hit 8x8. |
- if (!set_vt_partitioning(cpi, &vt, m, BLOCK_64X64, mi_row, mi_col, |
+ if (!set_vt_partitioning(cpi, &vt, mi_8x8, BLOCK_64X64, mi_row, mi_col, |
4)) { |
for (i = 0; i < 4; ++i) { |
const int x32_idx = ((i & 1) << 2); |
const int y32_idx = ((i >> 1) << 2); |
- if (!set_vt_partitioning(cpi, &vt.split[i], m, BLOCK_32X32, |
+ if (!set_vt_partitioning(cpi, &vt.split[i], mi_8x8, BLOCK_32X32, |
(mi_row + y32_idx), (mi_col + x32_idx), 2)) { |
for (j = 0; j < 4; ++j) { |
const int x16_idx = ((j & 1) << 1); |
const int y16_idx = ((j >> 1) << 1); |
- if (!set_vt_partitioning(cpi, &vt.split[i].split[j], m, |
+ if (!set_vt_partitioning(cpi, &vt.split[i].split[j], mi_8x8, |
BLOCK_16X16, |
(mi_row + y32_idx + y16_idx), |
(mi_col + x32_idx + x16_idx), 1)) { |
for (k = 0; k < 4; ++k) { |
const int x8_idx = (k & 1); |
const int y8_idx = (k >> 1); |
- set_block_size(cm, m, BLOCK_8X8, mis, |
+ set_block_size(cm, mi_8x8, BLOCK_8X8, mis, |
(mi_row + y32_idx + y16_idx + y8_idx), |
(mi_col + x32_idx + x16_idx + x8_idx)); |
} |
@@ -1188,9 +1264,10 @@ |
} |
} |
-static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp, |
- int mi_row, int mi_col, BLOCK_SIZE bsize, |
- int *rate, int64_t *dist, int do_recon) { |
+static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8, |
+ TOKENEXTRA **tp, int mi_row, int mi_col, |
+ BLOCK_SIZE bsize, int *rate, int64_t *dist, |
+ int do_recon) { |
VP9_COMMON * const cm = &cpi->common; |
MACROBLOCK * const x = &cpi->mb; |
MACROBLOCKD *xd = &cpi->mb.e_mbd; |
@@ -1216,7 +1293,7 @@ |
int64_t chosen_dist = INT_MAX; |
BLOCK_SIZE sub_subsize = BLOCK_4X4; |
int splits_below = 0; |
- BLOCK_SIZE bs_type = m->mbmi.sb_type; |
+ BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type; |
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
return; |
@@ -1249,7 +1326,8 @@ |
splits_below = 1; |
for (i = 0; i < 4; i++) { |
int jj = i >> 1, ii = i & 0x01; |
- if (m[jj * bss * mis + ii * bss].mbmi.sb_type >= sub_subsize) { |
+ MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss]; |
+ if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) { |
splits_below = 0; |
} |
} |
@@ -1269,7 +1347,7 @@ |
none_rate += x->partition_cost[pl][PARTITION_NONE]; |
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
- m->mbmi.sb_type = bs_type; |
+ mi_8x8[0]->mbmi.sb_type = bs_type; |
*(get_sb_partitioning(x, bsize)) = subsize; |
} |
} |
@@ -1280,7 +1358,7 @@ |
bsize, get_block_context(x, bsize), INT64_MAX); |
break; |
case PARTITION_HORZ: |
- *(get_sb_index(xd, subsize)) = 0; |
+ *get_sb_index(xd, subsize) = 0; |
pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist, |
subsize, get_block_context(x, subsize), INT64_MAX); |
if (last_part_rate != INT_MAX && |
@@ -1289,7 +1367,7 @@ |
int64_t dt = 0; |
update_state(cpi, get_block_context(x, subsize), subsize, 0); |
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); |
- *(get_sb_index(xd, subsize)) = 1; |
+ *get_sb_index(xd, subsize) = 1; |
pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, &rt, &dt, subsize, |
get_block_context(x, subsize), INT64_MAX); |
if (rt == INT_MAX || dt == INT_MAX) { |
@@ -1303,7 +1381,7 @@ |
} |
break; |
case PARTITION_VERT: |
- *(get_sb_index(xd, subsize)) = 0; |
+ *get_sb_index(xd, subsize) = 0; |
pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist, |
subsize, get_block_context(x, subsize), INT64_MAX); |
if (last_part_rate != INT_MAX && |
@@ -1312,7 +1390,7 @@ |
int64_t dt = 0; |
update_state(cpi, get_block_context(x, subsize), subsize, 0); |
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); |
- *(get_sb_index(xd, subsize)) = 1; |
+ *get_sb_index(xd, subsize) = 1; |
pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), &rt, &dt, subsize, |
get_block_context(x, subsize), INT64_MAX); |
if (rt == INT_MAX || dt == INT_MAX) { |
@@ -1338,10 +1416,11 @@ |
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) |
continue; |
- *(get_sb_index(xd, subsize)) = i; |
+ *get_sb_index(xd, subsize) = i; |
- rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx, |
- mi_col + x_idx, subsize, &rt, &dt, i != 3); |
+ rd_use_partition(cpi, mi_8x8 + jj * bss * mis + ii * bss, tp, |
+ mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt, |
+ i != 3); |
if (rt == INT_MAX || dt == INT_MAX) { |
last_part_rate = INT_MAX; |
last_part_dist = INT_MAX; |
@@ -1381,9 +1460,9 @@ |
|| (mi_col + x_idx >= cm->mi_cols)) |
continue; |
- *(get_sb_index(xd, split_subsize)) = i; |
- *(get_sb_partitioning(x, bsize)) = split_subsize; |
- *(get_sb_partitioning(x, split_subsize)) = split_subsize; |
+ *get_sb_index(xd, split_subsize) = i; |
+ *get_sb_partitioning(x, bsize) = split_subsize; |
+ *get_sb_partitioning(x, split_subsize) = split_subsize; |
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
@@ -1422,7 +1501,7 @@ |
// If last_part is better set the partitioning to that... |
if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist) |
< RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) { |
- m->mbmi.sb_type = bsize; |
+ mi_8x8[0]->mbmi.sb_type = bsize; |
if (bsize >= BLOCK_8X8) |
*(get_sb_partitioning(x, bsize)) = subsize; |
chosen_rate = last_part_rate; |
@@ -1470,9 +1549,9 @@ |
// |
// The min and max are assumed to have been initialized prior to calling this |
// function so repeat calls can accumulate a min and max of more than one sb64. |
-static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO * mi, |
- BLOCK_SIZE *min_block_size, |
- BLOCK_SIZE *max_block_size ) { |
+static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8, |
+ BLOCK_SIZE * min_block_size, |
+ BLOCK_SIZE * max_block_size ) { |
MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
int sb_width_in_blocks = MI_BLOCK_SIZE; |
int sb_height_in_blocks = MI_BLOCK_SIZE; |
@@ -1482,8 +1561,10 @@ |
// Check the sb_type for each block that belongs to this region. |
for (i = 0; i < sb_height_in_blocks; ++i) { |
for (j = 0; j < sb_width_in_blocks; ++j) { |
- *min_block_size = MIN(*min_block_size, mi[index + j].mbmi.sb_type); |
- *max_block_size = MAX(*max_block_size, mi[index + j].mbmi.sb_type); |
+ MODE_INFO * mi = mi_8x8[index+j]; |
+ BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0; |
+ *min_block_size = MIN(*min_block_size, sb_type); |
+ *max_block_size = MAX(*max_block_size, sb_type); |
} |
index += xd->mode_info_stride; |
} |
@@ -1491,17 +1572,16 @@ |
// Look at neighboring blocks and set a min and max partition size based on |
// what they chose. |
-static void rd_auto_partition_range(VP9_COMP *cpi, |
+static void rd_auto_partition_range(VP9_COMP *cpi, int row, int col, |
BLOCK_SIZE *min_block_size, |
BLOCK_SIZE *max_block_size) { |
MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
- MODE_INFO *mi = xd->mode_info_context; |
- MODE_INFO *above_sb64_mi; |
- MODE_INFO *left_sb64_mi; |
- const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi; |
- const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi; |
- const int left_in_image = xd->left_available && left_mbmi->in_image; |
- const int above_in_image = xd->up_available && above_mbmi->in_image; |
+ MODE_INFO ** mi_8x8 = xd->mi_8x8; |
+ const int left_in_image = xd->left_available && mi_8x8[-1]; |
+ const int above_in_image = xd->up_available && |
+ mi_8x8[-xd->mode_info_stride]; |
+ MODE_INFO ** above_sb64_mi_8x8; |
+ MODE_INFO ** left_sb64_mi_8x8; |
// Frequency check |
if (cpi->sf.auto_min_max_partition_count <= 0) { |
@@ -1509,38 +1589,48 @@ |
cpi->sf.auto_min_max_partition_interval; |
*min_block_size = BLOCK_4X4; |
*max_block_size = BLOCK_64X64; |
- return; |
} else { |
--cpi->sf.auto_min_max_partition_count; |
- } |
- // Set default values if not left or above neighbour |
- if (!left_in_image && !above_in_image) { |
- *min_block_size = BLOCK_4X4; |
- *max_block_size = BLOCK_64X64; |
- } else { |
- // Default "min to max" and "max to min" |
- *min_block_size = BLOCK_64X64; |
- *max_block_size = BLOCK_4X4; |
+ // Set default values if no left or above neighbour |
+ if (!left_in_image && !above_in_image) { |
+ *min_block_size = BLOCK_4X4; |
+ *max_block_size = BLOCK_64X64; |
+ } else { |
+ VP9_COMMON *const cm = &cpi->common; |
+ int row8x8_remaining = cm->cur_tile_mi_row_end - row; |
+ int col8x8_remaining = cm->cur_tile_mi_col_end - col; |
+ int bh, bw; |
- // Find the min and max partition sizes used in the left SB64 |
- if (left_in_image) { |
- left_sb64_mi = &mi[-MI_BLOCK_SIZE]; |
- get_sb_partition_size_range(cpi, left_sb64_mi, |
- min_block_size, max_block_size); |
- } |
+ // Default "min to max" and "max to min" |
+ *min_block_size = BLOCK_64X64; |
+ *max_block_size = BLOCK_4X4; |
- // Find the min and max partition sizes used in the above SB64 taking |
- // the values found for left as a starting point. |
- if (above_in_image) { |
- above_sb64_mi = &mi[-xd->mode_info_stride * MI_BLOCK_SIZE]; |
- get_sb_partition_size_range(cpi, above_sb64_mi, |
- min_block_size, max_block_size); |
+ // Find the min and max partition sizes used in the left SB64 |
+ if (left_in_image) { |
+ left_sb64_mi_8x8 = &mi_8x8[-MI_BLOCK_SIZE]; |
+ get_sb_partition_size_range(cpi, left_sb64_mi_8x8, |
+ min_block_size, max_block_size); |
+ } |
+ |
+ // Find the min and max partition sizes used in the above SB64 taking |
+ // the values found for left as a starting point. |
+ if (above_in_image) { |
+ above_sb64_mi_8x8 = &mi_8x8[-xd->mode_info_stride * MI_BLOCK_SIZE]; |
+ get_sb_partition_size_range(cpi, above_sb64_mi_8x8, |
+ min_block_size, max_block_size); |
+ } |
+ |
+ // Give a bit of leaway either side of the observed min and max |
+ *min_block_size = min_partition_size[*min_block_size]; |
+ *max_block_size = max_partition_size[*max_block_size]; |
+ |
+ // Check border cases where max and min from neighbours may not be legal. |
+ *max_block_size = find_partition_size(*max_block_size, |
+ row8x8_remaining, col8x8_remaining, |
+ &bh, &bw); |
+ *min_block_size = MIN(*min_block_size, *max_block_size); |
} |
- |
- // give a bit of leaway either side of the observed min and max |
- *min_block_size = min_partition_size[*min_block_size]; |
- *max_block_size = max_partition_size[*max_block_size]; |
} |
} |
@@ -1648,8 +1738,7 @@ |
VP9_COMMON * const cm = &cpi->common; |
MACROBLOCK * const x = &cpi->mb; |
MACROBLOCKD * const xd = &x->e_mbd; |
- int bsl = b_width_log2(bsize), bs = 1 << bsl; |
- int ms = bs / 2; |
+ const int ms = num_8x8_blocks_wide_lookup[bsize] / 2; |
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; |
PARTITION_CONTEXT sl[8], sa[8]; |
TOKENEXTRA *tp_orig = *tp; |
@@ -1661,8 +1750,8 @@ |
int do_split = bsize >= BLOCK_8X8; |
int do_rect = 1; |
// Override skipping rectangular partition operations for edge blocks |
- const int force_horz_split = (mi_row + (ms >> 1) >= cm->mi_rows); |
- const int force_vert_split = (mi_col + (ms >> 1) >= cm->mi_cols); |
+ const int force_horz_split = (mi_row + ms >= cm->mi_rows); |
+ const int force_vert_split = (mi_col + ms >= cm->mi_cols); |
int partition_none_allowed = !force_horz_split && !force_vert_split; |
int partition_horz_allowed = !force_vert_split && bsize >= BLOCK_8X8; |
@@ -1725,11 +1814,24 @@ |
} |
sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist); |
if (sum_rd < best_rd) { |
+ int64_t stop_thresh = 2048; |
+ |
best_rate = this_rate; |
best_dist = this_dist; |
best_rd = sum_rd; |
if (bsize >= BLOCK_8X8) |
*(get_sb_partitioning(x, bsize)) = bsize; |
+ |
+ // Adjust threshold according to partition size. |
+ stop_thresh >>= 8 - (b_width_log2_lookup[bsize] + |
+ b_height_log2_lookup[bsize]); |
+ |
+ // If obtained distortion is very small, choose current partition |
+ // and stop splitting. |
+ if (this_dist < stop_thresh) { |
+ do_split = 0; |
+ do_rect = 0; |
+ } |
} |
} |
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
@@ -1742,14 +1844,13 @@ |
if (do_split) { |
subsize = get_subsize(bsize, PARTITION_SPLIT); |
for (i = 0; i < 4 && sum_rd < best_rd; ++i) { |
- int x_idx = (i & 1) * (ms >> 1); |
- int y_idx = (i >> 1) * (ms >> 1); |
+ const int x_idx = (i & 1) * ms; |
+ const int y_idx = (i >> 1) * ms; |
- if ((mi_row + y_idx >= cm->mi_rows) || |
- (mi_col + x_idx >= cm->mi_cols)) |
+ if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols) |
continue; |
- *(get_sb_index(xd, subsize)) = i; |
+ *get_sb_index(xd, subsize) = i; |
rd_pick_partition(cpi, tp, mi_row + y_idx, mi_col + x_idx, subsize, |
&this_rate, &this_dist, i != 3, best_rd - sum_rd); |
@@ -1795,17 +1896,17 @@ |
// PARTITION_HORZ |
if (partition_horz_allowed && do_rect) { |
subsize = get_subsize(bsize, PARTITION_HORZ); |
- *(get_sb_index(xd, subsize)) = 0; |
+ *get_sb_index(xd, subsize) = 0; |
pick_sb_modes(cpi, mi_row, mi_col, &sum_rate, &sum_dist, subsize, |
get_block_context(x, subsize), best_rd); |
sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
- if (sum_rd < best_rd && mi_row + (ms >> 1) < cm->mi_rows) { |
+ if (sum_rd < best_rd && mi_row + ms < cm->mi_rows) { |
update_state(cpi, get_block_context(x, subsize), subsize, 0); |
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); |
- *(get_sb_index(xd, subsize)) = 1; |
- pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, &this_rate, |
+ *get_sb_index(xd, subsize) = 1; |
+ pick_sb_modes(cpi, mi_row + ms, mi_col, &this_rate, |
&this_dist, subsize, get_block_context(x, subsize), |
best_rd - sum_rd); |
if (this_rate == INT_MAX) { |
@@ -1835,16 +1936,16 @@ |
if (partition_vert_allowed && do_rect) { |
subsize = get_subsize(bsize, PARTITION_VERT); |
- *(get_sb_index(xd, subsize)) = 0; |
+ *get_sb_index(xd, subsize) = 0; |
pick_sb_modes(cpi, mi_row, mi_col, &sum_rate, &sum_dist, subsize, |
get_block_context(x, subsize), best_rd); |
sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); |
- if (sum_rd < best_rd && mi_col + (ms >> 1) < cm->mi_cols) { |
+ if (sum_rd < best_rd && mi_col + ms < cm->mi_cols) { |
update_state(cpi, get_block_context(x, subsize), subsize, 0); |
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); |
- *(get_sb_index(xd, subsize)) = 1; |
- pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), &this_rate, |
+ *get_sb_index(xd, subsize) = 1; |
+ pick_sb_modes(cpi, mi_row, mi_col + ms, &this_rate, |
&this_dist, subsize, get_block_context(x, subsize), |
best_rd - sum_rd); |
if (this_rate == INT_MAX) { |
@@ -1935,31 +2036,24 @@ |
int dummy_rate; |
int64_t dummy_dist; |
- // Initialize a mask of modes that we will not consider; |
- // cpi->unused_mode_skip_mask = 0x0000000AAE17F800 (test no golden) |
- if (cpi->common.frame_type == KEY_FRAME) |
- cpi->unused_mode_skip_mask = 0; |
- else |
- cpi->unused_mode_skip_mask = 0xFFFFFFFFFFFFFE00; |
- |
if (cpi->sf.reference_masking) |
rd_pick_reference_frame(cpi, mi_row, mi_col); |
if (cpi->sf.partition_by_variance || cpi->sf.use_lastframe_partitioning || |
cpi->sf.use_one_partition_size_always ) { |
const int idx_str = cm->mode_info_stride * mi_row + mi_col; |
- MODE_INFO *m = cm->mi + idx_str; |
- MODE_INFO *p = cm->prev_mi + idx_str; |
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str; |
+ MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str; |
cpi->mb.source_variance = UINT_MAX; |
if (cpi->sf.use_one_partition_size_always) { |
set_offsets(cpi, mi_row, mi_col, BLOCK_64X64); |
- set_partitioning(cpi, m, cpi->sf.always_this_block_size); |
- rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64, |
+ set_partitioning(cpi, mi_8x8, mi_row, mi_col); |
+ rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, |
&dummy_rate, &dummy_dist, 1); |
} else if (cpi->sf.partition_by_variance) { |
- choose_partitioning(cpi, cm->mi, mi_row, mi_col); |
- rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64, |
+ choose_partitioning(cpi, cm->mi_grid_visible, mi_row, mi_col); |
+ rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, |
&dummy_rate, &dummy_dist, 1); |
} else { |
if ((cpi->common.current_video_frame |
@@ -1971,15 +2065,15 @@ |
// If required set upper and lower partition size limits |
if (cpi->sf.auto_min_max_partition_size) { |
set_offsets(cpi, mi_row, mi_col, BLOCK_64X64); |
- rd_auto_partition_range(cpi, |
+ rd_auto_partition_range(cpi, mi_row, mi_col, |
&cpi->sf.min_partition_size, |
&cpi->sf.max_partition_size); |
} |
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64, |
&dummy_rate, &dummy_dist, 1, INT64_MAX); |
} else { |
- copy_partitioning(cpi, m, p); |
- rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64, |
+ copy_partitioning(cpi, mi_8x8, prev_mi_8x8); |
+ rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, |
&dummy_rate, &dummy_dist, 1); |
} |
} |
@@ -1987,10 +2081,10 @@ |
// If required set upper and lower partition size limits |
if (cpi->sf.auto_min_max_partition_size) { |
set_offsets(cpi, mi_row, mi_col, BLOCK_64X64); |
- rd_auto_partition_range(cpi, &cpi->sf.min_partition_size, |
+ rd_auto_partition_range(cpi, mi_row, mi_col, |
+ &cpi->sf.min_partition_size, |
&cpi->sf.max_partition_size); |
} |
- |
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64, |
&dummy_rate, &dummy_dist, 1, INT64_MAX); |
} |
@@ -2022,8 +2116,8 @@ |
setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); |
- xd->mode_info_context->mbmi.mode = DC_PRED; |
- xd->mode_info_context->mbmi.uv_mode = DC_PRED; |
+ xd->this_mi->mbmi.mode = DC_PRED; |
+ xd->this_mi->mbmi.uv_mode = DC_PRED; |
vp9_zero(cpi->y_mode_count) |
vp9_zero(cpi->y_uv_mode_count) |
@@ -2099,9 +2193,15 @@ |
vp9_zero(cm->counts.switchable_interp); |
vp9_zero(cpi->txfm_stepdown_count); |
- xd->mode_info_context = cm->mi; |
- xd->prev_mode_info_context = cm->prev_mi; |
+ xd->mi_8x8 = cm->mi_grid_visible; |
+ // required for vp9_frame_init_quantizer |
+ xd->this_mi = |
+ xd->mi_8x8[0] = cm->mi; |
+ xd->mic_stream_ptr = cm->mi; |
+ xd->last_mi = cm->prev_mi; |
+ |
+ |
vp9_zero(cpi->NMVcount); |
vp9_zero(cpi->coef_counts); |
vp9_zero(cm->counts.eob_branch); |
@@ -2203,12 +2303,12 @@ |
} |
} |
-static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) { |
+static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) { |
int x, y; |
for (y = 0; y < ymbs; y++) { |
for (x = 0; x < xmbs; x++) { |
- if (!mi[y * mis + x].mbmi.skip_coeff) |
+ if (!mi_8x8[y * mis + x]->mbmi.skip_coeff) |
return 0; |
} |
} |
@@ -2216,42 +2316,39 @@ |
return 1; |
} |
-static void set_txfm_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs, |
- TX_SIZE txfm_size) { |
+static void set_txfm_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs, |
+ TX_SIZE tx_size) { |
int x, y; |
for (y = 0; y < ymbs; y++) { |
for (x = 0; x < xmbs; x++) |
- mi[y * mis + x].mbmi.txfm_size = txfm_size; |
+ mi_8x8[y * mis + x]->mbmi.tx_size = tx_size; |
} |
} |
-static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO *mi, int mis, |
- TX_SIZE max_tx_size, int bw, int bh, |
+static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO **mi_8x8, |
+ int mis, TX_SIZE max_tx_size, int bw, int bh, |
int mi_row, int mi_col, BLOCK_SIZE bsize) { |
- VP9_COMMON *const cm = &cpi->common; |
- MB_MODE_INFO *const mbmi = &mi->mbmi; |
+ VP9_COMMON * const cm = &cpi->common; |
+ MB_MODE_INFO * const mbmi = &mi_8x8[0]->mbmi; |
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
return; |
- if (mbmi->txfm_size > max_tx_size) { |
- MACROBLOCK * const x = &cpi->mb; |
- MACROBLOCKD * const xd = &x->e_mbd; |
+ if (mbmi->tx_size > max_tx_size) { |
const int ymbs = MIN(bh, cm->mi_rows - mi_row); |
const int xmbs = MIN(bw, cm->mi_cols - mi_col); |
- xd->mode_info_context = mi; |
assert(vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) || |
- get_skip_flag(mi, mis, ymbs, xmbs)); |
- set_txfm_flag(mi, mis, ymbs, xmbs, max_tx_size); |
+ get_skip_flag(mi_8x8, mis, ymbs, xmbs)); |
+ set_txfm_flag(mi_8x8, mis, ymbs, xmbs, max_tx_size); |
} |
} |
-static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi, |
+static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, |
TX_SIZE max_tx_size, int mi_row, int mi_col, |
BLOCK_SIZE bsize) { |
- const VP9_COMMON *const cm = &cpi->common; |
+ VP9_COMMON * const cm = &cpi->common; |
const int mis = cm->mode_info_stride; |
int bw, bh; |
const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2; |
@@ -2259,22 +2356,23 @@ |
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
return; |
- bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type]; |
- bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type]; |
+ bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type]; |
+ bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type]; |
if (bw == bs && bh == bs) { |
- reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, bs, bs, mi_row, |
+ reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, bs, mi_row, |
mi_col, bsize); |
} else if (bw == bs && bh < bs) { |
- reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, bs, hbs, mi_row, mi_col, |
- bsize); |
- reset_skip_txfm_size_b(cpi, mi + hbs * mis, mis, max_tx_size, bs, hbs, |
+ reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, hbs, mi_row, |
+ mi_col, bsize); |
+ reset_skip_txfm_size_b(cpi, mi_8x8 + hbs * mis, mis, max_tx_size, bs, hbs, |
mi_row + hbs, mi_col, bsize); |
} else if (bw < bs && bh == bs) { |
- reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, hbs, bs, mi_row, mi_col, |
- bsize); |
- reset_skip_txfm_size_b(cpi, mi + hbs, mis, max_tx_size, hbs, bs, mi_row, |
+ reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, hbs, bs, mi_row, |
+ mi_col, bsize); |
+ reset_skip_txfm_size_b(cpi, mi_8x8 + hbs, mis, max_tx_size, hbs, bs, mi_row, |
mi_col + hbs, bsize); |
+ |
} else { |
const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize]; |
int n; |
@@ -2285,7 +2383,7 @@ |
const int mi_dc = hbs * (n & 1); |
const int mi_dr = hbs * (n >> 1); |
- reset_skip_txfm_size_sb(cpi, &mi[mi_dr * mis + mi_dc], max_tx_size, |
+ reset_skip_txfm_size_sb(cpi, &mi_8x8[mi_dr * mis + mi_dc], max_tx_size, |
mi_row + mi_dr, mi_col + mi_dc, subsize); |
} |
} |
@@ -2295,12 +2393,15 @@ |
VP9_COMMON * const cm = &cpi->common; |
int mi_row, mi_col; |
const int mis = cm->mode_info_stride; |
- MODE_INFO *mi, *mi_ptr = cm->mi; |
+// MODE_INFO *mi, *mi_ptr = cm->mi; |
+ MODE_INFO **mi_8x8, **mi_ptr = cm->mi_grid_visible; |
for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) { |
- mi = mi_ptr; |
- for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8) |
- reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col, BLOCK_64X64); |
+ mi_8x8 = mi_ptr; |
+ for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi_8x8 += 8) { |
+ reset_skip_txfm_size_sb(cpi, mi_8x8, txfm_max, mi_row, mi_col, |
+ BLOCK_64X64); |
+ } |
} |
} |
@@ -2551,13 +2652,13 @@ |
x->act_zbin_adj = 1 - (int) (((int64_t) a + (b >> 1)) / b); |
#endif |
} |
- |
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, |
int mi_row, int mi_col, BLOCK_SIZE bsize) { |
VP9_COMMON * const cm = &cpi->common; |
MACROBLOCK * const x = &cpi->mb; |
MACROBLOCKD * const xd = &x->e_mbd; |
- MODE_INFO *mi = xd->mode_info_context; |
+ MODE_INFO **mi_8x8 = xd->mi_8x8; |
+ MODE_INFO *mi = mi_8x8[0]; |
MB_MODE_INFO *mbmi = &mi->mbmi; |
unsigned int segment_id = mbmi->segment_id; |
const int mis = cm->mode_info_stride; |
@@ -2636,8 +2737,8 @@ |
vp9_encode_sb(x, MAX(bsize, BLOCK_8X8)); |
vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); |
} else { |
- int mb_skip_context = xd->left_available ? (mi - 1)->mbmi.skip_coeff : 0; |
- mb_skip_context += (mi - mis)->mbmi.skip_coeff; |
+ int mb_skip_context = xd->left_available ? mi_8x8[-1]->mbmi.skip_coeff : 0; |
+ mb_skip_context += mi_8x8[-mis] ? mi_8x8[-mis]->mbmi.skip_coeff : 0; |
mbmi->skip_coeff = 1; |
if (output_enabled) |
@@ -2645,10 +2746,6 @@ |
reset_skip_context(xd, MAX(bsize, BLOCK_8X8)); |
} |
- // copy skip flag on all mb_mode_info contexts in this SB |
- // if this was a skip at this txfm size |
- vp9_set_pred_flag_mbskip(cm, bsize, mi_row, mi_col, mi->mbmi.skip_coeff); |
- |
if (output_enabled) { |
if (cm->tx_mode == TX_MODE_SELECT && |
mbmi->sb_type >= BLOCK_8X8 && |
@@ -2656,10 +2753,12 @@ |
(mbmi->skip_coeff || |
vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)))) { |
const uint8_t context = vp9_get_pred_context_tx_size(xd); |
- update_tx_counts(bsize, context, mbmi->txfm_size, &cm->counts.tx); |
+ update_tx_counts(bsize, context, mbmi->tx_size, &cm->counts.tx); |
} else { |
int x, y; |
- TX_SIZE sz = (cm->tx_mode == TX_MODE_SELECT) ? TX_32X32 : cm->tx_mode; |
+ TX_SIZE sz = tx_mode_to_biggest_tx_size[cm->tx_mode]; |
+ assert(sizeof(tx_mode_to_biggest_tx_size) / |
+ sizeof(tx_mode_to_biggest_tx_size[0]) == TX_MODES); |
// The new intra coding scheme requires no change of transform size |
if (is_inter_block(&mi->mbmi)) { |
if (sz == TX_32X32 && bsize < BLOCK_32X32) |
@@ -2669,18 +2768,15 @@ |
if (sz == TX_8X8 && bsize < BLOCK_8X8) |
sz = TX_4X4; |
} else if (bsize >= BLOCK_8X8) { |
- sz = mbmi->txfm_size; |
+ sz = mbmi->tx_size; |
} else { |
sz = TX_4X4; |
} |
- for (y = 0; y < mi_height; y++) { |
- for (x = 0; x < mi_width; x++) { |
- if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) { |
- mi[mis * y + x].mbmi.txfm_size = sz; |
- } |
- } |
- } |
+ for (y = 0; y < mi_height; y++) |
+ for (x = 0; x < mi_width; x++) |
+ if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) |
+ mi_8x8[mis * y + x]->mbmi.tx_size = sz; |
} |
} |
} |