Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Unified Diff: source/libvpx/vp9/encoder/vp9_bitstream.c

Issue 23530058: libvpx: Pull from upstream (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « source/libvpx/vp9/decoder/vp9_onyxd_if.c ('k') | source/libvpx/vp9/encoder/vp9_dct.c » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: source/libvpx/vp9/encoder/vp9_bitstream.c
===================================================================
--- source/libvpx/vp9/encoder/vp9_bitstream.c (revision 223100)
+++ source/libvpx/vp9/encoder/vp9_bitstream.c (working copy)
@@ -198,10 +198,11 @@
(unsigned int *)cpi->y_mode_count[j]);
}
-static void write_selected_tx_size(const VP9_COMP *cpi, TX_SIZE tx_size,
- BLOCK_SIZE bsize, vp9_writer *w) {
+static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m,
+ TX_SIZE tx_size, BLOCK_SIZE bsize,
+ vp9_writer *w) {
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs);
+ const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs, m);
vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
if (bsize >= BLOCK_16X16 && tx_size != TX_4X4) {
vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
@@ -237,7 +238,7 @@
static void update_switchable_interp_probs(VP9_COMP *const cpi,
vp9_writer* const bc) {
- VP9_COMMON *const pc = &cpi->common;
+ VP9_COMMON *const cm = &cpi->common;
unsigned int branch_ct[SWITCHABLE_FILTERS + 1]
[SWITCHABLE_FILTERS - 1][2];
vp9_prob new_prob[SWITCHABLE_FILTERS + 1][SWITCHABLE_FILTERS - 1];
@@ -246,21 +247,21 @@
vp9_tree_probs_from_distribution(
vp9_switchable_interp_tree,
new_prob[j], branch_ct[j],
- pc->counts.switchable_interp[j], 0);
+ cm->counts.switchable_interp[j], 0);
}
for (j = 0; j <= SWITCHABLE_FILTERS; ++j) {
for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) {
- vp9_cond_prob_diff_update(bc, &pc->fc.switchable_interp_prob[j][i],
+ vp9_cond_prob_diff_update(bc, &cm->fc.switchable_interp_prob[j][i],
MODE_UPDATE_PROB, branch_ct[j][i]);
}
}
#ifdef MODE_STATS
if (!cpi->dummy_packing)
- update_switchable_interp_stats(pc);
+ update_switchable_interp_stats(cm);
#endif
}
-static void update_inter_mode_probs(VP9_COMMON *pc, vp9_writer* const bc) {
+static void update_inter_mode_probs(VP9_COMMON *cm, vp9_writer* const bc) {
int i, j;
for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
@@ -269,10 +270,10 @@
vp9_tree_probs_from_distribution(vp9_inter_mode_tree,
new_prob, branch_ct,
- pc->counts.inter_mode[i], NEARESTMV);
+ cm->counts.inter_mode[i], NEARESTMV);
for (j = 0; j < INTER_MODES - 1; ++j)
- vp9_cond_prob_diff_update(bc, &pc->fc.inter_mode_probs[i][j],
+ vp9_cond_prob_diff_update(bc, &cm->fc.inter_mode_probs[i][j],
MODE_UPDATE_PROB, branch_ct[j]);
}
}
@@ -356,39 +357,39 @@
// This function encodes the reference frame
static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
- VP9_COMMON *const pc = &cpi->common;
+ VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *mi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *mi = &xd->this_mi->mbmi;
const int segment_id = mi->segment_id;
- int seg_ref_active = vp9_segfeature_active(&pc->seg, segment_id,
+ int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id,
SEG_LVL_REF_FRAME);
// If segment level coding of this signal is disabled...
// or the segment allows multiple reference frame options
if (!seg_ref_active) {
// does the feature use compound prediction or not
// (if not specified at the frame/segment level)
- if (pc->comp_pred_mode == HYBRID_PREDICTION) {
+ if (cm->comp_pred_mode == HYBRID_PREDICTION) {
vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME,
- vp9_get_pred_prob_comp_inter_inter(pc, xd));
+ vp9_get_pred_prob_comp_inter_inter(cm, xd));
} else {
assert((mi->ref_frame[1] <= INTRA_FRAME) ==
- (pc->comp_pred_mode == SINGLE_PREDICTION_ONLY));
+ (cm->comp_pred_mode == SINGLE_PREDICTION_ONLY));
}
if (mi->ref_frame[1] > INTRA_FRAME) {
vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME,
- vp9_get_pred_prob_comp_ref_p(pc, xd));
+ vp9_get_pred_prob_comp_ref_p(cm, xd));
} else {
vp9_write(bc, mi->ref_frame[0] != LAST_FRAME,
- vp9_get_pred_prob_single_ref_p1(pc, xd));
+ vp9_get_pred_prob_single_ref_p1(cm, xd));
if (mi->ref_frame[0] != LAST_FRAME)
vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME,
- vp9_get_pred_prob_single_ref_p2(pc, xd));
+ vp9_get_pred_prob_single_ref_p2(cm, xd));
}
} else {
assert(mi->ref_frame[1] <= INTRA_FRAME);
- assert(vp9_get_segdata(&pc->seg, segment_id, SEG_LVL_REF_FRAME) ==
+ assert(vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) ==
mi->ref_frame[0]);
}
@@ -397,11 +398,11 @@
}
static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
- VP9_COMMON *const pc = &cpi->common;
- const nmv_context *nmvc = &pc->fc.nmvc;
+ VP9_COMMON *const cm = &cpi->common;
+ const nmv_context *nmvc = &cm->fc.nmvc;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- struct segmentation *seg = &pc->seg;
+ struct segmentation *seg = &cm->seg;
MB_MODE_INFO *const mi = &m->mbmi;
const MV_REFERENCE_FRAME rf = mi->ref_frame[0];
const MB_PREDICTION_MODE mode = mi->mode;
@@ -410,7 +411,7 @@
const BLOCK_SIZE bsize = mi->sb_type;
const int allow_hp = xd->allow_high_precision_mv;
- x->partition_info = x->pi + (m - pc->mi);
+ x->partition_info = x->pi + (m - cm->mi);
#ifdef ENTROPY_STATS
active_section = 9;
@@ -432,12 +433,12 @@
if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
vp9_write(bc, rf != INTRA_FRAME,
- vp9_get_pred_prob_intra_inter(pc, xd));
+ vp9_get_pred_prob_intra_inter(cm, xd));
- if (bsize >= BLOCK_8X8 && pc->tx_mode == TX_MODE_SELECT &&
+ if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
!(rf != INTRA_FRAME &&
(skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
- write_selected_tx_size(cpi, mi->txfm_size, bsize, bc);
+ write_selected_tx_size(cpi, m, mi->tx_size, bsize, bc);
}
if (rf == INTRA_FRAME) {
@@ -446,7 +447,7 @@
#endif
if (bsize >= BLOCK_8X8) {
- write_intra_mode(bc, mode, pc->fc.y_mode_prob[size_group_lookup[bsize]]);
+ write_intra_mode(bc, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]);
} else {
int idx, idy;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
@@ -454,11 +455,11 @@
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode;
- write_intra_mode(bc, bm, pc->fc.y_mode_prob[0]);
+ write_intra_mode(bc, bm, cm->fc.y_mode_prob[0]);
}
}
}
- write_intra_mode(bc, mi->uv_mode, pc->fc.uv_mode_prob[mode]);
+ write_intra_mode(bc, mi->uv_mode, cm->fc.uv_mode_prob[mode]);
} else {
vp9_prob *mv_ref_p;
encode_ref_frame(cpi, bc);
@@ -472,18 +473,18 @@
if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
if (bsize >= BLOCK_8X8) {
write_sb_mv_ref(bc, mode, mv_ref_p);
- ++pc->counts.inter_mode[mi->mode_context[rf]]
+ ++cm->counts.inter_mode[mi->mode_context[rf]]
[inter_mode_offset(mode)];
}
}
- if (pc->mcomp_filter_type == SWITCHABLE) {
+ if (cm->mcomp_filter_type == SWITCHABLE) {
const int ctx = vp9_get_pred_context_switchable_interp(xd);
write_token(bc, vp9_switchable_interp_tree,
- pc->fc.switchable_interp_prob[ctx],
+ cm->fc.switchable_interp_prob[ctx],
&vp9_switchable_interp_encodings[mi->interp_filter]);
} else {
- assert(mi->interp_filter == pc->mcomp_filter_type);
+ assert(mi->interp_filter == cm->mcomp_filter_type);
}
if (bsize < BLOCK_8X8) {
@@ -499,7 +500,7 @@
blockmode = x->partition_info->bmi[j].mode;
blockmv = m->bmi[j].as_mv[0];
write_sb_mv_ref(bc, blockmode, mv_ref_p);
- ++pc->counts.inter_mode[mi->mode_context[rf]]
+ ++cm->counts.inter_mode[mi->mode_context[rf]]
[inter_mode_offset(blockmode)];
if (blockmode == NEWMV) {
@@ -531,27 +532,29 @@
}
}
-static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO *m,
+static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8,
vp9_writer *bc) {
- const VP9_COMMON *const c = &cpi->common;
+ const VP9_COMMON *const cm = &cpi->common;
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- const struct segmentation *const seg = &c->seg;
+ const struct segmentation *const seg = &cm->seg;
+ MODE_INFO *m = mi_8x8[0];
const int ym = m->mbmi.mode;
- const int mis = c->mode_info_stride;
const int segment_id = m->mbmi.segment_id;
+ MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride];
+ MODE_INFO *left_mi = mi_8x8[-1];
if (seg->update_map)
write_segment_id(bc, seg, m->mbmi.segment_id);
write_skip_coeff(cpi, segment_id, m, bc);
- if (m->mbmi.sb_type >= BLOCK_8X8 && c->tx_mode == TX_MODE_SELECT)
- write_selected_tx_size(cpi, m->mbmi.txfm_size, m->mbmi.sb_type, bc);
+ if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
+ write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc);
if (m->mbmi.sb_type >= BLOCK_8X8) {
- const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
+ const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0);
const MB_PREDICTION_MODE L = xd->left_available ?
- left_block_mode(m, 0) : DC_PRED;
+ left_block_mode(m, left_mi, 0) : DC_PRED;
write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]);
} else {
int idx, idy;
@@ -559,10 +562,10 @@
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type];
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
- const int i = idy * 2 + idx;
- const MB_PREDICTION_MODE A = above_block_mode(m, i, mis);
+ int i = idy * 2 + idx;
+ const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i);
const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
- left_block_mode(m, i) : DC_PRED;
+ left_block_mode(m, left_mi, i) : DC_PRED;
const int bm = m->bmi[i].as_mode;
#ifdef ENTROPY_STATS
++intra_mode_stats[A][L][bm];
@@ -575,21 +578,25 @@
write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]);
}
-static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
+static void write_modes_b(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc,
TOKENEXTRA **tok, TOKENEXTRA *tok_end,
int mi_row, int mi_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ MODE_INFO *m = mi_8x8[0];
if (m->mbmi.sb_type < BLOCK_8X8)
if (xd->ab_index > 0)
return;
- xd->mode_info_context = m;
- set_mi_row_col(&cpi->common, xd, mi_row,
- 1 << mi_height_log2(m->mbmi.sb_type),
- mi_col, 1 << mi_width_log2(m->mbmi.sb_type));
+
+ xd->this_mi = mi_8x8[0];
+ xd->mi_8x8 = mi_8x8;
+
+ set_mi_row_col(&cpi->common, xd,
+ mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
+ mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type]);
if ((cm->frame_type == KEY_FRAME) || cm->intra_only) {
- write_mb_modes_kf(cpi, m, bc);
+ write_mb_modes_kf(cpi, mi_8x8, bc);
#ifdef ENTROPY_STATS
active_section = 8;
#endif
@@ -604,7 +611,7 @@
pack_mb_tokens(bc, tok, tok_end);
}
-static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
+static void write_modes_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc,
TOKENEXTRA **tok, TOKENEXTRA *tok_end,
int mi_row, int mi_col, BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
@@ -615,6 +622,7 @@
int n;
PARTITION_TYPE partition = PARTITION_NONE;
BLOCK_SIZE subsize;
+ MODE_INFO *m = mi_8x8[0];
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
@@ -646,25 +654,26 @@
switch (partition) {
case PARTITION_NONE:
- write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col);
break;
case PARTITION_HORZ:
- write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col);
*(get_sb_index(xd, subsize)) = 1;
if ((mi_row + bs) < cm->mi_rows)
- write_modes_b(cpi, m + bs * mis, bc, tok, tok_end, mi_row + bs, mi_col);
+ write_modes_b(cpi, mi_8x8 + bs * mis, bc, tok, tok_end, mi_row + bs,
+ mi_col);
break;
case PARTITION_VERT:
- write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col);
*(get_sb_index(xd, subsize)) = 1;
if ((mi_col + bs) < cm->mi_cols)
- write_modes_b(cpi, m + bs, bc, tok, tok_end, mi_row, mi_col + bs);
+ write_modes_b(cpi, mi_8x8 + bs, bc, tok, tok_end, mi_row, mi_col + bs);
break;
case PARTITION_SPLIT:
for (n = 0; n < 4; n++) {
int j = n >> 1, i = n & 0x01;
*(get_sb_index(xd, subsize)) = n;
- write_modes_sb(cpi, m + j * bs * mis + i * bs, bc, tok, tok_end,
+ write_modes_sb(cpi, mi_8x8 + j * bs * mis + i * bs, bc, tok, tok_end,
mi_row + j * bs, mi_col + i * bs, subsize);
}
break;
@@ -682,20 +691,23 @@
static void write_modes(VP9_COMP *cpi, vp9_writer* const bc,
TOKENEXTRA **tok, TOKENEXTRA *tok_end) {
- VP9_COMMON *const c = &cpi->common;
- const int mis = c->mode_info_stride;
- MODE_INFO *m, *m_ptr = c->mi;
+ VP9_COMMON *const cm = &cpi->common;
+ const int mis = cm->mode_info_stride;
int mi_row, mi_col;
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible;
+ MODE_INFO **m_8x8;
- m_ptr += c->cur_tile_mi_col_start + c->cur_tile_mi_row_start * mis;
+ mi_8x8 += cm->cur_tile_mi_col_start + cm->cur_tile_mi_row_start * mis;
- for (mi_row = c->cur_tile_mi_row_start; mi_row < c->cur_tile_mi_row_end;
- mi_row += 8, m_ptr += 8 * mis) {
- m = m_ptr;
- vp9_zero(c->left_seg_context);
- for (mi_col = c->cur_tile_mi_col_start; mi_col < c->cur_tile_mi_col_end;
- mi_col += MI_BLOCK_SIZE, m += MI_BLOCK_SIZE)
- write_modes_sb(cpi, m, bc, tok, tok_end, mi_row, mi_col, BLOCK_64X64);
+ for (mi_row = cm->cur_tile_mi_row_start; mi_row < cm->cur_tile_mi_row_end;
+ mi_row += 8, mi_8x8 += 8 * mis) {
+ m_8x8 = mi_8x8;
+ vp9_zero(cm->left_seg_context);
+ for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
+ mi_col += MI_BLOCK_SIZE, m_8x8 += MI_BLOCK_SIZE) {
+ write_modes_sb(cpi, m_8x8, bc, tok, tok_end, mi_row, mi_col,
+ BLOCK_64X64);
+ }
}
}
@@ -782,94 +794,170 @@
vp9_coeff_probs_model *old_frame_coef_probs =
cpi->common.fc.coef_probs[tx_size];
vp9_coeff_stats *frame_branch_ct = cpi->frame_branch_ct[tx_size];
+ const vp9_prob upd = VP9_COEF_UPDATE_PROB;
+ const int entropy_nodes_update = UNCONSTRAINED_NODES;
int i, j, k, l, t;
- int update[2] = {0, 0};
- int savings;
+ switch (cpi->sf.use_fast_coef_updates) {
+ case 0: {
+ /* dry run to see if there is any udpate at all needed */
+ int savings = 0;
+ int update[2] = {0, 0};
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < REF_TYPES; ++j) {
+ for (k = 0; k < COEF_BANDS; ++k) {
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ for (t = 0; t < entropy_nodes_update; ++t) {
+ vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
+ const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t];
+ int s;
+ int u = 0;
- const int entropy_nodes_update = UNCONSTRAINED_NODES;
+ if (l >= 3 && k == 0)
+ continue;
+ if (t == PIVOT_NODE)
+ s = vp9_prob_diff_update_savings_search_model(
+ frame_branch_ct[i][j][k][l][0],
+ old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
+ else
+ s = vp9_prob_diff_update_savings_search(
+ frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
+ if (s > 0 && newp != oldp)
+ u = 1;
+ if (u)
+ savings += s - (int)(vp9_cost_zero(upd));
+ else
+ savings -= (int)(vp9_cost_zero(upd));
+ update[u]++;
+ }
+ }
+ }
+ }
+ }
- const int tstart = 0;
- /* dry run to see if there is any udpate at all needed */
- savings = 0;
- for (i = 0; i < BLOCK_TYPES; ++i) {
- for (j = 0; j < REF_TYPES; ++j) {
- for (k = 0; k < COEF_BANDS; ++k) {
- // int prev_coef_savings[ENTROPY_NODES] = {0};
- for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
- for (t = tstart; t < entropy_nodes_update; ++t) {
- vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
- const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t];
- const vp9_prob upd = VP9_COEF_UPDATE_PROB;
- int s;
- int u = 0;
-
- if (l >= 3 && k == 0)
- continue;
- if (t == PIVOT_NODE)
- s = vp9_prob_diff_update_savings_search_model(
- frame_branch_ct[i][j][k][l][0],
- old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
- else
- s = vp9_prob_diff_update_savings_search(
- frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
- if (s > 0 && newp != oldp)
- u = 1;
- if (u)
- savings += s - (int)(vp9_cost_zero(upd));
- else
- savings -= (int)(vp9_cost_zero(upd));
- update[u]++;
+ // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
+ /* Is coef updated at all */
+ if (update[1] == 0 || savings < 0) {
+ vp9_write_bit(bc, 0);
+ return;
+ }
+ vp9_write_bit(bc, 1);
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < REF_TYPES; ++j) {
+ for (k = 0; k < COEF_BANDS; ++k) {
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ // calc probs and branch cts for this frame only
+ for (t = 0; t < entropy_nodes_update; ++t) {
+ vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
+ vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t;
+ const vp9_prob upd = VP9_COEF_UPDATE_PROB;
+ int s;
+ int u = 0;
+ if (l >= 3 && k == 0)
+ continue;
+ if (t == PIVOT_NODE)
+ s = vp9_prob_diff_update_savings_search_model(
+ frame_branch_ct[i][j][k][l][0],
+ old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
+ else
+ s = vp9_prob_diff_update_savings_search(
+ frame_branch_ct[i][j][k][l][t],
+ *oldp, &newp, upd);
+ if (s > 0 && newp != *oldp)
+ u = 1;
+ vp9_write(bc, u, upd);
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing)
+ ++tree_update_hist[tx_size][i][j][k][l][t][u];
+#endif
+ if (u) {
+ /* send/use new probability */
+ vp9_write_prob_diff_update(bc, newp, *oldp);
+ *oldp = newp;
+ }
+ }
+ }
}
}
}
+ return;
}
- }
- // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
- /* Is coef updated at all */
- if (update[1] == 0 || savings < 0) {
- vp9_write_bit(bc, 0);
- return;
- }
- vp9_write_bit(bc, 1);
- for (i = 0; i < BLOCK_TYPES; ++i) {
- for (j = 0; j < REF_TYPES; ++j) {
- for (k = 0; k < COEF_BANDS; ++k) {
- // int prev_coef_savings[ENTROPY_NODES] = {0};
- for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
- // calc probs and branch cts for this frame only
- for (t = tstart; t < entropy_nodes_update; ++t) {
- vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
- vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t;
- const vp9_prob upd = VP9_COEF_UPDATE_PROB;
- int s;
- int u = 0;
- if (l >= 3 && k == 0)
- continue;
- if (t == PIVOT_NODE)
- s = vp9_prob_diff_update_savings_search_model(
- frame_branch_ct[i][j][k][l][0],
- old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
- else
- s = vp9_prob_diff_update_savings_search(
- frame_branch_ct[i][j][k][l][t],
- *oldp, &newp, upd);
- if (s > 0 && newp != *oldp)
- u = 1;
- vp9_write(bc, u, upd);
+ case 1:
+ case 2: {
+ const int prev_coef_contexts_to_update =
+ (cpi->sf.use_fast_coef_updates == 2 ?
+ PREV_COEF_CONTEXTS >> 1 : PREV_COEF_CONTEXTS);
+ const int coef_band_to_update =
+ (cpi->sf.use_fast_coef_updates == 2 ?
+ COEF_BANDS >> 1 : COEF_BANDS);
+ int updates = 0;
+ int noupdates_before_first = 0;
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < REF_TYPES; ++j) {
+ for (k = 0; k < COEF_BANDS; ++k) {
+ for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
+ // calc probs and branch cts for this frame only
+ for (t = 0; t < entropy_nodes_update; ++t) {
+ vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
+ vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t;
+ int s;
+ int u = 0;
+ if (l >= 3 && k == 0)
+ continue;
+ if (l >= prev_coef_contexts_to_update ||
+ k >= coef_band_to_update) {
+ u = 0;
+ } else {
+ if (t == PIVOT_NODE)
+ s = vp9_prob_diff_update_savings_search_model(
+ frame_branch_ct[i][j][k][l][0],
+ old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
+ else
+ s = vp9_prob_diff_update_savings_search(
+ frame_branch_ct[i][j][k][l][t],
+ *oldp, &newp, upd);
+ if (s > 0 && newp != *oldp)
+ u = 1;
+ }
+ updates += u;
+ if (u == 0 && updates == 0) {
+ noupdates_before_first++;
#ifdef ENTROPY_STATS
- if (!cpi->dummy_packing)
- ++tree_update_hist[tx_size][i][j][k][l][t][u];
+ if (!cpi->dummy_packing)
+ ++tree_update_hist[tx_size][i][j][k][l][t][u];
#endif
- if (u) {
- /* send/use new probability */
- vp9_write_prob_diff_update(bc, newp, *oldp);
- *oldp = newp;
+ continue;
+ }
+ if (u == 1 && updates == 1) {
+ int v;
+ // first update
+ vp9_write_bit(bc, 1);
+ for (v = 0; v < noupdates_before_first; ++v)
+ vp9_write(bc, 0, upd);
+ }
+ vp9_write(bc, u, upd);
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing)
+ ++tree_update_hist[tx_size][i][j][k][l][t][u];
+#endif
+ if (u) {
+ /* send/use new probability */
+ vp9_write_prob_diff_update(bc, newp, *oldp);
+ *oldp = newp;
+ }
+ }
}
}
}
}
+ if (updates == 0) {
+ vp9_write_bit(bc, 0); // no updates
+ }
+ return;
}
+
+ default:
+ assert(0);
}
}
@@ -1130,7 +1218,8 @@
if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame &&
!cpi->refresh_alt_ref_frame) {
#else
- if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame) {
+ if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame &&
+ !cpi->use_svc) {
#endif
// Preserve the previously existing golden frame and update the frame in
// the alt ref slot instead. This is highly specific to the use of
@@ -1242,9 +1331,16 @@
YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->ref_frame_map[refs[i]]];
found = cm->width == cfg->y_crop_width &&
cm->height == cfg->y_crop_height;
+
+ // TODO(ivan): This prevents a bug while more than 3 buffers are used. Do it
+ // in a better way.
+ if (cpi->use_svc) {
+ found = 0;
+ }
vp9_wb_write_bit(wb, found);
- if (found)
+ if (found) {
break;
+ }
}
if (!found) {
@@ -1456,7 +1552,7 @@
vp9_compute_update_table();
#ifdef ENTROPY_STATS
- if (pc->frame_type == INTER_FRAME)
+ if (cm->frame_type == INTER_FRAME)
active_section = 0;
else
active_section = 7;
« no previous file with comments | « source/libvpx/vp9/decoder/vp9_onyxd_if.c ('k') | source/libvpx/vp9/encoder/vp9_dct.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698