Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(657)

Side by Side Diff: source/libvpx/vp9/encoder/vp9_bitstream.c

Issue 756673003: libvpx: Pull from upstream (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp9/encoder/vp9_bitstream.h ('k') | source/libvpx/vp9/encoder/vp9_block.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
72 72
73 // Assuming max number of probabilities <= 32 73 // Assuming max number of probabilities <= 32
74 assert(n <= 32); 74 assert(n <= 32);
75 75
76 vp9_tree_probs_from_distribution(tree, branch_ct, counts); 76 vp9_tree_probs_from_distribution(tree, branch_ct, counts);
77 for (i = 0; i < n - 1; ++i) 77 for (i = 0; i < n - 1; ++i)
78 vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]); 78 vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
79 } 79 }
80 80
81 static void write_selected_tx_size(const VP9_COMMON *cm, 81 static void write_selected_tx_size(const VP9_COMMON *cm,
82 const MACROBLOCKD *xd, 82 const MACROBLOCKD *xd, vp9_writer *w) {
83 TX_SIZE tx_size, BLOCK_SIZE bsize, 83 TX_SIZE tx_size = xd->mi[0].src_mi->mbmi.tx_size;
84 vp9_writer *w) { 84 BLOCK_SIZE bsize = xd->mi[0].src_mi->mbmi.sb_type;
85 const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; 85 const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
86 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd, 86 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
87 &cm->fc.tx_probs); 87 &cm->fc->tx_probs);
88 vp9_write(w, tx_size != TX_4X4, tx_probs[0]); 88 vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
89 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { 89 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
90 vp9_write(w, tx_size != TX_8X8, tx_probs[1]); 90 vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
91 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) 91 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
92 vp9_write(w, tx_size != TX_16X16, tx_probs[2]); 92 vp9_write(w, tx_size != TX_16X16, tx_probs[2]);
93 } 93 }
94 } 94 }
95 95
96 static int write_skip(const VP9_COMMON *cm, const MACROBLOCKD *xd, 96 static int write_skip(const VP9_COMMON *cm, const MACROBLOCKD *xd,
97 int segment_id, const MODE_INFO *mi, vp9_writer *w) { 97 int segment_id, const MODE_INFO *mi, vp9_writer *w) {
98 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { 98 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
99 return 1; 99 return 1;
100 } else { 100 } else {
101 const int skip = mi->mbmi.skip; 101 const int skip = mi->mbmi.skip;
102 vp9_write(w, skip, vp9_get_skip_prob(cm, xd)); 102 vp9_write(w, skip, vp9_get_skip_prob(cm, xd));
103 return skip; 103 return skip;
104 } 104 }
105 } 105 }
106 106
107 static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w) { 107 static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w) {
108 int k; 108 int k;
109 109
110 for (k = 0; k < SKIP_CONTEXTS; ++k) 110 for (k = 0; k < SKIP_CONTEXTS; ++k)
111 vp9_cond_prob_diff_update(w, &cm->fc.skip_probs[k], cm->counts.skip[k]); 111 vp9_cond_prob_diff_update(w, &cm->fc->skip_probs[k], cm->counts.skip[k]);
112 } 112 }
113 113
114 static void update_switchable_interp_probs(VP9_COMMON *cm, vp9_writer *w) { 114 static void update_switchable_interp_probs(VP9_COMMON *cm, vp9_writer *w) {
115 int j; 115 int j;
116 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) 116 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
117 prob_diff_update(vp9_switchable_interp_tree, 117 prob_diff_update(vp9_switchable_interp_tree,
118 cm->fc.switchable_interp_prob[j], 118 cm->fc->switchable_interp_prob[j],
119 cm->counts.switchable_interp[j], SWITCHABLE_FILTERS, w); 119 cm->counts.switchable_interp[j], SWITCHABLE_FILTERS, w);
120 } 120 }
121 121
122 static void pack_mb_tokens(vp9_writer *w, 122 static void pack_mb_tokens(vp9_writer *w,
123 TOKENEXTRA **tp, const TOKENEXTRA *const stop, 123 TOKENEXTRA **tp, const TOKENEXTRA *const stop,
124 vpx_bit_depth_t bit_depth) { 124 vpx_bit_depth_t bit_depth) {
125 TOKENEXTRA *p = *tp; 125 TOKENEXTRA *p = *tp;
126 126
127 while (p < stop && p->token != EOSB_TOKEN) { 127 while (p < stop && p->token != EOSB_TOKEN) {
128 const int t = p->token; 128 const int t = p->token;
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
230 const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME; 230 const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
231 vp9_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd)); 231 vp9_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd));
232 } 232 }
233 } 233 }
234 } 234 }
235 } 235 }
236 236
237 static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, 237 static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
238 vp9_writer *w) { 238 vp9_writer *w) {
239 VP9_COMMON *const cm = &cpi->common; 239 VP9_COMMON *const cm = &cpi->common;
240 const nmv_context *nmvc = &cm->fc.nmvc; 240 const nmv_context *nmvc = &cm->fc->nmvc;
241 const MACROBLOCK *const x = &cpi->mb; 241 const MACROBLOCK *const x = &cpi->mb;
242 const MACROBLOCKD *const xd = &x->e_mbd; 242 const MACROBLOCKD *const xd = &x->e_mbd;
243 const struct segmentation *const seg = &cm->seg; 243 const struct segmentation *const seg = &cm->seg;
244 const MB_MODE_INFO *const mbmi = &mi->mbmi; 244 const MB_MODE_INFO *const mbmi = &mi->mbmi;
245 const PREDICTION_MODE mode = mbmi->mode; 245 const PREDICTION_MODE mode = mbmi->mode;
246 const int segment_id = mbmi->segment_id; 246 const int segment_id = mbmi->segment_id;
247 const BLOCK_SIZE bsize = mbmi->sb_type; 247 const BLOCK_SIZE bsize = mbmi->sb_type;
248 const int allow_hp = cm->allow_high_precision_mv; 248 const int allow_hp = cm->allow_high_precision_mv;
249 const int is_inter = is_inter_block(mbmi); 249 const int is_inter = is_inter_block(mbmi);
250 const int is_compound = has_second_ref(mbmi); 250 const int is_compound = has_second_ref(mbmi);
(...skipping 12 matching lines...) Expand all
263 } 263 }
264 264
265 skip = write_skip(cm, xd, segment_id, mi, w); 265 skip = write_skip(cm, xd, segment_id, mi, w);
266 266
267 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) 267 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
268 vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd)); 268 vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd));
269 269
270 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && 270 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
271 !(is_inter && 271 !(is_inter &&
272 (skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { 272 (skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
273 write_selected_tx_size(cm, xd, mbmi->tx_size, bsize, w); 273 write_selected_tx_size(cm, xd, w);
274 } 274 }
275 275
276 if (!is_inter) { 276 if (!is_inter) {
277 if (bsize >= BLOCK_8X8) { 277 if (bsize >= BLOCK_8X8) {
278 write_intra_mode(w, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]); 278 write_intra_mode(w, mode, cm->fc->y_mode_prob[size_group_lookup[bsize]]);
279 } else { 279 } else {
280 int idx, idy; 280 int idx, idy;
281 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 281 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
282 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 282 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
283 for (idy = 0; idy < 2; idy += num_4x4_h) { 283 for (idy = 0; idy < 2; idy += num_4x4_h) {
284 for (idx = 0; idx < 2; idx += num_4x4_w) { 284 for (idx = 0; idx < 2; idx += num_4x4_w) {
285 const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode; 285 const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
286 write_intra_mode(w, b_mode, cm->fc.y_mode_prob[0]); 286 write_intra_mode(w, b_mode, cm->fc->y_mode_prob[0]);
287 } 287 }
288 } 288 }
289 } 289 }
290 write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]); 290 write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mode]);
291 } else { 291 } else {
292 const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]]; 292 const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
293 const vp9_prob *const inter_probs = cm->fc.inter_mode_probs[mode_ctx]; 293 const vp9_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
294 write_ref_frames(cm, xd, w); 294 write_ref_frames(cm, xd, w);
295 295
296 // If segment skip is not enabled code the mode. 296 // If segment skip is not enabled code the mode.
297 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { 297 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
298 if (bsize >= BLOCK_8X8) { 298 if (bsize >= BLOCK_8X8) {
299 write_inter_mode(w, mode, inter_probs); 299 write_inter_mode(w, mode, inter_probs);
300 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(mode)]; 300 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(mode)];
301 } 301 }
302 } 302 }
303 303
304 if (cm->interp_filter == SWITCHABLE) { 304 if (cm->interp_filter == SWITCHABLE) {
305 const int ctx = vp9_get_pred_context_switchable_interp(xd); 305 const int ctx = vp9_get_pred_context_switchable_interp(xd);
306 vp9_write_token(w, vp9_switchable_interp_tree, 306 vp9_write_token(w, vp9_switchable_interp_tree,
307 cm->fc.switchable_interp_prob[ctx], 307 cm->fc->switchable_interp_prob[ctx],
308 &switchable_interp_encodings[mbmi->interp_filter]); 308 &switchable_interp_encodings[mbmi->interp_filter]);
309 ++cpi->interp_filter_selected[0][mbmi->interp_filter]; 309 ++cpi->interp_filter_selected[0][mbmi->interp_filter];
310 } else { 310 } else {
311 assert(mbmi->interp_filter == cm->interp_filter); 311 assert(mbmi->interp_filter == cm->interp_filter);
312 } 312 }
313 313
314 if (bsize < BLOCK_8X8) { 314 if (bsize < BLOCK_8X8) {
315 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 315 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
316 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 316 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
317 int idx, idy; 317 int idx, idy;
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
349 xd->left_available ? mi_8x8[-1].src_mi : NULL; 349 xd->left_available ? mi_8x8[-1].src_mi : NULL;
350 const MB_MODE_INFO *const mbmi = &mi->mbmi; 350 const MB_MODE_INFO *const mbmi = &mi->mbmi;
351 const BLOCK_SIZE bsize = mbmi->sb_type; 351 const BLOCK_SIZE bsize = mbmi->sb_type;
352 352
353 if (seg->update_map) 353 if (seg->update_map)
354 write_segment_id(w, seg, mbmi->segment_id); 354 write_segment_id(w, seg, mbmi->segment_id);
355 355
356 write_skip(cm, xd, mbmi->segment_id, mi, w); 356 write_skip(cm, xd, mbmi->segment_id, mi, w);
357 357
358 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) 358 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
359 write_selected_tx_size(cm, xd, mbmi->tx_size, bsize, w); 359 write_selected_tx_size(cm, xd, w);
360 360
361 if (bsize >= BLOCK_8X8) { 361 if (bsize >= BLOCK_8X8) {
362 write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0)); 362 write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0));
363 } else { 363 } else {
364 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 364 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
365 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 365 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
366 int idx, idy; 366 int idx, idy;
367 367
368 for (idy = 0; idy < 2; idy += num_4x4_h) { 368 for (idy = 0; idy < 2; idy += num_4x4_h) {
369 for (idx = 0; idx < 2; idx += num_4x4_w) { 369 for (idx = 0; idx < 2; idx += num_4x4_w) {
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
493 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 493 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
494 mi_col += MI_BLOCK_SIZE) 494 mi_col += MI_BLOCK_SIZE)
495 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, 495 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col,
496 BLOCK_64X64); 496 BLOCK_64X64);
497 } 497 }
498 } 498 }
499 499
500 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size, 500 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size,
501 vp9_coeff_stats *coef_branch_ct, 501 vp9_coeff_stats *coef_branch_ct,
502 vp9_coeff_probs_model *coef_probs) { 502 vp9_coeff_probs_model *coef_probs) {
503 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; 503 vp9_coeff_count *coef_counts = cpi->frame_counts->coef_counts[tx_size];
504 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = 504 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
505 cpi->common.counts.eob_branch[tx_size]; 505 cpi->common.counts.eob_branch[tx_size];
506 int i, j, k, l, m; 506 int i, j, k, l, m;
507 507
508 for (i = 0; i < PLANE_TYPES; ++i) { 508 for (i = 0; i < PLANE_TYPES; ++i) {
509 for (j = 0; j < REF_TYPES; ++j) { 509 for (j = 0; j < REF_TYPES; ++j) {
510 for (k = 0; k < COEF_BANDS; ++k) { 510 for (k = 0; k < COEF_BANDS; ++k) {
511 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { 511 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
512 vp9_tree_probs_from_distribution(vp9_coef_tree, 512 vp9_tree_probs_from_distribution(vp9_coef_tree,
513 coef_branch_ct[i][j][k][l], 513 coef_branch_ct[i][j][k][l],
514 coef_counts[i][j][k][l]); 514 coef_counts[i][j][k][l]);
515 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - 515 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
516 coef_branch_ct[i][j][k][l][0][0]; 516 coef_branch_ct[i][j][k][l][0][0];
517 for (m = 0; m < UNCONSTRAINED_NODES; ++m) 517 for (m = 0; m < UNCONSTRAINED_NODES; ++m)
518 coef_probs[i][j][k][l][m] = get_binary_prob( 518 coef_probs[i][j][k][l][m] = get_binary_prob(
519 coef_branch_ct[i][j][k][l][m][0], 519 coef_branch_ct[i][j][k][l][m][0],
520 coef_branch_ct[i][j][k][l][m][1]); 520 coef_branch_ct[i][j][k][l][m][1]);
521 } 521 }
522 } 522 }
523 } 523 }
524 } 524 }
525 } 525 }
526 526
527 static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, 527 static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
528 TX_SIZE tx_size, 528 TX_SIZE tx_size,
529 vp9_coeff_stats *frame_branch_ct, 529 vp9_coeff_stats *frame_branch_ct,
530 vp9_coeff_probs_model *new_coef_probs) { 530 vp9_coeff_probs_model *new_coef_probs) {
531 vp9_coeff_probs_model *old_coef_probs = cpi->common.fc.coef_probs[tx_size]; 531 vp9_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
532 const vp9_prob upd = DIFF_UPDATE_PROB; 532 const vp9_prob upd = DIFF_UPDATE_PROB;
533 const int entropy_nodes_update = UNCONSTRAINED_NODES; 533 const int entropy_nodes_update = UNCONSTRAINED_NODES;
534 int i, j, k, l, t; 534 int i, j, k, l, t;
535 switch (cpi->sf.use_fast_coef_updates) { 535 switch (cpi->sf.use_fast_coef_updates) {
536 case TWO_LOOP: { 536 case TWO_LOOP: {
537 /* dry run to see if there is any update at all needed */ 537 /* dry run to see if there is any update at all needed */
538 int savings = 0; 538 int savings = 0;
539 int update[2] = {0, 0}; 539 int update[2] = {0, 0};
540 for (i = 0; i < PLANE_TYPES; ++i) { 540 for (i = 0; i < PLANE_TYPES; ++i) {
541 for (j = 0; j < REF_TYPES; ++j) { 541 for (j = 0; j < REF_TYPES; ++j) {
(...skipping 281 matching lines...) Expand 10 before | Expand all | Expand 10 after
823 if (cm->tx_mode == TX_MODE_SELECT) { 823 if (cm->tx_mode == TX_MODE_SELECT) {
824 int i, j; 824 int i, j;
825 unsigned int ct_8x8p[TX_SIZES - 3][2]; 825 unsigned int ct_8x8p[TX_SIZES - 3][2];
826 unsigned int ct_16x16p[TX_SIZES - 2][2]; 826 unsigned int ct_16x16p[TX_SIZES - 2][2];
827 unsigned int ct_32x32p[TX_SIZES - 1][2]; 827 unsigned int ct_32x32p[TX_SIZES - 1][2];
828 828
829 829
830 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 830 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
831 tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], ct_8x8p); 831 tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], ct_8x8p);
832 for (j = 0; j < TX_SIZES - 3; j++) 832 for (j = 0; j < TX_SIZES - 3; j++)
833 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], ct_8x8p[j]); 833 vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]);
834 } 834 }
835 835
836 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 836 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
837 tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], ct_16x16p); 837 tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], ct_16x16p);
838 for (j = 0; j < TX_SIZES - 2; j++) 838 for (j = 0; j < TX_SIZES - 2; j++)
839 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j], 839 vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j],
840 ct_16x16p[j]); 840 ct_16x16p[j]);
841 } 841 }
842 842
843 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 843 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
844 tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p); 844 tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p);
845 for (j = 0; j < TX_SIZES - 1; j++) 845 for (j = 0; j < TX_SIZES - 1; j++)
846 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j], 846 vp9_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j],
847 ct_32x32p[j]); 847 ct_32x32p[j]);
848 } 848 }
849 } 849 }
850 } 850 }
851 851
852 static void write_interp_filter(INTERP_FILTER filter, 852 static void write_interp_filter(INTERP_FILTER filter,
853 struct vp9_write_bit_buffer *wb) { 853 struct vp9_write_bit_buffer *wb) {
854 const int filter_to_literal[] = { 1, 0, 2, 3 }; 854 const int filter_to_literal[] = { 1, 0, 2, 3 };
855 855
856 vp9_wb_write_bit(wb, filter == SWITCHABLE); 856 vp9_wb_write_bit(wb, filter == SWITCHABLE);
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
922 } 922 }
923 return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 923 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
924 (cpi->refresh_golden_frame << cpi->gld_fb_idx) | 924 (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
925 (cpi->refresh_alt_ref_frame << arf_idx); 925 (cpi->refresh_alt_ref_frame << arf_idx);
926 } 926 }
927 } 927 }
928 928
929 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { 929 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) {
930 VP9_COMMON *const cm = &cpi->common; 930 VP9_COMMON *const cm = &cpi->common;
931 vp9_writer residual_bc; 931 vp9_writer residual_bc;
932
933 int tile_row, tile_col; 932 int tile_row, tile_col;
934 TOKENEXTRA *tok[4][1 << 6], *tok_end; 933 TOKENEXTRA *tok[4][1 << 6], *tok_end;
935 size_t total_size = 0; 934 size_t total_size = 0;
936 const int tile_cols = 1 << cm->log2_tile_cols; 935 const int tile_cols = 1 << cm->log2_tile_cols;
937 const int tile_rows = 1 << cm->log2_tile_rows; 936 const int tile_rows = 1 << cm->log2_tile_rows;
938 TileInfo tile[4][1 << 6];
939 TOKENEXTRA *pre_tok = cpi->tok; 937 TOKENEXTRA *pre_tok = cpi->tok;
940 int tile_tok = 0; 938 int tile_tok = 0;
941 939
942 vpx_memset(cm->above_seg_context, 0, sizeof(*cm->above_seg_context) * 940 vpx_memset(cm->above_seg_context, 0, sizeof(*cm->above_seg_context) *
943 mi_cols_aligned_to_sb(cm->mi_cols)); 941 mi_cols_aligned_to_sb(cm->mi_cols));
944 942
945 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { 943 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
946 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { 944 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
947 vp9_tile_init(&tile[tile_row][tile_col], cm, tile_row, tile_col); 945 int tile_idx = tile_row * tile_cols + tile_col;
948
949 tok[tile_row][tile_col] = pre_tok + tile_tok; 946 tok[tile_row][tile_col] = pre_tok + tile_tok;
950 pre_tok = tok[tile_row][tile_col]; 947 pre_tok = tok[tile_row][tile_col];
951 tile_tok = allocated_tokens(tile[tile_row][tile_col]); 948 tile_tok = allocated_tokens(cpi->tile_data[tile_idx].tile_info);
952 } 949 }
953 } 950 }
954 951
955 for (tile_row = 0; tile_row < tile_rows; tile_row++) { 952 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
956 for (tile_col = 0; tile_col < tile_cols; tile_col++) { 953 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
957 const TileInfo * const ptile = &tile[tile_row][tile_col]; 954 int tile_idx = tile_row * tile_cols + tile_col;
958
959 tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col]; 955 tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col];
960 956
961 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) 957 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
962 vp9_start_encode(&residual_bc, data_ptr + total_size + 4); 958 vp9_start_encode(&residual_bc, data_ptr + total_size + 4);
963 else 959 else
964 vp9_start_encode(&residual_bc, data_ptr + total_size); 960 vp9_start_encode(&residual_bc, data_ptr + total_size);
965 961
966 write_modes(cpi, ptile, &residual_bc, &tok[tile_row][tile_col], tok_end); 962 write_modes(cpi, &cpi->tile_data[tile_idx].tile_info,
963 &residual_bc, &tok[tile_row][tile_col], tok_end);
967 assert(tok[tile_row][tile_col] == tok_end); 964 assert(tok[tile_row][tile_col] == tok_end);
968 vp9_stop_encode(&residual_bc); 965 vp9_stop_encode(&residual_bc);
969 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { 966 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
970 // size of this tile 967 // size of this tile
971 mem_put_be32(data_ptr + total_size, residual_bc.pos); 968 mem_put_be32(data_ptr + total_size, residual_bc.pos);
972 total_size += 4; 969 total_size += 4;
973 } 970 }
974 971
975 total_size += residual_bc.pos; 972 total_size += residual_bc.pos;
976 } 973 }
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
1154 encode_loopfilter(&cm->lf, wb); 1151 encode_loopfilter(&cm->lf, wb);
1155 encode_quantization(cm, wb); 1152 encode_quantization(cm, wb);
1156 encode_segmentation(cm, &cpi->mb.e_mbd, wb); 1153 encode_segmentation(cm, &cpi->mb.e_mbd, wb);
1157 1154
1158 write_tile_info(cm, wb); 1155 write_tile_info(cm, wb);
1159 } 1156 }
1160 1157
1161 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { 1158 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
1162 VP9_COMMON *const cm = &cpi->common; 1159 VP9_COMMON *const cm = &cpi->common;
1163 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 1160 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1164 FRAME_CONTEXT *const fc = &cm->fc; 1161 FRAME_CONTEXT *const fc = cm->fc;
1165 vp9_writer header_bc; 1162 vp9_writer header_bc;
1166 1163
1167 vp9_start_encode(&header_bc, data); 1164 vp9_start_encode(&header_bc, data);
1168 1165
1169 if (xd->lossless) 1166 if (xd->lossless)
1170 cm->tx_mode = ONLY_4X4; 1167 cm->tx_mode = ONLY_4X4;
1171 else 1168 else
1172 encode_txfm_probs(cm, &header_bc); 1169 encode_txfm_probs(cm, &header_bc);
1173 1170
1174 update_coef_probs(cpi, &header_bc); 1171 update_coef_probs(cpi, &header_bc);
1175 update_skip_probs(cm, &header_bc); 1172 update_skip_probs(cm, &header_bc);
1176 1173
1177 if (!frame_is_intra_only(cm)) { 1174 if (!frame_is_intra_only(cm)) {
1178 int i; 1175 int i;
1179 1176
1180 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) 1177 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
1181 prob_diff_update(vp9_inter_mode_tree, cm->fc.inter_mode_probs[i], 1178 prob_diff_update(vp9_inter_mode_tree, cm->fc->inter_mode_probs[i],
1182 cm->counts.inter_mode[i], INTER_MODES, &header_bc); 1179 cm->counts.inter_mode[i], INTER_MODES, &header_bc);
1183 1180
1184 vp9_zero(cm->counts.inter_mode); 1181 vp9_zero(cm->counts.inter_mode);
1185 1182
1186 if (cm->interp_filter == SWITCHABLE) 1183 if (cm->interp_filter == SWITCHABLE)
1187 update_switchable_interp_probs(cm, &header_bc); 1184 update_switchable_interp_probs(cm, &header_bc);
1188 1185
1189 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) 1186 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1190 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], 1187 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
1191 cm->counts.intra_inter[i]); 1188 cm->counts.intra_inter[i]);
(...skipping 20 matching lines...) Expand all
1212 cm->counts.single_ref[i][1]); 1209 cm->counts.single_ref[i][1]);
1213 } 1210 }
1214 } 1211 }
1215 1212
1216 if (cm->reference_mode != SINGLE_REFERENCE) 1213 if (cm->reference_mode != SINGLE_REFERENCE)
1217 for (i = 0; i < REF_CONTEXTS; i++) 1214 for (i = 0; i < REF_CONTEXTS; i++)
1218 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], 1215 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
1219 cm->counts.comp_ref[i]); 1216 cm->counts.comp_ref[i]);
1220 1217
1221 for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) 1218 for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
1222 prob_diff_update(vp9_intra_mode_tree, cm->fc.y_mode_prob[i], 1219 prob_diff_update(vp9_intra_mode_tree, cm->fc->y_mode_prob[i],
1223 cm->counts.y_mode[i], INTRA_MODES, &header_bc); 1220 cm->counts.y_mode[i], INTRA_MODES, &header_bc);
1224 1221
1225 for (i = 0; i < PARTITION_CONTEXTS; ++i) 1222 for (i = 0; i < PARTITION_CONTEXTS; ++i)
1226 prob_diff_update(vp9_partition_tree, fc->partition_prob[i], 1223 prob_diff_update(vp9_partition_tree, fc->partition_prob[i],
1227 cm->counts.partition[i], PARTITION_TYPES, &header_bc); 1224 cm->counts.partition[i], PARTITION_TYPES, &header_bc);
1228 1225
1229 vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc); 1226 vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc);
1230 } 1227 }
1231 1228
1232 vp9_stop_encode(&header_bc); 1229 vp9_stop_encode(&header_bc);
(...skipping 19 matching lines...) Expand all
1252 1249
1253 first_part_size = write_compressed_header(cpi, data); 1250 first_part_size = write_compressed_header(cpi, data);
1254 data += first_part_size; 1251 data += first_part_size;
1255 // TODO(jbb): Figure out what to do if first_part_size > 16 bits. 1252 // TODO(jbb): Figure out what to do if first_part_size > 16 bits.
1256 vp9_wb_write_literal(&saved_wb, (int)first_part_size, 16); 1253 vp9_wb_write_literal(&saved_wb, (int)first_part_size, 16);
1257 1254
1258 data += encode_tiles(cpi, data); 1255 data += encode_tiles(cpi, data);
1259 1256
1260 *size = data - dest; 1257 *size = data - dest;
1261 } 1258 }
OLDNEW
« no previous file with comments | « source/libvpx/vp9/encoder/vp9_bitstream.h ('k') | source/libvpx/vp9/encoder/vp9_block.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698