Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(161)

Side by Side Diff: source/libvpx/vp9/encoder/vp9_bitstream.c

Issue 478033002: libvpx: Pull from upstream (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp9/encoder/vp9_bitstream.h ('k') | source/libvpx/vp9/encoder/vp9_block.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
71 unsigned int branch_ct[32][2]; 71 unsigned int branch_ct[32][2];
72 72
73 // Assuming max number of probabilities <= 32 73 // Assuming max number of probabilities <= 32
74 assert(n <= 32); 74 assert(n <= 32);
75 75
76 vp9_tree_probs_from_distribution(tree, branch_ct, counts); 76 vp9_tree_probs_from_distribution(tree, branch_ct, counts);
77 for (i = 0; i < n - 1; ++i) 77 for (i = 0; i < n - 1; ++i)
78 vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]); 78 vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
79 } 79 }
80 80
81 static void write_selected_tx_size(const VP9_COMP *cpi, 81 static void write_selected_tx_size(const VP9_COMMON *cm,
82 const MACROBLOCKD *xd,
82 TX_SIZE tx_size, BLOCK_SIZE bsize, 83 TX_SIZE tx_size, BLOCK_SIZE bsize,
83 vp9_writer *w) { 84 vp9_writer *w) {
84 const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; 85 const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
85 const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
86 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd, 86 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
87 &cpi->common.fc.tx_probs); 87 &cm->fc.tx_probs);
88 vp9_write(w, tx_size != TX_4X4, tx_probs[0]); 88 vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
89 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { 89 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
90 vp9_write(w, tx_size != TX_8X8, tx_probs[1]); 90 vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
91 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) 91 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
92 vp9_write(w, tx_size != TX_16X16, tx_probs[2]); 92 vp9_write(w, tx_size != TX_16X16, tx_probs[2]);
93 } 93 }
94 } 94 }
95 95
96 static int write_skip(const VP9_COMP *cpi, int segment_id, const MODE_INFO *mi, 96 static int write_skip(const VP9_COMMON *cm, const MACROBLOCKD *xd,
97 vp9_writer *w) { 97 int segment_id, const MODE_INFO *mi, vp9_writer *w) {
98 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 98 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
99 if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) {
100 return 1; 99 return 1;
101 } else { 100 } else {
102 const int skip = mi->mbmi.skip; 101 const int skip = mi->mbmi.skip;
103 vp9_write(w, skip, vp9_get_skip_prob(&cpi->common, xd)); 102 vp9_write(w, skip, vp9_get_skip_prob(cm, xd));
104 return skip; 103 return skip;
105 } 104 }
106 } 105 }
107 106
108 static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w) { 107 static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w) {
109 int k; 108 int k;
110 109
111 for (k = 0; k < SKIP_CONTEXTS; ++k) 110 for (k = 0; k < SKIP_CONTEXTS; ++k)
112 vp9_cond_prob_diff_update(w, &cm->fc.skip_probs[k], cm->counts.skip[k]); 111 vp9_cond_prob_diff_update(w, &cm->fc.skip_probs[k], cm->counts.skip[k]);
113 } 112 }
114 113
115 static void update_switchable_interp_probs(VP9_COMMON *cm, vp9_writer *w) { 114 static void update_switchable_interp_probs(VP9_COMMON *cm, vp9_writer *w) {
116 int j; 115 int j;
117 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) 116 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
118 prob_diff_update(vp9_switchable_interp_tree, 117 prob_diff_update(vp9_switchable_interp_tree,
119 cm->fc.switchable_interp_prob[j], 118 cm->fc.switchable_interp_prob[j],
120 cm->counts.switchable_interp[j], SWITCHABLE_FILTERS, w); 119 cm->counts.switchable_interp[j], SWITCHABLE_FILTERS, w);
121 } 120 }
122 121
123 static void pack_mb_tokens(vp9_writer *w, 122 static void pack_mb_tokens(vp9_writer *w,
124 TOKENEXTRA **tp, const TOKENEXTRA *stop) { 123 TOKENEXTRA **tp, const TOKENEXTRA *const stop) {
125 TOKENEXTRA *p = *tp; 124 TOKENEXTRA *p = *tp;
126 125
127 while (p < stop && p->token != EOSB_TOKEN) { 126 while (p < stop && p->token != EOSB_TOKEN) {
128 const int t = p->token; 127 const int t = p->token;
129 const struct vp9_token *const a = &vp9_coef_encodings[t]; 128 const struct vp9_token *const a = &vp9_coef_encodings[t];
130 const vp9_extra_bit *const b = &vp9_extra_bits[t]; 129 const vp9_extra_bit *const b = &vp9_extra_bits[t];
131 int i = 0; 130 int i = 0;
132 int v = a->value; 131 int v = a->value;
133 int n = a->len; 132 int n = a->len;
134 133
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
181 *tp = p + (p->token == EOSB_TOKEN); 180 *tp = p + (p->token == EOSB_TOKEN);
182 } 181 }
183 182
184 static void write_segment_id(vp9_writer *w, const struct segmentation *seg, 183 static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
185 int segment_id) { 184 int segment_id) {
186 if (seg->enabled && seg->update_map) 185 if (seg->enabled && seg->update_map)
187 vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0); 186 vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0);
188 } 187 }
189 188
190 // This function encodes the reference frame 189 // This function encodes the reference frame
191 static void write_ref_frames(const VP9_COMP *cpi, vp9_writer *w) { 190 static void write_ref_frames(const VP9_COMMON *cm, const MACROBLOCKD *xd,
192 const VP9_COMMON *const cm = &cpi->common; 191 vp9_writer *w) {
193 const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
194 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; 192 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
195 const int is_compound = has_second_ref(mbmi); 193 const int is_compound = has_second_ref(mbmi);
196 const int segment_id = mbmi->segment_id; 194 const int segment_id = mbmi->segment_id;
197 195
198 // If segment level coding of this signal is disabled... 196 // If segment level coding of this signal is disabled...
199 // or the segment allows multiple reference frame options 197 // or the segment allows multiple reference frame options
200 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { 198 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
201 assert(!is_compound); 199 assert(!is_compound);
202 assert(mbmi->ref_frame[0] == 200 assert(mbmi->ref_frame[0] ==
203 vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME)); 201 vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
245 const int pred_flag = mbmi->seg_id_predicted; 243 const int pred_flag = mbmi->seg_id_predicted;
246 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); 244 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
247 vp9_write(w, pred_flag, pred_prob); 245 vp9_write(w, pred_flag, pred_prob);
248 if (!pred_flag) 246 if (!pred_flag)
249 write_segment_id(w, seg, segment_id); 247 write_segment_id(w, seg, segment_id);
250 } else { 248 } else {
251 write_segment_id(w, seg, segment_id); 249 write_segment_id(w, seg, segment_id);
252 } 250 }
253 } 251 }
254 252
255 skip = write_skip(cpi, segment_id, mi, w); 253 skip = write_skip(cm, xd, segment_id, mi, w);
256 254
257 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) 255 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
258 vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd)); 256 vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd));
259 257
260 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && 258 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
261 !(is_inter && 259 !(is_inter &&
262 (skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { 260 (skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
263 write_selected_tx_size(cpi, mbmi->tx_size, bsize, w); 261 write_selected_tx_size(cm, xd, mbmi->tx_size, bsize, w);
264 } 262 }
265 263
266 if (!is_inter) { 264 if (!is_inter) {
267 if (bsize >= BLOCK_8X8) { 265 if (bsize >= BLOCK_8X8) {
268 write_intra_mode(w, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]); 266 write_intra_mode(w, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]);
269 } else { 267 } else {
270 int idx, idy; 268 int idx, idy;
271 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 269 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
272 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 270 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
273 for (idy = 0; idy < 2; idy += num_4x4_h) { 271 for (idy = 0; idy < 2; idy += num_4x4_h) {
274 for (idx = 0; idx < 2; idx += num_4x4_w) { 272 for (idx = 0; idx < 2; idx += num_4x4_w) {
275 const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode; 273 const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
276 write_intra_mode(w, b_mode, cm->fc.y_mode_prob[0]); 274 write_intra_mode(w, b_mode, cm->fc.y_mode_prob[0]);
277 } 275 }
278 } 276 }
279 } 277 }
280 write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]); 278 write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]);
281 } else { 279 } else {
282 const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]]; 280 const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
283 const vp9_prob *const inter_probs = cm->fc.inter_mode_probs[mode_ctx]; 281 const vp9_prob *const inter_probs = cm->fc.inter_mode_probs[mode_ctx];
284 write_ref_frames(cpi, w); 282 write_ref_frames(cm, xd, w);
285 283
286 // If segment skip is not enabled code the mode. 284 // If segment skip is not enabled code the mode.
287 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { 285 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
288 if (bsize >= BLOCK_8X8) { 286 if (bsize >= BLOCK_8X8) {
289 write_inter_mode(w, mode, inter_probs); 287 write_inter_mode(w, mode, inter_probs);
290 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(mode)]; 288 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(mode)];
291 } 289 }
292 } 290 }
293 291
294 if (cm->interp_filter == SWITCHABLE) { 292 if (cm->interp_filter == SWITCHABLE) {
(...skipping 27 matching lines...) Expand all
322 if (mode == NEWMV) { 320 if (mode == NEWMV) {
323 for (ref = 0; ref < 1 + is_compound; ++ref) 321 for (ref = 0; ref < 1 + is_compound; ++ref)
324 vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, 322 vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
325 &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc, 323 &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
326 allow_hp); 324 allow_hp);
327 } 325 }
328 } 326 }
329 } 327 }
330 } 328 }
331 329
332 static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, 330 static void write_mb_modes_kf(const VP9_COMMON *cm, const MACROBLOCKD *xd,
333 vp9_writer *w) { 331 MODE_INFO **mi_8x8, vp9_writer *w) {
334 const VP9_COMMON *const cm = &cpi->common;
335 const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
336 const struct segmentation *const seg = &cm->seg; 332 const struct segmentation *const seg = &cm->seg;
337 const MODE_INFO *const mi = mi_8x8[0]; 333 const MODE_INFO *const mi = mi_8x8[0];
338 const MODE_INFO *const above_mi = mi_8x8[-xd->mi_stride]; 334 const MODE_INFO *const above_mi = mi_8x8[-xd->mi_stride];
339 const MODE_INFO *const left_mi = xd->left_available ? mi_8x8[-1] : NULL; 335 const MODE_INFO *const left_mi = xd->left_available ? mi_8x8[-1] : NULL;
340 const MB_MODE_INFO *const mbmi = &mi->mbmi; 336 const MB_MODE_INFO *const mbmi = &mi->mbmi;
341 const BLOCK_SIZE bsize = mbmi->sb_type; 337 const BLOCK_SIZE bsize = mbmi->sb_type;
342 338
343 if (seg->update_map) 339 if (seg->update_map)
344 write_segment_id(w, seg, mbmi->segment_id); 340 write_segment_id(w, seg, mbmi->segment_id);
345 341
346 write_skip(cpi, mbmi->segment_id, mi, w); 342 write_skip(cm, xd, mbmi->segment_id, mi, w);
347 343
348 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) 344 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
349 write_selected_tx_size(cpi, mbmi->tx_size, bsize, w); 345 write_selected_tx_size(cm, xd, mbmi->tx_size, bsize, w);
350 346
351 if (bsize >= BLOCK_8X8) { 347 if (bsize >= BLOCK_8X8) {
352 write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0)); 348 write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0));
353 } else { 349 } else {
354 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; 350 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
355 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; 351 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
356 int idx, idy; 352 int idx, idy;
357 353
358 for (idy = 0; idy < 2; idy += num_4x4_h) { 354 for (idy = 0; idy < 2; idy += num_4x4_h) {
359 for (idx = 0; idx < 2; idx += num_4x4_w) { 355 for (idx = 0; idx < 2; idx += num_4x4_w) {
360 const int block = idy * 2 + idx; 356 const int block = idy * 2 + idx;
361 write_intra_mode(w, mi->bmi[block].as_mode, 357 write_intra_mode(w, mi->bmi[block].as_mode,
362 get_y_mode_probs(mi, above_mi, left_mi, block)); 358 get_y_mode_probs(mi, above_mi, left_mi, block));
363 } 359 }
364 } 360 }
365 } 361 }
366 362
367 write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]); 363 write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]);
368 } 364 }
369 365
370 static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, 366 static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile,
371 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, 367 vp9_writer *w, TOKENEXTRA **tok,
368 const TOKENEXTRA *const tok_end,
372 int mi_row, int mi_col) { 369 int mi_row, int mi_col) {
373 VP9_COMMON *const cm = &cpi->common; 370 const VP9_COMMON *const cm = &cpi->common;
374 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 371 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
375 MODE_INFO *m; 372 MODE_INFO *m;
376 373
377 xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col); 374 xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
378 m = xd->mi[0]; 375 m = xd->mi[0];
379 376
380 set_mi_row_col(xd, tile, 377 set_mi_row_col(xd, tile,
381 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], 378 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
382 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], 379 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type],
383 cm->mi_rows, cm->mi_cols); 380 cm->mi_rows, cm->mi_cols);
384 if (frame_is_intra_only(cm)) { 381 if (frame_is_intra_only(cm)) {
385 write_mb_modes_kf(cpi, xd->mi, w); 382 write_mb_modes_kf(cm, xd, xd->mi, w);
386 } else { 383 } else {
387 pack_inter_mode_mvs(cpi, m, w); 384 pack_inter_mode_mvs(cpi, m, w);
388 } 385 }
389 386
390 assert(*tok < tok_end); 387 assert(*tok < tok_end);
391 pack_mb_tokens(w, tok, tok_end); 388 pack_mb_tokens(w, tok, tok_end);
392 } 389 }
393 390
394 static void write_partition(VP9_COMMON *cm, MACROBLOCKD *xd, 391 static void write_partition(const VP9_COMMON *const cm,
392 const MACROBLOCKD *const xd,
395 int hbs, int mi_row, int mi_col, 393 int hbs, int mi_row, int mi_col,
396 PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) { 394 PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) {
397 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize); 395 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
398 const vp9_prob *const probs = get_partition_probs(cm, ctx); 396 const vp9_prob *const probs = get_partition_probs(cm, ctx);
399 const int has_rows = (mi_row + hbs) < cm->mi_rows; 397 const int has_rows = (mi_row + hbs) < cm->mi_rows;
400 const int has_cols = (mi_col + hbs) < cm->mi_cols; 398 const int has_cols = (mi_col + hbs) < cm->mi_cols;
401 399
402 if (has_rows && has_cols) { 400 if (has_rows && has_cols) {
403 vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]); 401 vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]);
404 } else if (!has_rows && has_cols) { 402 } else if (!has_rows && has_cols) {
405 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ); 403 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
406 vp9_write(w, p == PARTITION_SPLIT, probs[1]); 404 vp9_write(w, p == PARTITION_SPLIT, probs[1]);
407 } else if (has_rows && !has_cols) { 405 } else if (has_rows && !has_cols) {
408 assert(p == PARTITION_SPLIT || p == PARTITION_VERT); 406 assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
409 vp9_write(w, p == PARTITION_SPLIT, probs[2]); 407 vp9_write(w, p == PARTITION_SPLIT, probs[2]);
410 } else { 408 } else {
411 assert(p == PARTITION_SPLIT); 409 assert(p == PARTITION_SPLIT);
412 } 410 }
413 } 411 }
414 412
415 static void write_modes_sb(VP9_COMP *cpi, 413 static void write_modes_sb(VP9_COMP *cpi,
416 const TileInfo *const tile, 414 const TileInfo *const tile, vp9_writer *w,
417 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, 415 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end,
418 int mi_row, int mi_col, BLOCK_SIZE bsize) { 416 int mi_row, int mi_col, BLOCK_SIZE bsize) {
419 VP9_COMMON *const cm = &cpi->common; 417 const VP9_COMMON *const cm = &cpi->common;
420 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 418 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
421 419
422 const int bsl = b_width_log2(bsize); 420 const int bsl = b_width_log2(bsize);
423 const int bs = (1 << bsl) / 4; 421 const int bs = (1 << bsl) / 4;
424 PARTITION_TYPE partition; 422 PARTITION_TYPE partition;
425 BLOCK_SIZE subsize; 423 BLOCK_SIZE subsize;
426 MODE_INFO *m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]; 424 const MODE_INFO *m = NULL;
427 425
428 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) 426 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
429 return; 427 return;
430 428
429 m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col];
430
431 partition = partition_lookup[bsl][m->mbmi.sb_type]; 431 partition = partition_lookup[bsl][m->mbmi.sb_type];
432 write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w); 432 write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w);
433 subsize = get_subsize(bsize, partition); 433 subsize = get_subsize(bsize, partition);
434 if (subsize < BLOCK_8X8) { 434 if (subsize < BLOCK_8X8) {
435 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 435 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
436 } else { 436 } else {
437 switch (partition) { 437 switch (partition) {
438 case PARTITION_NONE: 438 case PARTITION_NONE:
439 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); 439 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
440 break; 440 break;
(...skipping 21 matching lines...) Expand all
462 } 462 }
463 } 463 }
464 464
465 // update partition context 465 // update partition context
466 if (bsize >= BLOCK_8X8 && 466 if (bsize >= BLOCK_8X8 &&
467 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) 467 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
468 update_partition_context(xd, mi_row, mi_col, subsize, bsize); 468 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
469 } 469 }
470 470
471 static void write_modes(VP9_COMP *cpi, 471 static void write_modes(VP9_COMP *cpi,
472 const TileInfo *const tile, 472 const TileInfo *const tile, vp9_writer *w,
473 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end) { 473 TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) {
474 int mi_row, mi_col; 474 int mi_row, mi_col;
475 475
476 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; 476 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
477 mi_row += MI_BLOCK_SIZE) { 477 mi_row += MI_BLOCK_SIZE) {
478 vp9_zero(cpi->mb.e_mbd.left_seg_context); 478 vp9_zero(cpi->mb.e_mbd.left_seg_context);
479 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 479 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
480 mi_col += MI_BLOCK_SIZE) 480 mi_col += MI_BLOCK_SIZE)
481 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, 481 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col,
482 BLOCK_64X64); 482 BLOCK_64X64);
483 } 483 }
(...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after
724 static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) { 724 static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) {
725 if (delta_q != 0) { 725 if (delta_q != 0) {
726 vp9_wb_write_bit(wb, 1); 726 vp9_wb_write_bit(wb, 1);
727 vp9_wb_write_literal(wb, abs(delta_q), 4); 727 vp9_wb_write_literal(wb, abs(delta_q), 4);
728 vp9_wb_write_bit(wb, delta_q < 0); 728 vp9_wb_write_bit(wb, delta_q < 0);
729 } else { 729 } else {
730 vp9_wb_write_bit(wb, 0); 730 vp9_wb_write_bit(wb, 0);
731 } 731 }
732 } 732 }
733 733
734 static void encode_quantization(VP9_COMMON *cm, 734 static void encode_quantization(const VP9_COMMON *const cm,
735 struct vp9_write_bit_buffer *wb) { 735 struct vp9_write_bit_buffer *wb) {
736 vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS); 736 vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
737 write_delta_q(wb, cm->y_dc_delta_q); 737 write_delta_q(wb, cm->y_dc_delta_q);
738 write_delta_q(wb, cm->uv_dc_delta_q); 738 write_delta_q(wb, cm->uv_dc_delta_q);
739 write_delta_q(wb, cm->uv_ac_delta_q); 739 write_delta_q(wb, cm->uv_ac_delta_q);
740 } 740 }
741 741
742 static void encode_segmentation(VP9_COMP *cpi, 742 static void encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd,
743 struct vp9_write_bit_buffer *wb) { 743 struct vp9_write_bit_buffer *wb) {
744 int i, j; 744 int i, j;
745 745
746 struct segmentation *seg = &cpi->common.seg; 746 const struct segmentation *seg = &cm->seg;
747 747
748 vp9_wb_write_bit(wb, seg->enabled); 748 vp9_wb_write_bit(wb, seg->enabled);
749 if (!seg->enabled) 749 if (!seg->enabled)
750 return; 750 return;
751 751
752 // Segmentation map 752 // Segmentation map
753 vp9_wb_write_bit(wb, seg->update_map); 753 vp9_wb_write_bit(wb, seg->update_map);
754 if (seg->update_map) { 754 if (seg->update_map) {
755 // Select the coding strategy (temporal or spatial) 755 // Select the coding strategy (temporal or spatial)
756 vp9_choose_segmap_coding_method(cpi); 756 vp9_choose_segmap_coding_method(cm, xd);
757 // Write out probabilities used to decode unpredicted macro-block segments 757 // Write out probabilities used to decode unpredicted macro-block segments
758 for (i = 0; i < SEG_TREE_PROBS; i++) { 758 for (i = 0; i < SEG_TREE_PROBS; i++) {
759 const int prob = seg->tree_probs[i]; 759 const int prob = seg->tree_probs[i];
760 const int update = prob != MAX_PROB; 760 const int update = prob != MAX_PROB;
761 vp9_wb_write_bit(wb, update); 761 vp9_wb_write_bit(wb, update);
762 if (update) 762 if (update)
763 vp9_wb_write_literal(wb, prob, 8); 763 vp9_wb_write_literal(wb, prob, 8);
764 } 764 }
765 765
766 // Write out the chosen coding method. 766 // Write out the chosen coding method.
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
862 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { 862 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
863 if (count[i]) { 863 if (count[i]) {
864 cm->interp_filter = i; 864 cm->interp_filter = i;
865 break; 865 break;
866 } 866 }
867 } 867 }
868 } 868 }
869 } 869 }
870 } 870 }
871 871
872 static void write_tile_info(VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) { 872 static void write_tile_info(const VP9_COMMON *const cm,
873 struct vp9_write_bit_buffer *wb) {
873 int min_log2_tile_cols, max_log2_tile_cols, ones; 874 int min_log2_tile_cols, max_log2_tile_cols, ones;
874 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); 875 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
875 876
876 // columns 877 // columns
877 ones = cm->log2_tile_cols - min_log2_tile_cols; 878 ones = cm->log2_tile_cols - min_log2_tile_cols;
878 while (ones--) 879 while (ones--)
879 vp9_wb_write_bit(wb, 1); 880 vp9_wb_write_bit(wb, 1);
880 881
881 if (cm->log2_tile_cols < max_log2_tile_cols) 882 if (cm->log2_tile_cols < max_log2_tile_cols)
882 vp9_wb_write_bit(wb, 0); 883 vp9_wb_write_bit(wb, 0);
883 884
884 // rows 885 // rows
885 vp9_wb_write_bit(wb, cm->log2_tile_rows != 0); 886 vp9_wb_write_bit(wb, cm->log2_tile_rows != 0);
886 if (cm->log2_tile_rows != 0) 887 if (cm->log2_tile_rows != 0)
887 vp9_wb_write_bit(wb, cm->log2_tile_rows != 1); 888 vp9_wb_write_bit(wb, cm->log2_tile_rows != 1);
888 } 889 }
889 890
890 static int get_refresh_mask(VP9_COMP *cpi) { 891 static int get_refresh_mask(VP9_COMP *cpi) {
891 if (!cpi->multi_arf_allowed && cpi->refresh_golden_frame && 892 if (vp9_preserve_existing_gf(cpi)) {
892 cpi->rc.is_src_frame_alt_ref && !cpi->use_svc) { 893 // We have decided to preserve the previously existing golden frame as our
893 // Preserve the previously existing golden frame and update the frame in 894 // new ARF frame. However, in the short term we leave it in the GF slot and,
894 // the alt ref slot instead. This is highly specific to the use of 895 // if we're updating the GF with the current decoded frame, we save it
895 // alt-ref as a forward reference, and this needs to be generalized as 896 // instead to the ARF slot.
896 // other uses are implemented (like RTC/temporal scaling) 897 // Later, in the function vp9_encoder.c:vp9_update_reference_frames() we
897 // 898 // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
898 // gld_fb_idx and alt_fb_idx need to be swapped for future frames, but 899 // there so that it can be done outside of the recode loop.
899 // that happens in vp9_encoder.c:update_reference_frames() so that it can 900 // Note: This is highly specific to the use of ARF as a forward reference,
900 // be done outside of the recode loop. 901 // and this needs to be generalized as other uses are implemented
902 // (like RTC/temporal scalability).
901 return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 903 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
902 (cpi->refresh_golden_frame << cpi->alt_fb_idx); 904 (cpi->refresh_golden_frame << cpi->alt_fb_idx);
903 } else { 905 } else {
904 int arf_idx = cpi->alt_fb_idx; 906 int arf_idx = cpi->alt_fb_idx;
905 if ((cpi->pass == 2) && cpi->multi_arf_allowed) { 907 if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
906 const GF_GROUP *const gf_group = &cpi->twopass.gf_group; 908 const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
907 arf_idx = gf_group->arf_update_idx[gf_group->index]; 909 arf_idx = gf_group->arf_update_idx[gf_group->index];
908 } 910 }
909 return (cpi->refresh_last_frame << cpi->lst_fb_idx) | 911 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
910 (cpi->refresh_golden_frame << cpi->gld_fb_idx) | 912 (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
911 (cpi->refresh_alt_ref_frame << arf_idx); 913 (cpi->refresh_alt_ref_frame << arf_idx);
912 } 914 }
913 } 915 }
914 916
915 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { 917 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) {
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
1033 vp9_wb_write_literal(wb, 1, 2); 1035 vp9_wb_write_literal(wb, 1, 2);
1034 break; 1036 break;
1035 case PROFILE_3: 1037 case PROFILE_3:
1036 vp9_wb_write_literal(wb, 6, 3); 1038 vp9_wb_write_literal(wb, 6, 3);
1037 break; 1039 break;
1038 default: 1040 default:
1039 assert(0); 1041 assert(0);
1040 } 1042 }
1041 } 1043 }
1042 1044
1045 static void write_bitdepth_colorspace_sampling(
1046 VP9_COMMON *const cm, struct vp9_write_bit_buffer *wb) {
1047 if (cm->profile >= PROFILE_2) {
1048 assert(cm->bit_depth > BITS_8);
1049 vp9_wb_write_bit(wb, cm->bit_depth - BITS_10);
1050 }
1051 vp9_wb_write_literal(wb, cm->color_space, 3);
1052 if (cm->color_space != SRGB) {
1053 vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
1054 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1055 assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
1056 vp9_wb_write_bit(wb, cm->subsampling_x);
1057 vp9_wb_write_bit(wb, cm->subsampling_y);
1058 vp9_wb_write_bit(wb, 0); // unused
1059 } else {
1060 assert(cm->subsampling_x == 1 && cm->subsampling_y == 1);
1061 }
1062 } else {
1063 assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
1064 vp9_wb_write_bit(wb, 0); // unused
1065 }
1066 }
1067
1043 static void write_uncompressed_header(VP9_COMP *cpi, 1068 static void write_uncompressed_header(VP9_COMP *cpi,
1044 struct vp9_write_bit_buffer *wb) { 1069 struct vp9_write_bit_buffer *wb) {
1045 VP9_COMMON *const cm = &cpi->common; 1070 VP9_COMMON *const cm = &cpi->common;
1046 1071
1047 vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2); 1072 vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2);
1048 1073
1049 write_profile(cm->profile, wb); 1074 write_profile(cm->profile, wb);
1050 1075
1051 vp9_wb_write_bit(wb, 0); // show_existing_frame 1076 vp9_wb_write_bit(wb, 0); // show_existing_frame
1052 vp9_wb_write_bit(wb, cm->frame_type); 1077 vp9_wb_write_bit(wb, cm->frame_type);
1053 vp9_wb_write_bit(wb, cm->show_frame); 1078 vp9_wb_write_bit(wb, cm->show_frame);
1054 vp9_wb_write_bit(wb, cm->error_resilient_mode); 1079 vp9_wb_write_bit(wb, cm->error_resilient_mode);
1055 1080
1056 if (cm->frame_type == KEY_FRAME) { 1081 if (cm->frame_type == KEY_FRAME) {
1057 const COLOR_SPACE cs = UNKNOWN;
1058 write_sync_code(wb); 1082 write_sync_code(wb);
1059 if (cm->profile > PROFILE_1) { 1083 write_bitdepth_colorspace_sampling(cm, wb);
1060 assert(cm->bit_depth > BITS_8);
1061 vp9_wb_write_bit(wb, cm->bit_depth - BITS_10);
1062 }
1063 vp9_wb_write_literal(wb, cs, 3);
1064 if (cs != SRGB) {
1065 vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
1066 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1067 vp9_wb_write_bit(wb, cm->subsampling_x);
1068 vp9_wb_write_bit(wb, cm->subsampling_y);
1069 vp9_wb_write_bit(wb, 0); // unused
1070 }
1071 } else {
1072 assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
1073 vp9_wb_write_bit(wb, 0); // unused
1074 }
1075
1076 write_frame_size(cm, wb); 1084 write_frame_size(cm, wb);
1077 } else { 1085 } else {
1078 if (!cm->show_frame) 1086 if (!cm->show_frame)
1079 vp9_wb_write_bit(wb, cm->intra_only); 1087 vp9_wb_write_bit(wb, cm->intra_only);
1080 1088
1081 if (!cm->error_resilient_mode) 1089 if (!cm->error_resilient_mode)
1082 vp9_wb_write_literal(wb, cm->reset_frame_context, 2); 1090 vp9_wb_write_literal(wb, cm->reset_frame_context, 2);
1083 1091
1084 if (cm->intra_only) { 1092 if (cm->intra_only) {
1085 write_sync_code(wb); 1093 write_sync_code(wb);
1086 1094
1095 // Note for profile 0, 420 8bpp is assumed.
1096 if (cm->profile > PROFILE_0) {
1097 write_bitdepth_colorspace_sampling(cm, wb);
1098 }
1099
1087 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); 1100 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1088 write_frame_size(cm, wb); 1101 write_frame_size(cm, wb);
1089 } else { 1102 } else {
1090 MV_REFERENCE_FRAME ref_frame; 1103 MV_REFERENCE_FRAME ref_frame;
1091 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); 1104 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1092 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { 1105 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
1093 vp9_wb_write_literal(wb, get_ref_frame_idx(cpi, ref_frame), 1106 vp9_wb_write_literal(wb, get_ref_frame_idx(cpi, ref_frame),
1094 REF_FRAMES_LOG2); 1107 REF_FRAMES_LOG2);
1095 vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]); 1108 vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
1096 } 1109 }
1097 1110
1098 write_frame_size_with_refs(cpi, wb); 1111 write_frame_size_with_refs(cpi, wb);
1099 1112
1100 vp9_wb_write_bit(wb, cm->allow_high_precision_mv); 1113 vp9_wb_write_bit(wb, cm->allow_high_precision_mv);
1101 1114
1102 fix_interp_filter(cm); 1115 fix_interp_filter(cm);
1103 write_interp_filter(cm->interp_filter, wb); 1116 write_interp_filter(cm->interp_filter, wb);
1104 } 1117 }
1105 } 1118 }
1106 1119
1107 if (!cm->error_resilient_mode) { 1120 if (!cm->error_resilient_mode) {
1108 vp9_wb_write_bit(wb, cm->refresh_frame_context); 1121 vp9_wb_write_bit(wb, cm->refresh_frame_context);
1109 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode); 1122 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
1110 } 1123 }
1111 1124
1112 vp9_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2); 1125 vp9_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
1113 1126
1114 encode_loopfilter(&cm->lf, wb); 1127 encode_loopfilter(&cm->lf, wb);
1115 encode_quantization(cm, wb); 1128 encode_quantization(cm, wb);
1116 encode_segmentation(cpi, wb); 1129 encode_segmentation(cm, &cpi->mb.e_mbd, wb);
1117 1130
1118 write_tile_info(cm, wb); 1131 write_tile_info(cm, wb);
1119 } 1132 }
1120 1133
1121 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { 1134 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
1122 VP9_COMMON *const cm = &cpi->common; 1135 VP9_COMMON *const cm = &cpi->common;
1123 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 1136 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1124 FRAME_CONTEXT *const fc = &cm->fc; 1137 FRAME_CONTEXT *const fc = &cm->fc;
1125 vp9_writer header_bc; 1138 vp9_writer header_bc;
1126 1139
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
1198 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) { 1211 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) {
1199 uint8_t *data = dest; 1212 uint8_t *data = dest;
1200 size_t first_part_size, uncompressed_hdr_size; 1213 size_t first_part_size, uncompressed_hdr_size;
1201 struct vp9_write_bit_buffer wb = {data, 0}; 1214 struct vp9_write_bit_buffer wb = {data, 0};
1202 struct vp9_write_bit_buffer saved_wb; 1215 struct vp9_write_bit_buffer saved_wb;
1203 1216
1204 write_uncompressed_header(cpi, &wb); 1217 write_uncompressed_header(cpi, &wb);
1205 saved_wb = wb; 1218 saved_wb = wb;
1206 vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size 1219 vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
1207 1220
1208 uncompressed_hdr_size = vp9_rb_bytes_written(&wb); 1221 uncompressed_hdr_size = vp9_wb_bytes_written(&wb);
1209 data += uncompressed_hdr_size; 1222 data += uncompressed_hdr_size;
1210 1223
1211 vp9_compute_update_table();
1212
1213 vp9_clear_system_state(); 1224 vp9_clear_system_state();
1214 1225
1215 first_part_size = write_compressed_header(cpi, data); 1226 first_part_size = write_compressed_header(cpi, data);
1216 data += first_part_size; 1227 data += first_part_size;
1217 // TODO(jbb): Figure out what to do if first_part_size > 16 bits. 1228 // TODO(jbb): Figure out what to do if first_part_size > 16 bits.
1218 vp9_wb_write_literal(&saved_wb, (int)first_part_size, 16); 1229 vp9_wb_write_literal(&saved_wb, (int)first_part_size, 16);
1219 1230
1220 data += encode_tiles(cpi, data); 1231 data += encode_tiles(cpi, data);
1221 1232
1222 *size = data - dest; 1233 *size = data - dest;
1223 } 1234 }
OLDNEW
« no previous file with comments | « source/libvpx/vp9/encoder/vp9_bitstream.h ('k') | source/libvpx/vp9/encoder/vp9_block.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698