OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include <assert.h> | 11 #include <assert.h> |
12 #include <stdio.h> | 12 #include <stdio.h> |
13 #include <limits.h> | 13 #include <limits.h> |
14 | 14 |
15 #include "vpx/vpx_encoder.h" | 15 #include "vpx/vpx_encoder.h" |
16 #include "vpx_mem/vpx_mem.h" | 16 #include "vpx_mem/vpx_mem.h" |
17 #include "vpx_ports/mem_ops.h" | 17 #include "vpx_ports/mem_ops.h" |
18 | 18 |
19 #include "vp9/common/vp9_entropy.h" | 19 #include "vp9/common/vp9_entropy.h" |
20 #include "vp9/common/vp9_entropymode.h" | 20 #include "vp9/common/vp9_entropymode.h" |
21 #include "vp9/common/vp9_entropymv.h" | 21 #include "vp9/common/vp9_entropymv.h" |
22 #include "vp9/common/vp9_mvref_common.h" | 22 #include "vp9/common/vp9_mvref_common.h" |
23 #include "vp9/common/vp9_pragmas.h" | 23 #include "vp9/common/vp9_pragmas.h" |
24 #include "vp9/common/vp9_pred_common.h" | 24 #include "vp9/common/vp9_pred_common.h" |
25 #include "vp9/common/vp9_seg_common.h" | 25 #include "vp9/common/vp9_seg_common.h" |
26 #include "vp9/common/vp9_systemdependent.h" | 26 #include "vp9/common/vp9_systemdependent.h" |
27 #include "vp9/common/vp9_tile_common.h" | 27 #include "vp9/common/vp9_tile_common.h" |
28 | 28 |
| 29 #include "vp9/encoder/vp9_cost.h" |
29 #include "vp9/encoder/vp9_bitstream.h" | 30 #include "vp9/encoder/vp9_bitstream.h" |
30 #include "vp9/encoder/vp9_encodemv.h" | 31 #include "vp9/encoder/vp9_encodemv.h" |
31 #include "vp9/encoder/vp9_mcomp.h" | 32 #include "vp9/encoder/vp9_mcomp.h" |
32 #include "vp9/encoder/vp9_segmentation.h" | 33 #include "vp9/encoder/vp9_segmentation.h" |
33 #include "vp9/encoder/vp9_subexp.h" | 34 #include "vp9/encoder/vp9_subexp.h" |
34 #include "vp9/encoder/vp9_tokenize.h" | 35 #include "vp9/encoder/vp9_tokenize.h" |
35 #include "vp9/encoder/vp9_write_bit_buffer.h" | 36 #include "vp9/encoder/vp9_write_bit_buffer.h" |
36 | 37 |
37 #ifdef ENTROPY_STATS | |
38 extern unsigned int active_section; | |
39 #endif | |
40 | |
41 static struct vp9_token intra_mode_encodings[INTRA_MODES]; | 38 static struct vp9_token intra_mode_encodings[INTRA_MODES]; |
42 static struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS]; | 39 static struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS]; |
43 static struct vp9_token partition_encodings[PARTITION_TYPES]; | 40 static struct vp9_token partition_encodings[PARTITION_TYPES]; |
44 static struct vp9_token inter_mode_encodings[INTER_MODES]; | 41 static struct vp9_token inter_mode_encodings[INTER_MODES]; |
45 | 42 |
46 void vp9_entropy_mode_init() { | 43 void vp9_entropy_mode_init() { |
47 vp9_tokens_from_tree(intra_mode_encodings, vp9_intra_mode_tree); | 44 vp9_tokens_from_tree(intra_mode_encodings, vp9_intra_mode_tree); |
48 vp9_tokens_from_tree(switchable_interp_encodings, vp9_switchable_interp_tree); | 45 vp9_tokens_from_tree(switchable_interp_encodings, vp9_switchable_interp_tree); |
49 vp9_tokens_from_tree(partition_encodings, vp9_partition_tree); | 46 vp9_tokens_from_tree(partition_encodings, vp9_partition_tree); |
50 vp9_tokens_from_tree(inter_mode_encodings, vp9_inter_mode_tree); | 47 vp9_tokens_from_tree(inter_mode_encodings, vp9_inter_mode_tree); |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
90 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd, | 87 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd, |
91 &cpi->common.fc.tx_probs); | 88 &cpi->common.fc.tx_probs); |
92 vp9_write(w, tx_size != TX_4X4, tx_probs[0]); | 89 vp9_write(w, tx_size != TX_4X4, tx_probs[0]); |
93 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { | 90 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { |
94 vp9_write(w, tx_size != TX_8X8, tx_probs[1]); | 91 vp9_write(w, tx_size != TX_8X8, tx_probs[1]); |
95 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) | 92 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) |
96 vp9_write(w, tx_size != TX_16X16, tx_probs[2]); | 93 vp9_write(w, tx_size != TX_16X16, tx_probs[2]); |
97 } | 94 } |
98 } | 95 } |
99 | 96 |
100 static int write_skip(const VP9_COMP *cpi, int segment_id, MODE_INFO *m, | 97 static int write_skip(const VP9_COMP *cpi, int segment_id, const MODE_INFO *mi, |
101 vp9_writer *w) { | 98 vp9_writer *w) { |
102 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 99 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
103 if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) { | 100 if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) { |
104 return 1; | 101 return 1; |
105 } else { | 102 } else { |
106 const int skip = m->mbmi.skip; | 103 const int skip = mi->mbmi.skip; |
107 vp9_write(w, skip, vp9_get_skip_prob(&cpi->common, xd)); | 104 vp9_write(w, skip, vp9_get_skip_prob(&cpi->common, xd)); |
108 return skip; | 105 return skip; |
109 } | 106 } |
110 } | 107 } |
111 | 108 |
112 static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w) { | 109 static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w) { |
113 int k; | 110 int k; |
114 | 111 |
115 for (k = 0; k < SKIP_CONTEXTS; ++k) | 112 for (k = 0; k < SKIP_CONTEXTS; ++k) |
116 vp9_cond_prob_diff_update(w, &cm->fc.skip_probs[k], cm->counts.skip[k]); | 113 vp9_cond_prob_diff_update(w, &cm->fc.skip_probs[k], cm->counts.skip[k]); |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
188 static void write_segment_id(vp9_writer *w, const struct segmentation *seg, | 185 static void write_segment_id(vp9_writer *w, const struct segmentation *seg, |
189 int segment_id) { | 186 int segment_id) { |
190 if (seg->enabled && seg->update_map) | 187 if (seg->enabled && seg->update_map) |
191 vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0); | 188 vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0); |
192 } | 189 } |
193 | 190 |
194 // This function encodes the reference frame | 191 // This function encodes the reference frame |
195 static void write_ref_frames(const VP9_COMP *cpi, vp9_writer *w) { | 192 static void write_ref_frames(const VP9_COMP *cpi, vp9_writer *w) { |
196 const VP9_COMMON *const cm = &cpi->common; | 193 const VP9_COMMON *const cm = &cpi->common; |
197 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 194 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
198 const MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; | 195 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; |
199 const int is_compound = has_second_ref(mbmi); | 196 const int is_compound = has_second_ref(mbmi); |
200 const int segment_id = mbmi->segment_id; | 197 const int segment_id = mbmi->segment_id; |
201 | 198 |
202 // If segment level coding of this signal is disabled... | 199 // If segment level coding of this signal is disabled... |
203 // or the segment allows multiple reference frame options | 200 // or the segment allows multiple reference frame options |
204 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { | 201 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { |
205 assert(!is_compound); | 202 assert(!is_compound); |
206 assert(mbmi->ref_frame[0] == | 203 assert(mbmi->ref_frame[0] == |
207 vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME)); | 204 vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME)); |
208 } else { | 205 } else { |
(...skipping 12 matching lines...) Expand all Loading... |
221 const int bit0 = mbmi->ref_frame[0] != LAST_FRAME; | 218 const int bit0 = mbmi->ref_frame[0] != LAST_FRAME; |
222 vp9_write(w, bit0, vp9_get_pred_prob_single_ref_p1(cm, xd)); | 219 vp9_write(w, bit0, vp9_get_pred_prob_single_ref_p1(cm, xd)); |
223 if (bit0) { | 220 if (bit0) { |
224 const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME; | 221 const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME; |
225 vp9_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd)); | 222 vp9_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd)); |
226 } | 223 } |
227 } | 224 } |
228 } | 225 } |
229 } | 226 } |
230 | 227 |
231 static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { | 228 static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, |
| 229 vp9_writer *w) { |
232 VP9_COMMON *const cm = &cpi->common; | 230 VP9_COMMON *const cm = &cpi->common; |
233 const nmv_context *nmvc = &cm->fc.nmvc; | 231 const nmv_context *nmvc = &cm->fc.nmvc; |
234 MACROBLOCK *const x = &cpi->mb; | 232 const MACROBLOCK *const x = &cpi->mb; |
235 MACROBLOCKD *const xd = &x->e_mbd; | 233 const MACROBLOCKD *const xd = &x->e_mbd; |
236 const struct segmentation *const seg = &cm->seg; | 234 const struct segmentation *const seg = &cm->seg; |
237 const MB_MODE_INFO *const mi = &m->mbmi; | 235 const MB_MODE_INFO *const mbmi = &mi->mbmi; |
238 const MV_REFERENCE_FRAME ref0 = mi->ref_frame[0]; | 236 const MB_PREDICTION_MODE mode = mbmi->mode; |
239 const MV_REFERENCE_FRAME ref1 = mi->ref_frame[1]; | 237 const int segment_id = mbmi->segment_id; |
240 const MB_PREDICTION_MODE mode = mi->mode; | 238 const BLOCK_SIZE bsize = mbmi->sb_type; |
241 const int segment_id = mi->segment_id; | |
242 const BLOCK_SIZE bsize = mi->sb_type; | |
243 const int allow_hp = cm->allow_high_precision_mv; | 239 const int allow_hp = cm->allow_high_precision_mv; |
244 int skip; | 240 const int is_inter = is_inter_block(mbmi); |
245 | 241 const int is_compound = has_second_ref(mbmi); |
246 #ifdef ENTROPY_STATS | 242 int skip, ref; |
247 active_section = 9; | |
248 #endif | |
249 | 243 |
250 if (seg->update_map) { | 244 if (seg->update_map) { |
251 if (seg->temporal_update) { | 245 if (seg->temporal_update) { |
252 const int pred_flag = mi->seg_id_predicted; | 246 const int pred_flag = mbmi->seg_id_predicted; |
253 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); | 247 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); |
254 vp9_write(bc, pred_flag, pred_prob); | 248 vp9_write(w, pred_flag, pred_prob); |
255 if (!pred_flag) | 249 if (!pred_flag) |
256 write_segment_id(bc, seg, segment_id); | 250 write_segment_id(w, seg, segment_id); |
257 } else { | 251 } else { |
258 write_segment_id(bc, seg, segment_id); | 252 write_segment_id(w, seg, segment_id); |
259 } | 253 } |
260 } | 254 } |
261 | 255 |
262 skip = write_skip(cpi, segment_id, m, bc); | 256 skip = write_skip(cpi, segment_id, mi, w); |
263 | 257 |
264 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) | 258 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) |
265 vp9_write(bc, ref0 != INTRA_FRAME, vp9_get_intra_inter_prob(cm, xd)); | 259 vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd)); |
266 | 260 |
267 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && | 261 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && |
268 !(ref0 != INTRA_FRAME && | 262 !(is_inter && |
269 (skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { | 263 (skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { |
270 write_selected_tx_size(cpi, mi->tx_size, bsize, bc); | 264 write_selected_tx_size(cpi, mbmi->tx_size, bsize, w); |
271 } | 265 } |
272 | 266 |
273 if (ref0 == INTRA_FRAME) { | 267 if (!is_inter) { |
274 #ifdef ENTROPY_STATS | |
275 active_section = 6; | |
276 #endif | |
277 | |
278 if (bsize >= BLOCK_8X8) { | 268 if (bsize >= BLOCK_8X8) { |
279 write_intra_mode(bc, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]); | 269 write_intra_mode(w, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]); |
280 } else { | 270 } else { |
281 int idx, idy; | 271 int idx, idy; |
282 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; | 272 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; |
283 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; | 273 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; |
284 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { | 274 for (idy = 0; idy < 2; idy += num_4x4_h) { |
285 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { | 275 for (idx = 0; idx < 2; idx += num_4x4_w) { |
286 const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode; | 276 const MB_PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode; |
287 write_intra_mode(bc, bm, cm->fc.y_mode_prob[0]); | 277 write_intra_mode(w, b_mode, cm->fc.y_mode_prob[0]); |
288 } | 278 } |
289 } | 279 } |
290 } | 280 } |
291 write_intra_mode(bc, mi->uv_mode, cm->fc.uv_mode_prob[mode]); | 281 write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]); |
292 } else { | 282 } else { |
293 vp9_prob *mv_ref_p; | 283 const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]]; |
294 write_ref_frames(cpi, bc); | 284 const vp9_prob *const inter_probs = cm->fc.inter_mode_probs[mode_ctx]; |
295 mv_ref_p = cm->fc.inter_mode_probs[mi->mode_context[ref0]]; | 285 write_ref_frames(cpi, w); |
296 | |
297 #ifdef ENTROPY_STATS | |
298 active_section = 3; | |
299 #endif | |
300 | 286 |
301 // If segment skip is not enabled code the mode. | 287 // If segment skip is not enabled code the mode. |
302 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { | 288 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { |
303 if (bsize >= BLOCK_8X8) { | 289 if (bsize >= BLOCK_8X8) { |
304 write_inter_mode(bc, mode, mv_ref_p); | 290 write_inter_mode(w, mode, inter_probs); |
305 ++cm->counts.inter_mode[mi->mode_context[ref0]][INTER_OFFSET(mode)]; | 291 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(mode)]; |
306 } | 292 } |
307 } | 293 } |
308 | 294 |
309 if (cm->interp_filter == SWITCHABLE) { | 295 if (cm->interp_filter == SWITCHABLE) { |
310 const int ctx = vp9_get_pred_context_switchable_interp(xd); | 296 const int ctx = vp9_get_pred_context_switchable_interp(xd); |
311 vp9_write_token(bc, vp9_switchable_interp_tree, | 297 vp9_write_token(w, vp9_switchable_interp_tree, |
312 cm->fc.switchable_interp_prob[ctx], | 298 cm->fc.switchable_interp_prob[ctx], |
313 &switchable_interp_encodings[mi->interp_filter]); | 299 &switchable_interp_encodings[mbmi->interp_filter]); |
314 } else { | 300 } else { |
315 assert(mi->interp_filter == cm->interp_filter); | 301 assert(mbmi->interp_filter == cm->interp_filter); |
316 } | 302 } |
317 | 303 |
318 if (bsize < BLOCK_8X8) { | 304 if (bsize < BLOCK_8X8) { |
319 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; | 305 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; |
320 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; | 306 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; |
321 int idx, idy; | 307 int idx, idy; |
322 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { | 308 for (idy = 0; idy < 2; idy += num_4x4_h) { |
323 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { | 309 for (idx = 0; idx < 2; idx += num_4x4_w) { |
324 const int j = idy * 2 + idx; | 310 const int j = idy * 2 + idx; |
325 const MB_PREDICTION_MODE b_mode = m->bmi[j].as_mode; | 311 const MB_PREDICTION_MODE b_mode = mi->bmi[j].as_mode; |
326 write_inter_mode(bc, b_mode, mv_ref_p); | 312 write_inter_mode(w, b_mode, inter_probs); |
327 ++cm->counts.inter_mode[mi->mode_context[ref0]][INTER_OFFSET(b_mode)]; | 313 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(b_mode)]; |
328 if (b_mode == NEWMV) { | 314 if (b_mode == NEWMV) { |
329 #ifdef ENTROPY_STATS | 315 for (ref = 0; ref < 1 + is_compound; ++ref) |
330 active_section = 11; | 316 vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv, |
331 #endif | 317 &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, |
332 vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[0].as_mv, | 318 nmvc, allow_hp); |
333 &mi->ref_mvs[ref0][0].as_mv, nmvc, allow_hp); | |
334 | |
335 if (has_second_ref(mi)) | |
336 vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[1].as_mv, | |
337 &mi->ref_mvs[ref1][0].as_mv, nmvc, allow_hp); | |
338 } | 319 } |
339 } | 320 } |
340 } | 321 } |
341 } else if (mode == NEWMV) { | 322 } else { |
342 #ifdef ENTROPY_STATS | 323 if (mode == NEWMV) { |
343 active_section = 5; | 324 for (ref = 0; ref < 1 + is_compound; ++ref) |
344 #endif | 325 vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, |
345 vp9_encode_mv(cpi, bc, &mi->mv[0].as_mv, | 326 &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc, |
346 &mi->ref_mvs[ref0][0].as_mv, nmvc, allow_hp); | 327 allow_hp); |
347 | 328 } |
348 if (has_second_ref(mi)) | |
349 vp9_encode_mv(cpi, bc, &mi->mv[1].as_mv, | |
350 &mi->ref_mvs[ref1][0].as_mv, nmvc, allow_hp); | |
351 } | 329 } |
352 } | 330 } |
353 } | 331 } |
354 | 332 |
355 static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, | 333 static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, |
356 vp9_writer *bc) { | 334 vp9_writer *w) { |
357 const VP9_COMMON *const cm = &cpi->common; | 335 const VP9_COMMON *const cm = &cpi->common; |
358 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 336 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
359 const struct segmentation *const seg = &cm->seg; | 337 const struct segmentation *const seg = &cm->seg; |
360 MODE_INFO *m = mi_8x8[0]; | 338 const MODE_INFO *const mi = mi_8x8[0]; |
361 const int ym = m->mbmi.mode; | 339 const MODE_INFO *const above_mi = mi_8x8[-xd->mi_stride]; |
362 const int segment_id = m->mbmi.segment_id; | 340 const MODE_INFO *const left_mi = xd->left_available ? mi_8x8[-1] : NULL; |
363 MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride]; | 341 const MB_MODE_INFO *const mbmi = &mi->mbmi; |
364 MODE_INFO *left_mi = xd->left_available ? mi_8x8[-1] : NULL; | 342 const BLOCK_SIZE bsize = mbmi->sb_type; |
365 | 343 |
366 if (seg->update_map) | 344 if (seg->update_map) |
367 write_segment_id(bc, seg, m->mbmi.segment_id); | 345 write_segment_id(w, seg, mbmi->segment_id); |
368 | 346 |
369 write_skip(cpi, segment_id, m, bc); | 347 write_skip(cpi, mbmi->segment_id, mi, w); |
370 | 348 |
371 if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) | 349 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) |
372 write_selected_tx_size(cpi, m->mbmi.tx_size, m->mbmi.sb_type, bc); | 350 write_selected_tx_size(cpi, mbmi->tx_size, bsize, w); |
373 | 351 |
374 if (m->mbmi.sb_type >= BLOCK_8X8) { | 352 if (bsize >= BLOCK_8X8) { |
375 const MB_PREDICTION_MODE A = vp9_above_block_mode(m, above_mi, 0); | 353 write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0)); |
376 const MB_PREDICTION_MODE L = vp9_left_block_mode(m, left_mi, 0); | |
377 write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]); | |
378 } else { | 354 } else { |
| 355 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; |
| 356 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; |
379 int idx, idy; | 357 int idx, idy; |
380 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[m->mbmi.sb_type]; | 358 |
381 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type]; | 359 for (idy = 0; idy < 2; idy += num_4x4_h) { |
382 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { | 360 for (idx = 0; idx < 2; idx += num_4x4_w) { |
383 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { | 361 const int block = idy * 2 + idx; |
384 int i = idy * 2 + idx; | 362 write_intra_mode(w, mi->bmi[block].as_mode, |
385 const MB_PREDICTION_MODE A = vp9_above_block_mode(m, above_mi, i); | 363 get_y_mode_probs(mi, above_mi, left_mi, block)); |
386 const MB_PREDICTION_MODE L = vp9_left_block_mode(m, left_mi, i); | |
387 const int bm = m->bmi[i].as_mode; | |
388 write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]); | |
389 } | 364 } |
390 } | 365 } |
391 } | 366 } |
392 | 367 |
393 write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]); | 368 write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]); |
394 } | 369 } |
395 | 370 |
396 static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, | 371 static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, |
397 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, | 372 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, |
398 int mi_row, int mi_col) { | 373 int mi_row, int mi_col) { |
399 VP9_COMMON *const cm = &cpi->common; | 374 VP9_COMMON *const cm = &cpi->common; |
400 MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 375 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
401 MODE_INFO *m; | 376 MODE_INFO *m; |
402 | 377 |
403 xd->mi_8x8 = cm->mi_grid_visible + (mi_row * cm->mode_info_stride + mi_col); | 378 xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col); |
404 m = xd->mi_8x8[0]; | 379 m = xd->mi[0]; |
405 | 380 |
406 set_mi_row_col(xd, tile, | 381 set_mi_row_col(xd, tile, |
407 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], | 382 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], |
408 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], | 383 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], |
409 cm->mi_rows, cm->mi_cols); | 384 cm->mi_rows, cm->mi_cols); |
410 if (frame_is_intra_only(cm)) { | 385 if (frame_is_intra_only(cm)) { |
411 write_mb_modes_kf(cpi, xd->mi_8x8, w); | 386 write_mb_modes_kf(cpi, xd->mi, w); |
412 #ifdef ENTROPY_STATS | |
413 active_section = 8; | |
414 #endif | |
415 } else { | 387 } else { |
416 pack_inter_mode_mvs(cpi, m, w); | 388 pack_inter_mode_mvs(cpi, m, w); |
417 #ifdef ENTROPY_STATS | |
418 active_section = 1; | |
419 #endif | |
420 } | 389 } |
421 | 390 |
422 assert(*tok < tok_end); | 391 assert(*tok < tok_end); |
423 pack_mb_tokens(w, tok, tok_end); | 392 pack_mb_tokens(w, tok, tok_end); |
424 } | 393 } |
425 | 394 |
426 static void write_partition(VP9_COMP *cpi, int hbs, int mi_row, int mi_col, | 395 static void write_partition(VP9_COMMON *cm, MACROBLOCKD *xd, |
| 396 int hbs, int mi_row, int mi_col, |
427 PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) { | 397 PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) { |
428 VP9_COMMON *const cm = &cpi->common; | 398 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize); |
429 const int ctx = partition_plane_context(cpi->above_seg_context, | |
430 cpi->left_seg_context, | |
431 mi_row, mi_col, bsize); | |
432 const vp9_prob *const probs = get_partition_probs(cm, ctx); | 399 const vp9_prob *const probs = get_partition_probs(cm, ctx); |
433 const int has_rows = (mi_row + hbs) < cm->mi_rows; | 400 const int has_rows = (mi_row + hbs) < cm->mi_rows; |
434 const int has_cols = (mi_col + hbs) < cm->mi_cols; | 401 const int has_cols = (mi_col + hbs) < cm->mi_cols; |
435 | 402 |
436 if (has_rows && has_cols) { | 403 if (has_rows && has_cols) { |
437 vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]); | 404 vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]); |
438 } else if (!has_rows && has_cols) { | 405 } else if (!has_rows && has_cols) { |
439 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ); | 406 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ); |
440 vp9_write(w, p == PARTITION_SPLIT, probs[1]); | 407 vp9_write(w, p == PARTITION_SPLIT, probs[1]); |
441 } else if (has_rows && !has_cols) { | 408 } else if (has_rows && !has_cols) { |
442 assert(p == PARTITION_SPLIT || p == PARTITION_VERT); | 409 assert(p == PARTITION_SPLIT || p == PARTITION_VERT); |
443 vp9_write(w, p == PARTITION_SPLIT, probs[2]); | 410 vp9_write(w, p == PARTITION_SPLIT, probs[2]); |
444 } else { | 411 } else { |
445 assert(p == PARTITION_SPLIT); | 412 assert(p == PARTITION_SPLIT); |
446 } | 413 } |
447 } | 414 } |
448 | 415 |
449 static void write_modes_sb(VP9_COMP *cpi, const TileInfo *const tile, | 416 static void write_modes_sb(VP9_COMP *cpi, |
| 417 const TileInfo *const tile, |
450 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, | 418 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end, |
451 int mi_row, int mi_col, BLOCK_SIZE bsize) { | 419 int mi_row, int mi_col, BLOCK_SIZE bsize) { |
452 VP9_COMMON *const cm = &cpi->common; | 420 VP9_COMMON *const cm = &cpi->common; |
| 421 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
| 422 |
453 const int bsl = b_width_log2(bsize); | 423 const int bsl = b_width_log2(bsize); |
454 const int bs = (1 << bsl) / 4; | 424 const int bs = (1 << bsl) / 4; |
455 PARTITION_TYPE partition; | 425 PARTITION_TYPE partition; |
456 BLOCK_SIZE subsize; | 426 BLOCK_SIZE subsize; |
457 MODE_INFO *m = cm->mi_grid_visible[mi_row * cm->mode_info_stride + mi_col]; | 427 MODE_INFO *m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]; |
458 | 428 |
459 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 429 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
460 return; | 430 return; |
461 | 431 |
462 partition = partition_lookup[bsl][m->mbmi.sb_type]; | 432 partition = partition_lookup[bsl][m->mbmi.sb_type]; |
463 write_partition(cpi, bs, mi_row, mi_col, partition, bsize, w); | 433 write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w); |
464 subsize = get_subsize(bsize, partition); | 434 subsize = get_subsize(bsize, partition); |
465 if (subsize < BLOCK_8X8) { | 435 if (subsize < BLOCK_8X8) { |
466 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); | 436 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); |
467 } else { | 437 } else { |
468 switch (partition) { | 438 switch (partition) { |
469 case PARTITION_NONE: | 439 case PARTITION_NONE: |
470 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); | 440 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); |
471 break; | 441 break; |
472 case PARTITION_HORZ: | 442 case PARTITION_HORZ: |
473 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); | 443 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); |
(...skipping 15 matching lines...) Expand all Loading... |
489 subsize); | 459 subsize); |
490 break; | 460 break; |
491 default: | 461 default: |
492 assert(0); | 462 assert(0); |
493 } | 463 } |
494 } | 464 } |
495 | 465 |
496 // update partition context | 466 // update partition context |
497 if (bsize >= BLOCK_8X8 && | 467 if (bsize >= BLOCK_8X8 && |
498 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) | 468 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) |
499 update_partition_context(cpi->above_seg_context, cpi->left_seg_context, | 469 update_partition_context(xd, mi_row, mi_col, subsize, bsize); |
500 mi_row, mi_col, subsize, bsize); | |
501 } | 470 } |
502 | 471 |
503 static void write_modes(VP9_COMP *cpi, const TileInfo *const tile, | 472 static void write_modes(VP9_COMP *cpi, |
| 473 const TileInfo *const tile, |
504 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end) { | 474 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end) { |
505 int mi_row, mi_col; | 475 int mi_row, mi_col; |
506 | 476 |
507 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; | 477 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; |
508 mi_row += MI_BLOCK_SIZE) { | 478 mi_row += MI_BLOCK_SIZE) { |
509 vp9_zero(cpi->left_seg_context); | 479 vp9_zero(cpi->mb.e_mbd.left_seg_context); |
510 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; | 480 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; |
511 mi_col += MI_BLOCK_SIZE) | 481 mi_col += MI_BLOCK_SIZE) |
512 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, BLOCK_64X64); | 482 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, |
| 483 BLOCK_64X64); |
513 } | 484 } |
514 } | 485 } |
515 | 486 |
516 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) { | 487 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size, |
| 488 vp9_coeff_stats *coef_branch_ct) { |
517 vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size]; | 489 vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size]; |
518 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; | 490 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; |
519 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = | 491 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = |
520 cpi->common.counts.eob_branch[tx_size]; | 492 cpi->common.counts.eob_branch[tx_size]; |
521 vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[tx_size]; | |
522 int i, j, k, l, m; | 493 int i, j, k, l, m; |
523 | 494 |
524 for (i = 0; i < PLANE_TYPES; ++i) { | 495 for (i = 0; i < PLANE_TYPES; ++i) { |
525 for (j = 0; j < REF_TYPES; ++j) { | 496 for (j = 0; j < REF_TYPES; ++j) { |
526 for (k = 0; k < COEF_BANDS; ++k) { | 497 for (k = 0; k < COEF_BANDS; ++k) { |
527 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { | 498 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { |
528 vp9_tree_probs_from_distribution(vp9_coef_tree, | 499 vp9_tree_probs_from_distribution(vp9_coef_tree, |
529 coef_branch_ct[i][j][k][l], | 500 coef_branch_ct[i][j][k][l], |
530 coef_counts[i][j][k][l]); | 501 coef_counts[i][j][k][l]); |
531 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - | 502 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - |
532 coef_branch_ct[i][j][k][l][0][0]; | 503 coef_branch_ct[i][j][k][l][0][0]; |
533 for (m = 0; m < UNCONSTRAINED_NODES; ++m) | 504 for (m = 0; m < UNCONSTRAINED_NODES; ++m) |
534 coef_probs[i][j][k][l][m] = get_binary_prob( | 505 coef_probs[i][j][k][l][m] = get_binary_prob( |
535 coef_branch_ct[i][j][k][l][m][0], | 506 coef_branch_ct[i][j][k][l][m][0], |
536 coef_branch_ct[i][j][k][l][m][1]); | 507 coef_branch_ct[i][j][k][l][m][1]); |
537 } | 508 } |
538 } | 509 } |
539 } | 510 } |
540 } | 511 } |
541 } | 512 } |
542 | 513 |
543 static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, | 514 static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, |
544 TX_SIZE tx_size) { | 515 TX_SIZE tx_size, |
| 516 vp9_coeff_stats *frame_branch_ct) { |
545 vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size]; | 517 vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size]; |
546 vp9_coeff_probs_model *old_frame_coef_probs = | 518 vp9_coeff_probs_model *old_frame_coef_probs = |
547 cpi->common.fc.coef_probs[tx_size]; | 519 cpi->common.fc.coef_probs[tx_size]; |
548 vp9_coeff_stats *frame_branch_ct = cpi->frame_branch_ct[tx_size]; | |
549 const vp9_prob upd = DIFF_UPDATE_PROB; | 520 const vp9_prob upd = DIFF_UPDATE_PROB; |
550 const int entropy_nodes_update = UNCONSTRAINED_NODES; | 521 const int entropy_nodes_update = UNCONSTRAINED_NODES; |
551 int i, j, k, l, t; | 522 int i, j, k, l, t; |
552 switch (cpi->sf.use_fast_coef_updates) { | 523 switch (cpi->sf.use_fast_coef_updates) { |
553 case 0: { | 524 case TWO_LOOP: { |
554 /* dry run to see if there is any udpate at all needed */ | 525 /* dry run to see if there is any udpate at all needed */ |
555 int savings = 0; | 526 int savings = 0; |
556 int update[2] = {0, 0}; | 527 int update[2] = {0, 0}; |
557 for (i = 0; i < PLANE_TYPES; ++i) { | 528 for (i = 0; i < PLANE_TYPES; ++i) { |
558 for (j = 0; j < REF_TYPES; ++j) { | 529 for (j = 0; j < REF_TYPES; ++j) { |
559 for (k = 0; k < COEF_BANDS; ++k) { | 530 for (k = 0; k < COEF_BANDS; ++k) { |
560 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { | 531 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { |
561 for (t = 0; t < entropy_nodes_update; ++t) { | 532 for (t = 0; t < entropy_nodes_update; ++t) { |
562 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; | 533 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; |
563 const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t]; | 534 const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t]; |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
618 *oldp = newp; | 589 *oldp = newp; |
619 } | 590 } |
620 } | 591 } |
621 } | 592 } |
622 } | 593 } |
623 } | 594 } |
624 } | 595 } |
625 return; | 596 return; |
626 } | 597 } |
627 | 598 |
628 case 1: | 599 case ONE_LOOP: |
629 case 2: { | 600 case ONE_LOOP_REDUCED: { |
630 const int prev_coef_contexts_to_update = | 601 const int prev_coef_contexts_to_update = |
631 cpi->sf.use_fast_coef_updates == 2 ? COEFF_CONTEXTS >> 1 | 602 cpi->sf.use_fast_coef_updates == ONE_LOOP_REDUCED ? |
632 : COEFF_CONTEXTS; | 603 COEFF_CONTEXTS >> 1 : COEFF_CONTEXTS; |
633 const int coef_band_to_update = | 604 const int coef_band_to_update = |
634 cpi->sf.use_fast_coef_updates == 2 ? COEF_BANDS >> 1 | 605 cpi->sf.use_fast_coef_updates == ONE_LOOP_REDUCED ? |
635 : COEF_BANDS; | 606 COEF_BANDS >> 1 : COEF_BANDS; |
636 int updates = 0; | 607 int updates = 0; |
637 int noupdates_before_first = 0; | 608 int noupdates_before_first = 0; |
638 for (i = 0; i < PLANE_TYPES; ++i) { | 609 for (i = 0; i < PLANE_TYPES; ++i) { |
639 for (j = 0; j < REF_TYPES; ++j) { | 610 for (j = 0; j < REF_TYPES; ++j) { |
640 for (k = 0; k < COEF_BANDS; ++k) { | 611 for (k = 0; k < COEF_BANDS; ++k) { |
641 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { | 612 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { |
642 // calc probs and branch cts for this frame only | 613 // calc probs and branch cts for this frame only |
643 for (t = 0; t < entropy_nodes_update; ++t) { | 614 for (t = 0; t < entropy_nodes_update; ++t) { |
644 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; | 615 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; |
645 vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; | 616 vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
691 | 662 |
692 default: | 663 default: |
693 assert(0); | 664 assert(0); |
694 } | 665 } |
695 } | 666 } |
696 | 667 |
697 static void update_coef_probs(VP9_COMP *cpi, vp9_writer* w) { | 668 static void update_coef_probs(VP9_COMP *cpi, vp9_writer* w) { |
698 const TX_MODE tx_mode = cpi->common.tx_mode; | 669 const TX_MODE tx_mode = cpi->common.tx_mode; |
699 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; | 670 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; |
700 TX_SIZE tx_size; | 671 TX_SIZE tx_size; |
| 672 vp9_coeff_stats frame_branch_ct[TX_SIZES][PLANE_TYPES]; |
| 673 |
701 vp9_clear_system_state(); | 674 vp9_clear_system_state(); |
702 | 675 |
703 for (tx_size = TX_4X4; tx_size <= TX_32X32; ++tx_size) | 676 for (tx_size = TX_4X4; tx_size <= TX_32X32; ++tx_size) |
704 build_tree_distribution(cpi, tx_size); | 677 build_tree_distribution(cpi, tx_size, frame_branch_ct[tx_size]); |
705 | 678 |
706 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) | 679 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) |
707 update_coef_probs_common(w, cpi, tx_size); | 680 update_coef_probs_common(w, cpi, tx_size, frame_branch_ct[tx_size]); |
708 } | 681 } |
709 | 682 |
710 static void encode_loopfilter(struct loopfilter *lf, | 683 static void encode_loopfilter(struct loopfilter *lf, |
711 struct vp9_write_bit_buffer *wb) { | 684 struct vp9_write_bit_buffer *wb) { |
712 int i; | 685 int i; |
713 | 686 |
714 // Encode the loop filter level and type | 687 // Encode the loop filter level and type |
715 vp9_wb_write_literal(wb, lf->filter_level, 6); | 688 vp9_wb_write_literal(wb, lf->filter_level, 6); |
716 vp9_wb_write_literal(wb, lf->sharpness_level, 3); | 689 vp9_wb_write_literal(wb, lf->sharpness_level, 3); |
717 | 690 |
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
954 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { | 927 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { |
955 VP9_COMMON *const cm = &cpi->common; | 928 VP9_COMMON *const cm = &cpi->common; |
956 vp9_writer residual_bc; | 929 vp9_writer residual_bc; |
957 | 930 |
958 int tile_row, tile_col; | 931 int tile_row, tile_col; |
959 TOKENEXTRA *tok[4][1 << 6], *tok_end; | 932 TOKENEXTRA *tok[4][1 << 6], *tok_end; |
960 size_t total_size = 0; | 933 size_t total_size = 0; |
961 const int tile_cols = 1 << cm->log2_tile_cols; | 934 const int tile_cols = 1 << cm->log2_tile_cols; |
962 const int tile_rows = 1 << cm->log2_tile_rows; | 935 const int tile_rows = 1 << cm->log2_tile_rows; |
963 | 936 |
964 vpx_memset(cpi->above_seg_context, 0, sizeof(*cpi->above_seg_context) * | 937 vpx_memset(cm->above_seg_context, 0, sizeof(*cm->above_seg_context) * |
965 mi_cols_aligned_to_sb(cm->mi_cols)); | 938 mi_cols_aligned_to_sb(cm->mi_cols)); |
966 | 939 |
967 tok[0][0] = cpi->tok; | 940 tok[0][0] = cpi->tok; |
968 for (tile_row = 0; tile_row < tile_rows; tile_row++) { | 941 for (tile_row = 0; tile_row < tile_rows; tile_row++) { |
969 if (tile_row) | 942 if (tile_row) |
970 tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] + | 943 tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] + |
971 cpi->tok_count[tile_row - 1][tile_cols - 1]; | 944 cpi->tok_count[tile_row - 1][tile_cols - 1]; |
972 | 945 |
973 for (tile_col = 1; tile_col < tile_cols; tile_col++) | 946 for (tile_col = 1; tile_col < tile_cols; tile_col++) |
974 tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] + | 947 tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] + |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1051 | 1024 |
1052 write_display_size(cm, wb); | 1025 write_display_size(cm, wb); |
1053 } | 1026 } |
1054 | 1027 |
1055 static void write_sync_code(struct vp9_write_bit_buffer *wb) { | 1028 static void write_sync_code(struct vp9_write_bit_buffer *wb) { |
1056 vp9_wb_write_literal(wb, VP9_SYNC_CODE_0, 8); | 1029 vp9_wb_write_literal(wb, VP9_SYNC_CODE_0, 8); |
1057 vp9_wb_write_literal(wb, VP9_SYNC_CODE_1, 8); | 1030 vp9_wb_write_literal(wb, VP9_SYNC_CODE_1, 8); |
1058 vp9_wb_write_literal(wb, VP9_SYNC_CODE_2, 8); | 1031 vp9_wb_write_literal(wb, VP9_SYNC_CODE_2, 8); |
1059 } | 1032 } |
1060 | 1033 |
| 1034 static void write_profile(BITSTREAM_PROFILE profile, |
| 1035 struct vp9_write_bit_buffer *wb) { |
| 1036 assert(profile < MAX_PROFILES); |
| 1037 vp9_wb_write_bit(wb, profile & 1); |
| 1038 vp9_wb_write_bit(wb, profile >> 1); |
| 1039 } |
| 1040 |
1061 static void write_uncompressed_header(VP9_COMP *cpi, | 1041 static void write_uncompressed_header(VP9_COMP *cpi, |
1062 struct vp9_write_bit_buffer *wb) { | 1042 struct vp9_write_bit_buffer *wb) { |
1063 VP9_COMMON *const cm = &cpi->common; | 1043 VP9_COMMON *const cm = &cpi->common; |
1064 | 1044 |
1065 vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2); | 1045 vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2); |
1066 | 1046 |
1067 // bitstream version. | 1047 write_profile(cm->profile, wb); |
1068 // 00 - profile 0. 4:2:0 only | |
1069 // 10 - profile 1. adds 4:4:4, 4:2:2, alpha | |
1070 vp9_wb_write_bit(wb, cm->version); | |
1071 vp9_wb_write_bit(wb, 0); | |
1072 | 1048 |
1073 vp9_wb_write_bit(wb, 0); | 1049 vp9_wb_write_bit(wb, 0); // show_existing_frame |
1074 vp9_wb_write_bit(wb, cm->frame_type); | 1050 vp9_wb_write_bit(wb, cm->frame_type); |
1075 vp9_wb_write_bit(wb, cm->show_frame); | 1051 vp9_wb_write_bit(wb, cm->show_frame); |
1076 vp9_wb_write_bit(wb, cm->error_resilient_mode); | 1052 vp9_wb_write_bit(wb, cm->error_resilient_mode); |
1077 | 1053 |
1078 if (cm->frame_type == KEY_FRAME) { | 1054 if (cm->frame_type == KEY_FRAME) { |
1079 const COLOR_SPACE cs = UNKNOWN; | 1055 const COLOR_SPACE cs = UNKNOWN; |
1080 write_sync_code(wb); | 1056 write_sync_code(wb); |
| 1057 if (cm->profile > PROFILE_1) { |
| 1058 assert(cm->bit_depth > BITS_8); |
| 1059 vp9_wb_write_bit(wb, cm->bit_depth - BITS_10); |
| 1060 } |
1081 vp9_wb_write_literal(wb, cs, 3); | 1061 vp9_wb_write_literal(wb, cs, 3); |
1082 if (cs != SRGB) { | 1062 if (cs != SRGB) { |
1083 vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] | 1063 vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] |
1084 if (cm->version == 1) { | 1064 if (cm->profile >= PROFILE_1) { |
1085 vp9_wb_write_bit(wb, cm->subsampling_x); | 1065 vp9_wb_write_bit(wb, cm->subsampling_x); |
1086 vp9_wb_write_bit(wb, cm->subsampling_y); | 1066 vp9_wb_write_bit(wb, cm->subsampling_y); |
1087 vp9_wb_write_bit(wb, 0); // has extra plane | 1067 vp9_wb_write_bit(wb, 0); // has extra plane |
1088 } | 1068 } |
1089 } else { | 1069 } else { |
1090 assert(cm->version == 1); | 1070 assert(cm->profile == PROFILE_1); |
1091 vp9_wb_write_bit(wb, 0); // has extra plane | 1071 vp9_wb_write_bit(wb, 0); // has extra plane |
1092 } | 1072 } |
1093 | 1073 |
1094 write_frame_size(cm, wb); | 1074 write_frame_size(cm, wb); |
1095 } else { | 1075 } else { |
1096 if (!cm->show_frame) | 1076 if (!cm->show_frame) |
1097 vp9_wb_write_bit(wb, cm->intra_only); | 1077 vp9_wb_write_bit(wb, cm->intra_only); |
1098 | 1078 |
1099 if (!cm->error_resilient_mode) | 1079 if (!cm->error_resilient_mode) |
1100 vp9_wb_write_literal(wb, cm->reset_frame_context, 2); | 1080 vp9_wb_write_literal(wb, cm->reset_frame_context, 2); |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1143 vp9_writer header_bc; | 1123 vp9_writer header_bc; |
1144 | 1124 |
1145 vp9_start_encode(&header_bc, data); | 1125 vp9_start_encode(&header_bc, data); |
1146 | 1126 |
1147 if (xd->lossless) | 1127 if (xd->lossless) |
1148 cm->tx_mode = ONLY_4X4; | 1128 cm->tx_mode = ONLY_4X4; |
1149 else | 1129 else |
1150 encode_txfm_probs(cm, &header_bc); | 1130 encode_txfm_probs(cm, &header_bc); |
1151 | 1131 |
1152 update_coef_probs(cpi, &header_bc); | 1132 update_coef_probs(cpi, &header_bc); |
1153 | |
1154 #ifdef ENTROPY_STATS | |
1155 active_section = 2; | |
1156 #endif | |
1157 | |
1158 update_skip_probs(cm, &header_bc); | 1133 update_skip_probs(cm, &header_bc); |
1159 | 1134 |
1160 if (!frame_is_intra_only(cm)) { | 1135 if (!frame_is_intra_only(cm)) { |
1161 int i; | 1136 int i; |
1162 #ifdef ENTROPY_STATS | |
1163 active_section = 1; | |
1164 #endif | |
1165 | 1137 |
1166 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) | 1138 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) |
1167 prob_diff_update(vp9_inter_mode_tree, cm->fc.inter_mode_probs[i], | 1139 prob_diff_update(vp9_inter_mode_tree, cm->fc.inter_mode_probs[i], |
1168 cm->counts.inter_mode[i], INTER_MODES, &header_bc); | 1140 cm->counts.inter_mode[i], INTER_MODES, &header_bc); |
1169 | 1141 |
1170 vp9_zero(cm->counts.inter_mode); | 1142 vp9_zero(cm->counts.inter_mode); |
1171 | 1143 |
1172 if (cm->interp_filter == SWITCHABLE) | 1144 if (cm->interp_filter == SWITCHABLE) |
1173 update_switchable_interp_probs(cm, &header_bc); | 1145 update_switchable_interp_probs(cm, &header_bc); |
1174 | 1146 |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1216 } | 1188 } |
1217 | 1189 |
1218 vp9_stop_encode(&header_bc); | 1190 vp9_stop_encode(&header_bc); |
1219 assert(header_bc.pos <= 0xffff); | 1191 assert(header_bc.pos <= 0xffff); |
1220 | 1192 |
1221 return header_bc.pos; | 1193 return header_bc.pos; |
1222 } | 1194 } |
1223 | 1195 |
1224 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) { | 1196 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) { |
1225 uint8_t *data = dest; | 1197 uint8_t *data = dest; |
1226 size_t first_part_size; | 1198 size_t first_part_size, uncompressed_hdr_size; |
1227 struct vp9_write_bit_buffer wb = {data, 0}; | 1199 struct vp9_write_bit_buffer wb = {data, 0}; |
1228 struct vp9_write_bit_buffer saved_wb; | 1200 struct vp9_write_bit_buffer saved_wb; |
1229 | 1201 |
1230 write_uncompressed_header(cpi, &wb); | 1202 write_uncompressed_header(cpi, &wb); |
1231 saved_wb = wb; | 1203 saved_wb = wb; |
1232 vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size | 1204 vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size |
1233 | 1205 |
1234 data += vp9_rb_bytes_written(&wb); | 1206 uncompressed_hdr_size = vp9_rb_bytes_written(&wb); |
| 1207 data += uncompressed_hdr_size; |
1235 | 1208 |
1236 vp9_compute_update_table(); | 1209 vp9_compute_update_table(); |
1237 | 1210 |
1238 #ifdef ENTROPY_STATS | |
1239 if (cm->frame_type == INTER_FRAME) | |
1240 active_section = 0; | |
1241 else | |
1242 active_section = 7; | |
1243 #endif | |
1244 | |
1245 vp9_clear_system_state(); | 1211 vp9_clear_system_state(); |
1246 | 1212 |
1247 first_part_size = write_compressed_header(cpi, data); | 1213 first_part_size = write_compressed_header(cpi, data); |
1248 data += first_part_size; | 1214 data += first_part_size; |
1249 // TODO(jbb): Figure out what to do if first_part_size > 16 bits. | 1215 // TODO(jbb): Figure out what to do if first_part_size > 16 bits. |
1250 vp9_wb_write_literal(&saved_wb, (int)first_part_size, 16); | 1216 vp9_wb_write_literal(&saved_wb, (int)first_part_size, 16); |
1251 | 1217 |
1252 data += encode_tiles(cpi, data); | 1218 data += encode_tiles(cpi, data); |
1253 | 1219 |
1254 *size = data - dest; | 1220 *size = data - dest; |
1255 } | 1221 } |
1256 | 1222 |
OLD | NEW |