OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include <assert.h> | 11 #include <assert.h> |
12 #include <stdio.h> | 12 #include <stdio.h> |
13 #include <limits.h> | 13 #include <limits.h> |
14 | 14 |
15 #include "vpx/vpx_encoder.h" | 15 #include "vpx/vpx_encoder.h" |
16 #include "vpx_mem/vpx_mem.h" | 16 #include "vpx_mem/vpx_mem.h" |
17 | 17 |
18 #include "vp9/common/vp9_entropymode.h" | 18 #include "vp9/common/vp9_entropymode.h" |
19 #include "vp9/common/vp9_entropymv.h" | 19 #include "vp9/common/vp9_entropymv.h" |
20 #include "vp9/common/vp9_findnearmv.h" | 20 #include "vp9/common/vp9_findnearmv.h" |
21 #include "vp9/common/vp9_tile_common.h" | 21 #include "vp9/common/vp9_tile_common.h" |
22 #include "vp9/common/vp9_seg_common.h" | 22 #include "vp9/common/vp9_seg_common.h" |
23 #include "vp9/common/vp9_pred_common.h" | 23 #include "vp9/common/vp9_pred_common.h" |
24 #include "vp9/common/vp9_entropy.h" | 24 #include "vp9/common/vp9_entropy.h" |
25 #include "vp9/common/vp9_entropymv.h" | |
26 #include "vp9/common/vp9_mvref_common.h" | 25 #include "vp9/common/vp9_mvref_common.h" |
27 #include "vp9/common/vp9_treecoder.h" | 26 #include "vp9/common/vp9_treecoder.h" |
28 #include "vp9/common/vp9_systemdependent.h" | 27 #include "vp9/common/vp9_systemdependent.h" |
29 #include "vp9/common/vp9_pragmas.h" | 28 #include "vp9/common/vp9_pragmas.h" |
30 | 29 |
31 #include "vp9/encoder/vp9_mcomp.h" | 30 #include "vp9/encoder/vp9_mcomp.h" |
32 #include "vp9/encoder/vp9_encodemv.h" | 31 #include "vp9/encoder/vp9_encodemv.h" |
33 #include "vp9/encoder/vp9_bitstream.h" | 32 #include "vp9/encoder/vp9_bitstream.h" |
34 #include "vp9/encoder/vp9_segmentation.h" | 33 #include "vp9/encoder/vp9_segmentation.h" |
35 #include "vp9/encoder/vp9_subexp.h" | 34 #include "vp9/encoder/vp9_subexp.h" |
(...skipping 11 matching lines...) Expand all Loading... |
47 vp9_coeff_stats tree_update_hist[TX_SIZES][BLOCK_TYPES]; | 46 vp9_coeff_stats tree_update_hist[TX_SIZES][BLOCK_TYPES]; |
48 | 47 |
49 extern unsigned int active_section; | 48 extern unsigned int active_section; |
50 #endif | 49 #endif |
51 | 50 |
52 | 51 |
53 #ifdef MODE_STATS | 52 #ifdef MODE_STATS |
54 int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZES]; | 53 int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZES]; |
55 int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 1]; | 54 int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 1]; |
56 int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 2]; | 55 int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 2]; |
57 int64_t switchable_interp_stats[SWITCHABLE_FILTERS+1] | 56 int64_t switchable_interp_stats[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS]; |
58 [SWITCHABLE_FILTERS]; | |
59 | 57 |
60 void init_tx_count_stats() { | 58 void init_tx_count_stats() { |
61 vp9_zero(tx_count_32x32p_stats); | 59 vp9_zero(tx_count_32x32p_stats); |
62 vp9_zero(tx_count_16x16p_stats); | 60 vp9_zero(tx_count_16x16p_stats); |
63 vp9_zero(tx_count_8x8p_stats); | 61 vp9_zero(tx_count_8x8p_stats); |
64 } | 62 } |
65 | 63 |
66 void init_switchable_interp_stats() { | 64 void init_switchable_interp_stats() { |
67 vp9_zero(switchable_interp_stats); | 65 vp9_zero(switchable_interp_stats); |
68 } | 66 } |
(...skipping 12 matching lines...) Expand all Loading... |
81 } | 79 } |
82 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 80 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
83 for (j = 0; j < TX_SIZES - 2; j++) { | 81 for (j = 0; j < TX_SIZES - 2; j++) { |
84 tx_count_8x8p_stats[i][j] += cm->fc.tx_count_8x8p[i][j]; | 82 tx_count_8x8p_stats[i][j] += cm->fc.tx_count_8x8p[i][j]; |
85 } | 83 } |
86 } | 84 } |
87 } | 85 } |
88 | 86 |
89 static void update_switchable_interp_stats(VP9_COMMON *cm) { | 87 static void update_switchable_interp_stats(VP9_COMMON *cm) { |
90 int i, j; | 88 int i, j; |
91 for (i = 0; i < SWITCHABLE_FILTERS+1; ++i) | 89 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) |
92 for (j = 0; j < SWITCHABLE_FILTERS; ++j) { | 90 for (j = 0; j < SWITCHABLE_FILTERS; ++j) |
93 switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j]; | 91 switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j]; |
94 } | |
95 } | 92 } |
96 | 93 |
97 void write_tx_count_stats() { | 94 void write_tx_count_stats() { |
98 int i, j; | 95 int i, j; |
99 FILE *fp = fopen("tx_count.bin", "wb"); | 96 FILE *fp = fopen("tx_count.bin", "wb"); |
100 fwrite(tx_count_32x32p_stats, sizeof(tx_count_32x32p_stats), 1, fp); | 97 fwrite(tx_count_32x32p_stats, sizeof(tx_count_32x32p_stats), 1, fp); |
101 fwrite(tx_count_16x16p_stats, sizeof(tx_count_16x16p_stats), 1, fp); | 98 fwrite(tx_count_16x16p_stats, sizeof(tx_count_16x16p_stats), 1, fp); |
102 fwrite(tx_count_8x8p_stats, sizeof(tx_count_8x8p_stats), 1, fp); | 99 fwrite(tx_count_8x8p_stats, sizeof(tx_count_8x8p_stats), 1, fp); |
103 fclose(fp); | 100 fclose(fp); |
104 | 101 |
(...skipping 29 matching lines...) Expand all Loading... |
134 printf("};\n"); | 131 printf("};\n"); |
135 } | 132 } |
136 | 133 |
137 void write_switchable_interp_stats() { | 134 void write_switchable_interp_stats() { |
138 int i, j; | 135 int i, j; |
139 FILE *fp = fopen("switchable_interp.bin", "wb"); | 136 FILE *fp = fopen("switchable_interp.bin", "wb"); |
140 fwrite(switchable_interp_stats, sizeof(switchable_interp_stats), 1, fp); | 137 fwrite(switchable_interp_stats, sizeof(switchable_interp_stats), 1, fp); |
141 fclose(fp); | 138 fclose(fp); |
142 | 139 |
143 printf( | 140 printf( |
144 "vp9_default_switchable_filter_count[SWITCHABLE_FILTERS+1]" | 141 "vp9_default_switchable_filter_count[SWITCHABLE_FILTER_CONTEXTS]" |
145 "[SWITCHABLE_FILTERS] = {\n"); | 142 "[SWITCHABLE_FILTERS] = {\n"); |
146 for (i = 0; i < SWITCHABLE_FILTERS+1; i++) { | 143 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { |
147 printf(" { "); | 144 printf(" { "); |
148 for (j = 0; j < SWITCHABLE_FILTERS; j++) { | 145 for (j = 0; j < SWITCHABLE_FILTERS; j++) { |
149 printf("%"PRId64", ", switchable_interp_stats[i][j]); | 146 printf("%"PRId64", ", switchable_interp_stats[i][j]); |
150 } | 147 } |
151 printf("},\n"); | 148 printf("},\n"); |
152 } | 149 } |
153 printf("};\n"); | 150 printf("};\n"); |
154 } | 151 } |
155 #endif | 152 #endif |
156 | 153 |
(...skipping 16 matching lines...) Expand all Loading... |
173 vp9_prob Pnew[/* n-1 */], | 170 vp9_prob Pnew[/* n-1 */], |
174 vp9_prob Pcur[/* n-1 */], | 171 vp9_prob Pcur[/* n-1 */], |
175 unsigned int bct[/* n-1 */] [2], | 172 unsigned int bct[/* n-1 */] [2], |
176 const unsigned int num_events[/* n */] | 173 const unsigned int num_events[/* n */] |
177 ) { | 174 ) { |
178 int i = 0; | 175 int i = 0; |
179 | 176 |
180 vp9_tree_probs_from_distribution(tree, Pnew, bct, num_events, 0); | 177 vp9_tree_probs_from_distribution(tree, Pnew, bct, num_events, 0); |
181 n--; | 178 n--; |
182 | 179 |
183 for (i = 0; i < n; ++i) { | 180 for (i = 0; i < n; ++i) |
184 vp9_cond_prob_diff_update(w, &Pcur[i], MODE_UPDATE_PROB, bct[i]); | 181 vp9_cond_prob_diff_update(w, &Pcur[i], bct[i]); |
185 } | |
186 } | 182 } |
187 | 183 |
188 static void update_mbintra_mode_probs(VP9_COMP* const cpi, | 184 static void update_mbintra_mode_probs(VP9_COMP* const cpi, |
189 vp9_writer* const bc) { | 185 vp9_writer* const bc) { |
190 VP9_COMMON *const cm = &cpi->common; | 186 VP9_COMMON *const cm = &cpi->common; |
191 int j; | 187 int j; |
192 vp9_prob pnew[INTRA_MODES - 1]; | 188 vp9_prob pnew[INTRA_MODES - 1]; |
193 unsigned int bct[INTRA_MODES - 1][2]; | 189 unsigned int bct[INTRA_MODES - 1][2]; |
194 | 190 |
195 for (j = 0; j < BLOCK_SIZE_GROUPS; j++) | 191 for (j = 0; j < BLOCK_SIZE_GROUPS; j++) |
(...skipping 25 matching lines...) Expand all Loading... |
221 vp9_write(w, skip_coeff, vp9_get_pred_prob_mbskip(&cpi->common, xd)); | 217 vp9_write(w, skip_coeff, vp9_get_pred_prob_mbskip(&cpi->common, xd)); |
222 return skip_coeff; | 218 return skip_coeff; |
223 } | 219 } |
224 } | 220 } |
225 | 221 |
226 void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) { | 222 void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) { |
227 VP9_COMMON *cm = &cpi->common; | 223 VP9_COMMON *cm = &cpi->common; |
228 int k; | 224 int k; |
229 | 225 |
230 for (k = 0; k < MBSKIP_CONTEXTS; ++k) | 226 for (k = 0; k < MBSKIP_CONTEXTS; ++k) |
231 vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k], | 227 vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k], cm->counts.mbskip[k]); |
232 MODE_UPDATE_PROB, cm->counts.mbskip[k]); | |
233 } | 228 } |
234 | 229 |
235 static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) { | 230 static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) { |
236 write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m); | 231 write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m); |
237 } | 232 } |
238 | 233 |
239 static void update_switchable_interp_probs(VP9_COMP *const cpi, | 234 static void update_switchable_interp_probs(VP9_COMP *const cpi, |
240 vp9_writer* const bc) { | 235 vp9_writer* const bc) { |
241 VP9_COMMON *const cm = &cpi->common; | 236 VP9_COMMON *const cm = &cpi->common; |
242 unsigned int branch_ct[SWITCHABLE_FILTERS + 1] | 237 unsigned int branch_ct[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1][2]; |
243 [SWITCHABLE_FILTERS - 1][2]; | 238 vp9_prob new_prob[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1]; |
244 vp9_prob new_prob[SWITCHABLE_FILTERS + 1][SWITCHABLE_FILTERS - 1]; | |
245 int i, j; | 239 int i, j; |
246 for (j = 0; j <= SWITCHABLE_FILTERS; ++j) { | 240 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) { |
247 vp9_tree_probs_from_distribution( | 241 vp9_tree_probs_from_distribution( |
248 vp9_switchable_interp_tree, | 242 vp9_switchable_interp_tree, |
249 new_prob[j], branch_ct[j], | 243 new_prob[j], branch_ct[j], |
250 cm->counts.switchable_interp[j], 0); | 244 cm->counts.switchable_interp[j], 0); |
251 } | 245 } |
252 for (j = 0; j <= SWITCHABLE_FILTERS; ++j) { | 246 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) { |
253 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) { | 247 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) { |
254 vp9_cond_prob_diff_update(bc, &cm->fc.switchable_interp_prob[j][i], | 248 vp9_cond_prob_diff_update(bc, &cm->fc.switchable_interp_prob[j][i], |
255 MODE_UPDATE_PROB, branch_ct[j][i]); | 249 branch_ct[j][i]); |
256 } | 250 } |
257 } | 251 } |
258 #ifdef MODE_STATS | 252 #ifdef MODE_STATS |
259 if (!cpi->dummy_packing) | 253 if (!cpi->dummy_packing) |
260 update_switchable_interp_stats(cm); | 254 update_switchable_interp_stats(cm); |
261 #endif | 255 #endif |
262 } | 256 } |
263 | 257 |
264 static void update_inter_mode_probs(VP9_COMMON *cm, vp9_writer* const bc) { | 258 static void update_inter_mode_probs(VP9_COMMON *cm, vp9_writer* const bc) { |
265 int i, j; | 259 int i, j; |
266 | 260 |
267 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) { | 261 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) { |
268 unsigned int branch_ct[INTER_MODES - 1][2]; | 262 unsigned int branch_ct[INTER_MODES - 1][2]; |
269 vp9_prob new_prob[INTER_MODES - 1]; | 263 vp9_prob new_prob[INTER_MODES - 1]; |
270 | 264 |
271 vp9_tree_probs_from_distribution(vp9_inter_mode_tree, | 265 vp9_tree_probs_from_distribution(vp9_inter_mode_tree, |
272 new_prob, branch_ct, | 266 new_prob, branch_ct, |
273 cm->counts.inter_mode[i], NEARESTMV); | 267 cm->counts.inter_mode[i], NEARESTMV); |
274 | 268 |
275 for (j = 0; j < INTER_MODES - 1; ++j) | 269 for (j = 0; j < INTER_MODES - 1; ++j) |
276 vp9_cond_prob_diff_update(bc, &cm->fc.inter_mode_probs[i][j], | 270 vp9_cond_prob_diff_update(bc, &cm->fc.inter_mode_probs[i][j], |
277 MODE_UPDATE_PROB, branch_ct[j]); | 271 branch_ct[j]); |
278 } | 272 } |
279 } | 273 } |
280 | 274 |
281 static void pack_mb_tokens(vp9_writer* const bc, | 275 static void pack_mb_tokens(vp9_writer* const bc, |
282 TOKENEXTRA **tp, | 276 TOKENEXTRA **tp, |
283 const TOKENEXTRA *const stop) { | 277 const TOKENEXTRA *const stop) { |
284 TOKENEXTRA *p = *tp; | 278 TOKENEXTRA *p = *tp; |
285 | 279 |
286 while (p < stop) { | 280 while (p < stop && p->token != EOSB_TOKEN) { |
287 const int t = p->token; | 281 const int t = p->token; |
288 const struct vp9_token *const a = vp9_coef_encodings + t; | 282 const struct vp9_token *const a = vp9_coef_encodings + t; |
289 const vp9_extra_bit *const b = vp9_extra_bits + t; | 283 const vp9_extra_bit *const b = vp9_extra_bits + t; |
290 int i = 0; | 284 int i = 0; |
291 const vp9_prob *pp; | 285 const vp9_prob *pp; |
292 int v = a->value; | 286 int v = a->value; |
293 int n = a->len; | 287 int n = a->len; |
294 vp9_prob probs[ENTROPY_NODES]; | 288 vp9_prob probs[ENTROPY_NODES]; |
295 | 289 |
296 if (t == EOSB_TOKEN) { | |
297 ++p; | |
298 break; | |
299 } | |
300 if (t >= TWO_TOKEN) { | 290 if (t >= TWO_TOKEN) { |
301 vp9_model_to_full_probs(p->context_tree, probs); | 291 vp9_model_to_full_probs(p->context_tree, probs); |
302 pp = probs; | 292 pp = probs; |
303 } else { | 293 } else { |
304 pp = p->context_tree; | 294 pp = p->context_tree; |
305 } | 295 } |
306 assert(pp != 0); | 296 assert(pp != 0); |
307 | 297 |
308 /* skip one or two nodes */ | 298 /* skip one or two nodes */ |
309 if (p->skip_eob_node) { | 299 if (p->skip_eob_node) { |
(...skipping 21 matching lines...) Expand all Loading... |
331 vp9_write(bc, bb, pb[i >> 1]); | 321 vp9_write(bc, bb, pb[i >> 1]); |
332 i = b->tree[i + bb]; | 322 i = b->tree[i + bb]; |
333 } while (n); | 323 } while (n); |
334 } | 324 } |
335 | 325 |
336 vp9_write_bit(bc, e & 1); | 326 vp9_write_bit(bc, e & 1); |
337 } | 327 } |
338 ++p; | 328 ++p; |
339 } | 329 } |
340 | 330 |
341 *tp = p; | 331 *tp = p + (p->token == EOSB_TOKEN); |
342 } | 332 } |
343 | 333 |
344 static void write_sb_mv_ref(vp9_writer *w, MB_PREDICTION_MODE mode, | 334 static void write_sb_mv_ref(vp9_writer *w, MB_PREDICTION_MODE mode, |
345 const vp9_prob *p) { | 335 const vp9_prob *p) { |
346 assert(is_inter_mode(mode)); | 336 assert(is_inter_mode(mode)); |
347 write_token(w, vp9_inter_mode_tree, p, | 337 write_token(w, vp9_inter_mode_tree, p, |
348 &vp9_inter_mode_encodings[mode - NEARESTMV]); | 338 &vp9_inter_mode_encodings[inter_mode_offset(mode)]); |
349 } | 339 } |
350 | 340 |
351 | 341 |
352 static void write_segment_id(vp9_writer *w, const struct segmentation *seg, | 342 static void write_segment_id(vp9_writer *w, const struct segmentation *seg, |
353 int segment_id) { | 343 int segment_id) { |
354 if (seg->enabled && seg->update_map) | 344 if (seg->enabled && seg->update_map) |
355 treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3); | 345 treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3); |
356 } | 346 } |
357 | 347 |
358 // This function encodes the reference frame | 348 // This function encodes the reference frame |
359 static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) { | 349 static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) { |
360 VP9_COMMON *const cm = &cpi->common; | 350 VP9_COMMON *const cm = &cpi->common; |
361 MACROBLOCK *const x = &cpi->mb; | 351 MACROBLOCK *const x = &cpi->mb; |
362 MACROBLOCKD *const xd = &x->e_mbd; | 352 MACROBLOCKD *const xd = &x->e_mbd; |
363 MB_MODE_INFO *mi = &xd->this_mi->mbmi; | 353 MB_MODE_INFO *mi = &xd->mi_8x8[0]->mbmi; |
364 const int segment_id = mi->segment_id; | 354 const int segment_id = mi->segment_id; |
365 int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id, | 355 int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id, |
366 SEG_LVL_REF_FRAME); | 356 SEG_LVL_REF_FRAME); |
367 // If segment level coding of this signal is disabled... | 357 // If segment level coding of this signal is disabled... |
368 // or the segment allows multiple reference frame options | 358 // or the segment allows multiple reference frame options |
369 if (!seg_ref_active) { | 359 if (!seg_ref_active) { |
370 // does the feature use compound prediction or not | 360 // does the feature use compound prediction or not |
371 // (if not specified at the frame/segment level) | 361 // (if not specified at the frame/segment level) |
372 if (cm->comp_pred_mode == HYBRID_PREDICTION) { | 362 if (cm->comp_pred_mode == HYBRID_PREDICTION) { |
373 vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME, | 363 vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME, |
(...skipping 12 matching lines...) Expand all Loading... |
386 if (mi->ref_frame[0] != LAST_FRAME) | 376 if (mi->ref_frame[0] != LAST_FRAME) |
387 vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME, | 377 vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME, |
388 vp9_get_pred_prob_single_ref_p2(cm, xd)); | 378 vp9_get_pred_prob_single_ref_p2(cm, xd)); |
389 } | 379 } |
390 } else { | 380 } else { |
391 assert(mi->ref_frame[1] <= INTRA_FRAME); | 381 assert(mi->ref_frame[1] <= INTRA_FRAME); |
392 assert(vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) == | 382 assert(vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) == |
393 mi->ref_frame[0]); | 383 mi->ref_frame[0]); |
394 } | 384 } |
395 | 385 |
396 // if using the prediction mdoel we have nothing further to do because | 386 // If using the prediction model we have nothing further to do because |
397 // the reference frame is fully coded by the segment | 387 // the reference frame is fully coded by the segment. |
398 } | 388 } |
399 | 389 |
400 static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { | 390 static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { |
401 VP9_COMMON *const cm = &cpi->common; | 391 VP9_COMMON *const cm = &cpi->common; |
402 const nmv_context *nmvc = &cm->fc.nmvc; | 392 const nmv_context *nmvc = &cm->fc.nmvc; |
403 MACROBLOCK *const x = &cpi->mb; | 393 MACROBLOCK *const x = &cpi->mb; |
404 MACROBLOCKD *const xd = &x->e_mbd; | 394 MACROBLOCKD *const xd = &x->e_mbd; |
405 struct segmentation *seg = &cm->seg; | 395 struct segmentation *seg = &cm->seg; |
406 MB_MODE_INFO *const mi = &m->mbmi; | 396 MB_MODE_INFO *const mi = &m->mbmi; |
407 const MV_REFERENCE_FRAME rf = mi->ref_frame[0]; | 397 const MV_REFERENCE_FRAME rf = mi->ref_frame[0]; |
408 const MB_PREDICTION_MODE mode = mi->mode; | 398 const MB_PREDICTION_MODE mode = mi->mode; |
409 const int segment_id = mi->segment_id; | 399 const int segment_id = mi->segment_id; |
410 int skip_coeff; | 400 int skip_coeff; |
411 const BLOCK_SIZE bsize = mi->sb_type; | 401 const BLOCK_SIZE bsize = mi->sb_type; |
412 const int allow_hp = xd->allow_high_precision_mv; | 402 const int allow_hp = cm->allow_high_precision_mv; |
413 | |
414 x->partition_info = x->pi + (m - cm->mi); | |
415 | 403 |
416 #ifdef ENTROPY_STATS | 404 #ifdef ENTROPY_STATS |
417 active_section = 9; | 405 active_section = 9; |
418 #endif | 406 #endif |
419 | 407 |
420 if (seg->update_map) { | 408 if (seg->update_map) { |
421 if (seg->temporal_update) { | 409 if (seg->temporal_update) { |
422 const int pred_flag = mi->seg_id_predicted; | 410 const int pred_flag = mi->seg_id_predicted; |
423 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); | 411 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); |
424 vp9_write(bc, pred_flag, pred_prob); | 412 vp9_write(bc, pred_flag, pred_prob); |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
481 if (cm->mcomp_filter_type == SWITCHABLE) { | 469 if (cm->mcomp_filter_type == SWITCHABLE) { |
482 const int ctx = vp9_get_pred_context_switchable_interp(xd); | 470 const int ctx = vp9_get_pred_context_switchable_interp(xd); |
483 write_token(bc, vp9_switchable_interp_tree, | 471 write_token(bc, vp9_switchable_interp_tree, |
484 cm->fc.switchable_interp_prob[ctx], | 472 cm->fc.switchable_interp_prob[ctx], |
485 &vp9_switchable_interp_encodings[mi->interp_filter]); | 473 &vp9_switchable_interp_encodings[mi->interp_filter]); |
486 } else { | 474 } else { |
487 assert(mi->interp_filter == cm->mcomp_filter_type); | 475 assert(mi->interp_filter == cm->mcomp_filter_type); |
488 } | 476 } |
489 | 477 |
490 if (bsize < BLOCK_8X8) { | 478 if (bsize < BLOCK_8X8) { |
491 int j; | |
492 MB_PREDICTION_MODE blockmode; | |
493 int_mv blockmv; | |
494 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; | 479 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; |
495 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; | 480 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; |
496 int idx, idy; | 481 int idx, idy; |
497 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { | 482 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { |
498 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { | 483 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { |
499 j = idy * 2 + idx; | 484 const int j = idy * 2 + idx; |
500 blockmode = x->partition_info->bmi[j].mode; | 485 const MB_PREDICTION_MODE blockmode = m->bmi[j].as_mode; |
501 blockmv = m->bmi[j].as_mv[0]; | |
502 write_sb_mv_ref(bc, blockmode, mv_ref_p); | 486 write_sb_mv_ref(bc, blockmode, mv_ref_p); |
503 ++cm->counts.inter_mode[mi->mode_context[rf]] | 487 ++cm->counts.inter_mode[mi->mode_context[rf]] |
504 [inter_mode_offset(blockmode)]; | 488 [inter_mode_offset(blockmode)]; |
505 | 489 |
506 if (blockmode == NEWMV) { | 490 if (blockmode == NEWMV) { |
507 #ifdef ENTROPY_STATS | 491 #ifdef ENTROPY_STATS |
508 active_section = 11; | 492 active_section = 11; |
509 #endif | 493 #endif |
510 vp9_encode_mv(cpi, bc, &blockmv.as_mv, &mi->best_mv.as_mv, | 494 vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[0].as_mv, |
511 nmvc, allow_hp); | 495 &mi->best_mv[0].as_mv, nmvc, allow_hp); |
512 | 496 |
513 if (mi->ref_frame[1] > INTRA_FRAME) | 497 if (has_second_ref(mi)) |
514 vp9_encode_mv(cpi, bc, | 498 vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[1].as_mv, |
515 &m->bmi[j].as_mv[1].as_mv, | 499 &mi->best_mv[1].as_mv, nmvc, allow_hp); |
516 &mi->best_second_mv.as_mv, | |
517 nmvc, allow_hp); | |
518 } | 500 } |
519 } | 501 } |
520 } | 502 } |
521 } else if (mode == NEWMV) { | 503 } else if (mode == NEWMV) { |
522 #ifdef ENTROPY_STATS | 504 #ifdef ENTROPY_STATS |
523 active_section = 5; | 505 active_section = 5; |
524 #endif | 506 #endif |
525 vp9_encode_mv(cpi, bc, &mi->mv[0].as_mv, &mi->best_mv.as_mv, | 507 vp9_encode_mv(cpi, bc, &mi->mv[0].as_mv, |
526 nmvc, allow_hp); | 508 &mi->best_mv[0].as_mv, nmvc, allow_hp); |
527 | 509 |
528 if (mi->ref_frame[1] > INTRA_FRAME) | 510 if (has_second_ref(mi)) |
529 vp9_encode_mv(cpi, bc, &mi->mv[1].as_mv, &mi->best_second_mv.as_mv, | 511 vp9_encode_mv(cpi, bc, &mi->mv[1].as_mv, |
530 nmvc, allow_hp); | 512 &mi->best_mv[1].as_mv, nmvc, allow_hp); |
531 } | 513 } |
532 } | 514 } |
533 } | 515 } |
534 | 516 |
535 static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, | 517 static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, |
536 vp9_writer *bc) { | 518 vp9_writer *bc) { |
537 const VP9_COMMON *const cm = &cpi->common; | 519 const VP9_COMMON *const cm = &cpi->common; |
538 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 520 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
539 const struct segmentation *const seg = &cm->seg; | 521 const struct segmentation *const seg = &cm->seg; |
540 MODE_INFO *m = mi_8x8[0]; | 522 MODE_INFO *m = mi_8x8[0]; |
541 const int ym = m->mbmi.mode; | 523 const int ym = m->mbmi.mode; |
542 const int segment_id = m->mbmi.segment_id; | 524 const int segment_id = m->mbmi.segment_id; |
543 MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride]; | 525 MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride]; |
544 MODE_INFO *left_mi = mi_8x8[-1]; | 526 MODE_INFO *left_mi = xd->left_available ? mi_8x8[-1] : NULL; |
545 | 527 |
546 if (seg->update_map) | 528 if (seg->update_map) |
547 write_segment_id(bc, seg, m->mbmi.segment_id); | 529 write_segment_id(bc, seg, m->mbmi.segment_id); |
548 | 530 |
549 write_skip_coeff(cpi, segment_id, m, bc); | 531 write_skip_coeff(cpi, segment_id, m, bc); |
550 | 532 |
551 if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) | 533 if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) |
552 write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc); | 534 write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc); |
553 | 535 |
554 if (m->mbmi.sb_type >= BLOCK_8X8) { | 536 if (m->mbmi.sb_type >= BLOCK_8X8) { |
555 const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0); | 537 const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0); |
556 const MB_PREDICTION_MODE L = xd->left_available ? | 538 const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, 0); |
557 left_block_mode(m, left_mi, 0) : DC_PRED; | |
558 write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]); | 539 write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]); |
559 } else { | 540 } else { |
560 int idx, idy; | 541 int idx, idy; |
561 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[m->mbmi.sb_type]; | 542 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[m->mbmi.sb_type]; |
562 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type]; | 543 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type]; |
563 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { | 544 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { |
564 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { | 545 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { |
565 int i = idy * 2 + idx; | 546 int i = idy * 2 + idx; |
566 const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i); | 547 const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i); |
567 const MB_PREDICTION_MODE L = (xd->left_available || idx) ? | 548 const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, i); |
568 left_block_mode(m, left_mi, i) : DC_PRED; | |
569 const int bm = m->bmi[i].as_mode; | 549 const int bm = m->bmi[i].as_mode; |
570 #ifdef ENTROPY_STATS | 550 #ifdef ENTROPY_STATS |
571 ++intra_mode_stats[A][L][bm]; | 551 ++intra_mode_stats[A][L][bm]; |
572 #endif | 552 #endif |
573 write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]); | 553 write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]); |
574 } | 554 } |
575 } | 555 } |
576 } | 556 } |
577 | 557 |
578 write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]); | 558 write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]); |
579 } | 559 } |
580 | 560 |
581 static void write_modes_b(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc, | 561 static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, |
| 562 MODE_INFO **mi_8x8, vp9_writer *bc, |
582 TOKENEXTRA **tok, TOKENEXTRA *tok_end, | 563 TOKENEXTRA **tok, TOKENEXTRA *tok_end, |
583 int mi_row, int mi_col) { | 564 int mi_row, int mi_col, int index) { |
584 VP9_COMMON *const cm = &cpi->common; | 565 VP9_COMMON *const cm = &cpi->common; |
585 MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 566 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
586 MODE_INFO *m = mi_8x8[0]; | 567 MODE_INFO *m = mi_8x8[0]; |
587 | 568 |
588 if (m->mbmi.sb_type < BLOCK_8X8) | 569 if (m->mbmi.sb_type < BLOCK_8X8) |
589 if (xd->ab_index > 0) | 570 if (index > 0) |
590 return; | 571 return; |
591 | 572 |
592 xd->this_mi = mi_8x8[0]; | |
593 xd->mi_8x8 = mi_8x8; | 573 xd->mi_8x8 = mi_8x8; |
594 | 574 |
595 set_mi_row_col(&cpi->common, xd, | 575 set_mi_row_col(xd, tile, |
596 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], | 576 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], |
597 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type]); | 577 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], |
598 if ((cm->frame_type == KEY_FRAME) || cm->intra_only) { | 578 cm->mi_rows, cm->mi_cols); |
| 579 if (frame_is_intra_only(cm)) { |
599 write_mb_modes_kf(cpi, mi_8x8, bc); | 580 write_mb_modes_kf(cpi, mi_8x8, bc); |
600 #ifdef ENTROPY_STATS | 581 #ifdef ENTROPY_STATS |
601 active_section = 8; | 582 active_section = 8; |
602 #endif | 583 #endif |
603 } else { | 584 } else { |
604 pack_inter_mode_mvs(cpi, m, bc); | 585 pack_inter_mode_mvs(cpi, m, bc); |
605 #ifdef ENTROPY_STATS | 586 #ifdef ENTROPY_STATS |
606 active_section = 1; | 587 active_section = 1; |
607 #endif | 588 #endif |
608 } | 589 } |
609 | 590 |
610 assert(*tok < tok_end); | 591 assert(*tok < tok_end); |
611 pack_mb_tokens(bc, tok, tok_end); | 592 pack_mb_tokens(bc, tok, tok_end); |
612 } | 593 } |
613 | 594 |
614 static void write_modes_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc, | 595 static void write_partition(PARTITION_TYPE partition, |
| 596 int hbs, int mi_rows, int mi_cols, |
| 597 int mi_row, int mi_col, |
| 598 vp9_prob probs[PARTITION_TYPES - 1], |
| 599 vp9_writer *w) { |
| 600 const int has_rows = (mi_row + hbs) < mi_rows; |
| 601 const int has_cols = (mi_col + hbs) < mi_cols; |
| 602 |
| 603 if (has_rows && has_cols) { |
| 604 write_token(w, vp9_partition_tree, probs, |
| 605 &vp9_partition_encodings[partition]); |
| 606 } else if (!has_rows && has_cols) { |
| 607 assert(partition == PARTITION_SPLIT || partition == PARTITION_HORZ); |
| 608 vp9_write(w, partition == PARTITION_SPLIT, probs[1]); |
| 609 } else if (has_rows && !has_cols) { |
| 610 assert(partition == PARTITION_SPLIT || partition == PARTITION_VERT); |
| 611 vp9_write(w, partition == PARTITION_SPLIT, probs[2]); |
| 612 } else { |
| 613 assert(partition == PARTITION_SPLIT); |
| 614 } |
| 615 } |
| 616 |
| 617 static void write_modes_sb(VP9_COMP *cpi, const TileInfo *const tile, |
| 618 MODE_INFO **mi_8x8, vp9_writer *bc, |
615 TOKENEXTRA **tok, TOKENEXTRA *tok_end, | 619 TOKENEXTRA **tok, TOKENEXTRA *tok_end, |
616 int mi_row, int mi_col, BLOCK_SIZE bsize) { | 620 int mi_row, int mi_col, BLOCK_SIZE bsize, |
| 621 int index) { |
617 VP9_COMMON *const cm = &cpi->common; | 622 VP9_COMMON *const cm = &cpi->common; |
618 MACROBLOCKD *xd = &cpi->mb.e_mbd; | |
619 const int mis = cm->mode_info_stride; | 623 const int mis = cm->mode_info_stride; |
620 int bsl = b_width_log2(bsize); | 624 int bsl = b_width_log2(bsize); |
621 int bs = (1 << bsl) / 4; // mode_info step for subsize | 625 int bs = (1 << bsl) / 4; // mode_info step for subsize |
622 int n; | 626 int n; |
623 PARTITION_TYPE partition = PARTITION_NONE; | 627 PARTITION_TYPE partition = PARTITION_NONE; |
624 BLOCK_SIZE subsize; | 628 BLOCK_SIZE subsize; |
625 MODE_INFO *m = mi_8x8[0]; | 629 MODE_INFO *m = mi_8x8[0]; |
626 | 630 |
627 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 631 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
628 return; | 632 return; |
629 | 633 |
630 partition = partition_lookup[bsl][m->mbmi.sb_type]; | 634 partition = partition_lookup[bsl][m->mbmi.sb_type]; |
631 | 635 |
632 if (bsize < BLOCK_8X8) | 636 if (bsize < BLOCK_8X8) { |
633 if (xd->ab_index > 0) | 637 if (index > 0) |
634 return; | 638 return; |
635 | 639 } else { |
636 if (bsize >= BLOCK_8X8) { | 640 const int ctx = partition_plane_context(cpi->above_seg_context, |
637 int pl; | 641 cpi->left_seg_context, |
638 const int idx = check_bsize_coverage(bs, cm->mi_rows, cm->mi_cols, | 642 mi_row, mi_col, bsize); |
639 mi_row, mi_col); | 643 write_partition(partition, bs, cm->mi_rows, cm->mi_cols, mi_row, mi_col, |
640 set_partition_seg_context(cm, xd, mi_row, mi_col); | 644 cm->fc.partition_prob[cm->frame_type][ctx], bc); |
641 pl = partition_plane_context(xd, bsize); | |
642 // encode the partition information | |
643 if (idx == 0) | |
644 write_token(bc, vp9_partition_tree, | |
645 cm->fc.partition_prob[cm->frame_type][pl], | |
646 vp9_partition_encodings + partition); | |
647 else if (idx > 0) | |
648 vp9_write(bc, partition == PARTITION_SPLIT, | |
649 cm->fc.partition_prob[cm->frame_type][pl][idx]); | |
650 } | 645 } |
651 | 646 |
652 subsize = get_subsize(bsize, partition); | 647 subsize = get_subsize(bsize, partition); |
653 *(get_sb_index(xd, subsize)) = 0; | |
654 | 648 |
655 switch (partition) { | 649 switch (partition) { |
656 case PARTITION_NONE: | 650 case PARTITION_NONE: |
657 write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col); | 651 write_modes_b(cpi, tile, mi_8x8, bc, tok, tok_end, mi_row, mi_col, 0); |
658 break; | 652 break; |
659 case PARTITION_HORZ: | 653 case PARTITION_HORZ: |
660 write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col); | 654 write_modes_b(cpi, tile, mi_8x8, bc, tok, tok_end, mi_row, mi_col, 0); |
661 *(get_sb_index(xd, subsize)) = 1; | |
662 if ((mi_row + bs) < cm->mi_rows) | 655 if ((mi_row + bs) < cm->mi_rows) |
663 write_modes_b(cpi, mi_8x8 + bs * mis, bc, tok, tok_end, mi_row + bs, | 656 write_modes_b(cpi, tile, mi_8x8 + bs * mis, bc, tok, tok_end, |
664 mi_col); | 657 mi_row + bs, mi_col, 1); |
665 break; | 658 break; |
666 case PARTITION_VERT: | 659 case PARTITION_VERT: |
667 write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col); | 660 write_modes_b(cpi, tile, mi_8x8, bc, tok, tok_end, mi_row, mi_col, 0); |
668 *(get_sb_index(xd, subsize)) = 1; | |
669 if ((mi_col + bs) < cm->mi_cols) | 661 if ((mi_col + bs) < cm->mi_cols) |
670 write_modes_b(cpi, mi_8x8 + bs, bc, tok, tok_end, mi_row, mi_col + bs); | 662 write_modes_b(cpi, tile, mi_8x8 + bs, bc, tok, tok_end, |
| 663 mi_row, mi_col + bs, 1); |
671 break; | 664 break; |
672 case PARTITION_SPLIT: | 665 case PARTITION_SPLIT: |
673 for (n = 0; n < 4; n++) { | 666 for (n = 0; n < 4; n++) { |
674 int j = n >> 1, i = n & 0x01; | 667 const int j = n >> 1, i = n & 1; |
675 *(get_sb_index(xd, subsize)) = n; | 668 write_modes_sb(cpi, tile, mi_8x8 + j * bs * mis + i * bs, bc, |
676 write_modes_sb(cpi, mi_8x8 + j * bs * mis + i * bs, bc, tok, tok_end, | 669 tok, tok_end, |
677 mi_row + j * bs, mi_col + i * bs, subsize); | 670 mi_row + j * bs, mi_col + i * bs, subsize, n); |
678 } | 671 } |
679 break; | 672 break; |
680 default: | 673 default: |
681 assert(0); | 674 assert(0); |
682 } | 675 } |
683 | 676 |
684 // update partition context | 677 // update partition context |
685 if (bsize >= BLOCK_8X8 && | 678 if (bsize >= BLOCK_8X8 && |
686 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) { | 679 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) |
687 set_partition_seg_context(cm, xd, mi_row, mi_col); | 680 update_partition_context(cpi->above_seg_context, cpi->left_seg_context, |
688 update_partition_context(xd, subsize, bsize); | 681 mi_row, mi_col, subsize, bsize); |
689 } | |
690 } | 682 } |
691 | 683 |
692 static void write_modes(VP9_COMP *cpi, vp9_writer* const bc, | 684 static void write_modes(VP9_COMP *cpi, const TileInfo *const tile, |
| 685 vp9_writer* const bc, |
693 TOKENEXTRA **tok, TOKENEXTRA *tok_end) { | 686 TOKENEXTRA **tok, TOKENEXTRA *tok_end) { |
694 VP9_COMMON *const cm = &cpi->common; | 687 VP9_COMMON *const cm = &cpi->common; |
695 const int mis = cm->mode_info_stride; | 688 const int mis = cm->mode_info_stride; |
696 int mi_row, mi_col; | 689 int mi_row, mi_col; |
697 MODE_INFO **mi_8x8 = cm->mi_grid_visible; | 690 MODE_INFO **mi_8x8 = cm->mi_grid_visible; |
698 MODE_INFO **m_8x8; | 691 MODE_INFO **m_8x8; |
699 | 692 |
700 mi_8x8 += cm->cur_tile_mi_col_start + cm->cur_tile_mi_row_start * mis; | 693 mi_8x8 += tile->mi_col_start + tile->mi_row_start * mis; |
701 | 694 |
702 for (mi_row = cm->cur_tile_mi_row_start; mi_row < cm->cur_tile_mi_row_end; | 695 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; |
703 mi_row += 8, mi_8x8 += 8 * mis) { | 696 mi_row += 8, mi_8x8 += 8 * mis) { |
704 m_8x8 = mi_8x8; | 697 m_8x8 = mi_8x8; |
705 vp9_zero(cm->left_seg_context); | 698 vp9_zero(cpi->left_seg_context); |
706 for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end; | 699 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; |
707 mi_col += MI_BLOCK_SIZE, m_8x8 += MI_BLOCK_SIZE) { | 700 mi_col += MI_BLOCK_SIZE, m_8x8 += MI_BLOCK_SIZE) { |
708 write_modes_sb(cpi, m_8x8, bc, tok, tok_end, mi_row, mi_col, | 701 write_modes_sb(cpi, tile, m_8x8, bc, tok, tok_end, mi_row, mi_col, |
709 BLOCK_64X64); | 702 BLOCK_64X64, 0); |
710 } | 703 } |
711 } | 704 } |
712 } | 705 } |
713 | 706 |
714 /* This function is used for debugging probability trees. */ | |
715 static void print_prob_tree(vp9_coeff_probs *coef_probs, int block_types) { | |
716 /* print coef probability tree */ | |
717 int i, j, k, l, m; | |
718 FILE *f = fopen("enc_tree_probs.txt", "a"); | |
719 fprintf(f, "{\n"); | |
720 for (i = 0; i < block_types; i++) { | |
721 fprintf(f, " {\n"); | |
722 for (j = 0; j < REF_TYPES; ++j) { | |
723 fprintf(f, " {\n"); | |
724 for (k = 0; k < COEF_BANDS; k++) { | |
725 fprintf(f, " {\n"); | |
726 for (l = 0; l < PREV_COEF_CONTEXTS; l++) { | |
727 fprintf(f, " {"); | |
728 for (m = 0; m < ENTROPY_NODES; m++) { | |
729 fprintf(f, "%3u, ", | |
730 (unsigned int)(coef_probs[i][j][k][l][m])); | |
731 } | |
732 } | |
733 fprintf(f, " }\n"); | |
734 } | |
735 fprintf(f, " }\n"); | |
736 } | |
737 fprintf(f, " }\n"); | |
738 } | |
739 fprintf(f, "}\n"); | |
740 fclose(f); | |
741 } | |
742 | |
743 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) { | 707 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) { |
744 vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size]; | 708 vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size]; |
745 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; | 709 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; |
746 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] = | 710 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] = |
747 cpi->common.counts.eob_branch[tx_size]; | 711 cpi->common.counts.eob_branch[tx_size]; |
748 vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[tx_size]; | 712 vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[tx_size]; |
749 vp9_prob full_probs[ENTROPY_NODES]; | 713 vp9_prob full_probs[ENTROPY_NODES]; |
750 int i, j, k, l; | 714 int i, j, k, l; |
751 | 715 |
752 for (i = 0; i < BLOCK_TYPES; ++i) { | 716 for (i = 0; i < BLOCK_TYPES; ++i) { |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
787 for (t = TX_4X4; t <= TX_32X32; t++) | 751 for (t = TX_4X4; t <= TX_32X32; t++) |
788 build_tree_distribution(cpi, t); | 752 build_tree_distribution(cpi, t); |
789 } | 753 } |
790 | 754 |
791 static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, | 755 static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, |
792 TX_SIZE tx_size) { | 756 TX_SIZE tx_size) { |
793 vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size]; | 757 vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size]; |
794 vp9_coeff_probs_model *old_frame_coef_probs = | 758 vp9_coeff_probs_model *old_frame_coef_probs = |
795 cpi->common.fc.coef_probs[tx_size]; | 759 cpi->common.fc.coef_probs[tx_size]; |
796 vp9_coeff_stats *frame_branch_ct = cpi->frame_branch_ct[tx_size]; | 760 vp9_coeff_stats *frame_branch_ct = cpi->frame_branch_ct[tx_size]; |
797 const vp9_prob upd = VP9_COEF_UPDATE_PROB; | 761 const vp9_prob upd = DIFF_UPDATE_PROB; |
798 const int entropy_nodes_update = UNCONSTRAINED_NODES; | 762 const int entropy_nodes_update = UNCONSTRAINED_NODES; |
799 int i, j, k, l, t; | 763 int i, j, k, l, t; |
800 switch (cpi->sf.use_fast_coef_updates) { | 764 switch (cpi->sf.use_fast_coef_updates) { |
801 case 0: { | 765 case 0: { |
802 /* dry run to see if there is any udpate at all needed */ | 766 /* dry run to see if there is any udpate at all needed */ |
803 int savings = 0; | 767 int savings = 0; |
804 int update[2] = {0, 0}; | 768 int update[2] = {0, 0}; |
805 for (i = 0; i < BLOCK_TYPES; ++i) { | 769 for (i = 0; i < BLOCK_TYPES; ++i) { |
806 for (j = 0; j < REF_TYPES; ++j) { | 770 for (j = 0; j < REF_TYPES; ++j) { |
807 for (k = 0; k < COEF_BANDS; ++k) { | 771 for (k = 0; k < COEF_BANDS; ++k) { |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
842 } | 806 } |
843 vp9_write_bit(bc, 1); | 807 vp9_write_bit(bc, 1); |
844 for (i = 0; i < BLOCK_TYPES; ++i) { | 808 for (i = 0; i < BLOCK_TYPES; ++i) { |
845 for (j = 0; j < REF_TYPES; ++j) { | 809 for (j = 0; j < REF_TYPES; ++j) { |
846 for (k = 0; k < COEF_BANDS; ++k) { | 810 for (k = 0; k < COEF_BANDS; ++k) { |
847 for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { | 811 for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { |
848 // calc probs and branch cts for this frame only | 812 // calc probs and branch cts for this frame only |
849 for (t = 0; t < entropy_nodes_update; ++t) { | 813 for (t = 0; t < entropy_nodes_update; ++t) { |
850 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; | 814 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; |
851 vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; | 815 vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; |
852 const vp9_prob upd = VP9_COEF_UPDATE_PROB; | 816 const vp9_prob upd = DIFF_UPDATE_PROB; |
853 int s; | 817 int s; |
854 int u = 0; | 818 int u = 0; |
855 if (l >= 3 && k == 0) | 819 if (l >= 3 && k == 0) |
856 continue; | 820 continue; |
857 if (t == PIVOT_NODE) | 821 if (t == PIVOT_NODE) |
858 s = vp9_prob_diff_update_savings_search_model( | 822 s = vp9_prob_diff_update_savings_search_model( |
859 frame_branch_ct[i][j][k][l][0], | 823 frame_branch_ct[i][j][k][l][0], |
860 old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); | 824 old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); |
861 else | 825 else |
862 s = vp9_prob_diff_update_savings_search( | 826 s = vp9_prob_diff_update_savings_search( |
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1125 | 1089 |
1126 // Probabilities | 1090 // Probabilities |
1127 if (cm->tx_mode == TX_MODE_SELECT) { | 1091 if (cm->tx_mode == TX_MODE_SELECT) { |
1128 int i, j; | 1092 int i, j; |
1129 unsigned int ct_8x8p[TX_SIZES - 3][2]; | 1093 unsigned int ct_8x8p[TX_SIZES - 3][2]; |
1130 unsigned int ct_16x16p[TX_SIZES - 2][2]; | 1094 unsigned int ct_16x16p[TX_SIZES - 2][2]; |
1131 unsigned int ct_32x32p[TX_SIZES - 1][2]; | 1095 unsigned int ct_32x32p[TX_SIZES - 1][2]; |
1132 | 1096 |
1133 | 1097 |
1134 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 1098 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
1135 tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], | 1099 tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], ct_8x8p); |
1136 ct_8x8p); | |
1137 for (j = 0; j < TX_SIZES - 3; j++) | 1100 for (j = 0; j < TX_SIZES - 3; j++) |
1138 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], | 1101 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], ct_8x8p[j]); |
1139 MODE_UPDATE_PROB, ct_8x8p[j]); | |
1140 } | 1102 } |
1141 | 1103 |
1142 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 1104 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
1143 tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], | 1105 tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], ct_16x16p); |
1144 ct_16x16p); | |
1145 for (j = 0; j < TX_SIZES - 2; j++) | 1106 for (j = 0; j < TX_SIZES - 2; j++) |
1146 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j], | 1107 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j], |
1147 MODE_UPDATE_PROB, ct_16x16p[j]); | 1108 ct_16x16p[j]); |
1148 } | 1109 } |
1149 | 1110 |
1150 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 1111 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
1151 tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p); | 1112 tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p); |
1152 for (j = 0; j < TX_SIZES - 1; j++) | 1113 for (j = 0; j < TX_SIZES - 1; j++) |
1153 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j], | 1114 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j], |
1154 MODE_UPDATE_PROB, ct_32x32p[j]); | 1115 ct_32x32p[j]); |
1155 } | 1116 } |
1156 #ifdef MODE_STATS | 1117 #ifdef MODE_STATS |
1157 if (!cpi->dummy_packing) | 1118 if (!cpi->dummy_packing) |
1158 update_tx_count_stats(cm); | 1119 update_tx_count_stats(cm); |
1159 #endif | 1120 #endif |
1160 } | 1121 } |
1161 } | 1122 } |
1162 | 1123 |
1163 static void write_interp_filter_type(INTERPOLATIONFILTERTYPE type, | 1124 static void write_interp_filter_type(INTERPOLATION_TYPE type, |
1164 struct vp9_write_bit_buffer *wb) { | 1125 struct vp9_write_bit_buffer *wb) { |
1165 const int type_to_literal[] = { 1, 0, 2, 3 }; | 1126 const int type_to_literal[] = { 1, 0, 2, 3 }; |
1166 | 1127 |
1167 vp9_wb_write_bit(wb, type == SWITCHABLE); | 1128 vp9_wb_write_bit(wb, type == SWITCHABLE); |
1168 if (type != SWITCHABLE) | 1129 if (type != SWITCHABLE) |
1169 vp9_wb_write_literal(wb, type_to_literal[type], 2); | 1130 vp9_wb_write_literal(wb, type_to_literal[type], 2); |
1170 } | 1131 } |
1171 | 1132 |
1172 static void fix_mcomp_filter_type(VP9_COMP *cpi) { | 1133 static void fix_mcomp_filter_type(VP9_COMP *cpi) { |
1173 VP9_COMMON *const cm = &cpi->common; | 1134 VP9_COMMON *const cm = &cpi->common; |
1174 | 1135 |
1175 if (cm->mcomp_filter_type == SWITCHABLE) { | 1136 if (cm->mcomp_filter_type == SWITCHABLE) { |
1176 // Check to see if only one of the filters is actually used | 1137 // Check to see if only one of the filters is actually used |
1177 int count[SWITCHABLE_FILTERS]; | 1138 int count[SWITCHABLE_FILTERS]; |
1178 int i, j, c = 0; | 1139 int i, j, c = 0; |
1179 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { | 1140 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { |
1180 count[i] = 0; | 1141 count[i] = 0; |
1181 for (j = 0; j <= SWITCHABLE_FILTERS; ++j) | 1142 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) |
1182 count[i] += cm->counts.switchable_interp[j][i]; | 1143 count[i] += cm->counts.switchable_interp[j][i]; |
1183 c += (count[i] > 0); | 1144 c += (count[i] > 0); |
1184 } | 1145 } |
1185 if (c == 1) { | 1146 if (c == 1) { |
1186 // Only one filter is used. So set the filter at frame level | 1147 // Only one filter is used. So set the filter at frame level |
1187 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { | 1148 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { |
1188 if (count[i]) { | 1149 if (count[i]) { |
1189 cm->mcomp_filter_type = i; | 1150 cm->mcomp_filter_type = i; |
1190 break; | 1151 break; |
1191 } | 1152 } |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1251 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { | 1212 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { |
1252 VP9_COMMON *const cm = &cpi->common; | 1213 VP9_COMMON *const cm = &cpi->common; |
1253 vp9_writer residual_bc; | 1214 vp9_writer residual_bc; |
1254 | 1215 |
1255 int tile_row, tile_col; | 1216 int tile_row, tile_col; |
1256 TOKENEXTRA *tok[4][1 << 6], *tok_end; | 1217 TOKENEXTRA *tok[4][1 << 6], *tok_end; |
1257 size_t total_size = 0; | 1218 size_t total_size = 0; |
1258 const int tile_cols = 1 << cm->log2_tile_cols; | 1219 const int tile_cols = 1 << cm->log2_tile_cols; |
1259 const int tile_rows = 1 << cm->log2_tile_rows; | 1220 const int tile_rows = 1 << cm->log2_tile_rows; |
1260 | 1221 |
1261 vpx_memset(cm->above_seg_context, 0, sizeof(PARTITION_CONTEXT) * | 1222 vpx_memset(cpi->above_seg_context, 0, sizeof(*cpi->above_seg_context) * |
1262 mi_cols_aligned_to_sb(cm->mi_cols)); | 1223 mi_cols_aligned_to_sb(cm->mi_cols)); |
1263 | 1224 |
1264 tok[0][0] = cpi->tok; | 1225 tok[0][0] = cpi->tok; |
1265 for (tile_row = 0; tile_row < tile_rows; tile_row++) { | 1226 for (tile_row = 0; tile_row < tile_rows; tile_row++) { |
1266 if (tile_row) | 1227 if (tile_row) |
1267 tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] + | 1228 tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] + |
1268 cpi->tok_count[tile_row - 1][tile_cols - 1]; | 1229 cpi->tok_count[tile_row - 1][tile_cols - 1]; |
1269 | 1230 |
1270 for (tile_col = 1; tile_col < tile_cols; tile_col++) | 1231 for (tile_col = 1; tile_col < tile_cols; tile_col++) |
1271 tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] + | 1232 tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] + |
1272 cpi->tok_count[tile_row][tile_col - 1]; | 1233 cpi->tok_count[tile_row][tile_col - 1]; |
1273 } | 1234 } |
1274 | 1235 |
1275 for (tile_row = 0; tile_row < tile_rows; tile_row++) { | 1236 for (tile_row = 0; tile_row < tile_rows; tile_row++) { |
1276 vp9_get_tile_row_offsets(cm, tile_row); | |
1277 for (tile_col = 0; tile_col < tile_cols; tile_col++) { | 1237 for (tile_col = 0; tile_col < tile_cols; tile_col++) { |
1278 vp9_get_tile_col_offsets(cm, tile_col); | 1238 TileInfo tile; |
| 1239 |
| 1240 vp9_tile_init(&tile, cm, 0, tile_col); |
1279 tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col]; | 1241 tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col]; |
1280 | 1242 |
1281 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) | 1243 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) |
1282 vp9_start_encode(&residual_bc, data_ptr + total_size + 4); | 1244 vp9_start_encode(&residual_bc, data_ptr + total_size + 4); |
1283 else | 1245 else |
1284 vp9_start_encode(&residual_bc, data_ptr + total_size); | 1246 vp9_start_encode(&residual_bc, data_ptr + total_size); |
1285 | 1247 |
1286 write_modes(cpi, &residual_bc, &tok[tile_row][tile_col], tok_end); | 1248 write_modes(cpi, &tile, &residual_bc, &tok[tile_row][tile_col], tok_end); |
1287 assert(tok[tile_row][tile_col] == tok_end); | 1249 assert(tok[tile_row][tile_col] == tok_end); |
1288 vp9_stop_encode(&residual_bc); | 1250 vp9_stop_encode(&residual_bc); |
1289 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { | 1251 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { |
1290 // size of this tile | 1252 // size of this tile |
1291 write_be32(data_ptr + total_size, residual_bc.pos); | 1253 write_be32(data_ptr + total_size, residual_bc.pos); |
1292 total_size += 4; | 1254 total_size += 4; |
1293 } | 1255 } |
1294 | 1256 |
1295 total_size += residual_bc.pos; | 1257 total_size += residual_bc.pos; |
1296 } | 1258 } |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1345 | 1307 |
1346 if (!found) { | 1308 if (!found) { |
1347 vp9_wb_write_literal(wb, cm->width - 1, 16); | 1309 vp9_wb_write_literal(wb, cm->width - 1, 16); |
1348 vp9_wb_write_literal(wb, cm->height - 1, 16); | 1310 vp9_wb_write_literal(wb, cm->height - 1, 16); |
1349 } | 1311 } |
1350 | 1312 |
1351 write_display_size(cpi, wb); | 1313 write_display_size(cpi, wb); |
1352 } | 1314 } |
1353 | 1315 |
1354 static void write_sync_code(struct vp9_write_bit_buffer *wb) { | 1316 static void write_sync_code(struct vp9_write_bit_buffer *wb) { |
1355 vp9_wb_write_literal(wb, SYNC_CODE_0, 8); | 1317 vp9_wb_write_literal(wb, VP9_SYNC_CODE_0, 8); |
1356 vp9_wb_write_literal(wb, SYNC_CODE_1, 8); | 1318 vp9_wb_write_literal(wb, VP9_SYNC_CODE_1, 8); |
1357 vp9_wb_write_literal(wb, SYNC_CODE_2, 8); | 1319 vp9_wb_write_literal(wb, VP9_SYNC_CODE_2, 8); |
1358 } | 1320 } |
1359 | 1321 |
1360 static void write_uncompressed_header(VP9_COMP *cpi, | 1322 static void write_uncompressed_header(VP9_COMP *cpi, |
1361 struct vp9_write_bit_buffer *wb) { | 1323 struct vp9_write_bit_buffer *wb) { |
1362 VP9_COMMON *const cm = &cpi->common; | 1324 VP9_COMMON *const cm = &cpi->common; |
1363 MACROBLOCKD *const xd = &cpi->mb.e_mbd; | |
1364 | 1325 |
1365 // frame marker bits | 1326 vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2); |
1366 vp9_wb_write_literal(wb, 0x2, 2); | |
1367 | 1327 |
1368 // bitstream version. | 1328 // bitstream version. |
1369 // 00 - profile 0. 4:2:0 only | 1329 // 00 - profile 0. 4:2:0 only |
1370 // 10 - profile 1. adds 4:4:4, 4:2:2, alpha | 1330 // 10 - profile 1. adds 4:4:4, 4:2:2, alpha |
1371 vp9_wb_write_bit(wb, cm->version); | 1331 vp9_wb_write_bit(wb, cm->version); |
1372 vp9_wb_write_bit(wb, 0); | 1332 vp9_wb_write_bit(wb, 0); |
1373 | 1333 |
1374 vp9_wb_write_bit(wb, 0); | 1334 vp9_wb_write_bit(wb, 0); |
1375 vp9_wb_write_bit(wb, cm->frame_type); | 1335 vp9_wb_write_bit(wb, cm->frame_type); |
1376 vp9_wb_write_bit(wb, cm->show_frame); | 1336 vp9_wb_write_bit(wb, cm->show_frame); |
1377 vp9_wb_write_bit(wb, cm->error_resilient_mode); | 1337 vp9_wb_write_bit(wb, cm->error_resilient_mode); |
1378 | 1338 |
1379 if (cm->frame_type == KEY_FRAME) { | 1339 if (cm->frame_type == KEY_FRAME) { |
| 1340 const COLOR_SPACE cs = UNKNOWN; |
1380 write_sync_code(wb); | 1341 write_sync_code(wb); |
1381 // colorspaces | 1342 vp9_wb_write_literal(wb, cs, 3); |
1382 // 000 - Unknown | 1343 if (cs != SRGB) { |
1383 // 001 - BT.601 | |
1384 // 010 - BT.709 | |
1385 // 011 - SMPTE-170 | |
1386 // 100 - SMPTE-240 | |
1387 // 101 - Reserved | |
1388 // 110 - Reserved | |
1389 // 111 - sRGB (RGB) | |
1390 vp9_wb_write_literal(wb, 0, 3); | |
1391 if (1 /* colorspace != sRGB */) { | |
1392 vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] | 1344 vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] |
1393 if (cm->version == 1) { | 1345 if (cm->version == 1) { |
1394 vp9_wb_write_bit(wb, cm->subsampling_x); | 1346 vp9_wb_write_bit(wb, cm->subsampling_x); |
1395 vp9_wb_write_bit(wb, cm->subsampling_y); | 1347 vp9_wb_write_bit(wb, cm->subsampling_y); |
1396 vp9_wb_write_bit(wb, 0); // has extra plane | 1348 vp9_wb_write_bit(wb, 0); // has extra plane |
1397 } | 1349 } |
1398 } else { | 1350 } else { |
1399 assert(cm->version == 1); | 1351 assert(cm->version == 1); |
1400 vp9_wb_write_bit(wb, 0); // has extra plane | 1352 vp9_wb_write_bit(wb, 0); // has extra plane |
1401 } | 1353 } |
(...skipping 16 matching lines...) Expand all Loading... |
1418 } else { | 1370 } else { |
1419 int i; | 1371 int i; |
1420 vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES); | 1372 vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES); |
1421 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { | 1373 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { |
1422 vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LOG2); | 1374 vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LOG2); |
1423 vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]); | 1375 vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]); |
1424 } | 1376 } |
1425 | 1377 |
1426 write_frame_size_with_refs(cpi, wb); | 1378 write_frame_size_with_refs(cpi, wb); |
1427 | 1379 |
1428 vp9_wb_write_bit(wb, xd->allow_high_precision_mv); | 1380 vp9_wb_write_bit(wb, cm->allow_high_precision_mv); |
1429 | 1381 |
1430 fix_mcomp_filter_type(cpi); | 1382 fix_mcomp_filter_type(cpi); |
1431 write_interp_filter_type(cm->mcomp_filter_type, wb); | 1383 write_interp_filter_type(cm->mcomp_filter_type, wb); |
1432 } | 1384 } |
1433 } | 1385 } |
1434 | 1386 |
1435 if (!cm->error_resilient_mode) { | 1387 if (!cm->error_resilient_mode) { |
1436 vp9_wb_write_bit(wb, cm->refresh_frame_context); | 1388 vp9_wb_write_bit(wb, cm->refresh_frame_context); |
1437 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode); | 1389 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode); |
1438 } | 1390 } |
(...skipping 21 matching lines...) Expand all Loading... |
1460 encode_txfm_probs(cpi, &header_bc); | 1412 encode_txfm_probs(cpi, &header_bc); |
1461 | 1413 |
1462 update_coef_probs(cpi, &header_bc); | 1414 update_coef_probs(cpi, &header_bc); |
1463 | 1415 |
1464 #ifdef ENTROPY_STATS | 1416 #ifdef ENTROPY_STATS |
1465 active_section = 2; | 1417 active_section = 2; |
1466 #endif | 1418 #endif |
1467 | 1419 |
1468 vp9_update_skip_probs(cpi, &header_bc); | 1420 vp9_update_skip_probs(cpi, &header_bc); |
1469 | 1421 |
1470 if (cm->frame_type != KEY_FRAME) { | 1422 if (!frame_is_intra_only(cm)) { |
1471 int i; | 1423 int i; |
1472 #ifdef ENTROPY_STATS | 1424 #ifdef ENTROPY_STATS |
1473 active_section = 1; | 1425 active_section = 1; |
1474 #endif | 1426 #endif |
1475 | 1427 |
1476 update_inter_mode_probs(cm, &header_bc); | 1428 update_inter_mode_probs(cm, &header_bc); |
1477 vp9_zero(cm->counts.inter_mode); | 1429 vp9_zero(cm->counts.inter_mode); |
1478 | 1430 |
1479 if (cm->mcomp_filter_type == SWITCHABLE) | 1431 if (cm->mcomp_filter_type == SWITCHABLE) |
1480 update_switchable_interp_probs(cpi, &header_bc); | 1432 update_switchable_interp_probs(cpi, &header_bc); |
1481 | 1433 |
1482 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) | 1434 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) |
1483 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], | 1435 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], |
1484 MODE_UPDATE_PROB, | |
1485 cpi->intra_inter_count[i]); | 1436 cpi->intra_inter_count[i]); |
1486 | 1437 |
1487 if (cm->allow_comp_inter_inter) { | 1438 if (cm->allow_comp_inter_inter) { |
1488 const int comp_pred_mode = cpi->common.comp_pred_mode; | 1439 const int comp_pred_mode = cpi->common.comp_pred_mode; |
1489 const int use_compound_pred = comp_pred_mode != SINGLE_PREDICTION_ONLY; | 1440 const int use_compound_pred = comp_pred_mode != SINGLE_PREDICTION_ONLY; |
1490 const int use_hybrid_pred = comp_pred_mode == HYBRID_PREDICTION; | 1441 const int use_hybrid_pred = comp_pred_mode == HYBRID_PREDICTION; |
1491 | 1442 |
1492 vp9_write_bit(&header_bc, use_compound_pred); | 1443 vp9_write_bit(&header_bc, use_compound_pred); |
1493 if (use_compound_pred) { | 1444 if (use_compound_pred) { |
1494 vp9_write_bit(&header_bc, use_hybrid_pred); | 1445 vp9_write_bit(&header_bc, use_hybrid_pred); |
1495 if (use_hybrid_pred) | 1446 if (use_hybrid_pred) |
1496 for (i = 0; i < COMP_INTER_CONTEXTS; i++) | 1447 for (i = 0; i < COMP_INTER_CONTEXTS; i++) |
1497 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], | 1448 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], |
1498 MODE_UPDATE_PROB, | |
1499 cpi->comp_inter_count[i]); | 1449 cpi->comp_inter_count[i]); |
1500 } | 1450 } |
1501 } | 1451 } |
1502 | 1452 |
1503 if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) { | 1453 if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) { |
1504 for (i = 0; i < REF_CONTEXTS; i++) { | 1454 for (i = 0; i < REF_CONTEXTS; i++) { |
1505 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], | 1455 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], |
1506 MODE_UPDATE_PROB, | |
1507 cpi->single_ref_count[i][0]); | 1456 cpi->single_ref_count[i][0]); |
1508 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], | 1457 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], |
1509 MODE_UPDATE_PROB, | |
1510 cpi->single_ref_count[i][1]); | 1458 cpi->single_ref_count[i][1]); |
1511 } | 1459 } |
1512 } | 1460 } |
1513 | 1461 |
1514 if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) | 1462 if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) |
1515 for (i = 0; i < REF_CONTEXTS; i++) | 1463 for (i = 0; i < REF_CONTEXTS; i++) |
1516 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], | 1464 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], |
1517 MODE_UPDATE_PROB, | |
1518 cpi->comp_ref_count[i]); | 1465 cpi->comp_ref_count[i]); |
1519 | 1466 |
1520 update_mbintra_mode_probs(cpi, &header_bc); | 1467 update_mbintra_mode_probs(cpi, &header_bc); |
1521 | 1468 |
1522 for (i = 0; i < NUM_PARTITION_CONTEXTS; ++i) { | 1469 for (i = 0; i < PARTITION_CONTEXTS; ++i) { |
1523 vp9_prob pnew[PARTITION_TYPES - 1]; | 1470 vp9_prob pnew[PARTITION_TYPES - 1]; |
1524 unsigned int bct[PARTITION_TYPES - 1][2]; | 1471 unsigned int bct[PARTITION_TYPES - 1][2]; |
1525 update_mode(&header_bc, PARTITION_TYPES, | 1472 update_mode(&header_bc, PARTITION_TYPES, |
1526 vp9_partition_tree, pnew, | 1473 vp9_partition_tree, pnew, |
1527 fc->partition_prob[cm->frame_type][i], bct, | 1474 fc->partition_prob[cm->frame_type][i], bct, |
1528 (unsigned int *)cpi->partition_count[i]); | 1475 (unsigned int *)cpi->partition_count[i]); |
1529 } | 1476 } |
1530 | 1477 |
1531 vp9_write_nmv_probs(cpi, xd->allow_high_precision_mv, &header_bc); | 1478 vp9_write_nmv_probs(cpi, cm->allow_high_precision_mv, &header_bc); |
1532 } | 1479 } |
1533 | 1480 |
1534 vp9_stop_encode(&header_bc); | 1481 vp9_stop_encode(&header_bc); |
1535 assert(header_bc.pos <= 0xffff); | 1482 assert(header_bc.pos <= 0xffff); |
1536 | 1483 |
1537 return header_bc.pos; | 1484 return header_bc.pos; |
1538 } | 1485 } |
1539 | 1486 |
1540 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) { | 1487 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) { |
1541 uint8_t *data = dest; | 1488 uint8_t *data = dest; |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1612 "vp9_coef_update_probs_16x16[BLOCK_TYPES]"); | 1559 "vp9_coef_update_probs_16x16[BLOCK_TYPES]"); |
1613 print_tree_update_for_type(f, tree_update_hist[TX_32X32], BLOCK_TYPES, | 1560 print_tree_update_for_type(f, tree_update_hist[TX_32X32], BLOCK_TYPES, |
1614 "vp9_coef_update_probs_32x32[BLOCK_TYPES]"); | 1561 "vp9_coef_update_probs_32x32[BLOCK_TYPES]"); |
1615 | 1562 |
1616 fclose(f); | 1563 fclose(f); |
1617 f = fopen("treeupdate.bin", "wb"); | 1564 f = fopen("treeupdate.bin", "wb"); |
1618 fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f); | 1565 fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f); |
1619 fclose(f); | 1566 fclose(f); |
1620 } | 1567 } |
1621 #endif | 1568 #endif |
OLD | NEW |