Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(253)

Side by Side Diff: source/libvpx/vp9/encoder/vp9_bitstream.c

Issue 111463005: libvpx: Pull from upstream (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp9/decoder/vp9_treereader.h ('k') | source/libvpx/vp9/encoder/vp9_block.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 14 matching lines...) Expand all
25 #include "vp9/common/vp9_mvref_common.h" 25 #include "vp9/common/vp9_mvref_common.h"
26 #include "vp9/common/vp9_treecoder.h" 26 #include "vp9/common/vp9_treecoder.h"
27 #include "vp9/common/vp9_systemdependent.h" 27 #include "vp9/common/vp9_systemdependent.h"
28 #include "vp9/common/vp9_pragmas.h" 28 #include "vp9/common/vp9_pragmas.h"
29 29
30 #include "vp9/encoder/vp9_mcomp.h" 30 #include "vp9/encoder/vp9_mcomp.h"
31 #include "vp9/encoder/vp9_encodemv.h" 31 #include "vp9/encoder/vp9_encodemv.h"
32 #include "vp9/encoder/vp9_bitstream.h" 32 #include "vp9/encoder/vp9_bitstream.h"
33 #include "vp9/encoder/vp9_segmentation.h" 33 #include "vp9/encoder/vp9_segmentation.h"
34 #include "vp9/encoder/vp9_subexp.h" 34 #include "vp9/encoder/vp9_subexp.h"
35 #include "vp9/encoder/vp9_tokenize.h"
35 #include "vp9/encoder/vp9_write_bit_buffer.h" 36 #include "vp9/encoder/vp9_write_bit_buffer.h"
36 37
37 38
38 #if defined(SECTIONBITS_OUTPUT) 39 #if defined(SECTIONBITS_OUTPUT)
39 unsigned __int64 Sectionbits[500]; 40 unsigned __int64 Sectionbits[500];
40 #endif 41 #endif
41 42
42 #ifdef ENTROPY_STATS 43 #ifdef ENTROPY_STATS
43 int intra_mode_stats[INTRA_MODES] 44 int intra_mode_stats[INTRA_MODES]
44 [INTRA_MODES] 45 [INTRA_MODES]
45 [INTRA_MODES]; 46 [INTRA_MODES];
46 vp9_coeff_stats tree_update_hist[TX_SIZES][BLOCK_TYPES]; 47 vp9_coeff_stats tree_update_hist[TX_SIZES][PLANE_TYPES];
47 48
48 extern unsigned int active_section; 49 extern unsigned int active_section;
49 #endif 50 #endif
50 51
52 static struct vp9_token intra_mode_encodings[INTRA_MODES];
53 static struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS];
54 static struct vp9_token partition_encodings[PARTITION_TYPES];
55 static struct vp9_token inter_mode_encodings[INTER_MODES];
51 56
52 #ifdef MODE_STATS 57 void vp9_entropy_mode_init() {
53 int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZES]; 58 vp9_tokens_from_tree(intra_mode_encodings, vp9_intra_mode_tree);
54 int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 1]; 59 vp9_tokens_from_tree(switchable_interp_encodings, vp9_switchable_interp_tree);
55 int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 2]; 60 vp9_tokens_from_tree(partition_encodings, vp9_partition_tree);
56 int64_t switchable_interp_stats[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS]; 61 vp9_tokens_from_tree(inter_mode_encodings, vp9_inter_mode_tree);
57
58 void init_tx_count_stats() {
59 vp9_zero(tx_count_32x32p_stats);
60 vp9_zero(tx_count_16x16p_stats);
61 vp9_zero(tx_count_8x8p_stats);
62 } 62 }
63 63
64 void init_switchable_interp_stats() { 64 static void write_intra_mode(vp9_writer *w, MB_PREDICTION_MODE mode,
65 vp9_zero(switchable_interp_stats); 65 const vp9_prob *probs) {
66 vp9_write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]);
66 } 67 }
67 68
68 static void update_tx_count_stats(VP9_COMMON *cm) { 69 static void write_inter_mode(vp9_writer *w, MB_PREDICTION_MODE mode,
69 int i, j; 70 const vp9_prob *probs) {
70 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { 71 assert(is_inter_mode(mode));
71 for (j = 0; j < TX_SIZES; j++) { 72 vp9_write_token(w, vp9_inter_mode_tree, probs,
72 tx_count_32x32p_stats[i][j] += cm->fc.tx_count_32x32p[i][j]; 73 &inter_mode_encodings[INTER_OFFSET(mode)]);
73 }
74 }
75 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
76 for (j = 0; j < TX_SIZES - 1; j++) {
77 tx_count_16x16p_stats[i][j] += cm->fc.tx_count_16x16p[i][j];
78 }
79 }
80 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
81 for (j = 0; j < TX_SIZES - 2; j++) {
82 tx_count_8x8p_stats[i][j] += cm->fc.tx_count_8x8p[i][j];
83 }
84 }
85 } 74 }
86 75
87 static void update_switchable_interp_stats(VP9_COMMON *cm) {
88 int i, j;
89 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
90 for (j = 0; j < SWITCHABLE_FILTERS; ++j)
91 switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j];
92 }
93
94 void write_tx_count_stats() {
95 int i, j;
96 FILE *fp = fopen("tx_count.bin", "wb");
97 fwrite(tx_count_32x32p_stats, sizeof(tx_count_32x32p_stats), 1, fp);
98 fwrite(tx_count_16x16p_stats, sizeof(tx_count_16x16p_stats), 1, fp);
99 fwrite(tx_count_8x8p_stats, sizeof(tx_count_8x8p_stats), 1, fp);
100 fclose(fp);
101
102 printf(
103 "vp9_default_tx_count_32x32p[TX_SIZE_CONTEXTS][TX_SIZES] = {\n");
104 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
105 printf(" { ");
106 for (j = 0; j < TX_SIZES; j++) {
107 printf("%"PRId64", ", tx_count_32x32p_stats[i][j]);
108 }
109 printf("},\n");
110 }
111 printf("};\n");
112 printf(
113 "vp9_default_tx_count_16x16p[TX_SIZE_CONTEXTS][TX_SIZES-1] = {\n");
114 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
115 printf(" { ");
116 for (j = 0; j < TX_SIZES - 1; j++) {
117 printf("%"PRId64", ", tx_count_16x16p_stats[i][j]);
118 }
119 printf("},\n");
120 }
121 printf("};\n");
122 printf(
123 "vp9_default_tx_count_8x8p[TX_SIZE_CONTEXTS][TX_SIZES-2] = {\n");
124 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
125 printf(" { ");
126 for (j = 0; j < TX_SIZES - 2; j++) {
127 printf("%"PRId64", ", tx_count_8x8p_stats[i][j]);
128 }
129 printf("},\n");
130 }
131 printf("};\n");
132 }
133
134 void write_switchable_interp_stats() {
135 int i, j;
136 FILE *fp = fopen("switchable_interp.bin", "wb");
137 fwrite(switchable_interp_stats, sizeof(switchable_interp_stats), 1, fp);
138 fclose(fp);
139
140 printf(
141 "vp9_default_switchable_filter_count[SWITCHABLE_FILTER_CONTEXTS]"
142 "[SWITCHABLE_FILTERS] = {\n");
143 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
144 printf(" { ");
145 for (j = 0; j < SWITCHABLE_FILTERS; j++) {
146 printf("%"PRId64", ", switchable_interp_stats[i][j]);
147 }
148 printf("},\n");
149 }
150 printf("};\n");
151 }
152 #endif
153
154 static INLINE void write_be32(uint8_t *p, int value) { 76 static INLINE void write_be32(uint8_t *p, int value) {
155 p[0] = value >> 24; 77 p[0] = value >> 24;
156 p[1] = value >> 16; 78 p[1] = value >> 16;
157 p[2] = value >> 8; 79 p[2] = value >> 8;
158 p[3] = value; 80 p[3] = value;
159 } 81 }
160 82
161 void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb, 83 void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb,
162 int data, int max) { 84 int data, int max) {
163 vp9_wb_write_literal(wb, data, get_unsigned_bits(max)); 85 vp9_wb_write_literal(wb, data, get_unsigned_bits(max));
164 } 86 }
165 87
166 static void update_mode( 88 static void prob_diff_update(const vp9_tree_index *tree,
167 vp9_writer *w, 89 vp9_prob probs[/*n - 1*/],
168 int n, 90 const unsigned int counts[/*n - 1*/],
169 vp9_tree tree, 91 int n, vp9_writer *w) {
170 vp9_prob Pnew[/* n-1 */], 92 int i;
171 vp9_prob Pcur[/* n-1 */], 93 unsigned int branch_ct[32][2];
172 unsigned int bct[/* n-1 */] [2],
173 const unsigned int num_events[/* n */]
174 ) {
175 int i = 0;
176 94
177 vp9_tree_probs_from_distribution(tree, Pnew, bct, num_events, 0); 95 // Assuming max number of probabilities <= 32
178 n--; 96 assert(n <= 32);
179 97
180 for (i = 0; i < n; ++i) 98 vp9_tree_probs_from_distribution(tree, branch_ct, counts);
181 vp9_cond_prob_diff_update(w, &Pcur[i], bct[i]); 99 for (i = 0; i < n - 1; ++i)
182 } 100 vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
183
184 static void update_mbintra_mode_probs(VP9_COMP* const cpi,
185 vp9_writer* const bc) {
186 VP9_COMMON *const cm = &cpi->common;
187 int j;
188 vp9_prob pnew[INTRA_MODES - 1];
189 unsigned int bct[INTRA_MODES - 1][2];
190
191 for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
192 update_mode(bc, INTRA_MODES, vp9_intra_mode_tree, pnew,
193 cm->fc.y_mode_prob[j], bct,
194 (unsigned int *)cpi->y_mode_count[j]);
195 } 101 }
196 102
197 static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m, 103 static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m,
198 TX_SIZE tx_size, BLOCK_SIZE bsize, 104 TX_SIZE tx_size, BLOCK_SIZE bsize,
199 vp9_writer *w) { 105 vp9_writer *w) {
106 const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
200 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 107 const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
201 const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs, m); 108 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
109 &cpi->common.fc.tx_probs);
202 vp9_write(w, tx_size != TX_4X4, tx_probs[0]); 110 vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
203 if (bsize >= BLOCK_16X16 && tx_size != TX_4X4) { 111 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
204 vp9_write(w, tx_size != TX_8X8, tx_probs[1]); 112 vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
205 if (bsize >= BLOCK_32X32 && tx_size != TX_8X8) 113 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
206 vp9_write(w, tx_size != TX_16X16, tx_probs[2]); 114 vp9_write(w, tx_size != TX_16X16, tx_probs[2]);
207 } 115 }
208 } 116 }
209 117
210 static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m, 118 static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m,
211 vp9_writer *w) { 119 vp9_writer *w) {
212 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; 120 const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
213 if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) { 121 if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) {
214 return 1; 122 return 1;
215 } else { 123 } else {
216 const int skip_coeff = m->mbmi.skip_coeff; 124 const int skip = m->mbmi.skip_coeff;
217 vp9_write(w, skip_coeff, vp9_get_pred_prob_mbskip(&cpi->common, xd)); 125 vp9_write(w, skip, vp9_get_skip_prob(&cpi->common, xd));
218 return skip_coeff; 126 return skip;
219 } 127 }
220 } 128 }
221 129
222 void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) { 130 void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) {
223 VP9_COMMON *cm = &cpi->common; 131 VP9_COMMON *cm = &cpi->common;
224 int k; 132 int k;
225 133
226 for (k = 0; k < MBSKIP_CONTEXTS; ++k) 134 for (k = 0; k < MBSKIP_CONTEXTS; ++k)
227 vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k], cm->counts.mbskip[k]); 135 vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k], cm->counts.mbskip[k]);
228 } 136 }
229 137
230 static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) { 138 static void update_switchable_interp_probs(VP9_COMP *cpi, vp9_writer *w) {
231 write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m); 139 VP9_COMMON *const cm = &cpi->common;
232 } 140 int j;
141 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
142 prob_diff_update(vp9_switchable_interp_tree,
143 cm->fc.switchable_interp_prob[j],
144 cm->counts.switchable_interp[j], SWITCHABLE_FILTERS, w);
233 145
234 static void update_switchable_interp_probs(VP9_COMP *const cpi,
235 vp9_writer* const bc) {
236 VP9_COMMON *const cm = &cpi->common;
237 unsigned int branch_ct[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1][2];
238 vp9_prob new_prob[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS - 1];
239 int i, j;
240 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) {
241 vp9_tree_probs_from_distribution(
242 vp9_switchable_interp_tree,
243 new_prob[j], branch_ct[j],
244 cm->counts.switchable_interp[j], 0);
245 }
246 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) {
247 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) {
248 vp9_cond_prob_diff_update(bc, &cm->fc.switchable_interp_prob[j][i],
249 branch_ct[j][i]);
250 }
251 }
252 #ifdef MODE_STATS 146 #ifdef MODE_STATS
253 if (!cpi->dummy_packing) 147 if (!cpi->dummy_packing)
254 update_switchable_interp_stats(cm); 148 update_switchable_interp_stats(cm);
255 #endif 149 #endif
256 } 150 }
257 151
258 static void update_inter_mode_probs(VP9_COMMON *cm, vp9_writer* const bc) { 152 static void pack_mb_tokens(vp9_writer* const w,
259 int i, j;
260
261 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
262 unsigned int branch_ct[INTER_MODES - 1][2];
263 vp9_prob new_prob[INTER_MODES - 1];
264
265 vp9_tree_probs_from_distribution(vp9_inter_mode_tree,
266 new_prob, branch_ct,
267 cm->counts.inter_mode[i], NEARESTMV);
268
269 for (j = 0; j < INTER_MODES - 1; ++j)
270 vp9_cond_prob_diff_update(bc, &cm->fc.inter_mode_probs[i][j],
271 branch_ct[j]);
272 }
273 }
274
275 static void pack_mb_tokens(vp9_writer* const bc,
276 TOKENEXTRA **tp, 153 TOKENEXTRA **tp,
277 const TOKENEXTRA *const stop) { 154 const TOKENEXTRA *const stop) {
278 TOKENEXTRA *p = *tp; 155 TOKENEXTRA *p = *tp;
279 156
280 while (p < stop && p->token != EOSB_TOKEN) { 157 while (p < stop && p->token != EOSB_TOKEN) {
281 const int t = p->token; 158 const int t = p->token;
282 const struct vp9_token *const a = vp9_coef_encodings + t; 159 const struct vp9_token *const a = &vp9_coef_encodings[t];
283 const vp9_extra_bit *const b = vp9_extra_bits + t; 160 const vp9_extra_bit *const b = &vp9_extra_bits[t];
284 int i = 0; 161 int i = 0;
285 const vp9_prob *pp;
286 int v = a->value; 162 int v = a->value;
287 int n = a->len; 163 int n = a->len;
288 vp9_prob probs[ENTROPY_NODES];
289
290 if (t >= TWO_TOKEN) {
291 vp9_model_to_full_probs(p->context_tree, probs);
292 pp = probs;
293 } else {
294 pp = p->context_tree;
295 }
296 assert(pp != 0);
297 164
298 /* skip one or two nodes */ 165 /* skip one or two nodes */
299 if (p->skip_eob_node) { 166 if (p->skip_eob_node) {
300 n -= p->skip_eob_node; 167 n -= p->skip_eob_node;
301 i = 2 * p->skip_eob_node; 168 i = 2 * p->skip_eob_node;
302 } 169 }
303 170
304 do { 171 // TODO(jbb): expanding this can lead to big gains. It allows
305 const int bb = (v >> --n) & 1; 172 // much better branch prediction and would enable us to avoid numerous
306 vp9_write(bc, bb, pp[i >> 1]); 173 // lookups and compares.
307 i = vp9_coef_tree[i + bb]; 174
308 } while (n); 175 // If we have a token that's in the constrained set, the coefficient tree
176 // is split into two treed writes. The first treed write takes care of the
177 // unconstrained nodes. The second treed write takes care of the
178 // constrained nodes.
179 if (t >= TWO_TOKEN && t < EOB_TOKEN) {
180 int len = UNCONSTRAINED_NODES - p->skip_eob_node;
181 int bits = v >> (n - len);
182 vp9_write_tree(w, vp9_coef_tree, p->context_tree, bits, len, i);
183 vp9_write_tree(w, vp9_coef_con_tree,
184 vp9_pareto8_full[p->context_tree[PIVOT_NODE] - 1],
185 v, n - len, 0);
186 } else {
187 vp9_write_tree(w, vp9_coef_tree, p->context_tree, v, n, i);
188 }
309 189
310 if (b->base_val) { 190 if (b->base_val) {
311 const int e = p->extra, l = b->len; 191 const int e = p->extra, l = b->len;
312 192
313 if (l) { 193 if (l) {
314 const unsigned char *pb = b->prob; 194 const unsigned char *pb = b->prob;
315 int v = e >> 1; 195 int v = e >> 1;
316 int n = l; /* number of bits in v, assumed nonzero */ 196 int n = l; /* number of bits in v, assumed nonzero */
317 int i = 0; 197 int i = 0;
318 198
319 do { 199 do {
320 const int bb = (v >> --n) & 1; 200 const int bb = (v >> --n) & 1;
321 vp9_write(bc, bb, pb[i >> 1]); 201 vp9_write(w, bb, pb[i >> 1]);
322 i = b->tree[i + bb]; 202 i = b->tree[i + bb];
323 } while (n); 203 } while (n);
324 } 204 }
325 205
326 vp9_write_bit(bc, e & 1); 206 vp9_write_bit(w, e & 1);
327 } 207 }
328 ++p; 208 ++p;
329 } 209 }
330 210
331 *tp = p + (p->token == EOSB_TOKEN); 211 *tp = p + (p->token == EOSB_TOKEN);
332 } 212 }
333 213
334 static void write_sb_mv_ref(vp9_writer *w, MB_PREDICTION_MODE mode,
335 const vp9_prob *p) {
336 assert(is_inter_mode(mode));
337 write_token(w, vp9_inter_mode_tree, p,
338 &vp9_inter_mode_encodings[inter_mode_offset(mode)]);
339 }
340
341
342 static void write_segment_id(vp9_writer *w, const struct segmentation *seg, 214 static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
343 int segment_id) { 215 int segment_id) {
344 if (seg->enabled && seg->update_map) 216 if (seg->enabled && seg->update_map)
345 treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3); 217 vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0);
346 } 218 }
347 219
348 // This function encodes the reference frame 220 // This function encodes the reference frame
349 static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) { 221 static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
350 VP9_COMMON *const cm = &cpi->common; 222 VP9_COMMON *const cm = &cpi->common;
351 MACROBLOCK *const x = &cpi->mb; 223 MACROBLOCK *const x = &cpi->mb;
352 MACROBLOCKD *const xd = &x->e_mbd; 224 MACROBLOCKD *const xd = &x->e_mbd;
353 MB_MODE_INFO *mi = &xd->mi_8x8[0]->mbmi; 225 MB_MODE_INFO *mi = &xd->mi_8x8[0]->mbmi;
354 const int segment_id = mi->segment_id; 226 const int segment_id = mi->segment_id;
355 int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id, 227 int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id,
356 SEG_LVL_REF_FRAME); 228 SEG_LVL_REF_FRAME);
357 // If segment level coding of this signal is disabled... 229 // If segment level coding of this signal is disabled...
358 // or the segment allows multiple reference frame options 230 // or the segment allows multiple reference frame options
359 if (!seg_ref_active) { 231 if (!seg_ref_active) {
360 // does the feature use compound prediction or not 232 // does the feature use compound prediction or not
361 // (if not specified at the frame/segment level) 233 // (if not specified at the frame/segment level)
362 if (cm->comp_pred_mode == HYBRID_PREDICTION) { 234 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
363 vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME, 235 vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME,
364 vp9_get_pred_prob_comp_inter_inter(cm, xd)); 236 vp9_get_reference_mode_prob(cm, xd));
365 } else { 237 } else {
366 assert((mi->ref_frame[1] <= INTRA_FRAME) == 238 assert((mi->ref_frame[1] <= INTRA_FRAME) ==
367 (cm->comp_pred_mode == SINGLE_PREDICTION_ONLY)); 239 (cm->reference_mode == SINGLE_REFERENCE));
368 } 240 }
369 241
370 if (mi->ref_frame[1] > INTRA_FRAME) { 242 if (mi->ref_frame[1] > INTRA_FRAME) {
371 vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME, 243 vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME,
372 vp9_get_pred_prob_comp_ref_p(cm, xd)); 244 vp9_get_pred_prob_comp_ref_p(cm, xd));
373 } else { 245 } else {
374 vp9_write(bc, mi->ref_frame[0] != LAST_FRAME, 246 vp9_write(bc, mi->ref_frame[0] != LAST_FRAME,
375 vp9_get_pred_prob_single_ref_p1(cm, xd)); 247 vp9_get_pred_prob_single_ref_p1(cm, xd));
376 if (mi->ref_frame[0] != LAST_FRAME) 248 if (mi->ref_frame[0] != LAST_FRAME)
377 vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME, 249 vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME,
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
413 if (!pred_flag) 285 if (!pred_flag)
414 write_segment_id(bc, seg, segment_id); 286 write_segment_id(bc, seg, segment_id);
415 } else { 287 } else {
416 write_segment_id(bc, seg, segment_id); 288 write_segment_id(bc, seg, segment_id);
417 } 289 }
418 } 290 }
419 291
420 skip_coeff = write_skip_coeff(cpi, segment_id, m, bc); 292 skip_coeff = write_skip_coeff(cpi, segment_id, m, bc);
421 293
422 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) 294 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
423 vp9_write(bc, rf != INTRA_FRAME, 295 vp9_write(bc, rf != INTRA_FRAME, vp9_get_intra_inter_prob(cm, xd));
424 vp9_get_pred_prob_intra_inter(cm, xd));
425 296
426 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && 297 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
427 !(rf != INTRA_FRAME && 298 !(rf != INTRA_FRAME &&
428 (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { 299 (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
429 write_selected_tx_size(cpi, m, mi->tx_size, bsize, bc); 300 write_selected_tx_size(cpi, m, mi->tx_size, bsize, bc);
430 } 301 }
431 302
432 if (rf == INTRA_FRAME) { 303 if (rf == INTRA_FRAME) {
433 #ifdef ENTROPY_STATS 304 #ifdef ENTROPY_STATS
434 active_section = 6; 305 active_section = 6;
(...skipping 18 matching lines...) Expand all
453 encode_ref_frame(cpi, bc); 324 encode_ref_frame(cpi, bc);
454 mv_ref_p = cpi->common.fc.inter_mode_probs[mi->mode_context[rf]]; 325 mv_ref_p = cpi->common.fc.inter_mode_probs[mi->mode_context[rf]];
455 326
456 #ifdef ENTROPY_STATS 327 #ifdef ENTROPY_STATS
457 active_section = 3; 328 active_section = 3;
458 #endif 329 #endif
459 330
460 // If segment skip is not enabled code the mode. 331 // If segment skip is not enabled code the mode.
461 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { 332 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
462 if (bsize >= BLOCK_8X8) { 333 if (bsize >= BLOCK_8X8) {
463 write_sb_mv_ref(bc, mode, mv_ref_p); 334 write_inter_mode(bc, mode, mv_ref_p);
464 ++cm->counts.inter_mode[mi->mode_context[rf]] 335 ++cm->counts.inter_mode[mi->mode_context[rf]][INTER_OFFSET(mode)];
465 [inter_mode_offset(mode)];
466 } 336 }
467 } 337 }
468 338
469 if (cm->mcomp_filter_type == SWITCHABLE) { 339 if (cm->mcomp_filter_type == SWITCHABLE) {
470 const int ctx = vp9_get_pred_context_switchable_interp(xd); 340 const int ctx = vp9_get_pred_context_switchable_interp(xd);
471 write_token(bc, vp9_switchable_interp_tree, 341 vp9_write_token(bc, vp9_switchable_interp_tree,
472 cm->fc.switchable_interp_prob[ctx], 342 cm->fc.switchable_interp_prob[ctx],
473 &vp9_switchable_interp_encodings[mi->interp_filter]); 343 &switchable_interp_encodings[mi->interp_filter]);
474 } else { 344 } else {
475 assert(mi->interp_filter == cm->mcomp_filter_type); 345 assert(mi->interp_filter == cm->mcomp_filter_type);
476 } 346 }
477 347
478 if (bsize < BLOCK_8X8) { 348 if (bsize < BLOCK_8X8) {
479 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; 349 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
480 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; 350 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
481 int idx, idy; 351 int idx, idy;
482 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { 352 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
483 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { 353 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
484 const int j = idy * 2 + idx; 354 const int j = idy * 2 + idx;
485 const MB_PREDICTION_MODE blockmode = m->bmi[j].as_mode; 355 const MB_PREDICTION_MODE blockmode = m->bmi[j].as_mode;
486 write_sb_mv_ref(bc, blockmode, mv_ref_p); 356 write_inter_mode(bc, blockmode, mv_ref_p);
487 ++cm->counts.inter_mode[mi->mode_context[rf]] 357 ++cm->counts.inter_mode[mi->mode_context[rf]]
488 [inter_mode_offset(blockmode)]; 358 [INTER_OFFSET(blockmode)];
489 359
490 if (blockmode == NEWMV) { 360 if (blockmode == NEWMV) {
491 #ifdef ENTROPY_STATS 361 #ifdef ENTROPY_STATS
492 active_section = 11; 362 active_section = 11;
493 #endif 363 #endif
494 vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[0].as_mv, 364 vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[0].as_mv,
495 &mi->best_mv[0].as_mv, nmvc, allow_hp); 365 &mi->best_mv[0].as_mv, nmvc, allow_hp);
496 366
497 if (has_second_ref(mi)) 367 if (has_second_ref(mi))
498 vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[1].as_mv, 368 vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[1].as_mv,
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
552 #endif 422 #endif
553 write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]); 423 write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]);
554 } 424 }
555 } 425 }
556 } 426 }
557 427
558 write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]); 428 write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]);
559 } 429 }
560 430
561 static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, 431 static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile,
562 MODE_INFO **mi_8x8, vp9_writer *bc, 432 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end,
563 TOKENEXTRA **tok, TOKENEXTRA *tok_end, 433 int mi_row, int mi_col) {
564 int mi_row, int mi_col, int index) {
565 VP9_COMMON *const cm = &cpi->common; 434 VP9_COMMON *const cm = &cpi->common;
566 MACROBLOCKD *const xd = &cpi->mb.e_mbd; 435 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
567 MODE_INFO *m = mi_8x8[0]; 436 MODE_INFO *m;
568 437
569 if (m->mbmi.sb_type < BLOCK_8X8) 438 xd->mi_8x8 = cm->mi_grid_visible + (mi_row * cm->mode_info_stride + mi_col);
570 if (index > 0) 439 m = xd->mi_8x8[0];
571 return;
572
573 xd->mi_8x8 = mi_8x8;
574 440
575 set_mi_row_col(xd, tile, 441 set_mi_row_col(xd, tile,
576 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], 442 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
577 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], 443 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type],
578 cm->mi_rows, cm->mi_cols); 444 cm->mi_rows, cm->mi_cols);
579 if (frame_is_intra_only(cm)) { 445 if (frame_is_intra_only(cm)) {
580 write_mb_modes_kf(cpi, mi_8x8, bc); 446 write_mb_modes_kf(cpi, xd->mi_8x8, w);
581 #ifdef ENTROPY_STATS 447 #ifdef ENTROPY_STATS
582 active_section = 8; 448 active_section = 8;
583 #endif 449 #endif
584 } else { 450 } else {
585 pack_inter_mode_mvs(cpi, m, bc); 451 pack_inter_mode_mvs(cpi, m, w);
586 #ifdef ENTROPY_STATS 452 #ifdef ENTROPY_STATS
587 active_section = 1; 453 active_section = 1;
588 #endif 454 #endif
589 } 455 }
590 456
591 assert(*tok < tok_end); 457 assert(*tok < tok_end);
592 pack_mb_tokens(bc, tok, tok_end); 458 pack_mb_tokens(w, tok, tok_end);
593 } 459 }
594 460
595 static void write_partition(PARTITION_TYPE partition, 461 static void write_partition(VP9_COMP *cpi, int hbs, int mi_row, int mi_col,
596 int hbs, int mi_rows, int mi_cols, 462 PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) {
597 int mi_row, int mi_col, 463 VP9_COMMON *const cm = &cpi->common;
598 vp9_prob probs[PARTITION_TYPES - 1], 464 const int ctx = partition_plane_context(cpi->above_seg_context,
599 vp9_writer *w) { 465 cpi->left_seg_context,
600 const int has_rows = (mi_row + hbs) < mi_rows; 466 mi_row, mi_col, bsize);
601 const int has_cols = (mi_col + hbs) < mi_cols; 467 const vp9_prob *const probs = get_partition_probs(cm, ctx);
468 const int has_rows = (mi_row + hbs) < cm->mi_rows;
469 const int has_cols = (mi_col + hbs) < cm->mi_cols;
602 470
603 if (has_rows && has_cols) { 471 if (has_rows && has_cols) {
604 write_token(w, vp9_partition_tree, probs, 472 vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]);
605 &vp9_partition_encodings[partition]);
606 } else if (!has_rows && has_cols) { 473 } else if (!has_rows && has_cols) {
607 assert(partition == PARTITION_SPLIT || partition == PARTITION_HORZ); 474 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
608 vp9_write(w, partition == PARTITION_SPLIT, probs[1]); 475 vp9_write(w, p == PARTITION_SPLIT, probs[1]);
609 } else if (has_rows && !has_cols) { 476 } else if (has_rows && !has_cols) {
610 assert(partition == PARTITION_SPLIT || partition == PARTITION_VERT); 477 assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
611 vp9_write(w, partition == PARTITION_SPLIT, probs[2]); 478 vp9_write(w, p == PARTITION_SPLIT, probs[2]);
612 } else { 479 } else {
613 assert(partition == PARTITION_SPLIT); 480 assert(p == PARTITION_SPLIT);
614 } 481 }
615 } 482 }
616 483
617 static void write_modes_sb(VP9_COMP *cpi, const TileInfo *const tile, 484 static void write_modes_sb(VP9_COMP *cpi, const TileInfo *const tile,
618 MODE_INFO **mi_8x8, vp9_writer *bc, 485 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end,
619 TOKENEXTRA **tok, TOKENEXTRA *tok_end, 486 int mi_row, int mi_col, BLOCK_SIZE bsize) {
620 int mi_row, int mi_col, BLOCK_SIZE bsize,
621 int index) {
622 VP9_COMMON *const cm = &cpi->common; 487 VP9_COMMON *const cm = &cpi->common;
623 const int mis = cm->mode_info_stride; 488 const int bsl = b_width_log2(bsize);
624 int bsl = b_width_log2(bsize); 489 const int bs = (1 << bsl) / 4;
625 int bs = (1 << bsl) / 4; // mode_info step for subsize 490 PARTITION_TYPE partition;
626 int n;
627 PARTITION_TYPE partition = PARTITION_NONE;
628 BLOCK_SIZE subsize; 491 BLOCK_SIZE subsize;
629 MODE_INFO *m = mi_8x8[0]; 492 MODE_INFO *m = cm->mi_grid_visible[mi_row * cm->mode_info_stride + mi_col];
630 493
631 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) 494 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
632 return; 495 return;
633 496
634 partition = partition_lookup[bsl][m->mbmi.sb_type]; 497 partition = partition_lookup[bsl][m->mbmi.sb_type];
635 498 write_partition(cpi, bs, mi_row, mi_col, partition, bsize, w);
636 if (bsize < BLOCK_8X8) { 499 subsize = get_subsize(bsize, partition);
637 if (index > 0) 500 if (subsize < BLOCK_8X8) {
638 return; 501 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
639 } else { 502 } else {
640 const int ctx = partition_plane_context(cpi->above_seg_context, 503 switch (partition) {
641 cpi->left_seg_context, 504 case PARTITION_NONE:
642 mi_row, mi_col, bsize); 505 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
643 write_partition(partition, bs, cm->mi_rows, cm->mi_cols, mi_row, mi_col, 506 break;
644 cm->fc.partition_prob[cm->frame_type][ctx], bc); 507 case PARTITION_HORZ:
645 } 508 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
646 509 if (mi_row + bs < cm->mi_rows)
647 subsize = get_subsize(bsize, partition); 510 write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col);
648 511 break;
649 switch (partition) { 512 case PARTITION_VERT:
650 case PARTITION_NONE: 513 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
651 write_modes_b(cpi, tile, mi_8x8, bc, tok, tok_end, mi_row, mi_col, 0); 514 if (mi_col + bs < cm->mi_cols)
652 break; 515 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs);
653 case PARTITION_HORZ: 516 break;
654 write_modes_b(cpi, tile, mi_8x8, bc, tok, tok_end, mi_row, mi_col, 0); 517 case PARTITION_SPLIT:
655 if ((mi_row + bs) < cm->mi_rows) 518 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize);
656 write_modes_b(cpi, tile, mi_8x8 + bs * mis, bc, tok, tok_end, 519 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs,
657 mi_row + bs, mi_col, 1); 520 subsize);
658 break; 521 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col,
659 case PARTITION_VERT: 522 subsize);
660 write_modes_b(cpi, tile, mi_8x8, bc, tok, tok_end, mi_row, mi_col, 0); 523 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
661 if ((mi_col + bs) < cm->mi_cols) 524 subsize);
662 write_modes_b(cpi, tile, mi_8x8 + bs, bc, tok, tok_end, 525 break;
663 mi_row, mi_col + bs, 1); 526 default:
664 break; 527 assert(0);
665 case PARTITION_SPLIT: 528 }
666 for (n = 0; n < 4; n++) {
667 const int j = n >> 1, i = n & 1;
668 write_modes_sb(cpi, tile, mi_8x8 + j * bs * mis + i * bs, bc,
669 tok, tok_end,
670 mi_row + j * bs, mi_col + i * bs, subsize, n);
671 }
672 break;
673 default:
674 assert(0);
675 } 529 }
676 530
677 // update partition context 531 // update partition context
678 if (bsize >= BLOCK_8X8 && 532 if (bsize >= BLOCK_8X8 &&
679 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) 533 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
680 update_partition_context(cpi->above_seg_context, cpi->left_seg_context, 534 update_partition_context(cpi->above_seg_context, cpi->left_seg_context,
681 mi_row, mi_col, subsize, bsize); 535 mi_row, mi_col, subsize, bsize);
682 } 536 }
683 537
684 static void write_modes(VP9_COMP *cpi, const TileInfo *const tile, 538 static void write_modes(VP9_COMP *cpi, const TileInfo *const tile,
685 vp9_writer* const bc, 539 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end) {
686 TOKENEXTRA **tok, TOKENEXTRA *tok_end) {
687 VP9_COMMON *const cm = &cpi->common;
688 const int mis = cm->mode_info_stride;
689 int mi_row, mi_col; 540 int mi_row, mi_col;
690 MODE_INFO **mi_8x8 = cm->mi_grid_visible;
691 MODE_INFO **m_8x8;
692
693 mi_8x8 += tile->mi_col_start + tile->mi_row_start * mis;
694 541
695 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; 542 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
696 mi_row += 8, mi_8x8 += 8 * mis) { 543 mi_row += MI_BLOCK_SIZE) {
697 m_8x8 = mi_8x8; 544 vp9_zero(cpi->left_seg_context);
698 vp9_zero(cpi->left_seg_context);
699 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; 545 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
700 mi_col += MI_BLOCK_SIZE, m_8x8 += MI_BLOCK_SIZE) { 546 mi_col += MI_BLOCK_SIZE)
701 write_modes_sb(cpi, tile, m_8x8, bc, tok, tok_end, mi_row, mi_col, 547 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, BLOCK_64X64);
702 BLOCK_64X64, 0);
703 }
704 } 548 }
705 } 549 }
706 550
707 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) { 551 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) {
708 vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size]; 552 vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size];
709 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; 553 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size];
710 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] = 554 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
711 cpi->common.counts.eob_branch[tx_size]; 555 cpi->common.counts.eob_branch[tx_size];
712 vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[tx_size]; 556 vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[tx_size];
713 vp9_prob full_probs[ENTROPY_NODES]; 557 int i, j, k, l, m;
714 int i, j, k, l;
715 558
716 for (i = 0; i < BLOCK_TYPES; ++i) { 559 for (i = 0; i < PLANE_TYPES; ++i) {
717 for (j = 0; j < REF_TYPES; ++j) { 560 for (j = 0; j < REF_TYPES; ++j) {
718 for (k = 0; k < COEF_BANDS; ++k) { 561 for (k = 0; k < COEF_BANDS; ++k) {
719 for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { 562 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
720 if (l >= 3 && k == 0)
721 continue;
722 vp9_tree_probs_from_distribution(vp9_coef_tree, 563 vp9_tree_probs_from_distribution(vp9_coef_tree,
723 full_probs,
724 coef_branch_ct[i][j][k][l], 564 coef_branch_ct[i][j][k][l],
725 coef_counts[i][j][k][l], 0); 565 coef_counts[i][j][k][l]);
726 vpx_memcpy(coef_probs[i][j][k][l], full_probs,
727 sizeof(vp9_prob) * UNCONSTRAINED_NODES);
728 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - 566 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
729 coef_branch_ct[i][j][k][l][0][0]; 567 coef_branch_ct[i][j][k][l][0][0];
730 coef_probs[i][j][k][l][0] = 568 for (m = 0; m < UNCONSTRAINED_NODES; ++m)
731 get_binary_prob(coef_branch_ct[i][j][k][l][0][0], 569 coef_probs[i][j][k][l][m] = get_binary_prob(
732 coef_branch_ct[i][j][k][l][0][1]); 570 coef_branch_ct[i][j][k][l][m][0],
571 coef_branch_ct[i][j][k][l][m][1]);
733 #ifdef ENTROPY_STATS 572 #ifdef ENTROPY_STATS
734 if (!cpi->dummy_packing) { 573 if (!cpi->dummy_packing) {
735 int t; 574 int t;
736 for (t = 0; t < MAX_ENTROPY_TOKENS; ++t) 575 for (t = 0; t < ENTROPY_TOKENS; ++t)
737 context_counters[tx_size][i][j][k][l][t] += 576 context_counters[tx_size][i][j][k][l][t] +=
738 coef_counts[i][j][k][l][t]; 577 coef_counts[i][j][k][l][t];
739 context_counters[tx_size][i][j][k][l][MAX_ENTROPY_TOKENS] += 578 context_counters[tx_size][i][j][k][l][ENTROPY_TOKENS] +=
740 eob_branch_ct[i][j][k][l]; 579 eob_branch_ct[i][j][k][l];
741 } 580 }
742 #endif 581 #endif
743 } 582 }
744 } 583 }
745 } 584 }
746 } 585 }
747 } 586 }
748 587
749 static void build_coeff_contexts(VP9_COMP *cpi) {
750 TX_SIZE t;
751 for (t = TX_4X4; t <= TX_32X32; t++)
752 build_tree_distribution(cpi, t);
753 }
754
755 static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi, 588 static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
756 TX_SIZE tx_size) { 589 TX_SIZE tx_size) {
757 vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size]; 590 vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size];
758 vp9_coeff_probs_model *old_frame_coef_probs = 591 vp9_coeff_probs_model *old_frame_coef_probs =
759 cpi->common.fc.coef_probs[tx_size]; 592 cpi->common.fc.coef_probs[tx_size];
760 vp9_coeff_stats *frame_branch_ct = cpi->frame_branch_ct[tx_size]; 593 vp9_coeff_stats *frame_branch_ct = cpi->frame_branch_ct[tx_size];
761 const vp9_prob upd = DIFF_UPDATE_PROB; 594 const vp9_prob upd = DIFF_UPDATE_PROB;
762 const int entropy_nodes_update = UNCONSTRAINED_NODES; 595 const int entropy_nodes_update = UNCONSTRAINED_NODES;
763 int i, j, k, l, t; 596 int i, j, k, l, t;
764 switch (cpi->sf.use_fast_coef_updates) { 597 switch (cpi->sf.use_fast_coef_updates) {
765 case 0: { 598 case 0: {
766 /* dry run to see if there is any udpate at all needed */ 599 /* dry run to see if there is any udpate at all needed */
767 int savings = 0; 600 int savings = 0;
768 int update[2] = {0, 0}; 601 int update[2] = {0, 0};
769 for (i = 0; i < BLOCK_TYPES; ++i) { 602 for (i = 0; i < PLANE_TYPES; ++i) {
770 for (j = 0; j < REF_TYPES; ++j) { 603 for (j = 0; j < REF_TYPES; ++j) {
771 for (k = 0; k < COEF_BANDS; ++k) { 604 for (k = 0; k < COEF_BANDS; ++k) {
772 for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { 605 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
773 for (t = 0; t < entropy_nodes_update; ++t) { 606 for (t = 0; t < entropy_nodes_update; ++t) {
774 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; 607 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
775 const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t]; 608 const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t];
776 int s; 609 int s;
777 int u = 0; 610 int u = 0;
778
779 if (l >= 3 && k == 0)
780 continue;
781 if (t == PIVOT_NODE) 611 if (t == PIVOT_NODE)
782 s = vp9_prob_diff_update_savings_search_model( 612 s = vp9_prob_diff_update_savings_search_model(
783 frame_branch_ct[i][j][k][l][0], 613 frame_branch_ct[i][j][k][l][0],
784 old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); 614 old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
785 else 615 else
786 s = vp9_prob_diff_update_savings_search( 616 s = vp9_prob_diff_update_savings_search(
787 frame_branch_ct[i][j][k][l][t], oldp, &newp, upd); 617 frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
788 if (s > 0 && newp != oldp) 618 if (s > 0 && newp != oldp)
789 u = 1; 619 u = 1;
790 if (u) 620 if (u)
791 savings += s - (int)(vp9_cost_zero(upd)); 621 savings += s - (int)(vp9_cost_zero(upd));
792 else 622 else
793 savings -= (int)(vp9_cost_zero(upd)); 623 savings -= (int)(vp9_cost_zero(upd));
794 update[u]++; 624 update[u]++;
795 } 625 }
796 } 626 }
797 } 627 }
798 } 628 }
799 } 629 }
800 630
801 // printf("Update %d %d, savings %d\n", update[0], update[1], savings); 631 // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
802 /* Is coef updated at all */ 632 /* Is coef updated at all */
803 if (update[1] == 0 || savings < 0) { 633 if (update[1] == 0 || savings < 0) {
804 vp9_write_bit(bc, 0); 634 vp9_write_bit(bc, 0);
805 return; 635 return;
806 } 636 }
807 vp9_write_bit(bc, 1); 637 vp9_write_bit(bc, 1);
808 for (i = 0; i < BLOCK_TYPES; ++i) { 638 for (i = 0; i < PLANE_TYPES; ++i) {
809 for (j = 0; j < REF_TYPES; ++j) { 639 for (j = 0; j < REF_TYPES; ++j) {
810 for (k = 0; k < COEF_BANDS; ++k) { 640 for (k = 0; k < COEF_BANDS; ++k) {
811 for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { 641 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
812 // calc probs and branch cts for this frame only 642 // calc probs and branch cts for this frame only
813 for (t = 0; t < entropy_nodes_update; ++t) { 643 for (t = 0; t < entropy_nodes_update; ++t) {
814 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; 644 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
815 vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; 645 vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t;
816 const vp9_prob upd = DIFF_UPDATE_PROB; 646 const vp9_prob upd = DIFF_UPDATE_PROB;
817 int s; 647 int s;
818 int u = 0; 648 int u = 0;
819 if (l >= 3 && k == 0)
820 continue;
821 if (t == PIVOT_NODE) 649 if (t == PIVOT_NODE)
822 s = vp9_prob_diff_update_savings_search_model( 650 s = vp9_prob_diff_update_savings_search_model(
823 frame_branch_ct[i][j][k][l][0], 651 frame_branch_ct[i][j][k][l][0],
824 old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); 652 old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
825 else 653 else
826 s = vp9_prob_diff_update_savings_search( 654 s = vp9_prob_diff_update_savings_search(
827 frame_branch_ct[i][j][k][l][t], 655 frame_branch_ct[i][j][k][l][t],
828 *oldp, &newp, upd); 656 *oldp, &newp, upd);
829 if (s > 0 && newp != *oldp) 657 if (s > 0 && newp != *oldp)
830 u = 1; 658 u = 1;
(...skipping 11 matching lines...) Expand all
842 } 670 }
843 } 671 }
844 } 672 }
845 } 673 }
846 return; 674 return;
847 } 675 }
848 676
849 case 1: 677 case 1:
850 case 2: { 678 case 2: {
851 const int prev_coef_contexts_to_update = 679 const int prev_coef_contexts_to_update =
852 (cpi->sf.use_fast_coef_updates == 2 ? 680 cpi->sf.use_fast_coef_updates == 2 ? COEFF_CONTEXTS >> 1
853 PREV_COEF_CONTEXTS >> 1 : PREV_COEF_CONTEXTS); 681 : COEFF_CONTEXTS;
854 const int coef_band_to_update = 682 const int coef_band_to_update =
855 (cpi->sf.use_fast_coef_updates == 2 ? 683 cpi->sf.use_fast_coef_updates == 2 ? COEF_BANDS >> 1
856 COEF_BANDS >> 1 : COEF_BANDS); 684 : COEF_BANDS;
857 int updates = 0; 685 int updates = 0;
858 int noupdates_before_first = 0; 686 int noupdates_before_first = 0;
859 for (i = 0; i < BLOCK_TYPES; ++i) { 687 for (i = 0; i < PLANE_TYPES; ++i) {
860 for (j = 0; j < REF_TYPES; ++j) { 688 for (j = 0; j < REF_TYPES; ++j) {
861 for (k = 0; k < COEF_BANDS; ++k) { 689 for (k = 0; k < COEF_BANDS; ++k) {
862 for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { 690 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
863 // calc probs and branch cts for this frame only 691 // calc probs and branch cts for this frame only
864 for (t = 0; t < entropy_nodes_update; ++t) { 692 for (t = 0; t < entropy_nodes_update; ++t) {
865 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t]; 693 vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
866 vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t; 694 vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t;
867 int s; 695 int s;
868 int u = 0; 696 int u = 0;
869 if (l >= 3 && k == 0)
870 continue;
871 if (l >= prev_coef_contexts_to_update || 697 if (l >= prev_coef_contexts_to_update ||
872 k >= coef_band_to_update) { 698 k >= coef_band_to_update) {
873 u = 0; 699 u = 0;
874 } else { 700 } else {
875 if (t == PIVOT_NODE) 701 if (t == PIVOT_NODE)
876 s = vp9_prob_diff_update_savings_search_model( 702 s = vp9_prob_diff_update_savings_search_model(
877 frame_branch_ct[i][j][k][l][0], 703 frame_branch_ct[i][j][k][l][0],
878 old_frame_coef_probs[i][j][k][l], &newp, upd, i, j); 704 old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
879 else 705 else
880 s = vp9_prob_diff_update_savings_search( 706 s = vp9_prob_diff_update_savings_search(
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
918 vp9_write_bit(bc, 0); // no updates 744 vp9_write_bit(bc, 0); // no updates
919 } 745 }
920 return; 746 return;
921 } 747 }
922 748
923 default: 749 default:
924 assert(0); 750 assert(0);
925 } 751 }
926 } 752 }
927 753
928 static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) { 754 static void update_coef_probs(VP9_COMP* cpi, vp9_writer* w) {
929 const TX_MODE tx_mode = cpi->common.tx_mode; 755 const TX_MODE tx_mode = cpi->common.tx_mode;
930 756 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
757 TX_SIZE tx_size;
931 vp9_clear_system_state(); 758 vp9_clear_system_state();
932 759
933 // Build the cofficient contexts based on counts collected in encode loop 760 for (tx_size = TX_4X4; tx_size <= TX_32X32; ++tx_size)
934 build_coeff_contexts(cpi); 761 build_tree_distribution(cpi, tx_size);
935 762
936 update_coef_probs_common(bc, cpi, TX_4X4); 763 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
937 764 update_coef_probs_common(w, cpi, tx_size);
938 // do not do this if not even allowed
939 if (tx_mode > ONLY_4X4)
940 update_coef_probs_common(bc, cpi, TX_8X8);
941
942 if (tx_mode > ALLOW_8X8)
943 update_coef_probs_common(bc, cpi, TX_16X16);
944
945 if (tx_mode > ALLOW_16X16)
946 update_coef_probs_common(bc, cpi, TX_32X32);
947 } 765 }
948 766
949 static void encode_loopfilter(struct loopfilter *lf, 767 static void encode_loopfilter(struct loopfilter *lf,
950 struct vp9_write_bit_buffer *wb) { 768 struct vp9_write_bit_buffer *wb) {
951 int i; 769 int i;
952 770
953 // Encode the loop filter level and type 771 // Encode the loop filter level and type
954 vp9_wb_write_literal(wb, lf->filter_level, 6); 772 vp9_wb_write_literal(wb, lf->filter_level, 6);
955 vp9_wb_write_literal(wb, lf->sharpness_level, 3); 773 vp9_wb_write_literal(wb, lf->sharpness_level, 3);
956 774
(...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after
1230 1048
1231 for (tile_col = 1; tile_col < tile_cols; tile_col++) 1049 for (tile_col = 1; tile_col < tile_cols; tile_col++)
1232 tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] + 1050 tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] +
1233 cpi->tok_count[tile_row][tile_col - 1]; 1051 cpi->tok_count[tile_row][tile_col - 1];
1234 } 1052 }
1235 1053
1236 for (tile_row = 0; tile_row < tile_rows; tile_row++) { 1054 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
1237 for (tile_col = 0; tile_col < tile_cols; tile_col++) { 1055 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
1238 TileInfo tile; 1056 TileInfo tile;
1239 1057
1240 vp9_tile_init(&tile, cm, 0, tile_col); 1058 vp9_tile_init(&tile, cm, tile_row, tile_col);
1241 tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col]; 1059 tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col];
1242 1060
1243 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) 1061 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
1244 vp9_start_encode(&residual_bc, data_ptr + total_size + 4); 1062 vp9_start_encode(&residual_bc, data_ptr + total_size + 4);
1245 else 1063 else
1246 vp9_start_encode(&residual_bc, data_ptr + total_size); 1064 vp9_start_encode(&residual_bc, data_ptr + total_size);
1247 1065
1248 write_modes(cpi, &tile, &residual_bc, &tok[tile_row][tile_col], tok_end); 1066 write_modes(cpi, &tile, &residual_bc, &tok[tile_row][tile_col], tok_end);
1249 assert(tok[tile_row][tile_col] == tok_end); 1067 assert(tok[tile_row][tile_col] == tok_end);
1250 vp9_stop_encode(&residual_bc); 1068 vp9_stop_encode(&residual_bc);
(...skipping 27 matching lines...) Expand all
1278 VP9_COMMON *const cm = &cpi->common; 1096 VP9_COMMON *const cm = &cpi->common;
1279 vp9_wb_write_literal(wb, cm->width - 1, 16); 1097 vp9_wb_write_literal(wb, cm->width - 1, 16);
1280 vp9_wb_write_literal(wb, cm->height - 1, 16); 1098 vp9_wb_write_literal(wb, cm->height - 1, 16);
1281 1099
1282 write_display_size(cpi, wb); 1100 write_display_size(cpi, wb);
1283 } 1101 }
1284 1102
1285 static void write_frame_size_with_refs(VP9_COMP *cpi, 1103 static void write_frame_size_with_refs(VP9_COMP *cpi,
1286 struct vp9_write_bit_buffer *wb) { 1104 struct vp9_write_bit_buffer *wb) {
1287 VP9_COMMON *const cm = &cpi->common; 1105 VP9_COMMON *const cm = &cpi->common;
1288 int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx, 1106 int refs[REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx,
1289 cpi->alt_fb_idx}; 1107 cpi->alt_fb_idx};
1290 int i, found = 0; 1108 int i, found = 0;
1291 1109
1292 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { 1110 for (i = 0; i < REFS_PER_FRAME; ++i) {
1293 YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->ref_frame_map[refs[i]]]; 1111 YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->ref_frame_map[refs[i]]];
1294 found = cm->width == cfg->y_crop_width && 1112 found = cm->width == cfg->y_crop_width &&
1295 cm->height == cfg->y_crop_height; 1113 cm->height == cfg->y_crop_height;
1296 1114
1297 // TODO(ivan): This prevents a bug while more than 3 buffers are used. Do it 1115 // TODO(ivan): This prevents a bug while more than 3 buffers are used. Do it
1298 // in a better way. 1116 // in a better way.
1299 if (cpi->use_svc) { 1117 if (cpi->use_svc) {
1300 found = 0; 1118 found = 0;
1301 } 1119 }
1302 vp9_wb_write_bit(wb, found); 1120 vp9_wb_write_bit(wb, found);
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1347 vp9_wb_write_bit(wb, cm->subsampling_y); 1165 vp9_wb_write_bit(wb, cm->subsampling_y);
1348 vp9_wb_write_bit(wb, 0); // has extra plane 1166 vp9_wb_write_bit(wb, 0); // has extra plane
1349 } 1167 }
1350 } else { 1168 } else {
1351 assert(cm->version == 1); 1169 assert(cm->version == 1);
1352 vp9_wb_write_bit(wb, 0); // has extra plane 1170 vp9_wb_write_bit(wb, 0); // has extra plane
1353 } 1171 }
1354 1172
1355 write_frame_size(cpi, wb); 1173 write_frame_size(cpi, wb);
1356 } else { 1174 } else {
1357 const int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx, 1175 const int refs[REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx,
1358 cpi->alt_fb_idx}; 1176 cpi->alt_fb_idx};
1359 if (!cm->show_frame) 1177 if (!cm->show_frame)
1360 vp9_wb_write_bit(wb, cm->intra_only); 1178 vp9_wb_write_bit(wb, cm->intra_only);
1361 1179
1362 if (!cm->error_resilient_mode) 1180 if (!cm->error_resilient_mode)
1363 vp9_wb_write_literal(wb, cm->reset_frame_context, 2); 1181 vp9_wb_write_literal(wb, cm->reset_frame_context, 2);
1364 1182
1365 if (cm->intra_only) { 1183 if (cm->intra_only) {
1366 write_sync_code(wb); 1184 write_sync_code(wb);
1367 1185
1368 vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES); 1186 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1369 write_frame_size(cpi, wb); 1187 write_frame_size(cpi, wb);
1370 } else { 1188 } else {
1371 int i; 1189 int i;
1372 vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES); 1190 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1373 for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) { 1191 for (i = 0; i < REFS_PER_FRAME; ++i) {
1374 vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LOG2); 1192 vp9_wb_write_literal(wb, refs[i], REF_FRAMES_LOG2);
1375 vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]); 1193 vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]);
1376 } 1194 }
1377 1195
1378 write_frame_size_with_refs(cpi, wb); 1196 write_frame_size_with_refs(cpi, wb);
1379 1197
1380 vp9_wb_write_bit(wb, cm->allow_high_precision_mv); 1198 vp9_wb_write_bit(wb, cm->allow_high_precision_mv);
1381 1199
1382 fix_mcomp_filter_type(cpi); 1200 fix_mcomp_filter_type(cpi);
1383 write_interp_filter_type(cm->mcomp_filter_type, wb); 1201 write_interp_filter_type(cm->mcomp_filter_type, wb);
1384 } 1202 }
1385 } 1203 }
1386 1204
1387 if (!cm->error_resilient_mode) { 1205 if (!cm->error_resilient_mode) {
1388 vp9_wb_write_bit(wb, cm->refresh_frame_context); 1206 vp9_wb_write_bit(wb, cm->refresh_frame_context);
1389 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode); 1207 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
1390 } 1208 }
1391 1209
1392 vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LOG2); 1210 vp9_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
1393 1211
1394 encode_loopfilter(&cm->lf, wb); 1212 encode_loopfilter(&cm->lf, wb);
1395 encode_quantization(cm, wb); 1213 encode_quantization(cm, wb);
1396 encode_segmentation(cpi, wb); 1214 encode_segmentation(cpi, wb);
1397 1215
1398 write_tile_info(cm, wb); 1216 write_tile_info(cm, wb);
1399 } 1217 }
1400 1218
1401 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { 1219 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
1402 VP9_COMMON *const cm = &cpi->common; 1220 VP9_COMMON *const cm = &cpi->common;
(...skipping 15 matching lines...) Expand all
1418 #endif 1236 #endif
1419 1237
1420 vp9_update_skip_probs(cpi, &header_bc); 1238 vp9_update_skip_probs(cpi, &header_bc);
1421 1239
1422 if (!frame_is_intra_only(cm)) { 1240 if (!frame_is_intra_only(cm)) {
1423 int i; 1241 int i;
1424 #ifdef ENTROPY_STATS 1242 #ifdef ENTROPY_STATS
1425 active_section = 1; 1243 active_section = 1;
1426 #endif 1244 #endif
1427 1245
1428 update_inter_mode_probs(cm, &header_bc); 1246 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
1247 prob_diff_update(vp9_inter_mode_tree, cm->fc.inter_mode_probs[i],
1248 cm->counts.inter_mode[i], INTER_MODES, &header_bc);
1249
1429 vp9_zero(cm->counts.inter_mode); 1250 vp9_zero(cm->counts.inter_mode);
1430 1251
1431 if (cm->mcomp_filter_type == SWITCHABLE) 1252 if (cm->mcomp_filter_type == SWITCHABLE)
1432 update_switchable_interp_probs(cpi, &header_bc); 1253 update_switchable_interp_probs(cpi, &header_bc);
1433 1254
1434 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) 1255 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1435 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], 1256 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
1436 cpi->intra_inter_count[i]); 1257 cpi->intra_inter_count[i]);
1437 1258
1438 if (cm->allow_comp_inter_inter) { 1259 if (cm->allow_comp_inter_inter) {
1439 const int comp_pred_mode = cpi->common.comp_pred_mode; 1260 const int reference_mode = cpi->common.reference_mode;
1440 const int use_compound_pred = comp_pred_mode != SINGLE_PREDICTION_ONLY; 1261 const int use_compound_pred = reference_mode != SINGLE_REFERENCE;
1441 const int use_hybrid_pred = comp_pred_mode == HYBRID_PREDICTION; 1262 const int use_hybrid_pred = reference_mode == REFERENCE_MODE_SELECT;
1442 1263
1443 vp9_write_bit(&header_bc, use_compound_pred); 1264 vp9_write_bit(&header_bc, use_compound_pred);
1444 if (use_compound_pred) { 1265 if (use_compound_pred) {
1445 vp9_write_bit(&header_bc, use_hybrid_pred); 1266 vp9_write_bit(&header_bc, use_hybrid_pred);
1446 if (use_hybrid_pred) 1267 if (use_hybrid_pred)
1447 for (i = 0; i < COMP_INTER_CONTEXTS; i++) 1268 for (i = 0; i < COMP_INTER_CONTEXTS; i++)
1448 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], 1269 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
1449 cpi->comp_inter_count[i]); 1270 cpi->comp_inter_count[i]);
1450 } 1271 }
1451 } 1272 }
1452 1273
1453 if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) { 1274 if (cm->reference_mode != COMPOUND_REFERENCE) {
1454 for (i = 0; i < REF_CONTEXTS; i++) { 1275 for (i = 0; i < REF_CONTEXTS; i++) {
1455 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], 1276 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
1456 cpi->single_ref_count[i][0]); 1277 cpi->single_ref_count[i][0]);
1457 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], 1278 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
1458 cpi->single_ref_count[i][1]); 1279 cpi->single_ref_count[i][1]);
1459 } 1280 }
1460 } 1281 }
1461 1282
1462 if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) 1283 if (cm->reference_mode != SINGLE_REFERENCE)
1463 for (i = 0; i < REF_CONTEXTS; i++) 1284 for (i = 0; i < REF_CONTEXTS; i++)
1464 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], 1285 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
1465 cpi->comp_ref_count[i]); 1286 cpi->comp_ref_count[i]);
1466 1287
1467 update_mbintra_mode_probs(cpi, &header_bc); 1288 for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
1289 prob_diff_update(vp9_intra_mode_tree, cm->fc.y_mode_prob[i],
1290 (unsigned int *)cpi->y_mode_count[i], INTRA_MODES,
1291 &header_bc);
1468 1292
1469 for (i = 0; i < PARTITION_CONTEXTS; ++i) { 1293 for (i = 0; i < PARTITION_CONTEXTS; ++i)
1470 vp9_prob pnew[PARTITION_TYPES - 1]; 1294 prob_diff_update(vp9_partition_tree, fc->partition_prob[i],
1471 unsigned int bct[PARTITION_TYPES - 1][2]; 1295 cm->counts.partition[i], PARTITION_TYPES, &header_bc);
1472 update_mode(&header_bc, PARTITION_TYPES,
1473 vp9_partition_tree, pnew,
1474 fc->partition_prob[cm->frame_type][i], bct,
1475 (unsigned int *)cpi->partition_count[i]);
1476 }
1477 1296
1478 vp9_write_nmv_probs(cpi, cm->allow_high_precision_mv, &header_bc); 1297 vp9_write_nmv_probs(cpi, cm->allow_high_precision_mv, &header_bc);
1479 } 1298 }
1480 1299
1481 vp9_stop_encode(&header_bc); 1300 vp9_stop_encode(&header_bc);
1482 assert(header_bc.pos <= 0xffff); 1301 assert(header_bc.pos <= 0xffff);
1483 1302
1484 return header_bc.pos; 1303 return header_bc.pos;
1485 } 1304 }
1486 1305
1487 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) { 1306 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) {
1488 uint8_t *data = dest; 1307 uint8_t *data = dest;
1489 size_t first_part_size; 1308 size_t first_part_size;
1490 struct vp9_write_bit_buffer wb = {data, 0}; 1309 struct vp9_write_bit_buffer wb = {data, 0};
1491 struct vp9_write_bit_buffer saved_wb; 1310 struct vp9_write_bit_buffer saved_wb;
1492 1311
1493 write_uncompressed_header(cpi, &wb); 1312 write_uncompressed_header(cpi, &wb);
1494 saved_wb = wb; 1313 saved_wb = wb;
1495 vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size 1314 vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
1496 1315
1497 data += vp9_rb_bytes_written(&wb); 1316 data += vp9_rb_bytes_written(&wb);
(...skipping 11 matching lines...) Expand all
1509 1328
1510 first_part_size = write_compressed_header(cpi, data); 1329 first_part_size = write_compressed_header(cpi, data);
1511 data += first_part_size; 1330 data += first_part_size;
1512 vp9_wb_write_literal(&saved_wb, first_part_size, 16); 1331 vp9_wb_write_literal(&saved_wb, first_part_size, 16);
1513 1332
1514 data += encode_tiles(cpi, data); 1333 data += encode_tiles(cpi, data);
1515 1334
1516 *size = data - dest; 1335 *size = data - dest;
1517 } 1336 }
1518 1337
1519 #ifdef ENTROPY_STATS
1520 static void print_tree_update_for_type(FILE *f,
1521 vp9_coeff_stats *tree_update_hist,
1522 int block_types, const char *header) {
1523 int i, j, k, l, m;
1524
1525 fprintf(f, "const vp9_coeff_prob %s = {\n", header);
1526 for (i = 0; i < block_types; i++) {
1527 fprintf(f, " { \n");
1528 for (j = 0; j < REF_TYPES; j++) {
1529 fprintf(f, " { \n");
1530 for (k = 0; k < COEF_BANDS; k++) {
1531 fprintf(f, " {\n");
1532 for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
1533 fprintf(f, " {");
1534 for (m = 0; m < ENTROPY_NODES; m++) {
1535 fprintf(f, "%3d, ",
1536 get_binary_prob(tree_update_hist[i][j][k][l][m][0],
1537 tree_update_hist[i][j][k][l][m][1]));
1538 }
1539 fprintf(f, "},\n");
1540 }
1541 fprintf(f, "},\n");
1542 }
1543 fprintf(f, " },\n");
1544 }
1545 fprintf(f, " },\n");
1546 }
1547 fprintf(f, "};\n");
1548 }
1549
1550 void print_tree_update_probs() {
1551 FILE *f = fopen("coefupdprob.h", "w");
1552 fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n");
1553
1554 print_tree_update_for_type(f, tree_update_hist[TX_4X4], BLOCK_TYPES,
1555 "vp9_coef_update_probs_4x4[BLOCK_TYPES]");
1556 print_tree_update_for_type(f, tree_update_hist[TX_8X8], BLOCK_TYPES,
1557 "vp9_coef_update_probs_8x8[BLOCK_TYPES]");
1558 print_tree_update_for_type(f, tree_update_hist[TX_16X16], BLOCK_TYPES,
1559 "vp9_coef_update_probs_16x16[BLOCK_TYPES]");
1560 print_tree_update_for_type(f, tree_update_hist[TX_32X32], BLOCK_TYPES,
1561 "vp9_coef_update_probs_32x32[BLOCK_TYPES]");
1562
1563 fclose(f);
1564 f = fopen("treeupdate.bin", "wb");
1565 fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f);
1566 fclose(f);
1567 }
1568 #endif
OLDNEW
« no previous file with comments | « source/libvpx/vp9/decoder/vp9_treereader.h ('k') | source/libvpx/vp9/encoder/vp9_block.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698