| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 23 matching lines...) Expand all Loading... |
| 34 #include "vp9/encoder/vp9_segmentation.h" | 34 #include "vp9/encoder/vp9_segmentation.h" |
| 35 #include "vp9/encoder/vp9_subexp.h" | 35 #include "vp9/encoder/vp9_subexp.h" |
| 36 #include "vp9/encoder/vp9_write_bit_buffer.h" | 36 #include "vp9/encoder/vp9_write_bit_buffer.h" |
| 37 | 37 |
| 38 | 38 |
| 39 #if defined(SECTIONBITS_OUTPUT) | 39 #if defined(SECTIONBITS_OUTPUT) |
| 40 unsigned __int64 Sectionbits[500]; | 40 unsigned __int64 Sectionbits[500]; |
| 41 #endif | 41 #endif |
| 42 | 42 |
| 43 #ifdef ENTROPY_STATS | 43 #ifdef ENTROPY_STATS |
| 44 int intra_mode_stats[VP9_INTRA_MODES] | 44 int intra_mode_stats[INTRA_MODES] |
| 45 [VP9_INTRA_MODES] | 45 [INTRA_MODES] |
| 46 [VP9_INTRA_MODES]; | 46 [INTRA_MODES]; |
| 47 vp9_coeff_stats tree_update_hist[TX_SIZE_MAX_SB][BLOCK_TYPES]; | 47 vp9_coeff_stats tree_update_hist[TX_SIZES][BLOCK_TYPES]; |
| 48 | 48 |
| 49 extern unsigned int active_section; | 49 extern unsigned int active_section; |
| 50 #endif | 50 #endif |
| 51 | 51 |
| 52 | 52 |
| 53 #ifdef MODE_STATS | 53 #ifdef MODE_STATS |
| 54 int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB]; | 54 int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZES]; |
| 55 int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 1]; | 55 int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 1]; |
| 56 int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB - 2]; | 56 int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 2]; |
| 57 int64_t switchable_interp_stats[VP9_SWITCHABLE_FILTERS+1] | 57 int64_t switchable_interp_stats[SWITCHABLE_FILTERS+1] |
| 58 [VP9_SWITCHABLE_FILTERS]; | 58 [SWITCHABLE_FILTERS]; |
| 59 | 59 |
| 60 void init_tx_count_stats() { | 60 void init_tx_count_stats() { |
| 61 vp9_zero(tx_count_32x32p_stats); | 61 vp9_zero(tx_count_32x32p_stats); |
| 62 vp9_zero(tx_count_16x16p_stats); | 62 vp9_zero(tx_count_16x16p_stats); |
| 63 vp9_zero(tx_count_8x8p_stats); | 63 vp9_zero(tx_count_8x8p_stats); |
| 64 } | 64 } |
| 65 | 65 |
| 66 void init_switchable_interp_stats() { | 66 void init_switchable_interp_stats() { |
| 67 vp9_zero(switchable_interp_stats); | 67 vp9_zero(switchable_interp_stats); |
| 68 } | 68 } |
| 69 | 69 |
| 70 static void update_tx_count_stats(VP9_COMMON *cm) { | 70 static void update_tx_count_stats(VP9_COMMON *cm) { |
| 71 int i, j; | 71 int i, j; |
| 72 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 72 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
| 73 for (j = 0; j < TX_SIZE_MAX_SB; j++) { | 73 for (j = 0; j < TX_SIZES; j++) { |
| 74 tx_count_32x32p_stats[i][j] += cm->fc.tx_count_32x32p[i][j]; | 74 tx_count_32x32p_stats[i][j] += cm->fc.tx_count_32x32p[i][j]; |
| 75 } | 75 } |
| 76 } | 76 } |
| 77 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 77 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
| 78 for (j = 0; j < TX_SIZE_MAX_SB - 1; j++) { | 78 for (j = 0; j < TX_SIZES - 1; j++) { |
| 79 tx_count_16x16p_stats[i][j] += cm->fc.tx_count_16x16p[i][j]; | 79 tx_count_16x16p_stats[i][j] += cm->fc.tx_count_16x16p[i][j]; |
| 80 } | 80 } |
| 81 } | 81 } |
| 82 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 82 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
| 83 for (j = 0; j < TX_SIZE_MAX_SB - 2; j++) { | 83 for (j = 0; j < TX_SIZES - 2; j++) { |
| 84 tx_count_8x8p_stats[i][j] += cm->fc.tx_count_8x8p[i][j]; | 84 tx_count_8x8p_stats[i][j] += cm->fc.tx_count_8x8p[i][j]; |
| 85 } | 85 } |
| 86 } | 86 } |
| 87 } | 87 } |
| 88 | 88 |
| 89 static void update_switchable_interp_stats(VP9_COMMON *cm) { | 89 static void update_switchable_interp_stats(VP9_COMMON *cm) { |
| 90 int i, j; | 90 int i, j; |
| 91 for (i = 0; i < VP9_SWITCHABLE_FILTERS+1; ++i) | 91 for (i = 0; i < SWITCHABLE_FILTERS+1; ++i) |
| 92 for (j = 0; j < VP9_SWITCHABLE_FILTERS; ++j) { | 92 for (j = 0; j < SWITCHABLE_FILTERS; ++j) { |
| 93 switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j]; | 93 switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j]; |
| 94 } | 94 } |
| 95 } | 95 } |
| 96 | 96 |
| 97 void write_tx_count_stats() { | 97 void write_tx_count_stats() { |
| 98 int i, j; | 98 int i, j; |
| 99 FILE *fp = fopen("tx_count.bin", "wb"); | 99 FILE *fp = fopen("tx_count.bin", "wb"); |
| 100 fwrite(tx_count_32x32p_stats, sizeof(tx_count_32x32p_stats), 1, fp); | 100 fwrite(tx_count_32x32p_stats, sizeof(tx_count_32x32p_stats), 1, fp); |
| 101 fwrite(tx_count_16x16p_stats, sizeof(tx_count_16x16p_stats), 1, fp); | 101 fwrite(tx_count_16x16p_stats, sizeof(tx_count_16x16p_stats), 1, fp); |
| 102 fwrite(tx_count_8x8p_stats, sizeof(tx_count_8x8p_stats), 1, fp); | 102 fwrite(tx_count_8x8p_stats, sizeof(tx_count_8x8p_stats), 1, fp); |
| 103 fclose(fp); | 103 fclose(fp); |
| 104 | 104 |
| 105 printf( | 105 printf( |
| 106 "vp9_default_tx_count_32x32p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB] = {\n"); | 106 "vp9_default_tx_count_32x32p[TX_SIZE_CONTEXTS][TX_SIZES] = {\n"); |
| 107 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 107 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
| 108 printf(" { "); | 108 printf(" { "); |
| 109 for (j = 0; j < TX_SIZE_MAX_SB; j++) { | 109 for (j = 0; j < TX_SIZES; j++) { |
| 110 printf("%"PRId64", ", tx_count_32x32p_stats[i][j]); | 110 printf("%"PRId64", ", tx_count_32x32p_stats[i][j]); |
| 111 } | 111 } |
| 112 printf("},\n"); | 112 printf("},\n"); |
| 113 } | 113 } |
| 114 printf("};\n"); | 114 printf("};\n"); |
| 115 printf( | 115 printf( |
| 116 "vp9_default_tx_count_16x16p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB-1] = {\n"); | 116 "vp9_default_tx_count_16x16p[TX_SIZE_CONTEXTS][TX_SIZES-1] = {\n"); |
| 117 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 117 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
| 118 printf(" { "); | 118 printf(" { "); |
| 119 for (j = 0; j < TX_SIZE_MAX_SB - 1; j++) { | 119 for (j = 0; j < TX_SIZES - 1; j++) { |
| 120 printf("%"PRId64", ", tx_count_16x16p_stats[i][j]); | 120 printf("%"PRId64", ", tx_count_16x16p_stats[i][j]); |
| 121 } | 121 } |
| 122 printf("},\n"); | 122 printf("},\n"); |
| 123 } | 123 } |
| 124 printf("};\n"); | 124 printf("};\n"); |
| 125 printf( | 125 printf( |
| 126 "vp9_default_tx_count_8x8p[TX_SIZE_CONTEXTS][TX_SIZE_MAX_SB-2] = {\n"); | 126 "vp9_default_tx_count_8x8p[TX_SIZE_CONTEXTS][TX_SIZES-2] = {\n"); |
| 127 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 127 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
| 128 printf(" { "); | 128 printf(" { "); |
| 129 for (j = 0; j < TX_SIZE_MAX_SB - 2; j++) { | 129 for (j = 0; j < TX_SIZES - 2; j++) { |
| 130 printf("%"PRId64", ", tx_count_8x8p_stats[i][j]); | 130 printf("%"PRId64", ", tx_count_8x8p_stats[i][j]); |
| 131 } | 131 } |
| 132 printf("},\n"); | 132 printf("},\n"); |
| 133 } | 133 } |
| 134 printf("};\n"); | 134 printf("};\n"); |
| 135 } | 135 } |
| 136 | 136 |
| 137 void write_switchable_interp_stats() { | 137 void write_switchable_interp_stats() { |
| 138 int i, j; | 138 int i, j; |
| 139 FILE *fp = fopen("switchable_interp.bin", "wb"); | 139 FILE *fp = fopen("switchable_interp.bin", "wb"); |
| 140 fwrite(switchable_interp_stats, sizeof(switchable_interp_stats), 1, fp); | 140 fwrite(switchable_interp_stats, sizeof(switchable_interp_stats), 1, fp); |
| 141 fclose(fp); | 141 fclose(fp); |
| 142 | 142 |
| 143 printf( | 143 printf( |
| 144 "vp9_default_switchable_filter_count[VP9_SWITCHABLE_FILTERS+1]" | 144 "vp9_default_switchable_filter_count[SWITCHABLE_FILTERS+1]" |
| 145 "[VP9_SWITCHABLE_FILTERS] = {\n"); | 145 "[SWITCHABLE_FILTERS] = {\n"); |
| 146 for (i = 0; i < VP9_SWITCHABLE_FILTERS+1; i++) { | 146 for (i = 0; i < SWITCHABLE_FILTERS+1; i++) { |
| 147 printf(" { "); | 147 printf(" { "); |
| 148 for (j = 0; j < VP9_SWITCHABLE_FILTERS; j++) { | 148 for (j = 0; j < SWITCHABLE_FILTERS; j++) { |
| 149 printf("%"PRId64", ", switchable_interp_stats[i][j]); | 149 printf("%"PRId64", ", switchable_interp_stats[i][j]); |
| 150 } | 150 } |
| 151 printf("},\n"); | 151 printf("},\n"); |
| 152 } | 152 } |
| 153 printf("};\n"); | 153 printf("};\n"); |
| 154 } | 154 } |
| 155 #endif | 155 #endif |
| 156 | 156 |
| 157 static INLINE void write_be32(uint8_t *p, int value) { | 157 static INLINE void write_be32(uint8_t *p, int value) { |
| 158 p[0] = value >> 24; | 158 p[0] = value >> 24; |
| 159 p[1] = value >> 16; | 159 p[1] = value >> 16; |
| 160 p[2] = value >> 8; | 160 p[2] = value >> 8; |
| 161 p[3] = value; | 161 p[3] = value; |
| 162 } | 162 } |
| 163 | 163 |
| 164 void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb, | 164 void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb, |
| 165 int data, int max) { | 165 int data, int max) { |
| 166 vp9_wb_write_literal(wb, data, get_unsigned_bits(max)); | 166 vp9_wb_write_literal(wb, data, get_unsigned_bits(max)); |
| 167 } | 167 } |
| 168 | 168 |
| 169 static void update_mode( | 169 static void update_mode( |
| 170 vp9_writer *w, | 170 vp9_writer *w, |
| 171 int n, | 171 int n, |
| 172 const struct vp9_token tok[/* n */], | |
| 173 vp9_tree tree, | 172 vp9_tree tree, |
| 174 vp9_prob Pnew[/* n-1 */], | 173 vp9_prob Pnew[/* n-1 */], |
| 175 vp9_prob Pcur[/* n-1 */], | 174 vp9_prob Pcur[/* n-1 */], |
| 176 unsigned int bct[/* n-1 */] [2], | 175 unsigned int bct[/* n-1 */] [2], |
| 177 const unsigned int num_events[/* n */] | 176 const unsigned int num_events[/* n */] |
| 178 ) { | 177 ) { |
| 179 int i = 0; | 178 int i = 0; |
| 180 | 179 |
| 181 vp9_tree_probs_from_distribution(tree, Pnew, bct, num_events, 0); | 180 vp9_tree_probs_from_distribution(tree, Pnew, bct, num_events, 0); |
| 182 n--; | 181 n--; |
| 183 | 182 |
| 184 for (i = 0; i < n; ++i) { | 183 for (i = 0; i < n; ++i) { |
| 185 vp9_cond_prob_diff_update(w, &Pcur[i], VP9_MODE_UPDATE_PROB, bct[i]); | 184 vp9_cond_prob_diff_update(w, &Pcur[i], MODE_UPDATE_PROB, bct[i]); |
| 186 } | 185 } |
| 187 } | 186 } |
| 188 | 187 |
| 189 static void update_mbintra_mode_probs(VP9_COMP* const cpi, | 188 static void update_mbintra_mode_probs(VP9_COMP* const cpi, |
| 190 vp9_writer* const bc) { | 189 vp9_writer* const bc) { |
| 191 VP9_COMMON *const cm = &cpi->common; | 190 VP9_COMMON *const cm = &cpi->common; |
| 192 int j; | 191 int j; |
| 193 vp9_prob pnew[VP9_INTRA_MODES - 1]; | 192 vp9_prob pnew[INTRA_MODES - 1]; |
| 194 unsigned int bct[VP9_INTRA_MODES - 1][2]; | 193 unsigned int bct[INTRA_MODES - 1][2]; |
| 195 | 194 |
| 196 for (j = 0; j < BLOCK_SIZE_GROUPS; j++) | 195 for (j = 0; j < BLOCK_SIZE_GROUPS; j++) |
| 197 update_mode(bc, VP9_INTRA_MODES, vp9_intra_mode_encodings, | 196 update_mode(bc, INTRA_MODES, vp9_intra_mode_tree, pnew, |
| 198 vp9_intra_mode_tree, pnew, | |
| 199 cm->fc.y_mode_prob[j], bct, | 197 cm->fc.y_mode_prob[j], bct, |
| 200 (unsigned int *)cpi->y_mode_count[j]); | 198 (unsigned int *)cpi->y_mode_count[j]); |
| 201 } | 199 } |
| 202 | 200 |
| 203 static void write_selected_txfm_size(const VP9_COMP *cpi, TX_SIZE tx_size, | 201 static void write_selected_tx_size(const VP9_COMP *cpi, TX_SIZE tx_size, |
| 204 BLOCK_SIZE_TYPE bsize, vp9_writer *w) { | 202 BLOCK_SIZE bsize, vp9_writer *w) { |
| 205 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 203 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
| 206 const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs); | 204 const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs); |
| 207 vp9_write(w, tx_size != TX_4X4, tx_probs[0]); | 205 vp9_write(w, tx_size != TX_4X4, tx_probs[0]); |
| 208 if (bsize >= BLOCK_SIZE_MB16X16 && tx_size != TX_4X4) { | 206 if (bsize >= BLOCK_16X16 && tx_size != TX_4X4) { |
| 209 vp9_write(w, tx_size != TX_8X8, tx_probs[1]); | 207 vp9_write(w, tx_size != TX_8X8, tx_probs[1]); |
| 210 if (bsize >= BLOCK_SIZE_SB32X32 && tx_size != TX_8X8) | 208 if (bsize >= BLOCK_32X32 && tx_size != TX_8X8) |
| 211 vp9_write(w, tx_size != TX_16X16, tx_probs[2]); | 209 vp9_write(w, tx_size != TX_16X16, tx_probs[2]); |
| 212 } | 210 } |
| 213 } | 211 } |
| 214 | 212 |
| 215 static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m, | 213 static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m, |
| 216 vp9_writer *w) { | 214 vp9_writer *w) { |
| 217 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 215 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
| 218 if (vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP)) { | 216 if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) { |
| 219 return 1; | 217 return 1; |
| 220 } else { | 218 } else { |
| 221 const int skip_coeff = m->mbmi.mb_skip_coeff; | 219 const int skip_coeff = m->mbmi.skip_coeff; |
| 222 vp9_write(w, skip_coeff, vp9_get_pred_prob_mbskip(&cpi->common, xd)); | 220 vp9_write(w, skip_coeff, vp9_get_pred_prob_mbskip(&cpi->common, xd)); |
| 223 return skip_coeff; | 221 return skip_coeff; |
| 224 } | 222 } |
| 225 } | 223 } |
| 226 | 224 |
| 227 void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) { | 225 void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) { |
| 228 VP9_COMMON *cm = &cpi->common; | 226 VP9_COMMON *cm = &cpi->common; |
| 229 int k; | 227 int k; |
| 230 | 228 |
| 231 for (k = 0; k < MBSKIP_CONTEXTS; ++k) | 229 for (k = 0; k < MBSKIP_CONTEXTS; ++k) |
| 232 vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k], | 230 vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k], |
| 233 VP9_MODE_UPDATE_PROB, cm->counts.mbskip[k]); | 231 MODE_UPDATE_PROB, cm->counts.mbskip[k]); |
| 234 } | 232 } |
| 235 | 233 |
| 236 static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) { | 234 static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) { |
| 237 write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m); | 235 write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m); |
| 238 } | 236 } |
| 239 | 237 |
| 240 static void update_switchable_interp_probs(VP9_COMP *const cpi, | 238 static void update_switchable_interp_probs(VP9_COMP *const cpi, |
| 241 vp9_writer* const bc) { | 239 vp9_writer* const bc) { |
| 242 VP9_COMMON *const pc = &cpi->common; | 240 VP9_COMMON *const pc = &cpi->common; |
| 243 unsigned int branch_ct[VP9_SWITCHABLE_FILTERS + 1] | 241 unsigned int branch_ct[SWITCHABLE_FILTERS + 1] |
| 244 [VP9_SWITCHABLE_FILTERS - 1][2]; | 242 [SWITCHABLE_FILTERS - 1][2]; |
| 245 vp9_prob new_prob[VP9_SWITCHABLE_FILTERS + 1][VP9_SWITCHABLE_FILTERS - 1]; | 243 vp9_prob new_prob[SWITCHABLE_FILTERS + 1][SWITCHABLE_FILTERS - 1]; |
| 246 int i, j; | 244 int i, j; |
| 247 for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j) { | 245 for (j = 0; j <= SWITCHABLE_FILTERS; ++j) { |
| 248 vp9_tree_probs_from_distribution( | 246 vp9_tree_probs_from_distribution( |
| 249 vp9_switchable_interp_tree, | 247 vp9_switchable_interp_tree, |
| 250 new_prob[j], branch_ct[j], | 248 new_prob[j], branch_ct[j], |
| 251 pc->counts.switchable_interp[j], 0); | 249 pc->counts.switchable_interp[j], 0); |
| 252 } | 250 } |
| 253 for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j) { | 251 for (j = 0; j <= SWITCHABLE_FILTERS; ++j) { |
| 254 for (i = 0; i < VP9_SWITCHABLE_FILTERS - 1; ++i) { | 252 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) { |
| 255 vp9_cond_prob_diff_update(bc, &pc->fc.switchable_interp_prob[j][i], | 253 vp9_cond_prob_diff_update(bc, &pc->fc.switchable_interp_prob[j][i], |
| 256 VP9_MODE_UPDATE_PROB, branch_ct[j][i]); | 254 MODE_UPDATE_PROB, branch_ct[j][i]); |
| 257 } | 255 } |
| 258 } | 256 } |
| 259 #ifdef MODE_STATS | 257 #ifdef MODE_STATS |
| 260 if (!cpi->dummy_packing) | 258 if (!cpi->dummy_packing) |
| 261 update_switchable_interp_stats(pc); | 259 update_switchable_interp_stats(pc); |
| 262 #endif | 260 #endif |
| 263 } | 261 } |
| 264 | 262 |
| 265 static void update_inter_mode_probs(VP9_COMMON *pc, vp9_writer* const bc) { | 263 static void update_inter_mode_probs(VP9_COMMON *pc, vp9_writer* const bc) { |
| 266 int i, j; | 264 int i, j; |
| 267 | 265 |
| 268 for (i = 0; i < INTER_MODE_CONTEXTS; i++) { | 266 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) { |
| 269 for (j = 0; j < VP9_INTER_MODES - 1; j++) { | 267 unsigned int branch_ct[INTER_MODES - 1][2]; |
| 268 vp9_prob new_prob[INTER_MODES - 1]; |
| 269 |
| 270 vp9_tree_probs_from_distribution(vp9_inter_mode_tree, |
| 271 new_prob, branch_ct, |
| 272 pc->counts.inter_mode[i], NEARESTMV); |
| 273 |
| 274 for (j = 0; j < INTER_MODES - 1; ++j) |
| 270 vp9_cond_prob_diff_update(bc, &pc->fc.inter_mode_probs[i][j], | 275 vp9_cond_prob_diff_update(bc, &pc->fc.inter_mode_probs[i][j], |
| 271 VP9_MODE_UPDATE_PROB, | 276 MODE_UPDATE_PROB, branch_ct[j]); |
| 272 pc->counts.inter_mode[i][j]); | |
| 273 } | |
| 274 } | 277 } |
| 275 } | 278 } |
| 276 | 279 |
| 277 static void pack_mb_tokens(vp9_writer* const bc, | 280 static void pack_mb_tokens(vp9_writer* const bc, |
| 278 TOKENEXTRA **tp, | 281 TOKENEXTRA **tp, |
| 279 const TOKENEXTRA *const stop) { | 282 const TOKENEXTRA *const stop) { |
| 280 TOKENEXTRA *p = *tp; | 283 TOKENEXTRA *p = *tp; |
| 281 | 284 |
| 282 while (p < stop) { | 285 while (p < stop) { |
| 283 const int t = p->token; | 286 const int t = p->token; |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 351 treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3); | 354 treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3); |
| 352 } | 355 } |
| 353 | 356 |
| 354 // This function encodes the reference frame | 357 // This function encodes the reference frame |
| 355 static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) { | 358 static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) { |
| 356 VP9_COMMON *const pc = &cpi->common; | 359 VP9_COMMON *const pc = &cpi->common; |
| 357 MACROBLOCK *const x = &cpi->mb; | 360 MACROBLOCK *const x = &cpi->mb; |
| 358 MACROBLOCKD *const xd = &x->e_mbd; | 361 MACROBLOCKD *const xd = &x->e_mbd; |
| 359 MB_MODE_INFO *mi = &xd->mode_info_context->mbmi; | 362 MB_MODE_INFO *mi = &xd->mode_info_context->mbmi; |
| 360 const int segment_id = mi->segment_id; | 363 const int segment_id = mi->segment_id; |
| 361 int seg_ref_active = vp9_segfeature_active(&xd->seg, segment_id, | 364 int seg_ref_active = vp9_segfeature_active(&pc->seg, segment_id, |
| 362 SEG_LVL_REF_FRAME); | 365 SEG_LVL_REF_FRAME); |
| 363 // If segment level coding of this signal is disabled... | 366 // If segment level coding of this signal is disabled... |
| 364 // or the segment allows multiple reference frame options | 367 // or the segment allows multiple reference frame options |
| 365 if (!seg_ref_active) { | 368 if (!seg_ref_active) { |
| 366 // does the feature use compound prediction or not | 369 // does the feature use compound prediction or not |
| 367 // (if not specified at the frame/segment level) | 370 // (if not specified at the frame/segment level) |
| 368 if (pc->comp_pred_mode == HYBRID_PREDICTION) { | 371 if (pc->comp_pred_mode == HYBRID_PREDICTION) { |
| 369 vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME, | 372 vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME, |
| 370 vp9_get_pred_prob_comp_inter_inter(pc, xd)); | 373 vp9_get_pred_prob_comp_inter_inter(pc, xd)); |
| 371 } else { | 374 } else { |
| 372 assert((mi->ref_frame[1] <= INTRA_FRAME) == | 375 assert((mi->ref_frame[1] <= INTRA_FRAME) == |
| 373 (pc->comp_pred_mode == SINGLE_PREDICTION_ONLY)); | 376 (pc->comp_pred_mode == SINGLE_PREDICTION_ONLY)); |
| 374 } | 377 } |
| 375 | 378 |
| 376 if (mi->ref_frame[1] > INTRA_FRAME) { | 379 if (mi->ref_frame[1] > INTRA_FRAME) { |
| 377 vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME, | 380 vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME, |
| 378 vp9_get_pred_prob_comp_ref_p(pc, xd)); | 381 vp9_get_pred_prob_comp_ref_p(pc, xd)); |
| 379 } else { | 382 } else { |
| 380 vp9_write(bc, mi->ref_frame[0] != LAST_FRAME, | 383 vp9_write(bc, mi->ref_frame[0] != LAST_FRAME, |
| 381 vp9_get_pred_prob_single_ref_p1(pc, xd)); | 384 vp9_get_pred_prob_single_ref_p1(pc, xd)); |
| 382 if (mi->ref_frame[0] != LAST_FRAME) | 385 if (mi->ref_frame[0] != LAST_FRAME) |
| 383 vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME, | 386 vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME, |
| 384 vp9_get_pred_prob_single_ref_p2(pc, xd)); | 387 vp9_get_pred_prob_single_ref_p2(pc, xd)); |
| 385 } | 388 } |
| 386 } else { | 389 } else { |
| 387 assert(mi->ref_frame[1] <= INTRA_FRAME); | 390 assert(mi->ref_frame[1] <= INTRA_FRAME); |
| 388 assert(vp9_get_segdata(&xd->seg, segment_id, SEG_LVL_REF_FRAME) == | 391 assert(vp9_get_segdata(&pc->seg, segment_id, SEG_LVL_REF_FRAME) == |
| 389 mi->ref_frame[0]); | 392 mi->ref_frame[0]); |
| 390 } | 393 } |
| 391 | 394 |
| 392 // if using the prediction mdoel we have nothing further to do because | 395 // if using the prediction mdoel we have nothing further to do because |
| 393 // the reference frame is fully coded by the segment | 396 // the reference frame is fully coded by the segment |
| 394 } | 397 } |
| 395 | 398 |
| 396 static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, | 399 static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { |
| 397 vp9_writer *bc, int mi_row, int mi_col) { | |
| 398 VP9_COMMON *const pc = &cpi->common; | 400 VP9_COMMON *const pc = &cpi->common; |
| 399 const nmv_context *nmvc = &pc->fc.nmvc; | 401 const nmv_context *nmvc = &pc->fc.nmvc; |
| 400 MACROBLOCK *const x = &cpi->mb; | 402 MACROBLOCK *const x = &cpi->mb; |
| 401 MACROBLOCKD *const xd = &x->e_mbd; | 403 MACROBLOCKD *const xd = &x->e_mbd; |
| 402 struct segmentation *seg = &xd->seg; | 404 struct segmentation *seg = &pc->seg; |
| 403 MB_MODE_INFO *const mi = &m->mbmi; | 405 MB_MODE_INFO *const mi = &m->mbmi; |
| 404 const MV_REFERENCE_FRAME rf = mi->ref_frame[0]; | 406 const MV_REFERENCE_FRAME rf = mi->ref_frame[0]; |
| 405 const MB_PREDICTION_MODE mode = mi->mode; | 407 const MB_PREDICTION_MODE mode = mi->mode; |
| 406 const int segment_id = mi->segment_id; | 408 const int segment_id = mi->segment_id; |
| 407 int skip_coeff; | 409 int skip_coeff; |
| 408 const BLOCK_SIZE_TYPE bsize = mi->sb_type; | 410 const BLOCK_SIZE bsize = mi->sb_type; |
| 411 const int allow_hp = xd->allow_high_precision_mv; |
| 409 | 412 |
| 410 x->partition_info = x->pi + (m - pc->mi); | 413 x->partition_info = x->pi + (m - pc->mi); |
| 411 | 414 |
| 412 #ifdef ENTROPY_STATS | 415 #ifdef ENTROPY_STATS |
| 413 active_section = 9; | 416 active_section = 9; |
| 414 #endif | 417 #endif |
| 415 | 418 |
| 416 if (seg->update_map) { | 419 if (seg->update_map) { |
| 417 if (seg->temporal_update) { | 420 if (seg->temporal_update) { |
| 418 const int pred_flag = mi->seg_id_predicted; | 421 const int pred_flag = mi->seg_id_predicted; |
| 419 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(xd); | 422 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); |
| 420 vp9_write(bc, pred_flag, pred_prob); | 423 vp9_write(bc, pred_flag, pred_prob); |
| 421 if (!pred_flag) | 424 if (!pred_flag) |
| 422 write_segment_id(bc, seg, segment_id); | 425 write_segment_id(bc, seg, segment_id); |
| 423 } else { | 426 } else { |
| 424 write_segment_id(bc, seg, segment_id); | 427 write_segment_id(bc, seg, segment_id); |
| 425 } | 428 } |
| 426 } | 429 } |
| 427 | 430 |
| 428 skip_coeff = write_skip_coeff(cpi, segment_id, m, bc); | 431 skip_coeff = write_skip_coeff(cpi, segment_id, m, bc); |
| 429 | 432 |
| 430 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) | 433 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) |
| 431 vp9_write(bc, rf != INTRA_FRAME, | 434 vp9_write(bc, rf != INTRA_FRAME, |
| 432 vp9_get_pred_prob_intra_inter(pc, xd)); | 435 vp9_get_pred_prob_intra_inter(pc, xd)); |
| 433 | 436 |
| 434 if (bsize >= BLOCK_SIZE_SB8X8 && pc->tx_mode == TX_MODE_SELECT && | 437 if (bsize >= BLOCK_8X8 && pc->tx_mode == TX_MODE_SELECT && |
| 435 !(rf != INTRA_FRAME && | 438 !(rf != INTRA_FRAME && |
| 436 (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { | 439 (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { |
| 437 write_selected_txfm_size(cpi, mi->txfm_size, bsize, bc); | 440 write_selected_tx_size(cpi, mi->txfm_size, bsize, bc); |
| 438 } | 441 } |
| 439 | 442 |
| 440 if (rf == INTRA_FRAME) { | 443 if (rf == INTRA_FRAME) { |
| 441 #ifdef ENTROPY_STATS | 444 #ifdef ENTROPY_STATS |
| 442 active_section = 6; | 445 active_section = 6; |
| 443 #endif | 446 #endif |
| 444 | 447 |
| 445 if (bsize >= BLOCK_SIZE_SB8X8) { | 448 if (bsize >= BLOCK_8X8) { |
| 446 const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize); | 449 write_intra_mode(bc, mode, pc->fc.y_mode_prob[size_group_lookup[bsize]]); |
| 447 const int bsl = MIN(bwl, bhl); | |
| 448 write_intra_mode(bc, mode, pc->fc.y_mode_prob[MIN(3, bsl)]); | |
| 449 } else { | 450 } else { |
| 450 int idx, idy; | 451 int idx, idy; |
| 451 int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; | 452 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; |
| 452 int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; | 453 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; |
| 453 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) | 454 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { |
| 454 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { | 455 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { |
| 455 const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode; | 456 const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode; |
| 456 write_intra_mode(bc, bm, pc->fc.y_mode_prob[0]); | 457 write_intra_mode(bc, bm, pc->fc.y_mode_prob[0]); |
| 457 } | 458 } |
| 459 } |
| 458 } | 460 } |
| 459 write_intra_mode(bc, mi->uv_mode, pc->fc.uv_mode_prob[mode]); | 461 write_intra_mode(bc, mi->uv_mode, pc->fc.uv_mode_prob[mode]); |
| 460 } else { | 462 } else { |
| 461 vp9_prob *mv_ref_p; | 463 vp9_prob *mv_ref_p; |
| 462 encode_ref_frame(cpi, bc); | 464 encode_ref_frame(cpi, bc); |
| 463 mv_ref_p = cpi->common.fc.inter_mode_probs[mi->mb_mode_context[rf]]; | 465 mv_ref_p = cpi->common.fc.inter_mode_probs[mi->mode_context[rf]]; |
| 464 | 466 |
| 465 #ifdef ENTROPY_STATS | 467 #ifdef ENTROPY_STATS |
| 466 active_section = 3; | 468 active_section = 3; |
| 467 #endif | 469 #endif |
| 468 | 470 |
| 469 // If segment skip is not enabled code the mode. | 471 // If segment skip is not enabled code the mode. |
| 470 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { | 472 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { |
| 471 if (bsize >= BLOCK_SIZE_SB8X8) { | 473 if (bsize >= BLOCK_8X8) { |
| 472 write_sb_mv_ref(bc, mode, mv_ref_p); | 474 write_sb_mv_ref(bc, mode, mv_ref_p); |
| 473 vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]); | 475 ++pc->counts.inter_mode[mi->mode_context[rf]] |
| 476 [inter_mode_offset(mode)]; |
| 474 } | 477 } |
| 475 } | 478 } |
| 476 | 479 |
| 477 if (cpi->common.mcomp_filter_type == SWITCHABLE) { | 480 if (pc->mcomp_filter_type == SWITCHABLE) { |
| 481 const int ctx = vp9_get_pred_context_switchable_interp(xd); |
| 478 write_token(bc, vp9_switchable_interp_tree, | 482 write_token(bc, vp9_switchable_interp_tree, |
| 479 vp9_get_pred_probs_switchable_interp(&cpi->common, xd), | 483 pc->fc.switchable_interp_prob[ctx], |
| 480 vp9_switchable_interp_encodings + | 484 &vp9_switchable_interp_encodings[mi->interp_filter]); |
| 481 vp9_switchable_interp_map[mi->interp_filter]); | |
| 482 } else { | 485 } else { |
| 483 assert(mi->interp_filter == cpi->common.mcomp_filter_type); | 486 assert(mi->interp_filter == pc->mcomp_filter_type); |
| 484 } | 487 } |
| 485 | 488 |
| 486 if (bsize < BLOCK_SIZE_SB8X8) { | 489 if (bsize < BLOCK_8X8) { |
| 487 int j; | 490 int j; |
| 488 MB_PREDICTION_MODE blockmode; | 491 MB_PREDICTION_MODE blockmode; |
| 489 int_mv blockmv; | 492 int_mv blockmv; |
| 490 int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; | 493 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; |
| 491 int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; | 494 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; |
| 492 int idx, idy; | 495 int idx, idy; |
| 493 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { | 496 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { |
| 494 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { | 497 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { |
| 495 j = idy * 2 + idx; | 498 j = idy * 2 + idx; |
| 496 blockmode = x->partition_info->bmi[j].mode; | 499 blockmode = x->partition_info->bmi[j].mode; |
| 497 blockmv = m->bmi[j].as_mv[0]; | 500 blockmv = m->bmi[j].as_mv[0]; |
| 498 write_sb_mv_ref(bc, blockmode, mv_ref_p); | 501 write_sb_mv_ref(bc, blockmode, mv_ref_p); |
| 499 vp9_accum_mv_refs(&cpi->common, blockmode, mi->mb_mode_context[rf]); | 502 ++pc->counts.inter_mode[mi->mode_context[rf]] |
| 503 [inter_mode_offset(blockmode)]; |
| 504 |
| 500 if (blockmode == NEWMV) { | 505 if (blockmode == NEWMV) { |
| 501 #ifdef ENTROPY_STATS | 506 #ifdef ENTROPY_STATS |
| 502 active_section = 11; | 507 active_section = 11; |
| 503 #endif | 508 #endif |
| 504 vp9_encode_mv(cpi, bc, &blockmv.as_mv, &mi->best_mv.as_mv, | 509 vp9_encode_mv(cpi, bc, &blockmv.as_mv, &mi->best_mv.as_mv, |
| 505 nmvc, xd->allow_high_precision_mv); | 510 nmvc, allow_hp); |
| 506 | 511 |
| 507 if (mi->ref_frame[1] > INTRA_FRAME) | 512 if (mi->ref_frame[1] > INTRA_FRAME) |
| 508 vp9_encode_mv(cpi, bc, | 513 vp9_encode_mv(cpi, bc, |
| 509 &m->bmi[j].as_mv[1].as_mv, | 514 &m->bmi[j].as_mv[1].as_mv, |
| 510 &mi->best_second_mv.as_mv, | 515 &mi->best_second_mv.as_mv, |
| 511 nmvc, xd->allow_high_precision_mv); | 516 nmvc, allow_hp); |
| 512 } | 517 } |
| 513 } | 518 } |
| 514 } | 519 } |
| 515 } else if (mode == NEWMV) { | 520 } else if (mode == NEWMV) { |
| 516 #ifdef ENTROPY_STATS | 521 #ifdef ENTROPY_STATS |
| 517 active_section = 5; | 522 active_section = 5; |
| 518 #endif | 523 #endif |
| 519 vp9_encode_mv(cpi, bc, | 524 vp9_encode_mv(cpi, bc, &mi->mv[0].as_mv, &mi->best_mv.as_mv, |
| 520 &mi->mv[0].as_mv, &mi->best_mv.as_mv, | 525 nmvc, allow_hp); |
| 521 nmvc, xd->allow_high_precision_mv); | |
| 522 | 526 |
| 523 if (mi->ref_frame[1] > INTRA_FRAME) | 527 if (mi->ref_frame[1] > INTRA_FRAME) |
| 524 vp9_encode_mv(cpi, bc, | 528 vp9_encode_mv(cpi, bc, &mi->mv[1].as_mv, &mi->best_second_mv.as_mv, |
| 525 &mi->mv[1].as_mv, &mi->best_second_mv.as_mv, | 529 nmvc, allow_hp); |
| 526 nmvc, xd->allow_high_precision_mv); | |
| 527 } | 530 } |
| 528 } | 531 } |
| 529 } | 532 } |
| 530 | 533 |
| 531 static void write_mb_modes_kf(const VP9_COMP *cpi, | 534 static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO *m, |
| 532 MODE_INFO *m, | 535 vp9_writer *bc) { |
| 533 vp9_writer *bc, int mi_row, int mi_col) { | |
| 534 const VP9_COMMON *const c = &cpi->common; | 536 const VP9_COMMON *const c = &cpi->common; |
| 535 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 537 const MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
| 538 const struct segmentation *const seg = &c->seg; |
| 536 const int ym = m->mbmi.mode; | 539 const int ym = m->mbmi.mode; |
| 537 const int mis = c->mode_info_stride; | 540 const int mis = c->mode_info_stride; |
| 538 const int segment_id = m->mbmi.segment_id; | 541 const int segment_id = m->mbmi.segment_id; |
| 539 | 542 |
| 540 if (xd->seg.update_map) | 543 if (seg->update_map) |
| 541 write_segment_id(bc, &xd->seg, m->mbmi.segment_id); | 544 write_segment_id(bc, seg, m->mbmi.segment_id); |
| 542 | 545 |
| 543 write_skip_coeff(cpi, segment_id, m, bc); | 546 write_skip_coeff(cpi, segment_id, m, bc); |
| 544 | 547 |
| 545 if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8 && c->tx_mode == TX_MODE_SELECT) | 548 if (m->mbmi.sb_type >= BLOCK_8X8 && c->tx_mode == TX_MODE_SELECT) |
| 546 write_selected_txfm_size(cpi, m->mbmi.txfm_size, m->mbmi.sb_type, bc); | 549 write_selected_tx_size(cpi, m->mbmi.txfm_size, m->mbmi.sb_type, bc); |
| 547 | 550 |
| 548 if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) { | 551 if (m->mbmi.sb_type >= BLOCK_8X8) { |
| 549 const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis); | 552 const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis); |
| 550 const MB_PREDICTION_MODE L = xd->left_available ? | 553 const MB_PREDICTION_MODE L = xd->left_available ? |
| 551 left_block_mode(m, 0) : DC_PRED; | 554 left_block_mode(m, 0) : DC_PRED; |
| 552 write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]); | 555 write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]); |
| 553 } else { | 556 } else { |
| 554 int idx, idy; | 557 int idx, idy; |
| 555 int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[m->mbmi.sb_type]; | 558 const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[m->mbmi.sb_type]; |
| 556 int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type]; | 559 const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type]; |
| 557 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { | 560 for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { |
| 558 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { | 561 for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { |
| 559 int i = idy * 2 + idx; | 562 const int i = idy * 2 + idx; |
| 560 const MB_PREDICTION_MODE A = above_block_mode(m, i, mis); | 563 const MB_PREDICTION_MODE A = above_block_mode(m, i, mis); |
| 561 const MB_PREDICTION_MODE L = (xd->left_available || idx) ? | 564 const MB_PREDICTION_MODE L = (xd->left_available || idx) ? |
| 562 left_block_mode(m, i) : DC_PRED; | 565 left_block_mode(m, i) : DC_PRED; |
| 563 const int bm = m->bmi[i].as_mode; | 566 const int bm = m->bmi[i].as_mode; |
| 564 #ifdef ENTROPY_STATS | 567 #ifdef ENTROPY_STATS |
| 565 ++intra_mode_stats[A][L][bm]; | 568 ++intra_mode_stats[A][L][bm]; |
| 566 #endif | 569 #endif |
| 567 write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]); | 570 write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]); |
| 568 } | 571 } |
| 569 } | 572 } |
| 570 } | 573 } |
| 571 | 574 |
| 572 write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]); | 575 write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]); |
| 573 } | 576 } |
| 574 | 577 |
| 575 static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc, | 578 static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc, |
| 576 TOKENEXTRA **tok, TOKENEXTRA *tok_end, | 579 TOKENEXTRA **tok, TOKENEXTRA *tok_end, |
| 577 int mi_row, int mi_col) { | 580 int mi_row, int mi_col) { |
| 578 VP9_COMMON *const cm = &cpi->common; | 581 VP9_COMMON *const cm = &cpi->common; |
| 579 MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 582 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
| 580 | 583 |
| 581 if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) | 584 if (m->mbmi.sb_type < BLOCK_8X8) |
| 582 if (xd->ab_index > 0) | 585 if (xd->ab_index > 0) |
| 583 return; | 586 return; |
| 584 xd->mode_info_context = m; | 587 xd->mode_info_context = m; |
| 585 set_mi_row_col(&cpi->common, xd, mi_row, | 588 set_mi_row_col(&cpi->common, xd, mi_row, |
| 586 1 << mi_height_log2(m->mbmi.sb_type), | 589 1 << mi_height_log2(m->mbmi.sb_type), |
| 587 mi_col, 1 << mi_width_log2(m->mbmi.sb_type)); | 590 mi_col, 1 << mi_width_log2(m->mbmi.sb_type)); |
| 588 if ((cm->frame_type == KEY_FRAME) || cm->intra_only) { | 591 if ((cm->frame_type == KEY_FRAME) || cm->intra_only) { |
| 589 write_mb_modes_kf(cpi, m, bc, mi_row, mi_col); | 592 write_mb_modes_kf(cpi, m, bc); |
| 590 #ifdef ENTROPY_STATS | 593 #ifdef ENTROPY_STATS |
| 591 active_section = 8; | 594 active_section = 8; |
| 592 #endif | 595 #endif |
| 593 } else { | 596 } else { |
| 594 pack_inter_mode_mvs(cpi, m, bc, mi_row, mi_col); | 597 pack_inter_mode_mvs(cpi, m, bc); |
| 595 #ifdef ENTROPY_STATS | 598 #ifdef ENTROPY_STATS |
| 596 active_section = 1; | 599 active_section = 1; |
| 597 #endif | 600 #endif |
| 598 } | 601 } |
| 599 | 602 |
| 600 assert(*tok < tok_end); | 603 assert(*tok < tok_end); |
| 601 pack_mb_tokens(bc, tok, tok_end); | 604 pack_mb_tokens(bc, tok, tok_end); |
| 602 } | 605 } |
| 603 | 606 |
| 604 static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc, | 607 static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc, |
| 605 TOKENEXTRA **tok, TOKENEXTRA *tok_end, | 608 TOKENEXTRA **tok, TOKENEXTRA *tok_end, |
| 606 int mi_row, int mi_col, | 609 int mi_row, int mi_col, BLOCK_SIZE bsize) { |
| 607 BLOCK_SIZE_TYPE bsize) { | |
| 608 VP9_COMMON *const cm = &cpi->common; | 610 VP9_COMMON *const cm = &cpi->common; |
| 609 MACROBLOCKD *xd = &cpi->mb.e_mbd; | 611 MACROBLOCKD *xd = &cpi->mb.e_mbd; |
| 610 const int mis = cm->mode_info_stride; | 612 const int mis = cm->mode_info_stride; |
| 611 int bsl = b_width_log2(bsize); | 613 int bsl = b_width_log2(bsize); |
| 612 int bs = (1 << bsl) / 4; // mode_info step for subsize | 614 int bs = (1 << bsl) / 4; // mode_info step for subsize |
| 613 int n; | 615 int n; |
| 614 PARTITION_TYPE partition = PARTITION_NONE; | 616 PARTITION_TYPE partition = PARTITION_NONE; |
| 615 BLOCK_SIZE_TYPE subsize; | 617 BLOCK_SIZE subsize; |
| 616 | 618 |
| 617 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 619 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
| 618 return; | 620 return; |
| 619 | 621 |
| 620 partition = partition_lookup[bsl][m->mbmi.sb_type]; | 622 partition = partition_lookup[bsl][m->mbmi.sb_type]; |
| 621 | 623 |
| 622 if (bsize < BLOCK_SIZE_SB8X8) | 624 if (bsize < BLOCK_8X8) |
| 623 if (xd->ab_index > 0) | 625 if (xd->ab_index > 0) |
| 624 return; | 626 return; |
| 625 | 627 |
| 626 if (bsize >= BLOCK_SIZE_SB8X8) { | 628 if (bsize >= BLOCK_8X8) { |
| 627 int pl; | 629 int pl; |
| 628 const int idx = check_bsize_coverage(cm, xd, mi_row, mi_col, bsize); | 630 const int idx = check_bsize_coverage(bs, cm->mi_rows, cm->mi_cols, |
| 631 mi_row, mi_col); |
| 629 set_partition_seg_context(cm, xd, mi_row, mi_col); | 632 set_partition_seg_context(cm, xd, mi_row, mi_col); |
| 630 pl = partition_plane_context(xd, bsize); | 633 pl = partition_plane_context(xd, bsize); |
| 631 // encode the partition information | 634 // encode the partition information |
| 632 if (idx == 0) | 635 if (idx == 0) |
| 633 write_token(bc, vp9_partition_tree, | 636 write_token(bc, vp9_partition_tree, |
| 634 cm->fc.partition_prob[cm->frame_type][pl], | 637 cm->fc.partition_prob[cm->frame_type][pl], |
| 635 vp9_partition_encodings + partition); | 638 vp9_partition_encodings + partition); |
| 636 else if (idx > 0) | 639 else if (idx > 0) |
| 637 vp9_write(bc, partition == PARTITION_SPLIT, | 640 vp9_write(bc, partition == PARTITION_SPLIT, |
| 638 cm->fc.partition_prob[cm->frame_type][pl][idx]); | 641 cm->fc.partition_prob[cm->frame_type][pl][idx]); |
| (...skipping 24 matching lines...) Expand all Loading... |
| 663 *(get_sb_index(xd, subsize)) = n; | 666 *(get_sb_index(xd, subsize)) = n; |
| 664 write_modes_sb(cpi, m + j * bs * mis + i * bs, bc, tok, tok_end, | 667 write_modes_sb(cpi, m + j * bs * mis + i * bs, bc, tok, tok_end, |
| 665 mi_row + j * bs, mi_col + i * bs, subsize); | 668 mi_row + j * bs, mi_col + i * bs, subsize); |
| 666 } | 669 } |
| 667 break; | 670 break; |
| 668 default: | 671 default: |
| 669 assert(0); | 672 assert(0); |
| 670 } | 673 } |
| 671 | 674 |
| 672 // update partition context | 675 // update partition context |
| 673 if (bsize >= BLOCK_SIZE_SB8X8 && | 676 if (bsize >= BLOCK_8X8 && |
| 674 (bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) { | 677 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) { |
| 675 set_partition_seg_context(cm, xd, mi_row, mi_col); | 678 set_partition_seg_context(cm, xd, mi_row, mi_col); |
| 676 update_partition_context(xd, subsize, bsize); | 679 update_partition_context(xd, subsize, bsize); |
| 677 } | 680 } |
| 678 } | 681 } |
| 679 | 682 |
| 680 static void write_modes(VP9_COMP *cpi, vp9_writer* const bc, | 683 static void write_modes(VP9_COMP *cpi, vp9_writer* const bc, |
| 681 TOKENEXTRA **tok, TOKENEXTRA *tok_end) { | 684 TOKENEXTRA **tok, TOKENEXTRA *tok_end) { |
| 682 VP9_COMMON *const c = &cpi->common; | 685 VP9_COMMON *const c = &cpi->common; |
| 683 const int mis = c->mode_info_stride; | 686 const int mis = c->mode_info_stride; |
| 684 MODE_INFO *m, *m_ptr = c->mi; | 687 MODE_INFO *m, *m_ptr = c->mi; |
| 685 int mi_row, mi_col; | 688 int mi_row, mi_col; |
| 686 | 689 |
| 687 m_ptr += c->cur_tile_mi_col_start + c->cur_tile_mi_row_start * mis; | 690 m_ptr += c->cur_tile_mi_col_start + c->cur_tile_mi_row_start * mis; |
| 688 | 691 |
| 689 for (mi_row = c->cur_tile_mi_row_start; mi_row < c->cur_tile_mi_row_end; | 692 for (mi_row = c->cur_tile_mi_row_start; mi_row < c->cur_tile_mi_row_end; |
| 690 mi_row += 8, m_ptr += 8 * mis) { | 693 mi_row += 8, m_ptr += 8 * mis) { |
| 691 m = m_ptr; | 694 m = m_ptr; |
| 692 vp9_zero(c->left_seg_context); | 695 vp9_zero(c->left_seg_context); |
| 693 for (mi_col = c->cur_tile_mi_col_start; mi_col < c->cur_tile_mi_col_end; | 696 for (mi_col = c->cur_tile_mi_col_start; mi_col < c->cur_tile_mi_col_end; |
| 694 mi_col += MI_BLOCK_SIZE, m += MI_BLOCK_SIZE) | 697 mi_col += MI_BLOCK_SIZE, m += MI_BLOCK_SIZE) |
| 695 write_modes_sb(cpi, m, bc, tok, tok_end, mi_row, mi_col, | 698 write_modes_sb(cpi, m, bc, tok, tok_end, mi_row, mi_col, BLOCK_64X64); |
| 696 BLOCK_SIZE_SB64X64); | |
| 697 } | 699 } |
| 698 } | 700 } |
| 699 | 701 |
| 700 /* This function is used for debugging probability trees. */ | 702 /* This function is used for debugging probability trees. */ |
| 701 static void print_prob_tree(vp9_coeff_probs *coef_probs, int block_types) { | 703 static void print_prob_tree(vp9_coeff_probs *coef_probs, int block_types) { |
| 702 /* print coef probability tree */ | 704 /* print coef probability tree */ |
| 703 int i, j, k, l, m; | 705 int i, j, k, l, m; |
| 704 FILE *f = fopen("enc_tree_probs.txt", "a"); | 706 FILE *f = fopen("enc_tree_probs.txt", "a"); |
| 705 fprintf(f, "{\n"); | 707 fprintf(f, "{\n"); |
| 706 for (i = 0; i < block_types; i++) { | 708 for (i = 0; i < block_types; i++) { |
| (...skipping 12 matching lines...) Expand all Loading... |
| 719 fprintf(f, " }\n"); | 721 fprintf(f, " }\n"); |
| 720 } | 722 } |
| 721 fprintf(f, " }\n"); | 723 fprintf(f, " }\n"); |
| 722 } | 724 } |
| 723 fprintf(f, " }\n"); | 725 fprintf(f, " }\n"); |
| 724 } | 726 } |
| 725 fprintf(f, "}\n"); | 727 fprintf(f, "}\n"); |
| 726 fclose(f); | 728 fclose(f); |
| 727 } | 729 } |
| 728 | 730 |
| 729 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE txfm_size) { | 731 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) { |
| 730 vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[txfm_size]; | 732 vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size]; |
| 731 vp9_coeff_count *coef_counts = cpi->coef_counts[txfm_size]; | 733 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size]; |
| 732 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] = | 734 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] = |
| 733 cpi->common.counts.eob_branch[txfm_size]; | 735 cpi->common.counts.eob_branch[tx_size]; |
| 734 vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[txfm_size]; | 736 vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[tx_size]; |
| 735 vp9_prob full_probs[ENTROPY_NODES]; | 737 vp9_prob full_probs[ENTROPY_NODES]; |
| 736 int i, j, k, l; | 738 int i, j, k, l; |
| 737 | 739 |
| 738 for (i = 0; i < BLOCK_TYPES; ++i) { | 740 for (i = 0; i < BLOCK_TYPES; ++i) { |
| 739 for (j = 0; j < REF_TYPES; ++j) { | 741 for (j = 0; j < REF_TYPES; ++j) { |
| 740 for (k = 0; k < COEF_BANDS; ++k) { | 742 for (k = 0; k < COEF_BANDS; ++k) { |
| 741 for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { | 743 for (l = 0; l < PREV_COEF_CONTEXTS; ++l) { |
| 742 if (l >= 3 && k == 0) | 744 if (l >= 3 && k == 0) |
| 743 continue; | 745 continue; |
| 744 vp9_tree_probs_from_distribution(vp9_coef_tree, | 746 vp9_tree_probs_from_distribution(vp9_coef_tree, |
| 745 full_probs, | 747 full_probs, |
| 746 coef_branch_ct[i][j][k][l], | 748 coef_branch_ct[i][j][k][l], |
| 747 coef_counts[i][j][k][l], 0); | 749 coef_counts[i][j][k][l], 0); |
| 748 vpx_memcpy(coef_probs[i][j][k][l], full_probs, | 750 vpx_memcpy(coef_probs[i][j][k][l], full_probs, |
| 749 sizeof(vp9_prob) * UNCONSTRAINED_NODES); | 751 sizeof(vp9_prob) * UNCONSTRAINED_NODES); |
| 750 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - | 752 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - |
| 751 coef_branch_ct[i][j][k][l][0][0]; | 753 coef_branch_ct[i][j][k][l][0][0]; |
| 752 coef_probs[i][j][k][l][0] = | 754 coef_probs[i][j][k][l][0] = |
| 753 get_binary_prob(coef_branch_ct[i][j][k][l][0][0], | 755 get_binary_prob(coef_branch_ct[i][j][k][l][0][0], |
| 754 coef_branch_ct[i][j][k][l][0][1]); | 756 coef_branch_ct[i][j][k][l][0][1]); |
| 755 #ifdef ENTROPY_STATS | 757 #ifdef ENTROPY_STATS |
| 756 if (!cpi->dummy_packing) { | 758 if (!cpi->dummy_packing) { |
| 757 int t; | 759 int t; |
| 758 for (t = 0; t < MAX_ENTROPY_TOKENS; ++t) | 760 for (t = 0; t < MAX_ENTROPY_TOKENS; ++t) |
| 759 context_counters[txfm_size][i][j][k][l][t] += | 761 context_counters[tx_size][i][j][k][l][t] += |
| 760 coef_counts[i][j][k][l][t]; | 762 coef_counts[i][j][k][l][t]; |
| 761 context_counters[txfm_size][i][j][k][l][MAX_ENTROPY_TOKENS] += | 763 context_counters[tx_size][i][j][k][l][MAX_ENTROPY_TOKENS] += |
| 762 eob_branch_ct[i][j][k][l]; | 764 eob_branch_ct[i][j][k][l]; |
| 763 } | 765 } |
| 764 #endif | 766 #endif |
| 765 } | 767 } |
| 766 } | 768 } |
| 767 } | 769 } |
| 768 } | 770 } |
| 769 } | 771 } |
| 770 | 772 |
| 771 static void build_coeff_contexts(VP9_COMP *cpi) { | 773 static void build_coeff_contexts(VP9_COMP *cpi) { |
| (...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 959 write_delta_q(wb, cm->y_dc_delta_q); | 961 write_delta_q(wb, cm->y_dc_delta_q); |
| 960 write_delta_q(wb, cm->uv_dc_delta_q); | 962 write_delta_q(wb, cm->uv_dc_delta_q); |
| 961 write_delta_q(wb, cm->uv_ac_delta_q); | 963 write_delta_q(wb, cm->uv_ac_delta_q); |
| 962 } | 964 } |
| 963 | 965 |
| 964 | 966 |
| 965 static void encode_segmentation(VP9_COMP *cpi, | 967 static void encode_segmentation(VP9_COMP *cpi, |
| 966 struct vp9_write_bit_buffer *wb) { | 968 struct vp9_write_bit_buffer *wb) { |
| 967 int i, j; | 969 int i, j; |
| 968 | 970 |
| 969 struct segmentation *seg = &cpi->mb.e_mbd.seg; | 971 struct segmentation *seg = &cpi->common.seg; |
| 970 | 972 |
| 971 vp9_wb_write_bit(wb, seg->enabled); | 973 vp9_wb_write_bit(wb, seg->enabled); |
| 972 if (!seg->enabled) | 974 if (!seg->enabled) |
| 973 return; | 975 return; |
| 974 | 976 |
| 975 // Segmentation map | 977 // Segmentation map |
| 976 vp9_wb_write_bit(wb, seg->update_map); | 978 vp9_wb_write_bit(wb, seg->update_map); |
| 977 if (seg->update_map) { | 979 if (seg->update_map) { |
| 978 // Select the coding strategy (temporal or spatial) | 980 // Select the coding strategy (temporal or spatial) |
| 979 vp9_choose_segmap_coding_method(cpi); | 981 vp9_choose_segmap_coding_method(cpi); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1029 VP9_COMMON *const cm = &cpi->common; | 1031 VP9_COMMON *const cm = &cpi->common; |
| 1030 | 1032 |
| 1031 // Mode | 1033 // Mode |
| 1032 vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2); | 1034 vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2); |
| 1033 if (cm->tx_mode >= ALLOW_32X32) | 1035 if (cm->tx_mode >= ALLOW_32X32) |
| 1034 vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT); | 1036 vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT); |
| 1035 | 1037 |
| 1036 // Probabilities | 1038 // Probabilities |
| 1037 if (cm->tx_mode == TX_MODE_SELECT) { | 1039 if (cm->tx_mode == TX_MODE_SELECT) { |
| 1038 int i, j; | 1040 int i, j; |
| 1039 unsigned int ct_8x8p[TX_SIZE_MAX_SB - 3][2]; | 1041 unsigned int ct_8x8p[TX_SIZES - 3][2]; |
| 1040 unsigned int ct_16x16p[TX_SIZE_MAX_SB - 2][2]; | 1042 unsigned int ct_16x16p[TX_SIZES - 2][2]; |
| 1041 unsigned int ct_32x32p[TX_SIZE_MAX_SB - 1][2]; | 1043 unsigned int ct_32x32p[TX_SIZES - 1][2]; |
| 1042 | 1044 |
| 1043 | 1045 |
| 1044 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 1046 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
| 1045 tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], | 1047 tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], |
| 1046 ct_8x8p); | 1048 ct_8x8p); |
| 1047 for (j = 0; j < TX_SIZE_MAX_SB - 3; j++) | 1049 for (j = 0; j < TX_SIZES - 3; j++) |
| 1048 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], | 1050 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], |
| 1049 VP9_MODE_UPDATE_PROB, ct_8x8p[j]); | 1051 MODE_UPDATE_PROB, ct_8x8p[j]); |
| 1050 } | 1052 } |
| 1051 | 1053 |
| 1052 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 1054 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
| 1053 tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], | 1055 tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], |
| 1054 ct_16x16p); | 1056 ct_16x16p); |
| 1055 for (j = 0; j < TX_SIZE_MAX_SB - 2; j++) | 1057 for (j = 0; j < TX_SIZES - 2; j++) |
| 1056 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j], | 1058 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j], |
| 1057 VP9_MODE_UPDATE_PROB, ct_16x16p[j]); | 1059 MODE_UPDATE_PROB, ct_16x16p[j]); |
| 1058 } | 1060 } |
| 1059 | 1061 |
| 1060 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { | 1062 for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
| 1061 tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p); | 1063 tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p); |
| 1062 for (j = 0; j < TX_SIZE_MAX_SB - 1; j++) | 1064 for (j = 0; j < TX_SIZES - 1; j++) |
| 1063 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j], | 1065 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j], |
| 1064 VP9_MODE_UPDATE_PROB, ct_32x32p[j]); | 1066 MODE_UPDATE_PROB, ct_32x32p[j]); |
| 1065 } | 1067 } |
| 1066 #ifdef MODE_STATS | 1068 #ifdef MODE_STATS |
| 1067 if (!cpi->dummy_packing) | 1069 if (!cpi->dummy_packing) |
| 1068 update_tx_count_stats(cm); | 1070 update_tx_count_stats(cm); |
| 1069 #endif | 1071 #endif |
| 1070 } | 1072 } |
| 1071 } | 1073 } |
| 1072 | 1074 |
| 1073 static void write_interp_filter_type(INTERPOLATIONFILTERTYPE type, | 1075 static void write_interp_filter_type(INTERPOLATIONFILTERTYPE type, |
| 1074 struct vp9_write_bit_buffer *wb) { | 1076 struct vp9_write_bit_buffer *wb) { |
| 1077 const int type_to_literal[] = { 1, 0, 2 }; |
| 1078 |
| 1075 vp9_wb_write_bit(wb, type == SWITCHABLE); | 1079 vp9_wb_write_bit(wb, type == SWITCHABLE); |
| 1076 if (type != SWITCHABLE) | 1080 if (type != SWITCHABLE) |
| 1077 vp9_wb_write_literal(wb, type, 2); | 1081 vp9_wb_write_literal(wb, type_to_literal[type], 2); |
| 1078 } | 1082 } |
| 1079 | 1083 |
| 1080 static void fix_mcomp_filter_type(VP9_COMP *cpi) { | 1084 static void fix_mcomp_filter_type(VP9_COMP *cpi) { |
| 1081 VP9_COMMON *const cm = &cpi->common; | 1085 VP9_COMMON *const cm = &cpi->common; |
| 1082 | 1086 |
| 1083 if (cm->mcomp_filter_type == SWITCHABLE) { | 1087 if (cm->mcomp_filter_type == SWITCHABLE) { |
| 1084 // Check to see if only one of the filters is actually used | 1088 // Check to see if only one of the filters is actually used |
| 1085 int count[VP9_SWITCHABLE_FILTERS]; | 1089 int count[SWITCHABLE_FILTERS]; |
| 1086 int i, j, c = 0; | 1090 int i, j, c = 0; |
| 1087 for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) { | 1091 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { |
| 1088 count[i] = 0; | 1092 count[i] = 0; |
| 1089 for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j) | 1093 for (j = 0; j <= SWITCHABLE_FILTERS; ++j) |
| 1090 count[i] += cm->counts.switchable_interp[j][i]; | 1094 count[i] += cm->counts.switchable_interp[j][i]; |
| 1091 c += (count[i] > 0); | 1095 c += (count[i] > 0); |
| 1092 } | 1096 } |
| 1093 if (c == 1) { | 1097 if (c == 1) { |
| 1094 // Only one filter is used. So set the filter at frame level | 1098 // Only one filter is used. So set the filter at frame level |
| 1095 for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) { | 1099 for (i = 0; i < SWITCHABLE_FILTERS; ++i) { |
| 1096 if (count[i]) { | 1100 if (count[i]) { |
| 1097 cm->mcomp_filter_type = vp9_switchable_interp[i]; | 1101 cm->mcomp_filter_type = i; |
| 1098 break; | 1102 break; |
| 1099 } | 1103 } |
| 1100 } | 1104 } |
| 1101 } | 1105 } |
| 1102 } | 1106 } |
| 1103 } | 1107 } |
| 1104 | 1108 |
| 1105 static void write_tile_info(VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) { | 1109 static void write_tile_info(VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) { |
| 1106 int min_log2_tile_cols, max_log2_tile_cols, ones; | 1110 int min_log2_tile_cols, max_log2_tile_cols, ones; |
| 1107 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); | 1111 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); |
| (...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1332 } | 1336 } |
| 1333 } | 1337 } |
| 1334 | 1338 |
| 1335 if (!cm->error_resilient_mode) { | 1339 if (!cm->error_resilient_mode) { |
| 1336 vp9_wb_write_bit(wb, cm->refresh_frame_context); | 1340 vp9_wb_write_bit(wb, cm->refresh_frame_context); |
| 1337 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode); | 1341 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode); |
| 1338 } | 1342 } |
| 1339 | 1343 |
| 1340 vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LOG2); | 1344 vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LOG2); |
| 1341 | 1345 |
| 1342 encode_loopfilter(&xd->lf, wb); | 1346 encode_loopfilter(&cm->lf, wb); |
| 1343 encode_quantization(cm, wb); | 1347 encode_quantization(cm, wb); |
| 1344 encode_segmentation(cpi, wb); | 1348 encode_segmentation(cpi, wb); |
| 1345 | 1349 |
| 1346 write_tile_info(cm, wb); | 1350 write_tile_info(cm, wb); |
| 1347 } | 1351 } |
| 1348 | 1352 |
| 1349 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { | 1353 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { |
| 1350 VP9_COMMON *const cm = &cpi->common; | 1354 VP9_COMMON *const cm = &cpi->common; |
| 1351 MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 1355 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
| 1352 FRAME_CONTEXT *const fc = &cm->fc; | 1356 FRAME_CONTEXT *const fc = &cm->fc; |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1374 #endif | 1378 #endif |
| 1375 | 1379 |
| 1376 update_inter_mode_probs(cm, &header_bc); | 1380 update_inter_mode_probs(cm, &header_bc); |
| 1377 vp9_zero(cm->counts.inter_mode); | 1381 vp9_zero(cm->counts.inter_mode); |
| 1378 | 1382 |
| 1379 if (cm->mcomp_filter_type == SWITCHABLE) | 1383 if (cm->mcomp_filter_type == SWITCHABLE) |
| 1380 update_switchable_interp_probs(cpi, &header_bc); | 1384 update_switchable_interp_probs(cpi, &header_bc); |
| 1381 | 1385 |
| 1382 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) | 1386 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) |
| 1383 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], | 1387 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], |
| 1384 VP9_MODE_UPDATE_PROB, | 1388 MODE_UPDATE_PROB, |
| 1385 cpi->intra_inter_count[i]); | 1389 cpi->intra_inter_count[i]); |
| 1386 | 1390 |
| 1387 if (cm->allow_comp_inter_inter) { | 1391 if (cm->allow_comp_inter_inter) { |
| 1388 const int comp_pred_mode = cpi->common.comp_pred_mode; | 1392 const int comp_pred_mode = cpi->common.comp_pred_mode; |
| 1389 const int use_compound_pred = comp_pred_mode != SINGLE_PREDICTION_ONLY; | 1393 const int use_compound_pred = comp_pred_mode != SINGLE_PREDICTION_ONLY; |
| 1390 const int use_hybrid_pred = comp_pred_mode == HYBRID_PREDICTION; | 1394 const int use_hybrid_pred = comp_pred_mode == HYBRID_PREDICTION; |
| 1391 | 1395 |
| 1392 vp9_write_bit(&header_bc, use_compound_pred); | 1396 vp9_write_bit(&header_bc, use_compound_pred); |
| 1393 if (use_compound_pred) { | 1397 if (use_compound_pred) { |
| 1394 vp9_write_bit(&header_bc, use_hybrid_pred); | 1398 vp9_write_bit(&header_bc, use_hybrid_pred); |
| 1395 if (use_hybrid_pred) | 1399 if (use_hybrid_pred) |
| 1396 for (i = 0; i < COMP_INTER_CONTEXTS; i++) | 1400 for (i = 0; i < COMP_INTER_CONTEXTS; i++) |
| 1397 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], | 1401 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], |
| 1398 VP9_MODE_UPDATE_PROB, | 1402 MODE_UPDATE_PROB, |
| 1399 cpi->comp_inter_count[i]); | 1403 cpi->comp_inter_count[i]); |
| 1400 } | 1404 } |
| 1401 } | 1405 } |
| 1402 | 1406 |
| 1403 if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) { | 1407 if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) { |
| 1404 for (i = 0; i < REF_CONTEXTS; i++) { | 1408 for (i = 0; i < REF_CONTEXTS; i++) { |
| 1405 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], | 1409 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], |
| 1406 VP9_MODE_UPDATE_PROB, | 1410 MODE_UPDATE_PROB, |
| 1407 cpi->single_ref_count[i][0]); | 1411 cpi->single_ref_count[i][0]); |
| 1408 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], | 1412 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], |
| 1409 VP9_MODE_UPDATE_PROB, | 1413 MODE_UPDATE_PROB, |
| 1410 cpi->single_ref_count[i][1]); | 1414 cpi->single_ref_count[i][1]); |
| 1411 } | 1415 } |
| 1412 } | 1416 } |
| 1413 | 1417 |
| 1414 if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) | 1418 if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) |
| 1415 for (i = 0; i < REF_CONTEXTS; i++) | 1419 for (i = 0; i < REF_CONTEXTS; i++) |
| 1416 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], | 1420 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], |
| 1417 VP9_MODE_UPDATE_PROB, | 1421 MODE_UPDATE_PROB, |
| 1418 cpi->comp_ref_count[i]); | 1422 cpi->comp_ref_count[i]); |
| 1419 | 1423 |
| 1420 update_mbintra_mode_probs(cpi, &header_bc); | 1424 update_mbintra_mode_probs(cpi, &header_bc); |
| 1421 | 1425 |
| 1422 for (i = 0; i < NUM_PARTITION_CONTEXTS; ++i) { | 1426 for (i = 0; i < NUM_PARTITION_CONTEXTS; ++i) { |
| 1423 vp9_prob pnew[PARTITION_TYPES - 1]; | 1427 vp9_prob pnew[PARTITION_TYPES - 1]; |
| 1424 unsigned int bct[PARTITION_TYPES - 1][2]; | 1428 unsigned int bct[PARTITION_TYPES - 1][2]; |
| 1425 update_mode(&header_bc, PARTITION_TYPES, vp9_partition_encodings, | 1429 update_mode(&header_bc, PARTITION_TYPES, |
| 1426 vp9_partition_tree, pnew, | 1430 vp9_partition_tree, pnew, |
| 1427 fc->partition_prob[cm->frame_type][i], bct, | 1431 fc->partition_prob[cm->frame_type][i], bct, |
| 1428 (unsigned int *)cpi->partition_count[i]); | 1432 (unsigned int *)cpi->partition_count[i]); |
| 1429 } | 1433 } |
| 1430 | 1434 |
| 1431 vp9_write_nmv_probs(cpi, xd->allow_high_precision_mv, &header_bc); | 1435 vp9_write_nmv_probs(cpi, xd->allow_high_precision_mv, &header_bc); |
| 1432 } | 1436 } |
| 1433 | 1437 |
| 1434 vp9_stop_encode(&header_bc); | 1438 vp9_stop_encode(&header_bc); |
| 1435 assert(header_bc.pos <= 0xffff); | 1439 assert(header_bc.pos <= 0xffff); |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1512 "vp9_coef_update_probs_16x16[BLOCK_TYPES]"); | 1516 "vp9_coef_update_probs_16x16[BLOCK_TYPES]"); |
| 1513 print_tree_update_for_type(f, tree_update_hist[TX_32X32], BLOCK_TYPES, | 1517 print_tree_update_for_type(f, tree_update_hist[TX_32X32], BLOCK_TYPES, |
| 1514 "vp9_coef_update_probs_32x32[BLOCK_TYPES]"); | 1518 "vp9_coef_update_probs_32x32[BLOCK_TYPES]"); |
| 1515 | 1519 |
| 1516 fclose(f); | 1520 fclose(f); |
| 1517 f = fopen("treeupdate.bin", "wb"); | 1521 f = fopen("treeupdate.bin", "wb"); |
| 1518 fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f); | 1522 fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f); |
| 1519 fclose(f); | 1523 fclose(f); |
| 1520 } | 1524 } |
| 1521 #endif | 1525 #endif |
| OLD | NEW |