| OLD | NEW |
| 1 /* | 1 /* |
| 2 Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 #include "vp9/decoder/vp9_decodemv.h" | 23 #include "vp9/decoder/vp9_decodemv.h" |
| 24 #include "vp9/decoder/vp9_decodframe.h" | 24 #include "vp9/decoder/vp9_decodframe.h" |
| 25 #include "vp9/decoder/vp9_onyxd_int.h" | 25 #include "vp9/decoder/vp9_onyxd_int.h" |
| 26 #include "vp9/decoder/vp9_dsubexp.h" | 26 #include "vp9/decoder/vp9_dsubexp.h" |
| 27 #include "vp9/decoder/vp9_treereader.h" | 27 #include "vp9/decoder/vp9_treereader.h" |
| 28 | 28 |
| 29 static MB_PREDICTION_MODE read_intra_mode(vp9_reader *r, const vp9_prob *p) { | 29 static MB_PREDICTION_MODE read_intra_mode(vp9_reader *r, const vp9_prob *p) { |
| 30 return (MB_PREDICTION_MODE)treed_read(r, vp9_intra_mode_tree, p); | 30 return (MB_PREDICTION_MODE)treed_read(r, vp9_intra_mode_tree, p); |
| 31 } | 31 } |
| 32 | 32 |
| 33 static MB_PREDICTION_MODE read_inter_mode(vp9_reader *r, const vp9_prob *p) { | 33 static MB_PREDICTION_MODE read_inter_mode(VP9_COMMON *cm, vp9_reader *r, |
| 34 return (MB_PREDICTION_MODE)treed_read(r, vp9_inter_mode_tree, p); | 34 uint8_t context) { |
| 35 MB_PREDICTION_MODE mode = treed_read(r, vp9_inter_mode_tree, |
| 36 cm->fc.inter_mode_probs[context]); |
| 37 ++cm->counts.inter_mode[context][inter_mode_offset(mode)]; |
| 38 return mode; |
| 35 } | 39 } |
| 36 | 40 |
| 37 static int read_segment_id(vp9_reader *r, const struct segmentation *seg) { | 41 static int read_segment_id(vp9_reader *r, const struct segmentation *seg) { |
| 38 return treed_read(r, vp9_segment_tree, seg->tree_probs); | 42 return treed_read(r, vp9_segment_tree, seg->tree_probs); |
| 39 } | 43 } |
| 40 | 44 |
| 41 static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd, | 45 static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd, |
| 42 BLOCK_SIZE_TYPE bsize, vp9_reader *r) { | 46 BLOCK_SIZE bsize, vp9_reader *r) { |
| 43 const uint8_t context = vp9_get_pred_context_tx_size(xd); | 47 const uint8_t context = vp9_get_pred_context_tx_size(xd); |
| 44 const vp9_prob *tx_probs = get_tx_probs(bsize, context, &cm->fc.tx_probs); | 48 const vp9_prob *tx_probs = get_tx_probs(bsize, context, &cm->fc.tx_probs); |
| 45 TX_SIZE tx_size = vp9_read(r, tx_probs[0]); | 49 TX_SIZE tx_size = vp9_read(r, tx_probs[0]); |
| 46 if (tx_size != TX_4X4 && bsize >= BLOCK_SIZE_MB16X16) { | 50 if (tx_size != TX_4X4 && bsize >= BLOCK_16X16) { |
| 47 tx_size += vp9_read(r, tx_probs[1]); | 51 tx_size += vp9_read(r, tx_probs[1]); |
| 48 if (tx_size != TX_8X8 && bsize >= BLOCK_SIZE_SB32X32) | 52 if (tx_size != TX_8X8 && bsize >= BLOCK_32X32) |
| 49 tx_size += vp9_read(r, tx_probs[2]); | 53 tx_size += vp9_read(r, tx_probs[2]); |
| 50 } | 54 } |
| 51 | 55 |
| 52 update_tx_counts(bsize, context, tx_size, &cm->counts.tx); | 56 update_tx_counts(bsize, context, tx_size, &cm->counts.tx); |
| 53 return tx_size; | 57 return tx_size; |
| 54 } | 58 } |
| 55 | 59 |
| 56 static TX_SIZE read_tx_size(VP9D_COMP *pbi, TX_MODE tx_mode, | 60 static TX_SIZE read_tx_size(VP9D_COMP *pbi, TX_MODE tx_mode, |
| 57 BLOCK_SIZE_TYPE bsize, int select_cond, | 61 BLOCK_SIZE bsize, int allow_select, |
| 58 vp9_reader *r) { | 62 vp9_reader *r) { |
| 59 VP9_COMMON *const cm = &pbi->common; | 63 VP9_COMMON *const cm = &pbi->common; |
| 60 MACROBLOCKD *const xd = &pbi->mb; | 64 MACROBLOCKD *const xd = &pbi->mb; |
| 61 | 65 |
| 62 if (tx_mode == TX_MODE_SELECT && bsize >= BLOCK_SIZE_SB8X8 && select_cond) | 66 if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_8X8) |
| 63 return read_selected_tx_size(cm, xd, bsize, r); | 67 return read_selected_tx_size(cm, xd, bsize, r); |
| 64 else if (tx_mode >= ALLOW_32X32 && bsize >= BLOCK_SIZE_SB32X32) | 68 else if (tx_mode >= ALLOW_32X32 && bsize >= BLOCK_32X32) |
| 65 return TX_32X32; | 69 return TX_32X32; |
| 66 else if (tx_mode >= ALLOW_16X16 && bsize >= BLOCK_SIZE_MB16X16) | 70 else if (tx_mode >= ALLOW_16X16 && bsize >= BLOCK_16X16) |
| 67 return TX_16X16; | 71 return TX_16X16; |
| 68 else if (tx_mode >= ALLOW_8X8 && bsize >= BLOCK_SIZE_SB8X8) | 72 else if (tx_mode >= ALLOW_8X8 && bsize >= BLOCK_8X8) |
| 69 return TX_8X8; | 73 return TX_8X8; |
| 70 else | 74 else |
| 71 return TX_4X4; | 75 return TX_4X4; |
| 72 } | 76 } |
| 73 | 77 |
| 74 static void set_segment_id(VP9_COMMON *cm, BLOCK_SIZE_TYPE bsize, | 78 static void set_segment_id(VP9_COMMON *cm, BLOCK_SIZE bsize, |
| 75 int mi_row, int mi_col, int segment_id) { | 79 int mi_row, int mi_col, int segment_id) { |
| 76 const int mi_offset = mi_row * cm->mi_cols + mi_col; | 80 const int mi_offset = mi_row * cm->mi_cols + mi_col; |
| 77 const int bw = 1 << mi_width_log2(bsize); | 81 const int bw = 1 << mi_width_log2(bsize); |
| 78 const int bh = 1 << mi_height_log2(bsize); | 82 const int bh = 1 << mi_height_log2(bsize); |
| 79 const int xmis = MIN(cm->mi_cols - mi_col, bw); | 83 const int xmis = MIN(cm->mi_cols - mi_col, bw); |
| 80 const int ymis = MIN(cm->mi_rows - mi_row, bh); | 84 const int ymis = MIN(cm->mi_rows - mi_row, bh); |
| 81 int x, y; | 85 int x, y; |
| 82 | 86 |
| 83 assert(segment_id >= 0 && segment_id < MAX_SEGMENTS); | 87 assert(segment_id >= 0 && segment_id < MAX_SEGMENTS); |
| 84 | 88 |
| 85 for (y = 0; y < ymis; y++) | 89 for (y = 0; y < ymis; y++) |
| 86 for (x = 0; x < xmis; x++) | 90 for (x = 0; x < xmis; x++) |
| 87 cm->last_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id; | 91 cm->last_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id; |
| 88 } | 92 } |
| 89 | 93 |
| 90 static int read_intra_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col, | 94 static int read_intra_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col, |
| 91 vp9_reader *r) { | 95 vp9_reader *r) { |
| 92 MACROBLOCKD *const xd = &pbi->mb; | 96 MACROBLOCKD *const xd = &pbi->mb; |
| 93 struct segmentation *const seg = &xd->seg; | 97 struct segmentation *const seg = &pbi->common.seg; |
| 94 const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type; | 98 const BLOCK_SIZE bsize = xd->mode_info_context->mbmi.sb_type; |
| 95 int segment_id; | 99 int segment_id; |
| 96 | 100 |
| 97 if (!seg->enabled) | 101 if (!seg->enabled) |
| 98 return 0; // Default for disabled segmentation | 102 return 0; // Default for disabled segmentation |
| 99 | 103 |
| 100 if (!seg->update_map) | 104 if (!seg->update_map) |
| 101 return 0; | 105 return 0; |
| 102 | 106 |
| 103 segment_id = read_segment_id(r, seg); | 107 segment_id = read_segment_id(r, seg); |
| 104 set_segment_id(&pbi->common, bsize, mi_row, mi_col, segment_id); | 108 set_segment_id(&pbi->common, bsize, mi_row, mi_col, segment_id); |
| 105 return segment_id; | 109 return segment_id; |
| 106 } | 110 } |
| 107 | 111 |
| 108 static int read_inter_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col, | 112 static int read_inter_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col, |
| 109 vp9_reader *r) { | 113 vp9_reader *r) { |
| 110 VP9_COMMON *const cm = &pbi->common; | 114 VP9_COMMON *const cm = &pbi->common; |
| 111 MACROBLOCKD *const xd = &pbi->mb; | 115 MACROBLOCKD *const xd = &pbi->mb; |
| 112 struct segmentation *const seg = &xd->seg; | 116 struct segmentation *const seg = &cm->seg; |
| 113 const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type; | 117 const BLOCK_SIZE bsize = xd->mode_info_context->mbmi.sb_type; |
| 114 int pred_segment_id, segment_id; | 118 int pred_segment_id, segment_id; |
| 115 | 119 |
| 116 if (!seg->enabled) | 120 if (!seg->enabled) |
| 117 return 0; // Default for disabled segmentation | 121 return 0; // Default for disabled segmentation |
| 118 | 122 |
| 119 pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map, | 123 pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map, |
| 120 bsize, mi_row, mi_col); | 124 bsize, mi_row, mi_col); |
| 121 if (!seg->update_map) | 125 if (!seg->update_map) |
| 122 return pred_segment_id; | 126 return pred_segment_id; |
| 123 | 127 |
| 124 if (seg->temporal_update) { | 128 if (seg->temporal_update) { |
| 125 const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(xd); | 129 const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); |
| 126 const int pred_flag = vp9_read(r, pred_prob); | 130 const int pred_flag = vp9_read(r, pred_prob); |
| 127 vp9_set_pred_flag_seg_id(cm, bsize, mi_row, mi_col, pred_flag); | 131 vp9_set_pred_flag_seg_id(cm, bsize, mi_row, mi_col, pred_flag); |
| 128 segment_id = pred_flag ? pred_segment_id | 132 segment_id = pred_flag ? pred_segment_id |
| 129 : read_segment_id(r, seg); | 133 : read_segment_id(r, seg); |
| 130 } else { | 134 } else { |
| 131 segment_id = read_segment_id(r, seg); | 135 segment_id = read_segment_id(r, seg); |
| 132 } | 136 } |
| 133 set_segment_id(cm, bsize, mi_row, mi_col, segment_id); | 137 set_segment_id(cm, bsize, mi_row, mi_col, segment_id); |
| 134 return segment_id; | 138 return segment_id; |
| 135 } | 139 } |
| 136 | 140 |
| 137 static uint8_t read_skip_coeff(VP9D_COMP *pbi, int segment_id, vp9_reader *r) { | 141 static uint8_t read_skip_coeff(VP9D_COMP *pbi, int segment_id, vp9_reader *r) { |
| 138 VP9_COMMON *const cm = &pbi->common; | 142 VP9_COMMON *const cm = &pbi->common; |
| 139 MACROBLOCKD *const xd = &pbi->mb; | 143 MACROBLOCKD *const xd = &pbi->mb; |
| 140 int skip_coeff = vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP); | 144 int skip_coeff = vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP); |
| 141 if (!skip_coeff) { | 145 if (!skip_coeff) { |
| 142 const int ctx = vp9_get_pred_context_mbskip(xd); | 146 const int ctx = vp9_get_pred_context_mbskip(xd); |
| 143 skip_coeff = vp9_read(r, vp9_get_pred_prob_mbskip(cm, xd)); | 147 skip_coeff = vp9_read(r, vp9_get_pred_prob_mbskip(cm, xd)); |
| 144 cm->counts.mbskip[ctx][skip_coeff]++; | 148 cm->counts.mbskip[ctx][skip_coeff]++; |
| 145 } | 149 } |
| 146 return skip_coeff; | 150 return skip_coeff; |
| 147 } | 151 } |
| 148 | 152 |
| 149 static void read_intra_mode_info(VP9D_COMP *pbi, MODE_INFO *m, | 153 static void read_intra_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *m, |
| 150 int mi_row, int mi_col, vp9_reader *r) { | 154 int mi_row, int mi_col, vp9_reader *r) { |
| 151 VP9_COMMON *const cm = &pbi->common; | 155 VP9_COMMON *const cm = &pbi->common; |
| 152 MACROBLOCKD *const xd = &pbi->mb; | 156 MACROBLOCKD *const xd = &pbi->mb; |
| 153 MB_MODE_INFO *const mbmi = &m->mbmi; | 157 MB_MODE_INFO *const mbmi = &m->mbmi; |
| 154 const BLOCK_SIZE_TYPE bsize = mbmi->sb_type; | 158 const BLOCK_SIZE bsize = mbmi->sb_type; |
| 155 const int mis = cm->mode_info_stride; | 159 const int mis = cm->mode_info_stride; |
| 156 | 160 |
| 157 mbmi->segment_id = read_intra_segment_id(pbi, mi_row, mi_col, r); | 161 mbmi->segment_id = read_intra_segment_id(pbi, mi_row, mi_col, r); |
| 158 mbmi->mb_skip_coeff = read_skip_coeff(pbi, mbmi->segment_id, r); | 162 mbmi->skip_coeff = read_skip_coeff(pbi, mbmi->segment_id, r); |
| 159 mbmi->txfm_size = read_tx_size(pbi, cm->tx_mode, bsize, 1, r); | 163 mbmi->txfm_size = read_tx_size(pbi, cm->tx_mode, bsize, 1, r); |
| 160 mbmi->ref_frame[0] = INTRA_FRAME; | 164 mbmi->ref_frame[0] = INTRA_FRAME; |
| 165 mbmi->ref_frame[1] = NONE; |
| 161 | 166 |
| 162 if (bsize >= BLOCK_SIZE_SB8X8) { | 167 if (bsize >= BLOCK_8X8) { |
| 163 const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis); | 168 const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis); |
| 164 const MB_PREDICTION_MODE L = xd->left_available ? | 169 const MB_PREDICTION_MODE L = xd->left_available ? |
| 165 left_block_mode(m, 0) : DC_PRED; | 170 left_block_mode(m, 0) : DC_PRED; |
| 166 mbmi->mode = read_intra_mode(r, vp9_kf_y_mode_prob[A][L]); | 171 mbmi->mode = read_intra_mode(r, vp9_kf_y_mode_prob[A][L]); |
| 167 } else { | 172 } else { |
| 168 // Only 4x4, 4x8, 8x4 blocks | 173 // Only 4x4, 4x8, 8x4 blocks |
| 169 const int bw = 1 << b_width_log2(bsize); | 174 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2 |
| 170 const int bh = 1 << b_height_log2(bsize); | 175 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2 |
| 171 int idx, idy; | 176 int idx, idy; |
| 172 | 177 |
| 173 for (idy = 0; idy < 2; idy += bh) { | 178 for (idy = 0; idy < 2; idy += num_4x4_h) { |
| 174 for (idx = 0; idx < 2; idx += bw) { | 179 for (idx = 0; idx < 2; idx += num_4x4_w) { |
| 175 const int ib = idy * 2 + idx; | 180 const int ib = idy * 2 + idx; |
| 176 const MB_PREDICTION_MODE A = above_block_mode(m, ib, mis); | 181 const MB_PREDICTION_MODE A = above_block_mode(m, ib, mis); |
| 177 const MB_PREDICTION_MODE L = (xd->left_available || idx) ? | 182 const MB_PREDICTION_MODE L = (xd->left_available || idx) ? |
| 178 left_block_mode(m, ib) : DC_PRED; | 183 left_block_mode(m, ib) : DC_PRED; |
| 179 const MB_PREDICTION_MODE b_mode = read_intra_mode(r, | 184 const MB_PREDICTION_MODE b_mode = read_intra_mode(r, |
| 180 vp9_kf_y_mode_prob[A][L]); | 185 vp9_kf_y_mode_prob[A][L]); |
| 181 m->bmi[ib].as_mode = b_mode; | 186 m->bmi[ib].as_mode = b_mode; |
| 182 if (bh == 2) | 187 if (num_4x4_h == 2) |
| 183 m->bmi[ib + 2].as_mode = b_mode; | 188 m->bmi[ib + 2].as_mode = b_mode; |
| 184 if (bw == 2) | 189 if (num_4x4_w == 2) |
| 185 m->bmi[ib + 1].as_mode = b_mode; | 190 m->bmi[ib + 1].as_mode = b_mode; |
| 186 } | 191 } |
| 187 } | 192 } |
| 188 | 193 |
| 189 mbmi->mode = m->bmi[3].as_mode; | 194 mbmi->mode = m->bmi[3].as_mode; |
| 190 } | 195 } |
| 191 | 196 |
| 192 mbmi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mbmi->mode]); | 197 mbmi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mbmi->mode]); |
| 193 } | 198 } |
| 194 | 199 |
| (...skipping 26 matching lines...) Expand all Loading... |
| 221 hp = usehp ? vp9_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) | 226 hp = usehp ? vp9_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) |
| 222 : 1; | 227 : 1; |
| 223 | 228 |
| 224 // Result | 229 // Result |
| 225 mag = vp9_get_mv_mag(mv_class, (d << 3) | (fr << 1) | hp) + 1; | 230 mag = vp9_get_mv_mag(mv_class, (d << 3) | (fr << 1) | hp) + 1; |
| 226 return sign ? -mag : mag; | 231 return sign ? -mag : mag; |
| 227 } | 232 } |
| 228 | 233 |
| 229 static INLINE void read_mv(vp9_reader *r, MV *mv, const MV *ref, | 234 static INLINE void read_mv(vp9_reader *r, MV *mv, const MV *ref, |
| 230 const nmv_context *ctx, | 235 const nmv_context *ctx, |
| 231 nmv_context_counts *counts, int usehp) { | 236 nmv_context_counts *counts, int allow_hp) { |
| 232 const MV_JOINT_TYPE j = treed_read(r, vp9_mv_joint_tree, ctx->joints); | 237 const MV_JOINT_TYPE j = treed_read(r, vp9_mv_joint_tree, ctx->joints); |
| 238 const int use_hp = allow_hp && vp9_use_mv_hp(ref); |
| 233 MV diff = {0, 0}; | 239 MV diff = {0, 0}; |
| 234 | 240 |
| 235 usehp = usehp && vp9_use_mv_hp(ref); | |
| 236 if (mv_joint_vertical(j)) | 241 if (mv_joint_vertical(j)) |
| 237 diff.row = read_mv_component(r, &ctx->comps[0], usehp); | 242 diff.row = read_mv_component(r, &ctx->comps[0], use_hp); |
| 238 | 243 |
| 239 if (mv_joint_horizontal(j)) | 244 if (mv_joint_horizontal(j)) |
| 240 diff.col = read_mv_component(r, &ctx->comps[1], usehp); | 245 diff.col = read_mv_component(r, &ctx->comps[1], use_hp); |
| 241 | 246 |
| 242 vp9_inc_mv(&diff, counts); | 247 vp9_inc_mv(&diff, counts); |
| 243 | 248 |
| 244 mv->row = ref->row + diff.row; | 249 mv->row = ref->row + diff.row; |
| 245 mv->col = ref->col + diff.col; | 250 mv->col = ref->col + diff.col; |
| 246 } | 251 } |
| 247 | 252 |
| 248 static void update_mv(vp9_reader *r, vp9_prob *p, vp9_prob upd_p) { | 253 static void update_mv(vp9_reader *r, vp9_prob *p) { |
| 249 if (vp9_read(r, upd_p)) | 254 if (vp9_read(r, NMV_UPDATE_PROB)) |
| 250 *p = (vp9_read_literal(r, 7) << 1) | 1; | 255 *p = (vp9_read_literal(r, 7) << 1) | 1; |
| 251 } | 256 } |
| 252 | 257 |
| 253 static void read_mv_probs(vp9_reader *r, nmv_context *mvc, int usehp) { | 258 static void read_mv_probs(vp9_reader *r, nmv_context *mvc, int allow_hp) { |
| 254 int i, j, k; | 259 int i, j, k; |
| 255 | 260 |
| 256 for (j = 0; j < MV_JOINTS - 1; ++j) | 261 for (j = 0; j < MV_JOINTS - 1; ++j) |
| 257 update_mv(r, &mvc->joints[j], VP9_NMV_UPDATE_PROB); | 262 update_mv(r, &mvc->joints[j]); |
| 258 | 263 |
| 259 for (i = 0; i < 2; ++i) { | 264 for (i = 0; i < 2; ++i) { |
| 260 nmv_component *const comp = &mvc->comps[i]; | 265 nmv_component *const comp = &mvc->comps[i]; |
| 261 | 266 |
| 262 update_mv(r, &comp->sign, VP9_NMV_UPDATE_PROB); | 267 update_mv(r, &comp->sign); |
| 268 |
| 263 for (j = 0; j < MV_CLASSES - 1; ++j) | 269 for (j = 0; j < MV_CLASSES - 1; ++j) |
| 264 update_mv(r, &comp->classes[j], VP9_NMV_UPDATE_PROB); | 270 update_mv(r, &comp->classes[j]); |
| 265 | 271 |
| 266 for (j = 0; j < CLASS0_SIZE - 1; ++j) | 272 for (j = 0; j < CLASS0_SIZE - 1; ++j) |
| 267 update_mv(r, &comp->class0[j], VP9_NMV_UPDATE_PROB); | 273 update_mv(r, &comp->class0[j]); |
| 268 | 274 |
| 269 for (j = 0; j < MV_OFFSET_BITS; ++j) | 275 for (j = 0; j < MV_OFFSET_BITS; ++j) |
| 270 update_mv(r, &comp->bits[j], VP9_NMV_UPDATE_PROB); | 276 update_mv(r, &comp->bits[j]); |
| 271 } | 277 } |
| 272 | 278 |
| 273 for (i = 0; i < 2; ++i) { | 279 for (i = 0; i < 2; ++i) { |
| 274 nmv_component *const comp = &mvc->comps[i]; | 280 nmv_component *const comp = &mvc->comps[i]; |
| 275 | 281 |
| 276 for (j = 0; j < CLASS0_SIZE; ++j) | 282 for (j = 0; j < CLASS0_SIZE; ++j) |
| 277 for (k = 0; k < 3; ++k) | 283 for (k = 0; k < 3; ++k) |
| 278 update_mv(r, &comp->class0_fp[j][k], VP9_NMV_UPDATE_PROB); | 284 update_mv(r, &comp->class0_fp[j][k]); |
| 279 | 285 |
| 280 for (j = 0; j < 3; ++j) | 286 for (j = 0; j < 3; ++j) |
| 281 update_mv(r, &comp->fp[j], VP9_NMV_UPDATE_PROB); | 287 update_mv(r, &comp->fp[j]); |
| 282 } | 288 } |
| 283 | 289 |
| 284 if (usehp) { | 290 if (allow_hp) { |
| 285 for (i = 0; i < 2; ++i) { | 291 for (i = 0; i < 2; ++i) { |
| 286 update_mv(r, &mvc->comps[i].class0_hp, VP9_NMV_UPDATE_PROB); | 292 update_mv(r, &mvc->comps[i].class0_hp); |
| 287 update_mv(r, &mvc->comps[i].hp, VP9_NMV_UPDATE_PROB); | 293 update_mv(r, &mvc->comps[i].hp); |
| 288 } | 294 } |
| 289 } | 295 } |
| 290 } | 296 } |
| 291 | 297 |
| 292 // Read the referncence frame | 298 // Read the referncence frame |
| 293 static void read_ref_frame(VP9D_COMP *pbi, vp9_reader *r, | 299 static void read_ref_frames(VP9D_COMP *pbi, vp9_reader *r, |
| 294 int segment_id, MV_REFERENCE_FRAME ref_frame[2]) { | 300 int segment_id, MV_REFERENCE_FRAME ref_frame[2]) { |
| 295 VP9_COMMON *const cm = &pbi->common; | 301 VP9_COMMON *const cm = &pbi->common; |
| 296 MACROBLOCKD *const xd = &pbi->mb; | 302 MACROBLOCKD *const xd = &pbi->mb; |
| 297 FRAME_CONTEXT *const fc = &cm->fc; | 303 FRAME_CONTEXT *const fc = &cm->fc; |
| 298 FRAME_COUNTS *const counts = &cm->counts; | 304 FRAME_COUNTS *const counts = &cm->counts; |
| 299 | 305 |
| 300 if (vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_REF_FRAME)) { | 306 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { |
| 301 ref_frame[0] = vp9_get_segdata(&xd->seg, segment_id, SEG_LVL_REF_FRAME); | 307 ref_frame[0] = vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME); |
| 302 ref_frame[1] = NONE; | 308 ref_frame[1] = NONE; |
| 303 } else { | 309 } else { |
| 304 const int comp_ctx = vp9_get_pred_context_comp_inter_inter(cm, xd); | 310 const int comp_ctx = vp9_get_pred_context_comp_inter_inter(cm, xd); |
| 305 int is_comp; | 311 int is_comp; |
| 306 | 312 |
| 307 if (cm->comp_pred_mode == HYBRID_PREDICTION) { | 313 if (cm->comp_pred_mode == HYBRID_PREDICTION) { |
| 308 is_comp = vp9_read(r, fc->comp_inter_prob[comp_ctx]); | 314 is_comp = vp9_read(r, fc->comp_inter_prob[comp_ctx]); |
| 309 counts->comp_inter[comp_ctx][is_comp]++; | 315 counts->comp_inter[comp_ctx][is_comp]++; |
| 310 } else { | 316 } else { |
| 311 is_comp = cm->comp_pred_mode == COMP_PREDICTION_ONLY; | 317 is_comp = cm->comp_pred_mode == COMP_PREDICTION_ONLY; |
| 312 } | 318 } |
| 313 | 319 |
| 314 // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding | 320 // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding |
| 315 if (is_comp) { | 321 if (is_comp) { |
| 316 const int fix_ref_idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref]; | 322 const int fix_ref_idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref]; |
| 317 const int ref_ctx = vp9_get_pred_context_comp_ref_p(cm, xd); | 323 const int ref_ctx = vp9_get_pred_context_comp_ref_p(cm, xd); |
| 318 const int b = vp9_read(r, fc->comp_ref_prob[ref_ctx]); | 324 const int b = vp9_read(r, fc->comp_ref_prob[ref_ctx]); |
| 319 counts->comp_ref[ref_ctx][b]++; | 325 counts->comp_ref[ref_ctx][b]++; |
| 320 ref_frame[fix_ref_idx] = cm->comp_fixed_ref; | 326 ref_frame[fix_ref_idx] = cm->comp_fixed_ref; |
| 321 ref_frame[!fix_ref_idx] = cm->comp_var_ref[b]; | 327 ref_frame[!fix_ref_idx] = cm->comp_var_ref[b]; |
| 322 } else { | 328 } else { |
| 323 const int ref1_ctx = vp9_get_pred_context_single_ref_p1(xd); | 329 const int ctx0 = vp9_get_pred_context_single_ref_p1(xd); |
| 324 ref_frame[1] = NONE; | 330 const int bit0 = vp9_read(r, fc->single_ref_prob[ctx0][0]); |
| 325 if (vp9_read(r, fc->single_ref_prob[ref1_ctx][0])) { | 331 ++counts->single_ref[ctx0][0][bit0]; |
| 326 const int ref2_ctx = vp9_get_pred_context_single_ref_p2(xd); | 332 if (bit0) { |
| 327 const int b = vp9_read(r, fc->single_ref_prob[ref2_ctx][1]); | 333 const int ctx1 = vp9_get_pred_context_single_ref_p2(xd); |
| 328 ref_frame[0] = b ? ALTREF_FRAME : GOLDEN_FRAME; | 334 const int bit1 = vp9_read(r, fc->single_ref_prob[ctx1][1]); |
| 329 counts->single_ref[ref1_ctx][0][1]++; | 335 ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME; |
| 330 counts->single_ref[ref2_ctx][1][b]++; | 336 ++counts->single_ref[ctx1][1][bit1]; |
| 331 } else { | 337 } else { |
| 332 ref_frame[0] = LAST_FRAME; | 338 ref_frame[0] = LAST_FRAME; |
| 333 counts->single_ref[ref1_ctx][0][0]++; | |
| 334 } | 339 } |
| 340 |
| 341 ref_frame[1] = NONE; |
| 335 } | 342 } |
| 336 } | 343 } |
| 337 } | 344 } |
| 338 | 345 |
| 339 static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) { | 346 static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) { |
| 340 int i, j; | 347 int i, j; |
| 341 for (j = 0; j < VP9_SWITCHABLE_FILTERS + 1; ++j) | 348 for (j = 0; j < SWITCHABLE_FILTERS + 1; ++j) |
| 342 for (i = 0; i < VP9_SWITCHABLE_FILTERS - 1; ++i) | 349 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) |
| 343 if (vp9_read(r, VP9_MODE_UPDATE_PROB)) | 350 if (vp9_read(r, MODE_UPDATE_PROB)) |
| 344 vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]); | 351 vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]); |
| 345 } | 352 } |
| 346 | 353 |
| 347 static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) { | 354 static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) { |
| 348 int i, j; | 355 int i, j; |
| 349 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) | 356 for (i = 0; i < INTER_MODE_CONTEXTS; ++i) |
| 350 for (j = 0; j < VP9_INTER_MODES - 1; ++j) | 357 for (j = 0; j < INTER_MODES - 1; ++j) |
| 351 if (vp9_read(r, VP9_MODE_UPDATE_PROB)) | 358 if (vp9_read(r, MODE_UPDATE_PROB)) |
| 352 vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]); | 359 vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]); |
| 353 } | 360 } |
| 354 | 361 |
| 355 static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) { | 362 static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) { |
| 356 COMPPREDMODE_TYPE mode = vp9_read_bit(r); | 363 COMPPREDMODE_TYPE mode = vp9_read_bit(r); |
| 357 if (mode) | 364 if (mode) |
| 358 mode += vp9_read_bit(r); | 365 mode += vp9_read_bit(r); |
| 359 return mode; | 366 return mode; |
| 360 } | 367 } |
| 361 | 368 |
| 362 static INLINE void assign_and_clamp_mv(int_mv *dst, const int_mv *src, | |
| 363 int mb_to_left_edge, | |
| 364 int mb_to_right_edge, | |
| 365 int mb_to_top_edge, | |
| 366 int mb_to_bottom_edge) { | |
| 367 dst->as_int = src->as_int; | |
| 368 clamp_mv(dst, mb_to_left_edge, mb_to_right_edge, mb_to_top_edge, | |
| 369 mb_to_bottom_edge); | |
| 370 } | |
| 371 | |
| 372 static INLINE INTERPOLATIONFILTERTYPE read_switchable_filter_type( | 369 static INLINE INTERPOLATIONFILTERTYPE read_switchable_filter_type( |
| 373 VP9D_COMP *pbi, vp9_reader *r) { | 370 VP9D_COMP *pbi, vp9_reader *r) { |
| 374 VP9_COMMON *const cm = &pbi->common; | 371 VP9_COMMON *const cm = &pbi->common; |
| 375 MACROBLOCKD *const xd = &pbi->mb; | 372 MACROBLOCKD *const xd = &pbi->mb; |
| 376 const vp9_prob *probs = vp9_get_pred_probs_switchable_interp(cm, xd); | |
| 377 const int index = treed_read(r, vp9_switchable_interp_tree, probs); | |
| 378 const int ctx = vp9_get_pred_context_switchable_interp(xd); | 373 const int ctx = vp9_get_pred_context_switchable_interp(xd); |
| 379 ++cm->counts.switchable_interp[ctx][index]; | 374 const int type = treed_read(r, vp9_switchable_interp_tree, |
| 380 return vp9_switchable_interp[index]; | 375 cm->fc.switchable_interp_prob[ctx]); |
| 376 ++cm->counts.switchable_interp[ctx][type]; |
| 377 return type; |
| 381 } | 378 } |
| 382 | 379 |
| 383 static void read_intra_block_modes(VP9D_COMP *pbi, MODE_INFO *mi, | 380 static void read_intra_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi, |
| 384 vp9_reader *r) { | 381 vp9_reader *r) { |
| 385 VP9_COMMON *const cm = &pbi->common; | 382 VP9_COMMON *const cm = &pbi->common; |
| 386 MB_MODE_INFO *const mbmi = &mi->mbmi; | 383 MB_MODE_INFO *const mbmi = &mi->mbmi; |
| 387 const BLOCK_SIZE_TYPE bsize = mi->mbmi.sb_type; | 384 const BLOCK_SIZE bsize = mi->mbmi.sb_type; |
| 388 const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize); | |
| 389 | 385 |
| 390 if (bsize >= BLOCK_SIZE_SB8X8) { | 386 mbmi->ref_frame[0] = INTRA_FRAME; |
| 391 const int size_group = MIN(3, MIN(bwl, bhl)); | 387 mbmi->ref_frame[1] = NONE; |
| 388 |
| 389 if (bsize >= BLOCK_8X8) { |
| 390 const int size_group = size_group_lookup[bsize]; |
| 392 mbmi->mode = read_intra_mode(r, cm->fc.y_mode_prob[size_group]); | 391 mbmi->mode = read_intra_mode(r, cm->fc.y_mode_prob[size_group]); |
| 393 cm->counts.y_mode[size_group][mbmi->mode]++; | 392 cm->counts.y_mode[size_group][mbmi->mode]++; |
| 394 } else { | 393 } else { |
| 395 // Only 4x4, 4x8, 8x4 blocks | 394 // Only 4x4, 4x8, 8x4 blocks |
| 396 const int bw = 1 << bwl, bh = 1 << bhl; | 395 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2 |
| 396 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2 |
| 397 int idx, idy; | 397 int idx, idy; |
| 398 | 398 |
| 399 for (idy = 0; idy < 2; idy += bh) { | 399 for (idy = 0; idy < 2; idy += num_4x4_h) { |
| 400 for (idx = 0; idx < 2; idx += bw) { | 400 for (idx = 0; idx < 2; idx += num_4x4_w) { |
| 401 const int ib = idy * 2 + idx; | 401 const int ib = idy * 2 + idx; |
| 402 const int b_mode = read_intra_mode(r, cm->fc.y_mode_prob[0]); | 402 const int b_mode = read_intra_mode(r, cm->fc.y_mode_prob[0]); |
| 403 mi->bmi[ib].as_mode = b_mode; | 403 mi->bmi[ib].as_mode = b_mode; |
| 404 cm->counts.y_mode[0][b_mode]++; | 404 cm->counts.y_mode[0][b_mode]++; |
| 405 | 405 |
| 406 if (bh == 2) | 406 if (num_4x4_h == 2) |
| 407 mi->bmi[ib + 2].as_mode = b_mode; | 407 mi->bmi[ib + 2].as_mode = b_mode; |
| 408 if (bw == 2) | 408 if (num_4x4_w == 2) |
| 409 mi->bmi[ib + 1].as_mode = b_mode; | 409 mi->bmi[ib + 1].as_mode = b_mode; |
| 410 } | 410 } |
| 411 } | 411 } |
| 412 mbmi->mode = mi->bmi[3].as_mode; | 412 mbmi->mode = mi->bmi[3].as_mode; |
| 413 } | 413 } |
| 414 | 414 |
| 415 mbmi->uv_mode = read_intra_mode(r, cm->fc.uv_mode_prob[mbmi->mode]); | 415 mbmi->uv_mode = read_intra_mode(r, cm->fc.uv_mode_prob[mbmi->mode]); |
| 416 cm->counts.uv_mode[mbmi->mode][mbmi->uv_mode]++; | 416 cm->counts.uv_mode[mbmi->mode][mbmi->uv_mode]++; |
| 417 } | 417 } |
| 418 | 418 |
| 419 static MV_REFERENCE_FRAME read_reference_frame(VP9D_COMP *pbi, int segment_id, | 419 static int read_is_inter_block(VP9D_COMP *pbi, int segment_id, vp9_reader *r) { |
| 420 vp9_reader *r) { | |
| 421 VP9_COMMON *const cm = &pbi->common; | 420 VP9_COMMON *const cm = &pbi->common; |
| 422 MACROBLOCKD *const xd = &pbi->mb; | 421 MACROBLOCKD *const xd = &pbi->mb; |
| 423 | 422 |
| 424 MV_REFERENCE_FRAME ref; | 423 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { |
| 425 if (!vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_REF_FRAME)) { | 424 return vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != |
| 425 INTRA_FRAME; |
| 426 } else { |
| 426 const int ctx = vp9_get_pred_context_intra_inter(xd); | 427 const int ctx = vp9_get_pred_context_intra_inter(xd); |
| 427 ref = (MV_REFERENCE_FRAME) | 428 const int is_inter = vp9_read(r, vp9_get_pred_prob_intra_inter(cm, xd)); |
| 428 vp9_read(r, vp9_get_pred_prob_intra_inter(cm, xd)); | 429 ++cm->counts.intra_inter[ctx][is_inter]; |
| 429 cm->counts.intra_inter[ctx][ref != INTRA_FRAME]++; | 430 return is_inter; |
| 430 } else { | |
| 431 ref = (MV_REFERENCE_FRAME) vp9_get_segdata(&xd->seg, segment_id, | |
| 432 SEG_LVL_REF_FRAME) != INTRA_FRAME; | |
| 433 } | 431 } |
| 434 return ref; | |
| 435 } | 432 } |
| 436 | 433 |
| 437 static void read_inter_mode_info(VP9D_COMP *pbi, MODE_INFO *mi, | 434 static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi, |
| 438 int mi_row, int mi_col, vp9_reader *r) { | 435 int mi_row, int mi_col, vp9_reader *r) { |
| 439 VP9_COMMON *const cm = &pbi->common; | 436 VP9_COMMON *const cm = &pbi->common; |
| 440 MACROBLOCKD *const xd = &pbi->mb; | 437 MACROBLOCKD *const xd = &pbi->mb; |
| 441 nmv_context *const nmvc = &cm->fc.nmvc; | 438 nmv_context *const nmvc = &cm->fc.nmvc; |
| 442 MB_MODE_INFO *const mbmi = &mi->mbmi; | 439 MB_MODE_INFO *const mbmi = &mi->mbmi; |
| 443 | |
| 444 int_mv *const mv0 = &mbmi->mv[0]; | 440 int_mv *const mv0 = &mbmi->mv[0]; |
| 445 int_mv *const mv1 = &mbmi->mv[1]; | 441 int_mv *const mv1 = &mbmi->mv[1]; |
| 446 const BLOCK_SIZE_TYPE bsize = mi->mbmi.sb_type; | 442 const BLOCK_SIZE bsize = mbmi->sb_type; |
| 447 const int bw = 1 << b_width_log2(bsize); | 443 const int allow_hp = xd->allow_high_precision_mv; |
| 448 const int bh = 1 << b_height_log2(bsize); | |
| 449 | 444 |
| 450 int idx, idy; | 445 int_mv nearest, nearby, best_mv; |
| 446 int_mv nearest_second, nearby_second, best_mv_second; |
| 447 uint8_t inter_mode_ctx; |
| 448 MV_REFERENCE_FRAME ref0; |
| 449 int is_compound; |
| 451 | 450 |
| 452 mbmi->segment_id = read_inter_segment_id(pbi, mi_row, mi_col, r); | 451 mbmi->uv_mode = DC_PRED; |
| 453 mbmi->mb_skip_coeff = read_skip_coeff(pbi, mbmi->segment_id, r); | 452 read_ref_frames(pbi, r, mbmi->segment_id, mbmi->ref_frame); |
| 454 mbmi->ref_frame[0] = read_reference_frame(pbi, mbmi->segment_id, r); | 453 ref0 = mbmi->ref_frame[0]; |
| 455 mbmi->ref_frame[1] = NONE; | 454 is_compound = has_second_ref(mbmi); |
| 456 mbmi->txfm_size = read_tx_size(pbi, cm->tx_mode, bsize, | |
| 457 (!mbmi->mb_skip_coeff || mbmi->ref_frame[0] == INTRA_FRAME), r); | |
| 458 | 455 |
| 459 if (mbmi->ref_frame[0] != INTRA_FRAME) { | 456 vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context, |
| 460 int_mv nearest, nearby, best_mv; | 457 ref0, mbmi->ref_mvs[ref0], mi_row, mi_col); |
| 461 int_mv nearest_second, nearby_second, best_mv_second; | |
| 462 vp9_prob *mv_ref_p; | |
| 463 MV_REFERENCE_FRAME ref0, ref1; | |
| 464 | 458 |
| 465 read_ref_frame(pbi, r, mbmi->segment_id, mbmi->ref_frame); | 459 inter_mode_ctx = mbmi->mode_context[ref0]; |
| 466 ref0 = mbmi->ref_frame[0]; | |
| 467 ref1 = mbmi->ref_frame[1]; | |
| 468 | 460 |
| 461 if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { |
| 462 mbmi->mode = ZEROMV; |
| 463 } else { |
| 464 if (bsize >= BLOCK_8X8) |
| 465 mbmi->mode = read_inter_mode(cm, r, inter_mode_ctx); |
| 466 } |
| 467 |
| 468 // nearest, nearby |
| 469 if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) { |
| 470 vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref0], &nearest, &nearby); |
| 471 best_mv.as_int = mbmi->ref_mvs[ref0][0].as_int; |
| 472 } |
| 473 |
| 474 if (is_compound) { |
| 475 const MV_REFERENCE_FRAME ref1 = mbmi->ref_frame[1]; |
| 469 vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context, | 476 vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context, |
| 470 ref0, mbmi->ref_mvs[ref0], cm->ref_frame_sign_bias); | 477 ref1, mbmi->ref_mvs[ref1], mi_row, mi_col); |
| 471 | 478 |
| 472 mv_ref_p = cm->fc.inter_mode_probs[mbmi->mb_mode_context[ref0]]; | 479 if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) { |
| 480 vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref1], |
| 481 &nearest_second, &nearby_second); |
| 482 best_mv_second.as_int = mbmi->ref_mvs[ref1][0].as_int; |
| 483 } |
| 484 } |
| 473 | 485 |
| 474 if (vp9_segfeature_active(&xd->seg, mbmi->segment_id, SEG_LVL_SKIP)) { | 486 mbmi->interp_filter = cm->mcomp_filter_type == SWITCHABLE |
| 475 mbmi->mode = ZEROMV; | |
| 476 } else if (bsize >= BLOCK_SIZE_SB8X8) { | |
| 477 mbmi->mode = read_inter_mode(r, mv_ref_p); | |
| 478 vp9_accum_mv_refs(cm, mbmi->mode, mbmi->mb_mode_context[ref0]); | |
| 479 } | |
| 480 mbmi->uv_mode = DC_PRED; | |
| 481 | |
| 482 // nearest, nearby | |
| 483 if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) { | |
| 484 vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref0], &nearest, &nearby); | |
| 485 best_mv.as_int = mbmi->ref_mvs[ref0][0].as_int; | |
| 486 } | |
| 487 | |
| 488 mbmi->interp_filter = cm->mcomp_filter_type == SWITCHABLE | |
| 489 ? read_switchable_filter_type(pbi, r) | 487 ? read_switchable_filter_type(pbi, r) |
| 490 : cm->mcomp_filter_type; | 488 : cm->mcomp_filter_type; |
| 491 | 489 |
| 492 if (ref1 > INTRA_FRAME) { | 490 if (bsize < BLOCK_8X8) { |
| 493 vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context, | 491 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2 |
| 494 ref1, mbmi->ref_mvs[ref1], cm->ref_frame_sign_bias); | 492 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2 |
| 493 int idx, idy; |
| 494 for (idy = 0; idy < 2; idy += num_4x4_h) { |
| 495 for (idx = 0; idx < 2; idx += num_4x4_w) { |
| 496 int_mv blockmv, secondmv; |
| 497 const int j = idy * 2 + idx; |
| 498 const int b_mode = read_inter_mode(cm, r, inter_mode_ctx); |
| 495 | 499 |
| 496 if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) { | 500 if (b_mode == NEARESTMV || b_mode == NEARMV) { |
| 497 vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref1], | 501 vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest, &nearby, j, 0, |
| 498 &nearest_second, &nearby_second); | 502 mi_row, mi_col); |
| 499 best_mv_second.as_int = mbmi->ref_mvs[ref1][0].as_int; | 503 |
| 504 if (is_compound) |
| 505 vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest_second, |
| 506 &nearby_second, j, 1, |
| 507 mi_row, mi_col); |
| 508 } |
| 509 |
| 510 switch (b_mode) { |
| 511 case NEWMV: |
| 512 read_mv(r, &blockmv.as_mv, &best_mv.as_mv, nmvc, |
| 513 &cm->counts.mv, allow_hp); |
| 514 |
| 515 if (is_compound) |
| 516 read_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc, |
| 517 &cm->counts.mv, allow_hp); |
| 518 break; |
| 519 case NEARESTMV: |
| 520 blockmv.as_int = nearest.as_int; |
| 521 if (is_compound) |
| 522 secondmv.as_int = nearest_second.as_int; |
| 523 break; |
| 524 case NEARMV: |
| 525 blockmv.as_int = nearby.as_int; |
| 526 if (is_compound) |
| 527 secondmv.as_int = nearby_second.as_int; |
| 528 break; |
| 529 case ZEROMV: |
| 530 blockmv.as_int = 0; |
| 531 if (is_compound) |
| 532 secondmv.as_int = 0; |
| 533 break; |
| 534 default: |
| 535 assert(!"Invalid inter mode value"); |
| 536 } |
| 537 mi->bmi[j].as_mv[0].as_int = blockmv.as_int; |
| 538 if (is_compound) |
| 539 mi->bmi[j].as_mv[1].as_int = secondmv.as_int; |
| 540 |
| 541 if (num_4x4_h == 2) |
| 542 mi->bmi[j + 2] = mi->bmi[j]; |
| 543 if (num_4x4_w == 2) |
| 544 mi->bmi[j + 1] = mi->bmi[j]; |
| 545 mi->mbmi.mode = b_mode; |
| 500 } | 546 } |
| 501 } | 547 } |
| 502 | 548 |
| 549 mv0->as_int = mi->bmi[3].as_mv[0].as_int; |
| 550 mv1->as_int = mi->bmi[3].as_mv[1].as_int; |
| 551 } else { |
| 552 switch (mbmi->mode) { |
| 553 case NEARMV: |
| 554 mv0->as_int = nearby.as_int; |
| 555 if (is_compound) |
| 556 mv1->as_int = nearby_second.as_int; |
| 557 break; |
| 503 | 558 |
| 504 if (mbmi->sb_type < BLOCK_SIZE_SB8X8) { | 559 case NEARESTMV: |
| 505 for (idy = 0; idy < 2; idy += bh) { | 560 mv0->as_int = nearest.as_int; |
| 506 for (idx = 0; idx < 2; idx += bw) { | 561 if (is_compound) |
| 507 int_mv blockmv, secondmv; | 562 mv1->as_int = nearest_second.as_int; |
| 508 const int j = idy * 2 + idx; | 563 break; |
| 509 const int blockmode = read_inter_mode(r, mv_ref_p); | |
| 510 | 564 |
| 511 vp9_accum_mv_refs(cm, blockmode, mbmi->mb_mode_context[ref0]); | 565 case ZEROMV: |
| 512 if (blockmode == NEARESTMV || blockmode == NEARMV) { | 566 mv0->as_int = 0; |
| 513 vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest, &nearby, j, 0); | 567 if (is_compound) |
| 514 if (ref1 > 0) | 568 mv1->as_int = 0; |
| 515 vp9_append_sub8x8_mvs_for_idx(cm, xd, &nearest_second, | 569 break; |
| 516 &nearby_second, j, 1); | |
| 517 } | |
| 518 | 570 |
| 519 switch (blockmode) { | 571 case NEWMV: |
| 520 case NEWMV: | 572 read_mv(r, &mv0->as_mv, &best_mv.as_mv, nmvc, &cm->counts.mv, allow_hp); |
| 521 read_mv(r, &blockmv.as_mv, &best_mv.as_mv, nmvc, | 573 if (is_compound) |
| 522 &cm->counts.mv, xd->allow_high_precision_mv); | 574 read_mv(r, &mv1->as_mv, &best_mv_second.as_mv, nmvc, &cm->counts.mv, |
| 575 allow_hp); |
| 576 break; |
| 577 default: |
| 578 assert(!"Invalid inter mode value"); |
| 579 } |
| 580 } |
| 581 } |
| 523 | 582 |
| 524 if (ref1 > 0) | 583 static void read_inter_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *mi, |
| 525 read_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc, | 584 int mi_row, int mi_col, vp9_reader *r) { |
| 526 &cm->counts.mv, xd->allow_high_precision_mv); | 585 VP9_COMMON *const cm = &pbi->common; |
| 527 break; | 586 MB_MODE_INFO *const mbmi = &mi->mbmi; |
| 528 case NEARESTMV: | 587 int inter_block; |
| 529 blockmv.as_int = nearest.as_int; | |
| 530 if (ref1 > 0) | |
| 531 secondmv.as_int = nearest_second.as_int; | |
| 532 break; | |
| 533 case NEARMV: | |
| 534 blockmv.as_int = nearby.as_int; | |
| 535 if (ref1 > 0) | |
| 536 secondmv.as_int = nearby_second.as_int; | |
| 537 break; | |
| 538 case ZEROMV: | |
| 539 blockmv.as_int = 0; | |
| 540 if (ref1 > 0) | |
| 541 secondmv.as_int = 0; | |
| 542 break; | |
| 543 default: | |
| 544 assert(!"Invalid inter mode value"); | |
| 545 } | |
| 546 mi->bmi[j].as_mv[0].as_int = blockmv.as_int; | |
| 547 if (ref1 > 0) | |
| 548 mi->bmi[j].as_mv[1].as_int = secondmv.as_int; | |
| 549 | 588 |
| 550 if (bh == 2) | 589 mbmi->mv[0].as_int = 0; |
| 551 mi->bmi[j + 2] = mi->bmi[j]; | 590 mbmi->mv[1].as_int = 0; |
| 552 if (bw == 2) | 591 mbmi->segment_id = read_inter_segment_id(pbi, mi_row, mi_col, r); |
| 553 mi->bmi[j + 1] = mi->bmi[j]; | 592 mbmi->skip_coeff = read_skip_coeff(pbi, mbmi->segment_id, r); |
| 554 mi->mbmi.mode = blockmode; | 593 inter_block = read_is_inter_block(pbi, mbmi->segment_id, r); |
| 555 } | 594 mbmi->txfm_size = read_tx_size(pbi, cm->tx_mode, mbmi->sb_type, |
| 556 } | 595 !mbmi->skip_coeff || !inter_block, r); |
| 557 | 596 |
| 558 mv0->as_int = mi->bmi[3].as_mv[0].as_int; | 597 if (inter_block) |
| 559 mv1->as_int = mi->bmi[3].as_mv[1].as_int; | 598 read_inter_block_mode_info(pbi, mi, mi_row, mi_col, r); |
| 560 } else { | 599 else |
| 561 const int mb_to_top_edge = xd->mb_to_top_edge - LEFT_TOP_MARGIN; | 600 read_intra_block_mode_info(pbi, mi, r); |
| 562 const int mb_to_bottom_edge = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN; | |
| 563 const int mb_to_left_edge = xd->mb_to_left_edge - LEFT_TOP_MARGIN; | |
| 564 const int mb_to_right_edge = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN; | |
| 565 | |
| 566 switch (mbmi->mode) { | |
| 567 case NEARMV: | |
| 568 // Clip "next_nearest" so that it does not extend to far out of image | |
| 569 assign_and_clamp_mv(mv0, &nearby, mb_to_left_edge, | |
| 570 mb_to_right_edge, | |
| 571 mb_to_top_edge, | |
| 572 mb_to_bottom_edge); | |
| 573 if (ref1 > 0) | |
| 574 assign_and_clamp_mv(mv1, &nearby_second, mb_to_left_edge, | |
| 575 mb_to_right_edge, | |
| 576 mb_to_top_edge, | |
| 577 mb_to_bottom_edge); | |
| 578 break; | |
| 579 | |
| 580 case NEARESTMV: | |
| 581 // Clip "next_nearest" so that it does not extend to far out of image | |
| 582 assign_and_clamp_mv(mv0, &nearest, mb_to_left_edge, | |
| 583 mb_to_right_edge, | |
| 584 mb_to_top_edge, | |
| 585 mb_to_bottom_edge); | |
| 586 if (ref1 > 0) | |
| 587 assign_and_clamp_mv(mv1, &nearest_second, mb_to_left_edge, | |
| 588 mb_to_right_edge, | |
| 589 mb_to_top_edge, | |
| 590 mb_to_bottom_edge); | |
| 591 break; | |
| 592 | |
| 593 case ZEROMV: | |
| 594 mv0->as_int = 0; | |
| 595 if (ref1 > 0) | |
| 596 mv1->as_int = 0; | |
| 597 break; | |
| 598 | |
| 599 case NEWMV: | |
| 600 read_mv(r, &mv0->as_mv, &best_mv.as_mv, nmvc, &cm->counts.mv, | |
| 601 xd->allow_high_precision_mv); | |
| 602 if (ref1 > 0) | |
| 603 read_mv(r, &mv1->as_mv, &best_mv_second.as_mv, nmvc, | |
| 604 &cm->counts.mv, xd->allow_high_precision_mv); | |
| 605 break; | |
| 606 default: | |
| 607 assert(!"Invalid inter mode value"); | |
| 608 } | |
| 609 } | |
| 610 } else { | |
| 611 mv0->as_int = 0; // required for left and above block mv | |
| 612 read_intra_block_modes(pbi, mi, r); | |
| 613 } | |
| 614 } | 601 } |
| 615 | 602 |
| 616 static void read_comp_pred(VP9_COMMON *cm, vp9_reader *r) { | 603 static void read_comp_pred(VP9_COMMON *cm, vp9_reader *r) { |
| 617 int i; | 604 int i; |
| 618 | 605 |
| 619 cm->comp_pred_mode = cm->allow_comp_inter_inter ? read_comp_pred_mode(r) | 606 cm->comp_pred_mode = cm->allow_comp_inter_inter ? read_comp_pred_mode(r) |
| 620 : SINGLE_PREDICTION_ONLY; | 607 : SINGLE_PREDICTION_ONLY; |
| 621 | 608 |
| 622 if (cm->comp_pred_mode == HYBRID_PREDICTION) | 609 if (cm->comp_pred_mode == HYBRID_PREDICTION) |
| 623 for (i = 0; i < COMP_INTER_CONTEXTS; i++) | 610 for (i = 0; i < COMP_INTER_CONTEXTS; i++) |
| 624 if (vp9_read(r, VP9_MODE_UPDATE_PROB)) | 611 if (vp9_read(r, MODE_UPDATE_PROB)) |
| 625 vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]); | 612 vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]); |
| 626 | 613 |
| 627 if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) | 614 if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) |
| 628 for (i = 0; i < REF_CONTEXTS; i++) { | 615 for (i = 0; i < REF_CONTEXTS; i++) { |
| 629 if (vp9_read(r, VP9_MODE_UPDATE_PROB)) | 616 if (vp9_read(r, MODE_UPDATE_PROB)) |
| 630 vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]); | 617 vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]); |
| 631 if (vp9_read(r, VP9_MODE_UPDATE_PROB)) | 618 if (vp9_read(r, MODE_UPDATE_PROB)) |
| 632 vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]); | 619 vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]); |
| 633 } | 620 } |
| 634 | 621 |
| 635 if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) | 622 if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY) |
| 636 for (i = 0; i < REF_CONTEXTS; i++) | 623 for (i = 0; i < REF_CONTEXTS; i++) |
| 637 if (vp9_read(r, VP9_MODE_UPDATE_PROB)) | 624 if (vp9_read(r, MODE_UPDATE_PROB)) |
| 638 vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]); | 625 vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]); |
| 639 } | 626 } |
| 640 | 627 |
| 641 void vp9_prepare_read_mode_info(VP9D_COMP* pbi, vp9_reader *r) { | 628 void vp9_prepare_read_mode_info(VP9D_COMP* pbi, vp9_reader *r) { |
| 642 VP9_COMMON *const cm = &pbi->common; | 629 VP9_COMMON *const cm = &pbi->common; |
| 643 int k; | 630 int k; |
| 644 | 631 |
| 645 // TODO(jkoleszar): does this clear more than MBSKIP_CONTEXTS? Maybe remove. | 632 // TODO(jkoleszar): does this clear more than MBSKIP_CONTEXTS? Maybe remove. |
| 646 // vpx_memset(cm->fc.mbskip_probs, 0, sizeof(cm->fc.mbskip_probs)); | 633 // vpx_memset(cm->fc.mbskip_probs, 0, sizeof(cm->fc.mbskip_probs)); |
| 647 for (k = 0; k < MBSKIP_CONTEXTS; ++k) | 634 for (k = 0; k < MBSKIP_CONTEXTS; ++k) |
| 648 if (vp9_read(r, VP9_MODE_UPDATE_PROB)) | 635 if (vp9_read(r, MODE_UPDATE_PROB)) |
| 649 vp9_diff_update_prob(r, &cm->fc.mbskip_probs[k]); | 636 vp9_diff_update_prob(r, &cm->fc.mbskip_probs[k]); |
| 650 | 637 |
| 651 if (cm->frame_type != KEY_FRAME && !cm->intra_only) { | 638 if (cm->frame_type != KEY_FRAME && !cm->intra_only) { |
| 652 nmv_context *const nmvc = &pbi->common.fc.nmvc; | 639 nmv_context *const nmvc = &pbi->common.fc.nmvc; |
| 653 MACROBLOCKD *const xd = &pbi->mb; | 640 MACROBLOCKD *const xd = &pbi->mb; |
| 654 int i, j; | 641 int i, j; |
| 655 | 642 |
| 656 read_inter_mode_probs(&cm->fc, r); | 643 read_inter_mode_probs(&cm->fc, r); |
| 657 | 644 |
| 658 if (cm->mcomp_filter_type == SWITCHABLE) | 645 if (cm->mcomp_filter_type == SWITCHABLE) |
| 659 read_switchable_interp_probs(&cm->fc, r); | 646 read_switchable_interp_probs(&cm->fc, r); |
| 660 | 647 |
| 661 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) | 648 for (i = 0; i < INTRA_INTER_CONTEXTS; i++) |
| 662 if (vp9_read(r, VP9_MODE_UPDATE_PROB)) | 649 if (vp9_read(r, MODE_UPDATE_PROB)) |
| 663 vp9_diff_update_prob(r, &cm->fc.intra_inter_prob[i]); | 650 vp9_diff_update_prob(r, &cm->fc.intra_inter_prob[i]); |
| 664 | 651 |
| 665 read_comp_pred(cm, r); | 652 read_comp_pred(cm, r); |
| 666 | 653 |
| 667 for (j = 0; j < BLOCK_SIZE_GROUPS; j++) | 654 for (j = 0; j < BLOCK_SIZE_GROUPS; j++) |
| 668 for (i = 0; i < VP9_INTRA_MODES - 1; ++i) | 655 for (i = 0; i < INTRA_MODES - 1; ++i) |
| 669 if (vp9_read(r, VP9_MODE_UPDATE_PROB)) | 656 if (vp9_read(r, MODE_UPDATE_PROB)) |
| 670 vp9_diff_update_prob(r, &cm->fc.y_mode_prob[j][i]); | 657 vp9_diff_update_prob(r, &cm->fc.y_mode_prob[j][i]); |
| 671 | 658 |
| 672 for (j = 0; j < NUM_PARTITION_CONTEXTS; ++j) | 659 for (j = 0; j < NUM_PARTITION_CONTEXTS; ++j) |
| 673 for (i = 0; i < PARTITION_TYPES - 1; ++i) | 660 for (i = 0; i < PARTITION_TYPES - 1; ++i) |
| 674 if (vp9_read(r, VP9_MODE_UPDATE_PROB)) | 661 if (vp9_read(r, MODE_UPDATE_PROB)) |
| 675 vp9_diff_update_prob(r, &cm->fc.partition_prob[INTER_FRAME][j][i]); | 662 vp9_diff_update_prob(r, &cm->fc.partition_prob[INTER_FRAME][j][i]); |
| 676 | 663 |
| 677 read_mv_probs(r, nmvc, xd->allow_high_precision_mv); | 664 read_mv_probs(r, nmvc, xd->allow_high_precision_mv); |
| 678 } | 665 } |
| 679 } | 666 } |
| 680 | 667 |
| 681 void vp9_read_mode_info(VP9D_COMP* pbi, int mi_row, int mi_col, vp9_reader *r) { | 668 void vp9_read_mode_info(VP9D_COMP* pbi, int mi_row, int mi_col, vp9_reader *r) { |
| 682 VP9_COMMON *const cm = &pbi->common; | 669 VP9_COMMON *const cm = &pbi->common; |
| 683 MACROBLOCKD *const xd = &pbi->mb; | 670 MACROBLOCKD *const xd = &pbi->mb; |
| 684 MODE_INFO *mi = xd->mode_info_context; | 671 MODE_INFO *mi = xd->mode_info_context; |
| 685 const BLOCK_SIZE_TYPE bsize = mi->mbmi.sb_type; | 672 const BLOCK_SIZE bsize = mi->mbmi.sb_type; |
| 686 const int bw = 1 << mi_width_log2(bsize); | 673 const int bw = 1 << mi_width_log2(bsize); |
| 687 const int bh = 1 << mi_height_log2(bsize); | 674 const int bh = 1 << mi_height_log2(bsize); |
| 688 const int y_mis = MIN(bh, cm->mi_rows - mi_row); | 675 const int y_mis = MIN(bh, cm->mi_rows - mi_row); |
| 689 const int x_mis = MIN(bw, cm->mi_cols - mi_col); | 676 const int x_mis = MIN(bw, cm->mi_cols - mi_col); |
| 690 int x, y; | 677 int x, y; |
| 691 | 678 |
| 692 if (cm->frame_type == KEY_FRAME || cm->intra_only) | 679 if (cm->frame_type == KEY_FRAME || cm->intra_only) |
| 693 read_intra_mode_info(pbi, mi, mi_row, mi_col, r); | 680 read_intra_frame_mode_info(pbi, mi, mi_row, mi_col, r); |
| 694 else | 681 else |
| 695 read_inter_mode_info(pbi, mi, mi_row, mi_col, r); | 682 read_inter_frame_mode_info(pbi, mi, mi_row, mi_col, r); |
| 696 | 683 |
| 697 for (y = 0; y < y_mis; y++) | 684 for (y = 0; y < y_mis; y++) |
| 698 for (x = !y; x < x_mis; x++) | 685 for (x = !y; x < x_mis; x++) |
| 699 mi[y * cm->mode_info_stride + x] = *mi; | 686 mi[y * cm->mode_info_stride + x] = *mi; |
| 700 } | 687 } |
| OLD | NEW |