OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The VP8 project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 | 11 |
12 #include "treereader.h" | 12 #include "treereader.h" |
13 #include "entropymv.h" | 13 #include "entropymv.h" |
14 #include "entropymode.h" | 14 #include "entropymode.h" |
15 #include "onyxd_int.h" | 15 #include "onyxd_int.h" |
16 #include "findnearmv.h" | 16 #include "findnearmv.h" |
17 #include "demode.h" | 17 |
18 #if CONFIG_DEBUG | 18 #if CONFIG_DEBUG |
19 #include <assert.h> | 19 #include <assert.h> |
20 #endif | 20 #endif |
| 21 static int vp8_read_bmode(vp8_reader *bc, const vp8_prob *p) |
| 22 { |
| 23 const int i = vp8_treed_read(bc, vp8_bmode_tree, p); |
| 24 |
| 25 return i; |
| 26 } |
| 27 |
| 28 |
| 29 static int vp8_read_ymode(vp8_reader *bc, const vp8_prob *p) |
| 30 { |
| 31 const int i = vp8_treed_read(bc, vp8_ymode_tree, p); |
| 32 |
| 33 return i; |
| 34 } |
| 35 |
| 36 static int vp8_kfread_ymode(vp8_reader *bc, const vp8_prob *p) |
| 37 { |
| 38 const int i = vp8_treed_read(bc, vp8_kf_ymode_tree, p); |
| 39 |
| 40 return i; |
| 41 } |
| 42 |
| 43 |
| 44 |
| 45 static int vp8_read_uv_mode(vp8_reader *bc, const vp8_prob *p) |
| 46 { |
| 47 const int i = vp8_treed_read(bc, vp8_uv_mode_tree, p); |
| 48 |
| 49 return i; |
| 50 } |
| 51 |
| 52 static void vp8_read_mb_features(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x
) |
| 53 { |
| 54 // Is segmentation enabled |
| 55 if (x->segmentation_enabled && x->update_mb_segmentation_map) |
| 56 { |
| 57 // If so then read the segment id. |
| 58 if (vp8_read(r, x->mb_segment_tree_probs[0])) |
| 59 mi->segment_id = (unsigned char)(2 + vp8_read(r, x->mb_segment_tree_
probs[2])); |
| 60 else |
| 61 mi->segment_id = (unsigned char)(vp8_read(r, x->mb_segment_tree_prob
s[1])); |
| 62 } |
| 63 } |
| 64 |
| 65 static void vp8_kfread_modes(VP8D_COMP *pbi, MODE_INFO *m, int mb_row, int mb_co
l) |
| 66 { |
| 67 vp8_reader *const bc = & pbi->bc; |
| 68 const int mis = pbi->common.mode_info_stride; |
| 69 |
| 70 { |
| 71 MB_PREDICTION_MODE y_mode; |
| 72 |
| 73 // Read the Macroblock segmentation map if it is being updated expli
citly this frame (reset to 0 above by default) |
| 74 // By default on a key frame reset all MBs to segment 0 |
| 75 m->mbmi.segment_id = 0; |
| 76 |
| 77 if (pbi->mb.update_mb_segmentation_map) |
| 78 vp8_read_mb_features(bc, &m->mbmi, &pbi->mb); |
| 79 |
| 80 // Read the macroblock coeff skip flag if this feature is in use, el
se default to 0 |
| 81 if (pbi->common.mb_no_coeff_skip) |
| 82 m->mbmi.mb_skip_coeff = vp8_read(bc, pbi->prob_skip_false); |
| 83 else |
| 84 m->mbmi.mb_skip_coeff = 0; |
| 85 |
| 86 y_mode = (MB_PREDICTION_MODE) vp8_kfread_ymode(bc, pbi->common.kf_ym
ode_prob); |
| 87 |
| 88 m->mbmi.ref_frame = INTRA_FRAME; |
| 89 |
| 90 if ((m->mbmi.mode = y_mode) == B_PRED) |
| 91 { |
| 92 int i = 0; |
| 93 |
| 94 do |
| 95 { |
| 96 const B_PREDICTION_MODE A = vp8_above_bmi(m, i, mis)->mode; |
| 97 const B_PREDICTION_MODE L = vp8_left_bmi(m, i)->mode; |
| 98 |
| 99 m->bmi[i].mode = (B_PREDICTION_MODE) vp8_read_bmode(bc, pbi-
>common.kf_bmode_prob [A] [L]); |
| 100 } |
| 101 while (++i < 16); |
| 102 } |
| 103 else |
| 104 { |
| 105 int BMode; |
| 106 int i = 0; |
| 107 |
| 108 switch (y_mode) |
| 109 { |
| 110 case DC_PRED: |
| 111 BMode = B_DC_PRED; |
| 112 break; |
| 113 case V_PRED: |
| 114 BMode = B_VE_PRED; |
| 115 break; |
| 116 case H_PRED: |
| 117 BMode = B_HE_PRED; |
| 118 break; |
| 119 case TM_PRED: |
| 120 BMode = B_TM_PRED; |
| 121 break; |
| 122 default: |
| 123 BMode = B_DC_PRED; |
| 124 break; |
| 125 } |
| 126 |
| 127 do |
| 128 { |
| 129 m->bmi[i].mode = (B_PREDICTION_MODE)BMode; |
| 130 } |
| 131 while (++i < 16); |
| 132 } |
| 133 |
| 134 m->mbmi.uv_mode = (MB_PREDICTION_MODE)vp8_read_uv_mode(bc, pbi->comm
on.kf_uv_mode_prob); |
| 135 } |
| 136 } |
21 | 137 |
22 static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) | 138 static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) |
23 { | 139 { |
24 const vp8_prob *const p = (const vp8_prob *) mvc; | 140 const vp8_prob *const p = (const vp8_prob *) mvc; |
25 int x = 0; | 141 int x = 0; |
26 | 142 |
27 if (vp8_read(r, p [mvpis_short])) /* Large */ | 143 if (vp8_read(r, p [mvpis_short])) /* Large */ |
28 { | 144 { |
29 int i = 0; | 145 int i = 0; |
30 | 146 |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
92 | 208 |
93 return (MB_PREDICTION_MODE)i; | 209 return (MB_PREDICTION_MODE)i; |
94 } | 210 } |
95 | 211 |
96 static MB_PREDICTION_MODE sub_mv_ref(vp8_reader *bc, const vp8_prob *p) | 212 static MB_PREDICTION_MODE sub_mv_ref(vp8_reader *bc, const vp8_prob *p) |
97 { | 213 { |
98 const int i = vp8_treed_read(bc, vp8_sub_mv_ref_tree, p); | 214 const int i = vp8_treed_read(bc, vp8_sub_mv_ref_tree, p); |
99 | 215 |
100 return (MB_PREDICTION_MODE)i; | 216 return (MB_PREDICTION_MODE)i; |
101 } | 217 } |
| 218 |
| 219 #ifdef VPX_MODE_COUNT |
102 unsigned int vp8_mv_cont_count[5][4] = | 220 unsigned int vp8_mv_cont_count[5][4] = |
103 { | 221 { |
104 { 0, 0, 0, 0 }, | 222 { 0, 0, 0, 0 }, |
105 { 0, 0, 0, 0 }, | 223 { 0, 0, 0, 0 }, |
106 { 0, 0, 0, 0 }, | 224 { 0, 0, 0, 0 }, |
107 { 0, 0, 0, 0 }, | 225 { 0, 0, 0, 0 }, |
108 { 0, 0, 0, 0 } | 226 { 0, 0, 0, 0 } |
109 }; | 227 }; |
110 | 228 #endif |
111 void vp8_decode_mode_mvs(VP8D_COMP *pbi) | 229 |
| 230 unsigned char vp8_mbsplit_offset[4][16] = { |
| 231 { 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, |
| 232 { 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, |
| 233 { 0, 2, 8, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, |
| 234 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} |
| 235 }; |
| 236 |
| 237 unsigned char vp8_mbsplit_fill_count[4] = {8, 8, 4, 1}; |
| 238 unsigned char vp8_mbsplit_fill_offset[4][16] = { |
| 239 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, |
| 240 { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15}, |
| 241 { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15}, |
| 242 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} |
| 243 }; |
| 244 |
| 245 |
| 246 |
| 247 |
| 248 void vp8_mb_mode_mv_init(VP8D_COMP *pbi) |
| 249 { |
| 250 vp8_reader *const bc = & pbi->bc; |
| 251 MV_CONTEXT *const mvc = pbi->common.fc.mvc; |
| 252 |
| 253 pbi->prob_skip_false = 0; |
| 254 if (pbi->common.mb_no_coeff_skip) |
| 255 pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8); |
| 256 |
| 257 if(pbi->common.frame_type != KEY_FRAME) |
| 258 { |
| 259 pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8); |
| 260 pbi->prob_last = (vp8_prob)vp8_read_literal(bc, 8); |
| 261 pbi->prob_gf = (vp8_prob)vp8_read_literal(bc, 8); |
| 262 |
| 263 if (vp8_read_bit(bc)) |
| 264 { |
| 265 int i = 0; |
| 266 |
| 267 do |
| 268 { |
| 269 pbi->common.fc.ymode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8
); |
| 270 } |
| 271 while (++i < 4); |
| 272 } |
| 273 |
| 274 if (vp8_read_bit(bc)) |
| 275 { |
| 276 int i = 0; |
| 277 |
| 278 do |
| 279 { |
| 280 pbi->common.fc.uv_mode_prob[i] = (vp8_prob) vp8_read_literal(bc,
8); |
| 281 } |
| 282 while (++i < 3); |
| 283 } |
| 284 |
| 285 read_mvcontexts(bc, mvc); |
| 286 } |
| 287 } |
| 288 |
| 289 void vp8_read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, |
| 290 int mb_row, int mb_col) |
112 { | 291 { |
113 const MV Zero = { 0, 0}; | 292 const MV Zero = { 0, 0}; |
114 | |
115 VP8_COMMON *const pc = & pbi->common; | |
116 vp8_reader *const bc = & pbi->bc; | 293 vp8_reader *const bc = & pbi->bc; |
117 | 294 MV_CONTEXT *const mvc = pbi->common.fc.mvc; |
118 MODE_INFO *mi = pc->mi, *ms; | 295 const int mis = pbi->common.mode_info_stride; |
119 const int mis = pc->mode_info_stride; | 296 |
120 | 297 MV *const mv = & mbmi->mv.as_mv; |
121 MV_CONTEXT *const mvc = pc->fc.mvc; | 298 int mb_to_left_edge; |
122 | 299 int mb_to_right_edge; |
123 int mb_row = -1; | 300 int mb_to_top_edge; |
124 | 301 int mb_to_bottom_edge; |
125 vp8_prob prob_intra; | 302 |
126 vp8_prob prob_last; | 303 mb_to_top_edge = pbi->mb.mb_to_top_edge; |
127 vp8_prob prob_gf; | 304 mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge; |
128 vp8_prob prob_skip_false = 0; | 305 mb_to_top_edge -= LEFT_TOP_MARGIN; |
129 | 306 mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN; |
130 if (pc->mb_no_coeff_skip) | 307 |
131 prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8); | 308 mbmi->need_to_clamp_mvs = 0; |
132 | 309 // Distance of Mb to the various image edges. |
133 prob_intra = (vp8_prob)vp8_read_literal(bc, 8); | 310 // These specified to 8th pel as they are always compared to MV values that
are in 1/8th pel units |
134 prob_last = (vp8_prob)vp8_read_literal(bc, 8); | 311 pbi->mb.mb_to_left_edge = |
135 prob_gf = (vp8_prob)vp8_read_literal(bc, 8); | 312 mb_to_left_edge = -((mb_col * 16) << 3); |
136 | 313 mb_to_left_edge -= LEFT_TOP_MARGIN; |
137 ms = pc->mi - 1; | 314 |
138 | 315 pbi->mb.mb_to_right_edge = |
139 if (vp8_read_bit(bc)) | 316 mb_to_right_edge = ((pbi->common.mb_cols - 1 - mb_col) * 16) << 3; |
| 317 mb_to_right_edge += RIGHT_BOTTOM_MARGIN; |
| 318 |
| 319 // If required read in new segmentation data for this MB |
| 320 if (pbi->mb.update_mb_segmentation_map) |
| 321 vp8_read_mb_features(bc, mbmi, &pbi->mb); |
| 322 |
| 323 // Read the macroblock coeff skip flag if this feature is in use, else defau
lt to 0 |
| 324 if (pbi->common.mb_no_coeff_skip) |
| 325 mbmi->mb_skip_coeff = vp8_read(bc, pbi->prob_skip_false); |
| 326 else |
| 327 mbmi->mb_skip_coeff = 0; |
| 328 |
| 329 if ((mbmi->ref_frame = (MV_REFERENCE_FRAME) vp8_read(bc, pbi->prob_intra)))
/* inter MB */ |
140 { | 330 { |
141 int i = 0; | 331 int rct[4]; |
142 | 332 vp8_prob mv_ref_p [VP8_MVREFS-1]; |
143 do | 333 MV nearest, nearby, best_mv; |
144 { | 334 |
145 pc->fc.ymode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8); | 335 if (vp8_read(bc, pbi->prob_last)) |
146 } | 336 { |
147 while (++i < 4); | 337 mbmi->ref_frame = (MV_REFERENCE_FRAME)((int)mbmi->ref_frame + (int)(
1 + vp8_read(bc, pbi->prob_gf))); |
148 } | 338 } |
149 | 339 |
150 if (vp8_read_bit(bc)) | 340 vp8_find_near_mvs(&pbi->mb, mi, &nearest, &nearby, &best_mv, rct, mbmi->
ref_frame, pbi->common.ref_frame_sign_bias); |
151 { | 341 |
152 int i = 0; | 342 vp8_mv_ref_probs(mv_ref_p, rct); |
153 | 343 |
154 do | 344 mbmi->uv_mode = DC_PRED; |
155 { | 345 switch (mbmi->mode = read_mv_ref(bc, mv_ref_p)) |
156 pc->fc.uv_mode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8); | 346 { |
157 } | 347 case SPLITMV: |
158 while (++i < 3); | 348 { |
159 } | 349 const int s = mbmi->partitioning = |
160 | 350 vp8_treed_read(bc, vp8_mbsplit_tree, vp8_mbsplit_probs); |
161 read_mvcontexts(bc, mvc); | 351 const int num_p = vp8_mbsplit_count [s]; |
162 | 352 int j = 0; |
163 while (++mb_row < pc->mb_rows) | 353 |
164 { | 354 do /* for each subset j */ |
165 int mb_col = -1; | 355 { |
166 | 356 B_MODE_INFO bmi; |
167 while (++mb_col < pc->mb_cols) | 357 MV *const mv = & bmi.mv.as_mv; |
168 { | 358 |
169 MB_MODE_INFO *const mbmi = & mi->mbmi; | 359 int k; /* first block in subset j */ |
170 MV *const mv = & mbmi->mv.as_mv; | 360 int mv_contz; |
171 VP8_COMMON *const pc = &pbi->common; | 361 k = vp8_mbsplit_offset[s][j]; |
172 MACROBLOCKD *xd = &pbi->mb; | 362 |
173 | 363 mv_contz = vp8_mv_cont(&(vp8_left_bmi(mi, k)->mv.as_mv), &(vp8_a
bove_bmi(mi, k, mis)->mv.as_mv)); |
174 mbmi->need_to_clamp_mvs = 0; | 364 |
175 // Distance of Mb to the various image edges. | 365 switch (bmi.mode = (B_PREDICTION_MODE) sub_mv_ref(bc, vp8_sub_mv
_ref_prob2 [mv_contz])) //pc->fc.sub_mv_ref_prob)) |
176 // These specified to 8th pel as they are always compared to MV valu
es that are in 1/8th pel units | |
177 xd->mb_to_left_edge = -((mb_col * 16) << 3); | |
178 xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; | |
179 xd->mb_to_top_edge = -((mb_row * 16)) << 3; | |
180 xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; | |
181 | |
182 // If required read in new segmentation data for this MB | |
183 if (pbi->mb.update_mb_segmentation_map) | |
184 vp8_read_mb_features(bc, mbmi, &pbi->mb); | |
185 | |
186 // Read the macroblock coeff skip flag if this feature is in use, el
se default to 0 | |
187 if (pc->mb_no_coeff_skip) | |
188 mbmi->mb_skip_coeff = vp8_read(bc, prob_skip_false); | |
189 else | |
190 mbmi->mb_skip_coeff = 0; | |
191 | |
192 mbmi->uv_mode = DC_PRED; | |
193 | |
194 if ((mbmi->ref_frame = (MV_REFERENCE_FRAME) vp8_read(bc, prob_intra)
)) /* inter MB */ | |
195 { | |
196 int rct[4]; | |
197 vp8_prob mv_ref_p [VP8_MVREFS-1]; | |
198 MV nearest, nearby, best_mv; | |
199 | |
200 if (vp8_read(bc, prob_last)) | |
201 { | 366 { |
202 mbmi->ref_frame = (MV_REFERENCE_FRAME)((int)mbmi->ref_frame
+ (int)(1 + vp8_read(bc, prob_gf))); | 367 case NEW4X4: |
203 } | |
204 | |
205 vp8_find_near_mvs(xd, mi, &nearest, &nearby, &best_mv, rct, mbmi
->ref_frame, pbi->common.ref_frame_sign_bias); | |
206 | |
207 vp8_mv_ref_probs(mv_ref_p, rct); | |
208 | |
209 switch (mbmi->mode = read_mv_ref(bc, mv_ref_p)) | |
210 { | |
211 case SPLITMV: | |
212 { | |
213 const int s = mbmi->partitioning = vp8_treed_read( | |
214 bc, vp8_mbsplit_tree,
vp8_mbsplit_probs | |
215 ); | |
216 const int num_p = vp8_mbsplit_count [s]; | |
217 const int *const L = vp8_mbsplits [s]; | |
218 int j = 0; | |
219 | |
220 do /* for each subset j */ | |
221 { | |
222 B_MODE_INFO bmi; | |
223 MV *const mv = & bmi.mv.as_mv; | |
224 | |
225 int k = -1; /* first block in subset j */ | |
226 int mv_contz; | |
227 | |
228 while (j != L[++k]) | |
229 { | |
230 #if CONFIG_DEBUG | |
231 if (k >= 16) | |
232 { | |
233 assert(0); | |
234 } | |
235 #endif | |
236 } | |
237 | |
238 mv_contz = vp8_mv_cont(&(vp8_left_bmi(mi, k)->mv.as_mv),
&(vp8_above_bmi(mi, k, mis)->mv.as_mv)); | |
239 | |
240 switch (bmi.mode = (B_PREDICTION_MODE) sub_mv_ref(bc, vp
8_sub_mv_ref_prob2 [mv_contz])) //pc->fc.sub_mv_ref_prob)) | |
241 { | |
242 case NEW4X4: | |
243 read_mv(bc, mv, (const MV_CONTEXT *) mvc); | |
244 mv->row += best_mv.row; | |
245 mv->col += best_mv.col; | |
246 #ifdef VPX_MODE_COUNT | |
247 vp8_mv_cont_count[mv_contz][3]++; | |
248 #endif | |
249 break; | |
250 case LEFT4X4: | |
251 *mv = vp8_left_bmi(mi, k)->mv.as_mv; | |
252 #ifdef VPX_MODE_COUNT | |
253 vp8_mv_cont_count[mv_contz][0]++; | |
254 #endif | |
255 break; | |
256 case ABOVE4X4: | |
257 *mv = vp8_above_bmi(mi, k, mis)->mv.as_mv; | |
258 #ifdef VPX_MODE_COUNT | |
259 vp8_mv_cont_count[mv_contz][1]++; | |
260 #endif | |
261 break; | |
262 case ZERO4X4: | |
263 *mv = Zero; | |
264 #ifdef VPX_MODE_COUNT | |
265 vp8_mv_cont_count[mv_contz][2]++; | |
266 #endif | |
267 break; | |
268 default: | |
269 break; | |
270 } | |
271 | |
272 if (mv->col < xd->mb_to_left_edge | |
273 - LEFT_TOP_MARGIN | |
274 || mv->col > xd->mb_to_right_edge | |
275 + RIGHT_BOTTOM_MARGIN | |
276 || mv->row < xd->mb_to_top_edge | |
277 - LEFT_TOP_MARGIN | |
278 || mv->row > xd->mb_to_bottom_edge | |
279 + RIGHT_BOTTOM_MARGIN | |
280 ) | |
281 mbmi->need_to_clamp_mvs = 1; | |
282 | |
283 /* Fill (uniform) modes, mvs of jth subset. | |
284 Must do it here because ensuing subsets can | |
285 refer back to us via "left" or "above". */ | |
286 do | |
287 if (j == L[k]) | |
288 mi->bmi[k] = bmi; | |
289 | |
290 while (++k < 16); | |
291 } | |
292 while (++j < num_p); | |
293 } | |
294 | |
295 *mv = mi->bmi[15].mv.as_mv; | |
296 | |
297 break; /* done with SPLITMV */ | |
298 | |
299 case NEARMV: | |
300 *mv = nearby; | |
301 | |
302 // Clip "next_nearest" so that it does not extend to far out
of image | |
303 if (mv->col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN)) | |
304 mv->col = xd->mb_to_left_edge - LEFT_TOP_MARGIN; | |
305 else if (mv->col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGI
N) | |
306 mv->col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN; | |
307 | |
308 if (mv->row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN)) | |
309 mv->row = xd->mb_to_top_edge - LEFT_TOP_MARGIN; | |
310 else if (mv->row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARG
IN) | |
311 mv->row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN; | |
312 | |
313 goto propagate_mv; | |
314 | |
315 case NEARESTMV: | |
316 *mv = nearest; | |
317 | |
318 // Clip "next_nearest" so that it does not extend to far out
of image | |
319 if (mv->col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN)) | |
320 mv->col = xd->mb_to_left_edge - LEFT_TOP_MARGIN; | |
321 else if (mv->col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGI
N) | |
322 mv->col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN; | |
323 | |
324 if (mv->row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN)) | |
325 mv->row = xd->mb_to_top_edge - LEFT_TOP_MARGIN; | |
326 else if (mv->row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARG
IN) | |
327 mv->row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN; | |
328 | |
329 goto propagate_mv; | |
330 | |
331 case ZEROMV: | |
332 *mv = Zero; | |
333 goto propagate_mv; | |
334 | |
335 case NEWMV: | |
336 read_mv(bc, mv, (const MV_CONTEXT *) mvc); | 368 read_mv(bc, mv, (const MV_CONTEXT *) mvc); |
337 mv->row += best_mv.row; | 369 mv->row += best_mv.row; |
338 mv->col += best_mv.col; | 370 mv->col += best_mv.col; |
339 | 371 #ifdef VPX_MODE_COUNT |
340 /* Don't need to check this on NEARMV and NEARESTMV modes | 372 vp8_mv_cont_count[mv_contz][3]++; |
341 * since those modes clamp the MV. The NEWMV mode does not, | 373 #endif |
342 * so signal to the prediction stage whether special | 374 break; |
343 * handling may be required. | 375 case LEFT4X4: |
344 */ | 376 *mv = vp8_left_bmi(mi, k)->mv.as_mv; |
345 if (mv->col < xd->mb_to_left_edge - LEFT_TOP_MARGIN | 377 #ifdef VPX_MODE_COUNT |
346 || mv->col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN | 378 vp8_mv_cont_count[mv_contz][0]++; |
347 || mv->row < xd->mb_to_top_edge - LEFT_TOP_MARGIN | 379 #endif |
348 || mv->row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN | 380 break; |
349 ) | 381 case ABOVE4X4: |
350 mbmi->need_to_clamp_mvs = 1; | 382 *mv = vp8_above_bmi(mi, k, mis)->mv.as_mv; |
351 | 383 #ifdef VPX_MODE_COUNT |
352 propagate_mv: /* same MV throughout */ | 384 vp8_mv_cont_count[mv_contz][1]++; |
353 { | 385 #endif |
354 //int i=0; | 386 break; |
355 //do | 387 case ZERO4X4: |
356 //{ | 388 *mv = Zero; |
357 // mi->bmi[i].mv.as_mv = *mv; | 389 #ifdef VPX_MODE_COUNT |
358 //} | 390 vp8_mv_cont_count[mv_contz][2]++; |
359 //while( ++i < 16); | 391 #endif |
360 | 392 break; |
361 mi->bmi[0].mv.as_mv = *mv; | 393 default: |
362 mi->bmi[1].mv.as_mv = *mv; | 394 break; |
363 mi->bmi[2].mv.as_mv = *mv; | |
364 mi->bmi[3].mv.as_mv = *mv; | |
365 mi->bmi[4].mv.as_mv = *mv; | |
366 mi->bmi[5].mv.as_mv = *mv; | |
367 mi->bmi[6].mv.as_mv = *mv; | |
368 mi->bmi[7].mv.as_mv = *mv; | |
369 mi->bmi[8].mv.as_mv = *mv; | |
370 mi->bmi[9].mv.as_mv = *mv; | |
371 mi->bmi[10].mv.as_mv = *mv; | |
372 mi->bmi[11].mv.as_mv = *mv; | |
373 mi->bmi[12].mv.as_mv = *mv; | |
374 mi->bmi[13].mv.as_mv = *mv; | |
375 mi->bmi[14].mv.as_mv = *mv; | |
376 mi->bmi[15].mv.as_mv = *mv; | |
377 } | |
378 | |
379 break; | |
380 | |
381 default:; | |
382 #if CONFIG_DEBUG | |
383 assert(0); | |
384 #endif | |
385 } | 395 } |
386 } | 396 |
| 397 mbmi->need_to_clamp_mvs |= (mv->col < mb_to_left_edge) ? 1 : 0; |
| 398 mbmi->need_to_clamp_mvs |= (mv->col > mb_to_right_edge) ? 1 : 0; |
| 399 mbmi->need_to_clamp_mvs |= (mv->row < mb_to_top_edge) ? 1 : 0; |
| 400 mbmi->need_to_clamp_mvs |= (mv->row > mb_to_bottom_edge) ? 1 : 0
; |
| 401 |
| 402 { |
| 403 /* Fill (uniform) modes, mvs of jth subset. |
| 404 Must do it here because ensuing subsets can |
| 405 refer back to us via "left" or "above". */ |
| 406 unsigned char *fill_offset; |
| 407 unsigned int fill_count = vp8_mbsplit_fill_count[s]; |
| 408 |
| 409 fill_offset = &vp8_mbsplit_fill_offset[s][(unsigned char)j *
vp8_mbsplit_fill_count[s]]; |
| 410 |
| 411 do { |
| 412 mi->bmi[ *fill_offset] = bmi; |
| 413 fill_offset++; |
| 414 |
| 415 }while (--fill_count); |
| 416 } |
| 417 |
| 418 } |
| 419 while (++j < num_p); |
| 420 } |
| 421 |
| 422 *mv = mi->bmi[15].mv.as_mv; |
| 423 |
| 424 break; /* done with SPLITMV */ |
| 425 |
| 426 case NEARMV: |
| 427 *mv = nearby; |
| 428 // Clip "next_nearest" so that it does not extend to far out of imag
e |
| 429 mv->col = (mv->col < mb_to_left_edge) ? mb_to_left_edge : mv->col; |
| 430 mv->col = (mv->col > mb_to_right_edge) ? mb_to_right_edge : mv->col; |
| 431 mv->row = (mv->row < mb_to_top_edge) ? mb_to_top_edge : mv->row; |
| 432 mv->row = (mv->row > mb_to_bottom_edge) ? mb_to_bottom_edge : mv->ro
w; |
| 433 goto propagate_mv; |
| 434 |
| 435 case NEARESTMV: |
| 436 *mv = nearest; |
| 437 // Clip "next_nearest" so that it does not extend to far out of imag
e |
| 438 mv->col = (mv->col < mb_to_left_edge) ? mb_to_left_edge : mv->col; |
| 439 mv->col = (mv->col > mb_to_right_edge) ? mb_to_right_edge : mv->col; |
| 440 mv->row = (mv->row < mb_to_top_edge) ? mb_to_top_edge : mv->row; |
| 441 mv->row = (mv->row > mb_to_bottom_edge) ? mb_to_bottom_edge : mv->ro
w; |
| 442 goto propagate_mv; |
| 443 |
| 444 case ZEROMV: |
| 445 *mv = Zero; |
| 446 goto propagate_mv; |
| 447 |
| 448 case NEWMV: |
| 449 read_mv(bc, mv, (const MV_CONTEXT *) mvc); |
| 450 mv->row += best_mv.row; |
| 451 mv->col += best_mv.col; |
| 452 |
| 453 /* Don't need to check this on NEARMV and NEARESTMV modes |
| 454 * since those modes clamp the MV. The NEWMV mode does not, |
| 455 * so signal to the prediction stage whether special |
| 456 * handling may be required. |
| 457 */ |
| 458 mbmi->need_to_clamp_mvs = (mv->col < mb_to_left_edge) ? 1 : 0; |
| 459 mbmi->need_to_clamp_mvs |= (mv->col > mb_to_right_edge) ? 1 : 0; |
| 460 mbmi->need_to_clamp_mvs |= (mv->row < mb_to_top_edge) ? 1 : 0; |
| 461 mbmi->need_to_clamp_mvs |= (mv->row > mb_to_bottom_edge) ? 1 : 0; |
| 462 |
| 463 propagate_mv: /* same MV throughout */ |
| 464 { |
| 465 //int i=0; |
| 466 //do |
| 467 //{ |
| 468 // mi->bmi[i].mv.as_mv = *mv; |
| 469 //} |
| 470 //while( ++i < 16); |
| 471 |
| 472 mi->bmi[0].mv.as_mv = *mv; |
| 473 mi->bmi[1].mv.as_mv = *mv; |
| 474 mi->bmi[2].mv.as_mv = *mv; |
| 475 mi->bmi[3].mv.as_mv = *mv; |
| 476 mi->bmi[4].mv.as_mv = *mv; |
| 477 mi->bmi[5].mv.as_mv = *mv; |
| 478 mi->bmi[6].mv.as_mv = *mv; |
| 479 mi->bmi[7].mv.as_mv = *mv; |
| 480 mi->bmi[8].mv.as_mv = *mv; |
| 481 mi->bmi[9].mv.as_mv = *mv; |
| 482 mi->bmi[10].mv.as_mv = *mv; |
| 483 mi->bmi[11].mv.as_mv = *mv; |
| 484 mi->bmi[12].mv.as_mv = *mv; |
| 485 mi->bmi[13].mv.as_mv = *mv; |
| 486 mi->bmi[14].mv.as_mv = *mv; |
| 487 mi->bmi[15].mv.as_mv = *mv; |
| 488 } |
| 489 break; |
| 490 default:; |
| 491 #if CONFIG_DEBUG |
| 492 assert(0); |
| 493 #endif |
| 494 } |
| 495 } |
| 496 else |
| 497 { |
| 498 /* MB is intra coded */ |
| 499 int j = 0; |
| 500 do |
| 501 { |
| 502 mi->bmi[j].mv.as_mv = Zero; |
| 503 } |
| 504 while (++j < 16); |
| 505 |
| 506 if ((mbmi->mode = (MB_PREDICTION_MODE) vp8_read_ymode(bc, pbi->common.fc
.ymode_prob)) == B_PRED) |
| 507 { |
| 508 j = 0; |
| 509 do |
| 510 { |
| 511 mi->bmi[j].mode = (B_PREDICTION_MODE)vp8_read_bmode(bc, pbi->com
mon.fc.bmode_prob); |
| 512 } |
| 513 while (++j < 16); |
| 514 } |
| 515 |
| 516 mbmi->uv_mode = (MB_PREDICTION_MODE)vp8_read_uv_mode(bc, pbi->common.fc.
uv_mode_prob); |
| 517 } |
| 518 |
| 519 } |
| 520 |
| 521 void vp8_decode_mode_mvs(VP8D_COMP *pbi) |
| 522 { |
| 523 MODE_INFO *mi = pbi->common.mi; |
| 524 int mb_row = -1; |
| 525 |
| 526 vp8_mb_mode_mv_init(pbi); |
| 527 |
| 528 while (++mb_row < pbi->common.mb_rows) |
| 529 { |
| 530 int mb_col = -1; |
| 531 int mb_to_top_edge; |
| 532 int mb_to_bottom_edge; |
| 533 |
| 534 pbi->mb.mb_to_top_edge = |
| 535 mb_to_top_edge = -((mb_row * 16)) << 3; |
| 536 mb_to_top_edge -= LEFT_TOP_MARGIN; |
| 537 |
| 538 pbi->mb.mb_to_bottom_edge = |
| 539 mb_to_bottom_edge = ((pbi->common.mb_rows - 1 - mb_row) * 16) << 3; |
| 540 mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN; |
| 541 |
| 542 while (++mb_col < pbi->common.mb_cols) |
| 543 { |
| 544 // vp8_read_mb_modes_mv(pbi, xd->mode_info_context, &xd->mode_info_cont
ext->mbmi, mb_row, mb_col); |
| 545 if(pbi->common.frame_type == KEY_FRAME) |
| 546 vp8_kfread_modes(pbi, mi, mb_row, mb_col); |
387 else | 547 else |
388 { | 548 vp8_read_mb_modes_mv(pbi, mi, &mi->mbmi, mb_row, mb_col); |
389 /* MB is intra coded */ | |
390 | |
391 int j = 0; | |
392 | |
393 do | |
394 { | |
395 mi->bmi[j].mv.as_mv = Zero; | |
396 } | |
397 while (++j < 16); | |
398 | |
399 *mv = Zero; | |
400 | |
401 if ((mbmi->mode = (MB_PREDICTION_MODE) vp8_read_ymode(bc, pc->fc
.ymode_prob)) == B_PRED) | |
402 { | |
403 int j = 0; | |
404 | |
405 do | |
406 { | |
407 mi->bmi[j].mode = (B_PREDICTION_MODE)vp8_read_bmode(bc,
pc->fc.bmode_prob); | |
408 } | |
409 while (++j < 16); | |
410 } | |
411 | |
412 mbmi->uv_mode = (MB_PREDICTION_MODE)vp8_read_uv_mode(bc, pc->fc.
uv_mode_prob); | |
413 } | |
414 | 549 |
415 mi++; // next macroblock | 550 mi++; // next macroblock |
416 } | 551 } |
417 | 552 |
418 mi++; // skip left predictor each row | 553 mi++; // skip left predictor each row |
419 } | 554 } |
420 } | 555 } |
| 556 |
OLD | NEW |