Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(369)

Side by Side Diff: source/libvpx/vp8/decoder/decodemv.c

Issue 7671004: Update libvpx snapshot to v0.9.7-p1 (Cayuga). (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: '' Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp8/decoder/dboolhuff.c ('k') | source/libvpx/vp8/decoder/decodframe.c » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
87 y_mode = (MB_PREDICTION_MODE) vp8_kfread_ymode(bc, pbi->common.kf_ym ode_prob); 87 y_mode = (MB_PREDICTION_MODE) vp8_kfread_ymode(bc, pbi->common.kf_ym ode_prob);
88 88
89 m->mbmi.ref_frame = INTRA_FRAME; 89 m->mbmi.ref_frame = INTRA_FRAME;
90 90
91 if ((m->mbmi.mode = y_mode) == B_PRED) 91 if ((m->mbmi.mode = y_mode) == B_PRED)
92 { 92 {
93 int i = 0; 93 int i = 0;
94 94
95 do 95 do
96 { 96 {
97 const B_PREDICTION_MODE A = vp8_above_bmi(m, i, mis)->mode; 97 const B_PREDICTION_MODE A = above_block_mode(m, i, mis);
98 const B_PREDICTION_MODE L = vp8_left_bmi(m, i)->mode; 98 const B_PREDICTION_MODE L = left_block_mode(m, i);
99 99
100 m->bmi[i].mode = (B_PREDICTION_MODE) vp8_read_bmode(bc, pbi- >common.kf_bmode_prob [A] [L]); 100 m->bmi[i].as_mode = (B_PREDICTION_MODE) vp8_read_bmode(bc, p bi->common.kf_bmode_prob [A] [L]);
101 }
102 while (++i < 16);
103 }
104 else
105 {
106 int BMode;
107 int i = 0;
108
109 switch (y_mode)
110 {
111 case DC_PRED:
112 BMode = B_DC_PRED;
113 break;
114 case V_PRED:
115 BMode = B_VE_PRED;
116 break;
117 case H_PRED:
118 BMode = B_HE_PRED;
119 break;
120 case TM_PRED:
121 BMode = B_TM_PRED;
122 break;
123 default:
124 BMode = B_DC_PRED;
125 break;
126 }
127
128 do
129 {
130 m->bmi[i].mode = (B_PREDICTION_MODE)BMode;
131 } 101 }
132 while (++i < 16); 102 while (++i < 16);
133 } 103 }
134 104
135 m->mbmi.uv_mode = (MB_PREDICTION_MODE)vp8_read_uv_mode(bc, pbi->comm on.kf_uv_mode_prob); 105 m->mbmi.uv_mode = (MB_PREDICTION_MODE)vp8_read_uv_mode(bc, pbi->comm on.kf_uv_mode_prob);
136 } 106 }
137 } 107 }
138 108
139 static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) 109 static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc)
140 { 110 {
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
203 } 173 }
204 174
205 175
206 static MB_PREDICTION_MODE read_mv_ref(vp8_reader *bc, const vp8_prob *p) 176 static MB_PREDICTION_MODE read_mv_ref(vp8_reader *bc, const vp8_prob *p)
207 { 177 {
208 const int i = vp8_treed_read(bc, vp8_mv_ref_tree, p); 178 const int i = vp8_treed_read(bc, vp8_mv_ref_tree, p);
209 179
210 return (MB_PREDICTION_MODE)i; 180 return (MB_PREDICTION_MODE)i;
211 } 181 }
212 182
213 static MB_PREDICTION_MODE sub_mv_ref(vp8_reader *bc, const vp8_prob *p) 183 static B_PREDICTION_MODE sub_mv_ref(vp8_reader *bc, const vp8_prob *p)
214 { 184 {
215 const int i = vp8_treed_read(bc, vp8_sub_mv_ref_tree, p); 185 const int i = vp8_treed_read(bc, vp8_sub_mv_ref_tree, p);
216 186
217 return (MB_PREDICTION_MODE)i; 187 return (B_PREDICTION_MODE)i;
218 } 188 }
219 189
220 #ifdef VPX_MODE_COUNT 190 #ifdef VPX_MODE_COUNT
221 unsigned int vp8_mv_cont_count[5][4] = 191 unsigned int vp8_mv_cont_count[5][4] =
222 { 192 {
223 { 0, 0, 0, 0 }, 193 { 0, 0, 0, 0 },
224 { 0, 0, 0, 0 }, 194 { 0, 0, 0, 0 },
225 { 0, 0, 0, 0 }, 195 { 0, 0, 0, 0 },
226 { 0, 0, 0, 0 }, 196 { 0, 0, 0, 0 },
227 { 0, 0, 0, 0 } 197 { 0, 0, 0, 0 }
228 }; 198 };
229 #endif 199 #endif
230 200
231 static const unsigned char mbsplit_fill_count[4] = {8, 8, 4, 1}; 201 static const unsigned char mbsplit_fill_count[4] = {8, 8, 4, 1};
232 static const unsigned char mbsplit_fill_offset[4][16] = { 202 static const unsigned char mbsplit_fill_offset[4][16] = {
233 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, 203 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
234 { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15}, 204 { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15},
235 { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15}, 205 { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15},
236 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} 206 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
237 }; 207 };
238 208
239 209
240 210
241 211
242 static void mb_mode_mv_init(VP8D_COMP *pbi) 212 static void mb_mode_mv_init(VP8D_COMP *pbi)
243 { 213 {
244 vp8_reader *const bc = & pbi->bc; 214 vp8_reader *const bc = & pbi->bc;
245 MV_CONTEXT *const mvc = pbi->common.fc.mvc; 215 MV_CONTEXT *const mvc = pbi->common.fc.mvc;
246 216
217 #if CONFIG_ERROR_CONCEALMENT
218 /* Default is that no macroblock is corrupt, therefore we initialize
219 * mvs_corrupt_from_mb to something very big, which we can be sure is
220 * outside the frame. */
221 pbi->mvs_corrupt_from_mb = UINT_MAX;
222 #endif
247 pbi->prob_skip_false = 0; 223 pbi->prob_skip_false = 0;
248 if (pbi->common.mb_no_coeff_skip) 224 if (pbi->common.mb_no_coeff_skip)
249 pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8); 225 pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8);
250 226
251 if(pbi->common.frame_type != KEY_FRAME) 227 if(pbi->common.frame_type != KEY_FRAME)
252 { 228 {
253 pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8); 229 pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8);
254 pbi->prob_last = (vp8_prob)vp8_read_literal(bc, 8); 230 pbi->prob_last = (vp8_prob)vp8_read_literal(bc, 8);
255 pbi->prob_gf = (vp8_prob)vp8_read_literal(bc, 8); 231 pbi->prob_gf = (vp8_prob)vp8_read_literal(bc, 8);
256 232
(...skipping 16 matching lines...) Expand all
273 { 249 {
274 pbi->common.fc.uv_mode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8); 250 pbi->common.fc.uv_mode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8);
275 } 251 }
276 while (++i < 3); 252 while (++i < 3);
277 } 253 }
278 254
279 read_mvcontexts(bc, mvc); 255 read_mvcontexts(bc, mvc);
280 } 256 }
281 } 257 }
282 258
259
283 static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, 260 static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
284 int mb_row, int mb_col) 261 int mb_row, int mb_col)
285 { 262 {
286 const MV Zero = { 0, 0};
287 vp8_reader *const bc = & pbi->bc; 263 vp8_reader *const bc = & pbi->bc;
288 MV_CONTEXT *const mvc = pbi->common.fc.mvc; 264 MV_CONTEXT *const mvc = pbi->common.fc.mvc;
289 const int mis = pbi->common.mode_info_stride; 265 const int mis = pbi->common.mode_info_stride;
290 266
291 MV *const mv = & mbmi->mv.as_mv; 267 int_mv *const mv = & mbmi->mv;
292 int mb_to_left_edge; 268 int mb_to_left_edge;
293 int mb_to_right_edge; 269 int mb_to_right_edge;
294 int mb_to_top_edge; 270 int mb_to_top_edge;
295 int mb_to_bottom_edge; 271 int mb_to_bottom_edge;
296 272
297 mb_to_top_edge = pbi->mb.mb_to_top_edge; 273 mb_to_top_edge = pbi->mb.mb_to_top_edge;
298 mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge; 274 mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge;
299 mb_to_top_edge -= LEFT_TOP_MARGIN; 275 mb_to_top_edge -= LEFT_TOP_MARGIN;
300 mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN; 276 mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN;
301 277
(...skipping 16 matching lines...) Expand all
318 /* Read the macroblock coeff skip flag if this feature is in use, else defau lt to 0 */ 294 /* Read the macroblock coeff skip flag if this feature is in use, else defau lt to 0 */
319 if (pbi->common.mb_no_coeff_skip) 295 if (pbi->common.mb_no_coeff_skip)
320 mbmi->mb_skip_coeff = vp8_read(bc, pbi->prob_skip_false); 296 mbmi->mb_skip_coeff = vp8_read(bc, pbi->prob_skip_false);
321 else 297 else
322 mbmi->mb_skip_coeff = 0; 298 mbmi->mb_skip_coeff = 0;
323 299
324 if ((mbmi->ref_frame = (MV_REFERENCE_FRAME) vp8_read(bc, pbi->prob_intra))) /* inter MB */ 300 if ((mbmi->ref_frame = (MV_REFERENCE_FRAME) vp8_read(bc, pbi->prob_intra))) /* inter MB */
325 { 301 {
326 int rct[4]; 302 int rct[4];
327 vp8_prob mv_ref_p [VP8_MVREFS-1]; 303 vp8_prob mv_ref_p [VP8_MVREFS-1];
328 MV nearest, nearby, best_mv; 304 int_mv nearest, nearby, best_mv;
329 305
330 if (vp8_read(bc, pbi->prob_last)) 306 if (vp8_read(bc, pbi->prob_last))
331 { 307 {
332 mbmi->ref_frame = (MV_REFERENCE_FRAME)((int)mbmi->ref_frame + (int)( 1 + vp8_read(bc, pbi->prob_gf))); 308 mbmi->ref_frame = (MV_REFERENCE_FRAME)((int)mbmi->ref_frame + (int)( 1 + vp8_read(bc, pbi->prob_gf)));
333 } 309 }
334 310
335 vp8_find_near_mvs(&pbi->mb, mi, &nearest, &nearby, &best_mv, rct, mbmi-> ref_frame, pbi->common.ref_frame_sign_bias); 311 vp8_find_near_mvs(&pbi->mb, mi, &nearest, &nearby, &best_mv, rct, mbmi-> ref_frame, pbi->common.ref_frame_sign_bias);
336 312
337 vp8_mv_ref_probs(mv_ref_p, rct); 313 vp8_mv_ref_probs(mv_ref_p, rct);
338 314
339 mbmi->uv_mode = DC_PRED; 315 mbmi->uv_mode = DC_PRED;
340 switch (mbmi->mode = read_mv_ref(bc, mv_ref_p)) 316 switch (mbmi->mode = read_mv_ref(bc, mv_ref_p))
341 { 317 {
342 case SPLITMV: 318 case SPLITMV:
343 { 319 {
344 const int s = mbmi->partitioning = 320 const int s = mbmi->partitioning =
345 vp8_treed_read(bc, vp8_mbsplit_tree, vp8_mbsplit_probs); 321 vp8_treed_read(bc, vp8_mbsplit_tree, vp8_mbsplit_probs);
346 const int num_p = vp8_mbsplit_count [s]; 322 const int num_p = vp8_mbsplit_count [s];
347 int j = 0; 323 int j = 0;
348 324
349 do /* for each subset j */ 325 do /* for each subset j */
350 { 326 {
351 B_MODE_INFO bmi; 327 int_mv leftmv, abovemv;
352 MV *const mv = & bmi.mv.as_mv; 328 int_mv blockmv;
353
354 int k; /* first block in subset j */ 329 int k; /* first block in subset j */
355 int mv_contz; 330 int mv_contz;
356 k = vp8_mbsplit_offset[s][j]; 331 k = vp8_mbsplit_offset[s][j];
357 332
358 mv_contz = vp8_mv_cont(&(vp8_left_bmi(mi, k)->mv.as_mv), &(vp8_a bove_bmi(mi, k, mis)->mv.as_mv)); 333 leftmv.as_int = left_block_mv(mi, k);
334 abovemv.as_int = above_block_mv(mi, k, mis);
335 mv_contz = vp8_mv_cont(&leftmv, &abovemv);
359 336
360 switch (bmi.mode = (B_PREDICTION_MODE) sub_mv_ref(bc, vp8_sub_mv _ref_prob2 [mv_contz])) /*pc->fc.sub_mv_ref_prob))*/ 337 switch (sub_mv_ref(bc, vp8_sub_mv_ref_prob2 [mv_contz])) /*pc->f c.sub_mv_ref_prob))*/
361 { 338 {
362 case NEW4X4: 339 case NEW4X4:
363 read_mv(bc, mv, (const MV_CONTEXT *) mvc); 340 read_mv(bc, &blockmv.as_mv, (const MV_CONTEXT *) mvc);
364 mv->row += best_mv.row; 341 blockmv.as_mv.row += best_mv.as_mv.row;
365 mv->col += best_mv.col; 342 blockmv.as_mv.col += best_mv.as_mv.col;
366 #ifdef VPX_MODE_COUNT 343 #ifdef VPX_MODE_COUNT
367 vp8_mv_cont_count[mv_contz][3]++; 344 vp8_mv_cont_count[mv_contz][3]++;
368 #endif 345 #endif
369 break; 346 break;
370 case LEFT4X4: 347 case LEFT4X4:
371 *mv = vp8_left_bmi(mi, k)->mv.as_mv; 348 blockmv.as_int = leftmv.as_int;
372 #ifdef VPX_MODE_COUNT 349 #ifdef VPX_MODE_COUNT
373 vp8_mv_cont_count[mv_contz][0]++; 350 vp8_mv_cont_count[mv_contz][0]++;
374 #endif 351 #endif
375 break; 352 break;
376 case ABOVE4X4: 353 case ABOVE4X4:
377 *mv = vp8_above_bmi(mi, k, mis)->mv.as_mv; 354 blockmv.as_int = abovemv.as_int;
378 #ifdef VPX_MODE_COUNT 355 #ifdef VPX_MODE_COUNT
379 vp8_mv_cont_count[mv_contz][1]++; 356 vp8_mv_cont_count[mv_contz][1]++;
380 #endif 357 #endif
381 break; 358 break;
382 case ZERO4X4: 359 case ZERO4X4:
383 *mv = Zero; 360 blockmv.as_int = 0;
384 #ifdef VPX_MODE_COUNT 361 #ifdef VPX_MODE_COUNT
385 vp8_mv_cont_count[mv_contz][2]++; 362 vp8_mv_cont_count[mv_contz][2]++;
386 #endif 363 #endif
387 break; 364 break;
388 default: 365 default:
389 break; 366 break;
390 } 367 }
391 368
392 mbmi->need_to_clamp_mvs |= (mv->col < mb_to_left_edge) ? 1 : 0; 369 mbmi->need_to_clamp_mvs = vp8_check_mv_bounds(&blockmv,
393 mbmi->need_to_clamp_mvs |= (mv->col > mb_to_right_edge) ? 1 : 0; 370 mb_to_left_edge,
394 mbmi->need_to_clamp_mvs |= (mv->row < mb_to_top_edge) ? 1 : 0; 371 mb_to_right_edge,
395 mbmi->need_to_clamp_mvs |= (mv->row > mb_to_bottom_edge) ? 1 : 0 ; 372 mb_to_top_edge,
373 mb_to_bottom_edge);
396 374
397 { 375 {
398 /* Fill (uniform) modes, mvs of jth subset. 376 /* Fill (uniform) modes, mvs of jth subset.
399 Must do it here because ensuing subsets can 377 Must do it here because ensuing subsets can
400 refer back to us via "left" or "above". */ 378 refer back to us via "left" or "above". */
401 const unsigned char *fill_offset; 379 const unsigned char *fill_offset;
402 unsigned int fill_count = mbsplit_fill_count[s]; 380 unsigned int fill_count = mbsplit_fill_count[s];
403 381
404 fill_offset = &mbsplit_fill_offset[s][(unsigned char)j * mbs plit_fill_count[s]]; 382 fill_offset = &mbsplit_fill_offset[s][(unsigned char)j * mbs plit_fill_count[s]];
405 383
406 do { 384 do {
407 mi->bmi[ *fill_offset] = bmi; 385 mi->bmi[ *fill_offset].mv.as_int = blockmv.as_int;
408 fill_offset++; 386 fill_offset++;
409
410 }while (--fill_count); 387 }while (--fill_count);
411 } 388 }
412 389
413 } 390 }
414 while (++j < num_p); 391 while (++j < num_p);
415 } 392 }
416 393
417 *mv = mi->bmi[15].mv.as_mv; 394 mv->as_int = mi->bmi[15].mv.as_int;
418 395
419 break; /* done with SPLITMV */ 396 break; /* done with SPLITMV */
420 397
421 case NEARMV: 398 case NEARMV:
422 *mv = nearby; 399 mv->as_int = nearby.as_int;
423 /* Clip "next_nearest" so that it does not extend to far out of imag e */ 400 /* Clip "next_nearest" so that it does not extend to far out of imag e */
424 mv->col = (mv->col < mb_to_left_edge) ? mb_to_left_edge : mv->col; 401 vp8_clamp_mv(mv, mb_to_left_edge, mb_to_right_edge,
425 mv->col = (mv->col > mb_to_right_edge) ? mb_to_right_edge : mv->col; 402 mb_to_top_edge, mb_to_bottom_edge);
426 mv->row = (mv->row < mb_to_top_edge) ? mb_to_top_edge : mv->row;
427 mv->row = (mv->row > mb_to_bottom_edge) ? mb_to_bottom_edge : mv->ro w;
428 goto propagate_mv; 403 goto propagate_mv;
429 404
430 case NEARESTMV: 405 case NEARESTMV:
431 *mv = nearest; 406 mv->as_int = nearest.as_int;
432 /* Clip "next_nearest" so that it does not extend to far out of imag e */ 407 /* Clip "next_nearest" so that it does not extend to far out of imag e */
433 mv->col = (mv->col < mb_to_left_edge) ? mb_to_left_edge : mv->col; 408 vp8_clamp_mv(mv, mb_to_left_edge, mb_to_right_edge,
434 mv->col = (mv->col > mb_to_right_edge) ? mb_to_right_edge : mv->col; 409 mb_to_top_edge, mb_to_bottom_edge);
435 mv->row = (mv->row < mb_to_top_edge) ? mb_to_top_edge : mv->row;
436 mv->row = (mv->row > mb_to_bottom_edge) ? mb_to_bottom_edge : mv->ro w;
437 goto propagate_mv; 410 goto propagate_mv;
438 411
439 case ZEROMV: 412 case ZEROMV:
440 *mv = Zero; 413 mv->as_int = 0;
441 goto propagate_mv; 414 goto propagate_mv;
442 415
443 case NEWMV: 416 case NEWMV:
444 read_mv(bc, mv, (const MV_CONTEXT *) mvc); 417 read_mv(bc, &mv->as_mv, (const MV_CONTEXT *) mvc);
445 mv->row += best_mv.row; 418 mv->as_mv.row += best_mv.as_mv.row;
446 mv->col += best_mv.col; 419 mv->as_mv.col += best_mv.as_mv.col;
447 420
448 /* Don't need to check this on NEARMV and NEARESTMV modes 421 /* Don't need to check this on NEARMV and NEARESTMV modes
449 * since those modes clamp the MV. The NEWMV mode does not, 422 * since those modes clamp the MV. The NEWMV mode does not,
450 * so signal to the prediction stage whether special 423 * so signal to the prediction stage whether special
451 * handling may be required. 424 * handling may be required.
452 */ 425 */
453 mbmi->need_to_clamp_mvs = (mv->col < mb_to_left_edge) ? 1 : 0; 426 mbmi->need_to_clamp_mvs = vp8_check_mv_bounds(mv,
454 mbmi->need_to_clamp_mvs |= (mv->col > mb_to_right_edge) ? 1 : 0; 427 mb_to_left_edge,
455 mbmi->need_to_clamp_mvs |= (mv->row < mb_to_top_edge) ? 1 : 0; 428 mb_to_right_edge,
456 mbmi->need_to_clamp_mvs |= (mv->row > mb_to_bottom_edge) ? 1 : 0; 429 mb_to_top_edge,
430 mb_to_bottom_edge);
457 431
458 propagate_mv: /* same MV throughout */ 432 propagate_mv: /* same MV throughout */
433 #if CONFIG_ERROR_CONCEALMENT
434 if(pbi->ec_enabled)
459 { 435 {
460 /*int i=0; 436 mi->bmi[ 0].mv.as_int =
461 do 437 mi->bmi[ 1].mv.as_int =
462 { 438 mi->bmi[ 2].mv.as_int =
463 mi->bmi[i].mv.as_mv = *mv; 439 mi->bmi[ 3].mv.as_int =
464 } 440 mi->bmi[ 4].mv.as_int =
465 while( ++i < 16);*/ 441 mi->bmi[ 5].mv.as_int =
466 442 mi->bmi[ 6].mv.as_int =
467 mi->bmi[0].mv.as_mv = *mv; 443 mi->bmi[ 7].mv.as_int =
468 mi->bmi[1].mv.as_mv = *mv; 444 mi->bmi[ 8].mv.as_int =
469 mi->bmi[2].mv.as_mv = *mv; 445 mi->bmi[ 9].mv.as_int =
470 mi->bmi[3].mv.as_mv = *mv; 446 mi->bmi[10].mv.as_int =
471 mi->bmi[4].mv.as_mv = *mv; 447 mi->bmi[11].mv.as_int =
472 mi->bmi[5].mv.as_mv = *mv; 448 mi->bmi[12].mv.as_int =
473 mi->bmi[6].mv.as_mv = *mv; 449 mi->bmi[13].mv.as_int =
474 mi->bmi[7].mv.as_mv = *mv; 450 mi->bmi[14].mv.as_int =
475 mi->bmi[8].mv.as_mv = *mv; 451 mi->bmi[15].mv.as_int = mv->as_int;
476 mi->bmi[9].mv.as_mv = *mv;
477 mi->bmi[10].mv.as_mv = *mv;
478 mi->bmi[11].mv.as_mv = *mv;
479 mi->bmi[12].mv.as_mv = *mv;
480 mi->bmi[13].mv.as_mv = *mv;
481 mi->bmi[14].mv.as_mv = *mv;
482 mi->bmi[15].mv.as_mv = *mv;
483 } 452 }
453 #endif
484 break; 454 break;
485 default:; 455 default:;
486 #if CONFIG_DEBUG 456 #if CONFIG_DEBUG
487 assert(0); 457 assert(0);
488 #endif 458 #endif
489 } 459 }
490 } 460 }
491 else 461 else
492 { 462 {
463 /* required for left and above block mv */
464 mbmi->mv.as_int = 0;
465
493 /* MB is intra coded */ 466 /* MB is intra coded */
494 int j = 0;
495 do
496 {
497 mi->bmi[j].mv.as_mv = Zero;
498 }
499 while (++j < 16);
500
501 if ((mbmi->mode = (MB_PREDICTION_MODE) vp8_read_ymode(bc, pbi->common.fc .ymode_prob)) == B_PRED) 467 if ((mbmi->mode = (MB_PREDICTION_MODE) vp8_read_ymode(bc, pbi->common.fc .ymode_prob)) == B_PRED)
502 { 468 {
503 j = 0; 469 int j = 0;
504 do 470 do
505 { 471 {
506 mi->bmi[j].mode = (B_PREDICTION_MODE)vp8_read_bmode(bc, pbi->com mon.fc.bmode_prob); 472 mi->bmi[j].as_mode = (B_PREDICTION_MODE)vp8_read_bmode(bc, pbi-> common.fc.bmode_prob);
507 } 473 }
508 while (++j < 16); 474 while (++j < 16);
509 } 475 }
510 476
511 mbmi->uv_mode = (MB_PREDICTION_MODE)vp8_read_uv_mode(bc, pbi->common.fc. uv_mode_prob); 477 mbmi->uv_mode = (MB_PREDICTION_MODE)vp8_read_uv_mode(bc, pbi->common.fc. uv_mode_prob);
512 } 478 }
513 479
514 } 480 }
515 481
516 void vp8_decode_mode_mvs(VP8D_COMP *pbi) 482 void vp8_decode_mode_mvs(VP8D_COMP *pbi)
(...skipping 12 matching lines...) Expand all
529 pbi->mb.mb_to_top_edge = 495 pbi->mb.mb_to_top_edge =
530 mb_to_top_edge = -((mb_row * 16)) << 3; 496 mb_to_top_edge = -((mb_row * 16)) << 3;
531 mb_to_top_edge -= LEFT_TOP_MARGIN; 497 mb_to_top_edge -= LEFT_TOP_MARGIN;
532 498
533 pbi->mb.mb_to_bottom_edge = 499 pbi->mb.mb_to_bottom_edge =
534 mb_to_bottom_edge = ((pbi->common.mb_rows - 1 - mb_row) * 16) << 3; 500 mb_to_bottom_edge = ((pbi->common.mb_rows - 1 - mb_row) * 16) << 3;
535 mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN; 501 mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN;
536 502
537 while (++mb_col < pbi->common.mb_cols) 503 while (++mb_col < pbi->common.mb_cols)
538 { 504 {
505 #if CONFIG_ERROR_CONCEALMENT
506 int mb_num = mb_row * pbi->common.mb_cols + mb_col;
507 #endif
539 /*read_mb_modes_mv(pbi, xd->mode_info_context, &xd->mode_info_contex t->mbmi, mb_row, mb_col);*/ 508 /*read_mb_modes_mv(pbi, xd->mode_info_context, &xd->mode_info_contex t->mbmi, mb_row, mb_col);*/
540 if(pbi->common.frame_type == KEY_FRAME) 509 if(pbi->common.frame_type == KEY_FRAME)
541 vp8_kfread_modes(pbi, mi, mb_row, mb_col); 510 vp8_kfread_modes(pbi, mi, mb_row, mb_col);
542 else 511 else
543 read_mb_modes_mv(pbi, mi, &mi->mbmi, mb_row, mb_col); 512 read_mb_modes_mv(pbi, mi, &mi->mbmi, mb_row, mb_col);
544 513
514 #if CONFIG_ERROR_CONCEALMENT
515 /* look for corruption. set mvs_corrupt_from_mb to the current
516 * mb_num if the frame is corrupt from this macroblock. */
517 if (vp8dx_bool_error(&pbi->bc) && mb_num < pbi->mvs_corrupt_from_mb)
518 {
519 pbi->mvs_corrupt_from_mb = mb_num;
520 /* no need to continue since the partition is corrupt from
521 * here on.
522 */
523 return;
524 }
525 #endif
526
545 mi++; /* next macroblock */ 527 mi++; /* next macroblock */
546 } 528 }
547 529
548 mi++; /* skip left predictor each row */ 530 mi++; /* skip left predictor each row */
549 } 531 }
550 } 532 }
551 533
OLDNEW
« no previous file with comments | « source/libvpx/vp8/decoder/dboolhuff.c ('k') | source/libvpx/vp8/decoder/decodframe.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698