OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 11 matching lines...) Expand all Loading... |
22 | 22 |
23 | 23 |
24 static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, | 24 static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, |
25 const MV *ref_mv, | 25 const MV *ref_mv, |
26 MV *dst_mv, | 26 MV *dst_mv, |
27 int mb_row, | 27 int mb_row, |
28 int mb_col) { | 28 int mb_col) { |
29 MACROBLOCK *const x = &cpi->mb; | 29 MACROBLOCK *const x = &cpi->mb; |
30 MACROBLOCKD *const xd = &x->e_mbd; | 30 MACROBLOCKD *const xd = &x->e_mbd; |
31 vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; | 31 vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; |
32 unsigned int best_err; | |
33 | 32 |
34 const int tmp_col_min = x->mv_col_min; | 33 const int tmp_col_min = x->mv_col_min; |
35 const int tmp_col_max = x->mv_col_max; | 34 const int tmp_col_max = x->mv_col_max; |
36 const int tmp_row_min = x->mv_row_min; | 35 const int tmp_row_min = x->mv_row_min; |
37 const int tmp_row_max = x->mv_row_max; | 36 const int tmp_row_max = x->mv_row_max; |
38 MV ref_full; | 37 MV ref_full; |
39 | 38 |
40 // Further step/diamond searches as necessary | 39 // Further step/diamond searches as necessary |
41 int step_param = cpi->sf.reduce_first_step_size + | 40 int step_param = cpi->sf.reduce_first_step_size + |
42 (cpi->speed < 8 ? (cpi->speed > 5 ? 1 : 0) : 2); | 41 (cpi->speed < 8 ? (cpi->speed > 5 ? 1 : 0) : 2); |
43 step_param = MIN(step_param, (cpi->sf.max_step_search_steps - 2)); | 42 step_param = MIN(step_param, (cpi->sf.max_step_search_steps - 2)); |
44 | 43 |
45 vp9_set_mv_search_range(x, ref_mv); | 44 vp9_set_mv_search_range(x, ref_mv); |
46 | 45 |
47 ref_full.col = ref_mv->col >> 3; | 46 ref_full.col = ref_mv->col >> 3; |
48 ref_full.row = ref_mv->row >> 3; | 47 ref_full.row = ref_mv->row >> 3; |
49 | 48 |
50 /*cpi->sf.search_method == HEX*/ | 49 /*cpi->sf.search_method == HEX*/ |
51 best_err = vp9_hex_search(x, &ref_full, step_param, x->errorperbit, | 50 vp9_hex_search(x, &ref_full, step_param, x->errorperbit, 0, &v_fn_ptr, 0, |
52 0, &v_fn_ptr, 0, ref_mv, dst_mv); | 51 ref_mv, dst_mv); |
53 | 52 |
54 // Try sub-pixel MC | 53 // Try sub-pixel MC |
55 // if (bestsme > error_thresh && bestsme < INT_MAX) | 54 // if (bestsme > error_thresh && bestsme < INT_MAX) |
56 { | 55 { |
57 int distortion; | 56 int distortion; |
58 unsigned int sse; | 57 unsigned int sse; |
59 best_err = cpi->find_fractional_mv_step( | 58 cpi->find_fractional_mv_step( |
60 x, dst_mv, ref_mv, | 59 x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit, |
61 cpi->common.allow_high_precision_mv, | 60 &v_fn_ptr, 0, cpi->sf.subpel_iters_per_step, NULL, NULL, &distortion, |
62 x->errorperbit, &v_fn_ptr, | 61 &sse); |
63 0, cpi->sf.subpel_iters_per_step, NULL, NULL, | |
64 & distortion, &sse); | |
65 } | 62 } |
66 | 63 |
67 vp9_set_mbmode_and_mvs(xd, NEWMV, dst_mv); | 64 vp9_set_mbmode_and_mvs(xd, NEWMV, dst_mv); |
68 vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); | 65 vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); |
69 best_err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, | |
70 xd->plane[0].dst.buf, xd->plane[0].dst.stride, | |
71 INT_MAX); | |
72 | 66 |
73 /* restore UMV window */ | 67 /* restore UMV window */ |
74 x->mv_col_min = tmp_col_min; | 68 x->mv_col_min = tmp_col_min; |
75 x->mv_col_max = tmp_col_max; | 69 x->mv_col_max = tmp_col_max; |
76 x->mv_row_min = tmp_row_min; | 70 x->mv_row_min = tmp_row_min; |
77 x->mv_row_max = tmp_row_max; | 71 x->mv_row_max = tmp_row_max; |
78 | 72 |
79 return best_err; | 73 return vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, |
| 74 xd->plane[0].dst.buf, xd->plane[0].dst.stride, |
| 75 INT_MAX); |
80 } | 76 } |
81 | 77 |
82 static int do_16x16_motion_search(VP9_COMP *cpi, const int_mv *ref_mv, | 78 static int do_16x16_motion_search(VP9_COMP *cpi, const int_mv *ref_mv, |
83 int_mv *dst_mv, int mb_row, int mb_col) { | 79 int_mv *dst_mv, int mb_row, int mb_col) { |
84 MACROBLOCK *const x = &cpi->mb; | 80 MACROBLOCK *const x = &cpi->mb; |
85 MACROBLOCKD *const xd = &x->e_mbd; | 81 MACROBLOCKD *const xd = &x->e_mbd; |
86 unsigned int err, tmp_err; | 82 unsigned int err, tmp_err; |
87 int_mv tmp_mv; | 83 int_mv tmp_mv; |
88 | 84 |
89 // Try zero MV first | 85 // Try zero MV first |
(...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
348 } | 344 } |
349 } | 345 } |
350 } | 346 } |
351 | 347 |
352 // arf_not_zz is indexed by MB, but this loop is indexed by MI to avoid out | 348 // arf_not_zz is indexed by MB, but this loop is indexed by MI to avoid out |
353 // of bound access in segmentation_map | 349 // of bound access in segmentation_map |
354 for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) { | 350 for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) { |
355 for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) { | 351 for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) { |
356 // If any of the blocks in the sequence failed then the MB | 352 // If any of the blocks in the sequence failed then the MB |
357 // goes in segment 0 | 353 // goes in segment 0 |
358 if (arf_not_zz[mi_row/2*cm->mb_cols + mi_col/2]) { | 354 if (arf_not_zz[mi_row / 2 * cm->mb_cols + mi_col / 2]) { |
359 ncnt[0]++; | 355 ncnt[0]++; |
360 cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 0; | 356 cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 0; |
361 } else { | 357 } else { |
362 cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 1; | 358 cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 1; |
363 ncnt[1]++; | 359 ncnt[1]++; |
364 } | 360 } |
365 } | 361 } |
366 } | 362 } |
367 | 363 |
368 // Only bother with segmentation if over 10% of the MBs in static segment | 364 // Only bother with segmentation if over 10% of the MBs in static segment |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
420 assert(q_cur != NULL); | 416 assert(q_cur != NULL); |
421 | 417 |
422 update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, | 418 update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, |
423 golden_ref, cpi->Source); | 419 golden_ref, cpi->Source); |
424 } | 420 } |
425 | 421 |
426 vp9_clear_system_state(); // __asm emms; | 422 vp9_clear_system_state(); // __asm emms; |
427 | 423 |
428 separate_arf_mbs(cpi); | 424 separate_arf_mbs(cpi); |
429 } | 425 } |
OLD | NEW |