| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 54 // if (bestsme > error_thresh && bestsme < INT_MAX) | 54 // if (bestsme > error_thresh && bestsme < INT_MAX) |
| 55 { | 55 { |
| 56 int distortion; | 56 int distortion; |
| 57 unsigned int sse; | 57 unsigned int sse; |
| 58 cpi->find_fractional_mv_step( | 58 cpi->find_fractional_mv_step( |
| 59 x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit, | 59 x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit, |
| 60 &v_fn_ptr, 0, cpi->sf.subpel_iters_per_step, NULL, NULL, &distortion, | 60 &v_fn_ptr, 0, cpi->sf.subpel_iters_per_step, NULL, NULL, &distortion, |
| 61 &sse); | 61 &sse); |
| 62 } | 62 } |
| 63 | 63 |
| 64 vp9_set_mbmode_and_mvs(xd, NEWMV, dst_mv); | 64 xd->mi[0]->mbmi.mode = NEWMV; |
| 65 xd->mi[0]->mbmi.mv[0].as_mv = *dst_mv; |
| 66 |
| 65 vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); | 67 vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); |
| 66 | 68 |
| 67 /* restore UMV window */ | 69 /* restore UMV window */ |
| 68 x->mv_col_min = tmp_col_min; | 70 x->mv_col_min = tmp_col_min; |
| 69 x->mv_col_max = tmp_col_max; | 71 x->mv_col_max = tmp_col_max; |
| 70 x->mv_row_min = tmp_row_min; | 72 x->mv_row_min = tmp_row_min; |
| 71 x->mv_row_max = tmp_row_max; | 73 x->mv_row_max = tmp_row_max; |
| 72 | 74 |
| 73 return vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, | 75 return vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, |
| 74 xd->plane[0].dst.buf, xd->plane[0].dst.stride, | 76 xd->plane[0].dst.buf, xd->plane[0].dst.stride, |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 136 MACROBLOCK *const x = &cpi->mb; | 138 MACROBLOCK *const x = &cpi->mb; |
| 137 MACROBLOCKD *const xd = &x->e_mbd; | 139 MACROBLOCKD *const xd = &x->e_mbd; |
| 138 MB_PREDICTION_MODE best_mode = -1, mode; | 140 MB_PREDICTION_MODE best_mode = -1, mode; |
| 139 unsigned int best_err = INT_MAX; | 141 unsigned int best_err = INT_MAX; |
| 140 | 142 |
| 141 // calculate SATD for each intra prediction mode; | 143 // calculate SATD for each intra prediction mode; |
| 142 // we're intentionally not doing 4x4, we just want a rough estimate | 144 // we're intentionally not doing 4x4, we just want a rough estimate |
| 143 for (mode = DC_PRED; mode <= TM_PRED; mode++) { | 145 for (mode = DC_PRED; mode <= TM_PRED; mode++) { |
| 144 unsigned int err; | 146 unsigned int err; |
| 145 | 147 |
| 146 xd->mi_8x8[0]->mbmi.mode = mode; | 148 xd->mi[0]->mbmi.mode = mode; |
| 147 vp9_predict_intra_block(xd, 0, 2, TX_16X16, mode, | 149 vp9_predict_intra_block(xd, 0, 2, TX_16X16, mode, |
| 148 x->plane[0].src.buf, x->plane[0].src.stride, | 150 x->plane[0].src.buf, x->plane[0].src.stride, |
| 149 xd->plane[0].dst.buf, xd->plane[0].dst.stride, | 151 xd->plane[0].dst.buf, xd->plane[0].dst.stride, |
| 150 0, 0, 0); | 152 0, 0, 0); |
| 151 err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, | 153 err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, |
| 152 xd->plane[0].dst.buf, xd->plane[0].dst.stride, best_err); | 154 xd->plane[0].dst.buf, xd->plane[0].dst.stride, best_err); |
| 153 | 155 |
| 154 // find best | 156 // find best |
| 155 if (err < best_err) { | 157 if (err < best_err) { |
| 156 best_err = err; | 158 best_err = err; |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 243 // Set up limit values for motion vectors to prevent them extending outside | 245 // Set up limit values for motion vectors to prevent them extending outside |
| 244 // the UMV borders. | 246 // the UMV borders. |
| 245 arf_top_mv.as_int = 0; | 247 arf_top_mv.as_int = 0; |
| 246 gld_top_mv.as_int = 0; | 248 gld_top_mv.as_int = 0; |
| 247 x->mv_row_min = -BORDER_MV_PIXELS_B16; | 249 x->mv_row_min = -BORDER_MV_PIXELS_B16; |
| 248 x->mv_row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16; | 250 x->mv_row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16; |
| 249 xd->up_available = 0; | 251 xd->up_available = 0; |
| 250 xd->plane[0].dst.stride = buf->y_stride; | 252 xd->plane[0].dst.stride = buf->y_stride; |
| 251 xd->plane[0].pre[0].stride = buf->y_stride; | 253 xd->plane[0].pre[0].stride = buf->y_stride; |
| 252 xd->plane[1].dst.stride = buf->uv_stride; | 254 xd->plane[1].dst.stride = buf->uv_stride; |
| 253 xd->mi_8x8[0] = &mi_local; | 255 xd->mi[0] = &mi_local; |
| 254 mi_local.mbmi.sb_type = BLOCK_16X16; | 256 mi_local.mbmi.sb_type = BLOCK_16X16; |
| 255 mi_local.mbmi.ref_frame[0] = LAST_FRAME; | 257 mi_local.mbmi.ref_frame[0] = LAST_FRAME; |
| 256 mi_local.mbmi.ref_frame[1] = NONE; | 258 mi_local.mbmi.ref_frame[1] = NONE; |
| 257 | 259 |
| 258 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) { | 260 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) { |
| 259 int_mv arf_left_mv, gld_left_mv; | 261 int_mv arf_left_mv, gld_left_mv; |
| 260 int mb_y_in_offset = mb_y_offset; | 262 int mb_y_in_offset = mb_y_offset; |
| 261 int arf_y_in_offset = arf_y_offset; | 263 int arf_y_in_offset = arf_y_offset; |
| 262 int gld_y_in_offset = gld_y_offset; | 264 int gld_y_in_offset = gld_y_offset; |
| 263 | 265 |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 361 if (1) { | 363 if (1) { |
| 362 // Note % of blocks that are marked as static | 364 // Note % of blocks that are marked as static |
| 363 if (cm->MBs) | 365 if (cm->MBs) |
| 364 cpi->static_mb_pct = (ncnt[1] * 100) / (cm->mi_rows * cm->mi_cols); | 366 cpi->static_mb_pct = (ncnt[1] * 100) / (cm->mi_rows * cm->mi_cols); |
| 365 | 367 |
| 366 // This error case should not be reachable as this function should | 368 // This error case should not be reachable as this function should |
| 367 // never be called with the common data structure uninitialized. | 369 // never be called with the common data structure uninitialized. |
| 368 else | 370 else |
| 369 cpi->static_mb_pct = 0; | 371 cpi->static_mb_pct = 0; |
| 370 | 372 |
| 371 cpi->seg0_cnt = ncnt[0]; | |
| 372 vp9_enable_segmentation(&cm->seg); | 373 vp9_enable_segmentation(&cm->seg); |
| 373 } else { | 374 } else { |
| 374 cpi->static_mb_pct = 0; | 375 cpi->static_mb_pct = 0; |
| 375 vp9_disable_segmentation(&cm->seg); | 376 vp9_disable_segmentation(&cm->seg); |
| 376 } | 377 } |
| 377 | 378 |
| 378 // Free localy allocated storage | 379 // Free localy allocated storage |
| 379 vpx_free(arf_not_zz); | 380 vpx_free(arf_not_zz); |
| 380 } | 381 } |
| 381 | 382 |
| (...skipping 29 matching lines...) Expand all Loading... |
| 411 assert(q_cur != NULL); | 412 assert(q_cur != NULL); |
| 412 | 413 |
| 413 update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, | 414 update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, |
| 414 golden_ref, cpi->Source); | 415 golden_ref, cpi->Source); |
| 415 } | 416 } |
| 416 | 417 |
| 417 vp9_clear_system_state(); | 418 vp9_clear_system_state(); |
| 418 | 419 |
| 419 separate_arf_mbs(cpi); | 420 separate_arf_mbs(cpi); |
| 420 } | 421 } |
| OLD | NEW |