| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 | 11 |
| 12 #if !defined(WIN32) && CONFIG_OS_SUPPORT == 1 | 12 #if !defined(WIN32) && CONFIG_OS_SUPPORT == 1 |
| 13 # include <unistd.h> | 13 # include <unistd.h> |
| 14 #endif | 14 #endif |
| 15 #include "onyxd_int.h" | 15 #include "onyxd_int.h" |
| 16 #include "vpx_mem/vpx_mem.h" | 16 #include "vpx_mem/vpx_mem.h" |
| 17 #include "vp8/common/threading.h" | 17 #include "vp8/common/threading.h" |
| 18 | 18 |
| 19 #include "vp8/common/loopfilter.h" | 19 #include "vp8/common/loopfilter.h" |
| 20 #include "vp8/common/extend.h" | 20 #include "vp8/common/extend.h" |
| 21 #include "vpx_ports/vpx_timer.h" | 21 #include "vpx_ports/vpx_timer.h" |
| 22 #include "detokenize.h" | 22 #include "detokenize.h" |
| 23 #include "vp8/common/reconinter.h" | 23 #include "vp8/common/reconinter.h" |
| 24 #include "reconintra_mt.h" | 24 #include "reconintra_mt.h" |
| 25 #if CONFIG_ERROR_CONCEALMENT |
| 26 #include "error_concealment.h" |
| 27 #endif |
| 25 | 28 |
| 26 extern void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd); | 29 extern void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd); |
| 27 extern void clamp_mvs(MACROBLOCKD *xd); | 30 extern void clamp_mvs(MACROBLOCKD *xd); |
| 28 extern void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel); | 31 extern void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel); |
| 29 | 32 |
| 30 #if CONFIG_RUNTIME_CPU_DETECT | 33 #if CONFIG_RUNTIME_CPU_DETECT |
| 31 #define RTCD_VTABLE(x) (&(pbi)->common.rtcd.x) | 34 #define RTCD_VTABLE(x) (&(pbi)->common.rtcd.x) |
| 32 #else | 35 #else |
| 33 #define RTCD_VTABLE(x) NULL | 36 #define RTCD_VTABLE(x) NULL |
| 34 #endif | 37 #endif |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 83 } | 86 } |
| 84 | 87 |
| 85 for (i=0; i< pc->mb_rows; i++) | 88 for (i=0; i< pc->mb_rows; i++) |
| 86 pbi->mt_current_mb_col[i]=-1; | 89 pbi->mt_current_mb_col[i]=-1; |
| 87 } | 90 } |
| 88 | 91 |
| 89 | 92 |
| 90 static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m
b_col) | 93 static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m
b_col) |
| 91 { | 94 { |
| 92 int eobtotal = 0; | 95 int eobtotal = 0; |
| 96 int throw_residual = 0; |
| 93 int i, do_clamp = xd->mode_info_context->mbmi.need_to_clamp_mvs; | 97 int i, do_clamp = xd->mode_info_context->mbmi.need_to_clamp_mvs; |
| 94 VP8_COMMON *pc = &pbi->common; | |
| 95 | 98 |
| 96 if (xd->mode_info_context->mbmi.mb_skip_coeff) | 99 if (xd->mode_info_context->mbmi.mb_skip_coeff) |
| 97 { | 100 { |
| 98 vp8_reset_mb_tokens_context(xd); | 101 vp8_reset_mb_tokens_context(xd); |
| 99 } | 102 } |
| 100 else | 103 else |
| 101 { | 104 { |
| 102 eobtotal = vp8_decode_mb_tokens(pbi, xd); | 105 eobtotal = vp8_decode_mb_tokens(pbi, xd); |
| 103 } | 106 } |
| 104 | 107 |
| 105 /* Perform temporary clamping of the MV to be used for prediction */ | 108 /* Perform temporary clamping of the MV to be used for prediction */ |
| 106 if (do_clamp) | 109 if (do_clamp) |
| 107 { | 110 { |
| 108 clamp_mvs(xd); | 111 clamp_mvs(xd); |
| 109 } | 112 } |
| 110 | 113 |
| 111 xd->mode_info_context->mbmi.dc_diff = 1; | 114 eobtotal |= (xd->mode_info_context->mbmi.mode == B_PRED || |
| 112 | 115 xd->mode_info_context->mbmi.mode == SPLITMV); |
| 113 if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbm
i.mode != SPLITMV && eobtotal == 0) | 116 if (!eobtotal && !vp8dx_bool_error(xd->current_bc)) |
| 114 { | 117 { |
| 115 xd->mode_info_context->mbmi.dc_diff = 0; | 118 /* Special case: Force the loopfilter to skip when eobtotal and |
| 119 * mb_skip_coeff are zero. |
| 120 * */ |
| 121 xd->mode_info_context->mbmi.mb_skip_coeff = 1; |
| 116 | 122 |
| 117 /*mt_skip_recon_mb(pbi, xd, mb_row, mb_col);*/ | 123 /*mt_skip_recon_mb(pbi, xd, mb_row, mb_col);*/ |
| 118 if (xd->frame_type == KEY_FRAME || xd->mode_info_context->mbmi.ref_fra
me == INTRA_FRAME) | 124 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) |
| 119 { | 125 { |
| 120 vp8mt_build_intra_predictors_mbuv_s(pbi, xd, mb_row, mb_col); | 126 vp8mt_build_intra_predictors_mbuv_s(pbi, xd, mb_row, mb_col); |
| 121 vp8mt_build_intra_predictors_mby_s(pbi, xd, mb_row, mb_col); | 127 vp8mt_build_intra_predictors_mby_s(pbi, xd, mb_row, mb_col); |
| 122 } | 128 } |
| 123 else | 129 else |
| 124 { | 130 { |
| 125 vp8_build_inter_predictors_mb_s(xd); | 131 vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, |
| 132 xd->dst.u_buffer, xd->dst.v_buffe
r, |
| 133 xd->dst.y_stride, xd->dst.uv_stri
de); |
| 126 } | 134 } |
| 127 return; | 135 return; |
| 128 } | 136 } |
| 129 | 137 |
| 130 if (xd->segmentation_enabled) | 138 if (xd->segmentation_enabled) |
| 131 mb_init_dequantizer(pbi, xd); | 139 mb_init_dequantizer(pbi, xd); |
| 132 | 140 |
| 133 /* do prediction */ | 141 /* do prediction */ |
| 134 if (xd->frame_type == KEY_FRAME || xd->mode_info_context->mbmi.ref_frame =
= INTRA_FRAME) | 142 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) |
| 135 { | 143 { |
| 136 vp8mt_build_intra_predictors_mbuv(pbi, xd, mb_row, mb_col); | 144 vp8mt_build_intra_predictors_mbuv(pbi, xd, mb_row, mb_col); |
| 137 | 145 |
| 138 if (xd->mode_info_context->mbmi.mode != B_PRED) | 146 if (xd->mode_info_context->mbmi.mode != B_PRED) |
| 139 { | 147 { |
| 140 vp8mt_build_intra_predictors_mby(pbi, xd, mb_row, mb_col); | 148 vp8mt_build_intra_predictors_mby(pbi, xd, mb_row, mb_col); |
| 141 } else { | 149 } else { |
| 142 vp8mt_intra_prediction_down_copy(pbi, xd, mb_row, mb_col); | 150 vp8mt_intra_prediction_down_copy(pbi, xd, mb_row, mb_col); |
| 143 } | 151 } |
| 144 } | 152 } |
| 145 else | 153 else |
| 146 { | 154 { |
| 147 vp8_build_inter_predictors_mb(xd); | 155 vp8_build_inter_predictors_mb(xd); |
| 148 } | 156 } |
| 149 | 157 |
| 158 /* When we have independent partitions we can apply residual even |
| 159 * though other partitions within the frame are corrupt. |
| 160 */ |
| 161 throw_residual = (!pbi->independent_partitions && |
| 162 pbi->frame_corrupt_residual); |
| 163 throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); |
| 164 |
| 165 #if CONFIG_ERROR_CONCEALMENT |
| 166 if (pbi->ec_active && |
| 167 (mb_row * pbi->common.mb_cols + mb_col >= pbi->mvs_corrupt_from_mb || |
| 168 throw_residual)) |
| 169 { |
| 170 /* MB with corrupt residuals or corrupt mode/motion vectors. |
| 171 * Better to use the predictor as reconstruction. |
| 172 */ |
| 173 pbi->frame_corrupt_residual = 1; |
| 174 vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); |
| 175 vp8_conceal_corrupt_mb(xd); |
| 176 return; |
| 177 } |
| 178 #endif |
| 179 |
| 150 /* dequantization and idct */ | 180 /* dequantization and idct */ |
| 151 if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbm
i.mode != SPLITMV) | 181 if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbm
i.mode != SPLITMV) |
| 152 { | 182 { |
| 153 BLOCKD *b = &xd->block[24]; | 183 BLOCKD *b = &xd->block[24]; |
| 154 DEQUANT_INVOKE(&pbi->dequant, block)(b); | 184 DEQUANT_INVOKE(&pbi->dequant, block)(b); |
| 155 | 185 |
| 156 /* do 2nd order transform on the dc block */ | 186 /* do 2nd order transform on the dc block */ |
| 157 if (xd->eobs[24] > 1) | 187 if (xd->eobs[24] > 1) |
| 158 { | 188 { |
| 159 IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh16)(&b->dqcoeff[0], b->diff); | 189 IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh16)(&b->dqcoeff[0], b->diff); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 170 { | 200 { |
| 171 IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh1)(&b->dqcoeff[0], b->diff); | 201 IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh1)(&b->dqcoeff[0], b->diff); |
| 172 ((int *)b->qcoeff)[0] = 0; | 202 ((int *)b->qcoeff)[0] = 0; |
| 173 } | 203 } |
| 174 | 204 |
| 175 DEQUANT_INVOKE (&pbi->dequant, dc_idct_add_y_block) | 205 DEQUANT_INVOKE (&pbi->dequant, dc_idct_add_y_block) |
| 176 (xd->qcoeff, xd->block[0].dequant, | 206 (xd->qcoeff, xd->block[0].dequant, |
| 177 xd->predictor, xd->dst.y_buffer, | 207 xd->predictor, xd->dst.y_buffer, |
| 178 xd->dst.y_stride, xd->eobs, xd->block[24].diff); | 208 xd->dst.y_stride, xd->eobs, xd->block[24].diff); |
| 179 } | 209 } |
| 180 else if ((xd->frame_type == KEY_FRAME || xd->mode_info_context->mbmi.ref_f
rame == INTRA_FRAME) && xd->mode_info_context->mbmi.mode == B_PRED) | 210 else if (xd->mode_info_context->mbmi.mode == B_PRED) |
| 181 { | 211 { |
| 182 for (i = 0; i < 16; i++) | 212 for (i = 0; i < 16; i++) |
| 183 { | 213 { |
| 184 BLOCKD *b = &xd->block[i]; | 214 BLOCKD *b = &xd->block[i]; |
| 185 vp8mt_predict_intra4x4(pbi, xd, b->bmi.mode, b->predictor, mb_row, m
b_col, i); | 215 |
| 216 vp8mt_predict_intra4x4(pbi, xd, b->bmi.as_mode, b->predictor, mb_row
, mb_col, i); |
| 186 | 217 |
| 187 if (xd->eobs[i] > 1) | 218 if (xd->eobs[i] > 1) |
| 188 { | 219 { |
| 189 DEQUANT_INVOKE(&pbi->dequant, idct_add) | 220 DEQUANT_INVOKE(&pbi->dequant, idct_add) |
| 190 (b->qcoeff, b->dequant, b->predictor, | 221 (b->qcoeff, b->dequant, b->predictor, |
| 191 *(b->base_dst) + b->dst, 16, b->dst_stride); | 222 *(b->base_dst) + b->dst, 16, b->dst_stride); |
| 192 } | 223 } |
| 193 else | 224 else |
| 194 { | 225 { |
| 195 IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add) | 226 IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add) |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 245 { | 276 { |
| 246 int i; | 277 int i; |
| 247 int recon_yoffset, recon_uvoffset; | 278 int recon_yoffset, recon_uvoffset; |
| 248 int mb_col; | 279 int mb_col; |
| 249 int ref_fb_idx = pc->lst_fb_idx; | 280 int ref_fb_idx = pc->lst_fb_idx; |
| 250 int dst_fb_idx = pc->new_fb_idx; | 281 int dst_fb_idx = pc->new_fb_idx; |
| 251 int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride; | 282 int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride; |
| 252 int recon_uv_stride = pc->yv12_fb[ref_fb_idx].uv_stride; | 283 int recon_uv_stride = pc->yv12_fb[ref_fb_idx].uv_stride; |
| 253 | 284 |
| 254 int filter_level; | 285 int filter_level; |
| 255 loop_filter_info *lfi = pc->lf_info; | 286 loop_filter_info_n *lfi_n = &pc->lf_info; |
| 256 int alt_flt_enabled = xd->segmentation_enabled; | |
| 257 int Segment; | |
| 258 | 287 |
| 259 pbi->mb_row_di[ithread].mb_row = mb_row; | 288 pbi->mb_row_di[ithread].mb_row = mb_row; |
| 260 pbi->mb_row_di[ithread].mbd.current_bc = &pbi->mbc[mb_row%n
um_part]; | 289 pbi->mb_row_di[ithread].mbd.current_bc = &pbi->mbc[mb_row%n
um_part]; |
| 261 | 290 |
| 262 last_row_current_mb_col = &pbi->mt_current_mb_col[mb_row -1]
; | 291 last_row_current_mb_col = &pbi->mt_current_mb_col[mb_row -1]
; |
| 263 | 292 |
| 264 recon_yoffset = mb_row * recon_y_stride * 16; | 293 recon_yoffset = mb_row * recon_y_stride * 16; |
| 265 recon_uvoffset = mb_row * recon_uv_stride * 8; | 294 recon_uvoffset = mb_row * recon_uv_stride * 8; |
| 266 /* reset above block coeffs */ | 295 /* reset above block coeffs */ |
| 267 | 296 |
| 268 xd->above_context = pc->above_context; | 297 xd->above_context = pc->above_context; |
| 269 xd->left_context = &mb_row_left_context; | 298 xd->left_context = &mb_row_left_context; |
| 270 vpx_memset(&mb_row_left_context, 0, sizeof(mb_row_left_conte
xt)); | 299 vpx_memset(&mb_row_left_context, 0, sizeof(mb_row_left_conte
xt)); |
| 271 xd->up_available = (mb_row != 0); | 300 xd->up_available = (mb_row != 0); |
| 272 | 301 |
| 273 xd->mb_to_top_edge = -((mb_row * 16)) << 3; | 302 xd->mb_to_top_edge = -((mb_row * 16)) << 3; |
| 274 xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) <<
3; | 303 xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) <<
3; |
| 275 | 304 |
| 276 for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) | 305 for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) |
| 277 { | 306 { |
| 278 if ((mb_col & (nsync-1)) == 0) | 307 if ((mb_col & (nsync-1)) == 0) |
| 279 { | 308 { |
| 280 while (mb_col > (*last_row_current_mb_col - nsync) &
& *last_row_current_mb_col != pc->mb_cols - 1) | 309 while (mb_col > (*last_row_current_mb_col - nsync) &
& *last_row_current_mb_col != pc->mb_cols - 1) |
| 281 { | 310 { |
| 282 x86_pause_hint(); | 311 x86_pause_hint(); |
| 283 thread_sleep(0); | 312 thread_sleep(0); |
| 284 } | 313 } |
| 285 } | 314 } |
| 286 | 315 |
| 287 if (xd->mode_info_context->mbmi.mode == SPLITMV || xd->m
ode_info_context->mbmi.mode == B_PRED) | 316 update_blockd_bmi(xd); |
| 288 { | |
| 289 for (i = 0; i < 16; i++) | |
| 290 { | |
| 291 BLOCKD *d = &xd->block[i]; | |
| 292 vpx_memcpy(&d->bmi, &xd->mode_info_context->bmi[
i], sizeof(B_MODE_INFO)); | |
| 293 } | |
| 294 } | |
| 295 | 317 |
| 296 /* Distance of Mb to the various image edges. | 318 /* Distance of MB to the various image edges. |
| 297 * These are specified to 8th pel as they are always com
pared to values that are in 1/8th pel units | 319 * These are specified to 8th pel as they are always |
| 320 * compared to values that are in 1/8th pel units. |
| 298 */ | 321 */ |
| 299 xd->mb_to_left_edge = -((mb_col * 16) << 3); | 322 xd->mb_to_left_edge = -((mb_col * 16) << 3); |
| 300 xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16)
<< 3; | 323 xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16)
<< 3; |
| 301 | 324 |
| 325 #if CONFIG_ERROR_CONCEALMENT |
| 326 { |
| 327 int corrupt_residual = |
| 328 (!pbi->independent_partitions && |
| 329 pbi->frame_corrupt_residual) || |
| 330 vp8dx_bool_error(xd->current_bc); |
| 331 if (pbi->ec_active && |
| 332 (xd->mode_info_context->mbmi.ref_frame == |
| 333 INTRA_FRAME) && |
| 334 corrupt_residual) |
| 335 { |
| 336 /* We have an intra block with corrupt |
| 337 * coefficients, better to conceal with an inter |
| 338 * block. |
| 339 * Interpolate MVs from neighboring MBs |
| 340 * |
| 341 * Note that for the first mb with corrupt |
| 342 * residual in a frame, we might not discover |
| 343 * that before decoding the residual. That |
| 344 * happens after this check, and therefore no |
| 345 * inter concealment will be done. |
| 346 */ |
| 347 vp8_interpolate_motion(xd, |
| 348 mb_row, mb_col, |
| 349 pc->mb_rows, pc->mb_cols, |
| 350 pc->mode_info_stride); |
| 351 } |
| 352 } |
| 353 #endif |
| 354 |
| 355 |
| 302 xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + re
con_yoffset; | 356 xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + re
con_yoffset; |
| 303 xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + re
con_uvoffset; | 357 xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + re
con_uvoffset; |
| 304 xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + re
con_uvoffset; | 358 xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + re
con_uvoffset; |
| 305 | 359 |
| 306 xd->left_available = (mb_col != 0); | 360 xd->left_available = (mb_col != 0); |
| 307 | 361 |
| 308 /* Select the appropriate reference frame for this MB */ | 362 /* Select the appropriate reference frame for this MB */ |
| 309 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) | 363 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) |
| 310 ref_fb_idx = pc->lst_fb_idx; | 364 ref_fb_idx = pc->lst_fb_idx; |
| 311 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN
_FRAME) | 365 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN
_FRAME) |
| 312 ref_fb_idx = pc->gld_fb_idx; | 366 ref_fb_idx = pc->gld_fb_idx; |
| 313 else | 367 else |
| 314 ref_fb_idx = pc->alt_fb_idx; | 368 ref_fb_idx = pc->alt_fb_idx; |
| 315 | 369 |
| 316 xd->pre.y_buffer = pc->yv12_fb[ref_fb_idx].y_buffer + re
con_yoffset; | 370 xd->pre.y_buffer = pc->yv12_fb[ref_fb_idx].y_buffer + re
con_yoffset; |
| 317 xd->pre.u_buffer = pc->yv12_fb[ref_fb_idx].u_buffer + re
con_uvoffset; | 371 xd->pre.u_buffer = pc->yv12_fb[ref_fb_idx].u_buffer + re
con_uvoffset; |
| 318 xd->pre.v_buffer = pc->yv12_fb[ref_fb_idx].v_buffer + re
con_uvoffset; | 372 xd->pre.v_buffer = pc->yv12_fb[ref_fb_idx].v_buffer + re
con_uvoffset; |
| 319 | 373 |
| 374 if (xd->mode_info_context->mbmi.ref_frame != |
| 375 INTRA_FRAME) |
| 376 { |
| 377 /* propagate errors from reference frames */ |
| 378 xd->corrupted |= pc->yv12_fb[ref_fb_idx].corrupted; |
| 379 } |
| 380 |
| 320 vp8_build_uvmvs(xd, pc->full_pixel); | 381 vp8_build_uvmvs(xd, pc->full_pixel); |
| 321 decode_macroblock(pbi, xd, mb_row, mb_col); | 382 decode_macroblock(pbi, xd, mb_row, mb_col); |
| 322 | 383 |
| 384 /* check if the boolean decoder has suffered an error */ |
| 385 xd->corrupted |= vp8dx_bool_error(xd->current_bc); |
| 386 |
| 323 if (pbi->common.filter_level) | 387 if (pbi->common.filter_level) |
| 324 { | 388 { |
| 389 int skip_lf = (xd->mode_info_context->mbmi.mode != B
_PRED && |
| 390 xd->mode_info_context->mbmi.mode !=
SPLITMV && |
| 391 xd->mode_info_context->mbmi.mb_skip_
coeff); |
| 392 |
| 393 const int mode_index = lfi_n->mode_lf_lut[xd->mode_i
nfo_context->mbmi.mode]; |
| 394 const int seg = xd->mode_info_context->mbmi.segment_
id; |
| 395 const int ref_frame = xd->mode_info_context->mbmi.re
f_frame; |
| 396 |
| 397 filter_level = lfi_n->lvl[seg][ref_frame][mode_index
]; |
| 398 |
| 325 if( mb_row != pc->mb_rows-1 ) | 399 if( mb_row != pc->mb_rows-1 ) |
| 326 { | 400 { |
| 327 /* Save decoded MB last row data for next-row de
coding */ | 401 /* Save decoded MB last row data for next-row de
coding */ |
| 328 vpx_memcpy((pbi->mt_yabove_row[mb_row + 1] + 32
+ mb_col*16), (xd->dst.y_buffer + 15 * recon_y_stride), 16); | 402 vpx_memcpy((pbi->mt_yabove_row[mb_row + 1] + 32
+ mb_col*16), (xd->dst.y_buffer + 15 * recon_y_stride), 16); |
| 329 vpx_memcpy((pbi->mt_uabove_row[mb_row + 1] + 16
+ mb_col*8), (xd->dst.u_buffer + 7 * recon_uv_stride), 8); | 403 vpx_memcpy((pbi->mt_uabove_row[mb_row + 1] + 16
+ mb_col*8), (xd->dst.u_buffer + 7 * recon_uv_stride), 8); |
| 330 vpx_memcpy((pbi->mt_vabove_row[mb_row + 1] + 16
+ mb_col*8), (xd->dst.v_buffer + 7 * recon_uv_stride), 8); | 404 vpx_memcpy((pbi->mt_vabove_row[mb_row + 1] + 16
+ mb_col*8), (xd->dst.v_buffer + 7 * recon_uv_stride), 8); |
| 331 } | 405 } |
| 332 | 406 |
| 333 /* save left_col for next MB decoding */ | 407 /* save left_col for next MB decoding */ |
| 334 if(mb_col != pc->mb_cols-1) | 408 if(mb_col != pc->mb_cols-1) |
| 335 { | 409 { |
| 336 MODE_INFO *next = xd->mode_info_context +1; | 410 MODE_INFO *next = xd->mode_info_context +1; |
| 337 | 411 |
| 338 if (xd->frame_type == KEY_FRAME || next->mbmi.
ref_frame == INTRA_FRAME) | 412 if (next->mbmi.ref_frame == INTRA_FRAME) |
| 339 { | 413 { |
| 340 for (i = 0; i < 16; i++) | 414 for (i = 0; i < 16; i++) |
| 341 pbi->mt_yleft_col[mb_row][i] = xd->dst.y
_buffer [i* recon_y_stride + 15]; | 415 pbi->mt_yleft_col[mb_row][i] = xd->dst.y
_buffer [i* recon_y_stride + 15]; |
| 342 for (i = 0; i < 8; i++) | 416 for (i = 0; i < 8; i++) |
| 343 { | 417 { |
| 344 pbi->mt_uleft_col[mb_row][i] = xd->dst.u
_buffer [i* recon_uv_stride + 7]; | 418 pbi->mt_uleft_col[mb_row][i] = xd->dst.u
_buffer [i* recon_uv_stride + 7]; |
| 345 pbi->mt_vleft_col[mb_row][i] = xd->dst.v
_buffer [i* recon_uv_stride + 7]; | 419 pbi->mt_vleft_col[mb_row][i] = xd->dst.v
_buffer [i* recon_uv_stride + 7]; |
| 346 } | 420 } |
| 347 } | 421 } |
| 348 } | 422 } |
| 349 | 423 |
| 350 /* update loopfilter info */ | |
| 351 Segment = (alt_flt_enabled) ? xd->mode_info_context-
>mbmi.segment_id : 0; | |
| 352 filter_level = pbi->mt_baseline_filter_level[Segment
]; | |
| 353 /* Distance of Mb to the various image edges. | |
| 354 * These are specified to 8th pel as they are always
compared to values that are in 1/8th pel units | |
| 355 * Apply any context driven MB level adjustment | |
| 356 */ | |
| 357 filter_level = vp8_adjust_mb_lf_value(xd, filter_lev
el); | |
| 358 | |
| 359 /* loopfilter on this macroblock. */ | 424 /* loopfilter on this macroblock. */ |
| 360 if (filter_level) | 425 if (filter_level) |
| 361 { | 426 { |
| 362 if (mb_col > 0) | 427 if(pc->filter_type == NORMAL_LOOPFILTER) |
| 363 pc->lf_mbv(xd->dst.y_buffer, xd->dst.u_buffe
r, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi[filter_level], pc->si
mpler_lpf); | 428 { |
| 429 loop_filter_info lfi; |
| 430 FRAME_TYPE frame_type = pc->frame_type; |
| 431 const int hev_index = lfi_n->hev_thr_lut[fra
me_type][filter_level]; |
| 432 lfi.mblim = lfi_n->mblim[filter_level]; |
| 433 lfi.blim = lfi_n->blim[filter_level]; |
| 434 lfi.lim = lfi_n->lim[filter_level]; |
| 435 lfi.hev_thr = lfi_n->hev_thr[hev_index]; |
| 364 | 436 |
| 365 if (xd->mode_info_context->mbmi.dc_diff > 0) | 437 if (mb_col > 0) |
| 366 pc->lf_bv(xd->dst.y_buffer, xd->dst.u_buffer
, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi[filter_level], pc->sim
pler_lpf); | 438 LF_INVOKE(&pc->rtcd.loopfilter, normal_m
b_v) |
| 439 (xd->dst.y_buffer, xd->dst.u_buffer, xd-
>dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi); |
| 367 | 440 |
| 368 /* don't apply across umv border */ | 441 if (!skip_lf) |
| 369 if (mb_row > 0) | 442 LF_INVOKE(&pc->rtcd.loopfilter, normal_b
_v) |
| 370 pc->lf_mbh(xd->dst.y_buffer, xd->dst.u_buffe
r, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi[filter_level], pc->si
mpler_lpf); | 443 (xd->dst.y_buffer, xd->dst.u_buffer, xd-
>dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi); |
| 371 | 444 |
| 372 if (xd->mode_info_context->mbmi.dc_diff > 0) | 445 /* don't apply across umv border */ |
| 373 pc->lf_bh(xd->dst.y_buffer, xd->dst.u_buffer
, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi[filter_level], pc->sim
pler_lpf); | 446 if (mb_row > 0) |
| 447 LF_INVOKE(&pc->rtcd.loopfilter, normal_m
b_h) |
| 448 (xd->dst.y_buffer, xd->dst.u_buffer, xd-
>dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi); |
| 449 |
| 450 if (!skip_lf) |
| 451 LF_INVOKE(&pc->rtcd.loopfilter, normal_b
_h) |
| 452 (xd->dst.y_buffer, xd->dst.u_buffer, xd-
>dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi); |
| 453 } |
| 454 else |
| 455 { |
| 456 if (mb_col > 0) |
| 457 LF_INVOKE(&pc->rtcd.loopfilter, simple_m
b_v) |
| 458 (xd->dst.y_buffer, recon_y_stride, lfi_n
->mblim[filter_level]); |
| 459 |
| 460 if (!skip_lf) |
| 461 LF_INVOKE(&pc->rtcd.loopfilter, simple_b
_v) |
| 462 (xd->dst.y_buffer, recon_y_stride, lfi_n
->blim[filter_level]); |
| 463 |
| 464 /* don't apply across umv border */ |
| 465 if (mb_row > 0) |
| 466 LF_INVOKE(&pc->rtcd.loopfilter, simple_m
b_h) |
| 467 (xd->dst.y_buffer, recon_y_stride, lfi_n
->mblim[filter_level]); |
| 468 |
| 469 if (!skip_lf) |
| 470 LF_INVOKE(&pc->rtcd.loopfilter, simple_b
_h) |
| 471 (xd->dst.y_buffer, recon_y_stride, lfi_n
->blim[filter_level]); |
| 472 } |
| 374 } | 473 } |
| 474 |
| 375 } | 475 } |
| 376 | 476 |
| 377 recon_yoffset += 16; | 477 recon_yoffset += 16; |
| 378 recon_uvoffset += 8; | 478 recon_uvoffset += 8; |
| 379 | 479 |
| 380 ++xd->mode_info_context; /* next mb */ | 480 ++xd->mode_info_context; /* next mb */ |
| 381 | 481 |
| 382 xd->above_context++; | 482 xd->above_context++; |
| 383 | 483 |
| 384 /*pbi->mb_row_di[ithread].current_mb_col = mb_col;*/ | 484 /*pbi->mb_row_di[ithread].current_mb_col = mb_col;*/ |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 422 } | 522 } |
| 423 | 523 |
| 424 | 524 |
| 425 void vp8_decoder_create_threads(VP8D_COMP *pbi) | 525 void vp8_decoder_create_threads(VP8D_COMP *pbi) |
| 426 { | 526 { |
| 427 int core_count = 0; | 527 int core_count = 0; |
| 428 int ithread; | 528 int ithread; |
| 429 | 529 |
| 430 pbi->b_multithreaded_rd = 0; | 530 pbi->b_multithreaded_rd = 0; |
| 431 pbi->allocated_decoding_thread_count = 0; | 531 pbi->allocated_decoding_thread_count = 0; |
| 432 core_count = (pbi->max_threads > 16) ? 16 : pbi->max_threads; | 532 |
| 533 /* limit decoding threads to the max number of token partitions */ |
| 534 core_count = (pbi->max_threads > 8) ? 8 : pbi->max_threads; |
| 535 |
| 536 /* limit decoding threads to the available cores */ |
| 537 if (core_count > pbi->common.processor_core_count) |
| 538 core_count = pbi->common.processor_core_count; |
| 433 | 539 |
| 434 if (core_count > 1) | 540 if (core_count > 1) |
| 435 { | 541 { |
| 436 pbi->b_multithreaded_rd = 1; | 542 pbi->b_multithreaded_rd = 1; |
| 437 pbi->decoding_thread_count = core_count -1; | 543 pbi->decoding_thread_count = core_count - 1; |
| 438 | 544 |
| 439 CHECK_MEM_ERROR(pbi->h_decoding_thread, vpx_malloc(sizeof(pthread_t) * p
bi->decoding_thread_count)); | 545 CHECK_MEM_ERROR(pbi->h_decoding_thread, vpx_malloc(sizeof(pthread_t) * p
bi->decoding_thread_count)); |
| 440 CHECK_MEM_ERROR(pbi->h_event_start_decoding, vpx_malloc(sizeof(sem_t) *
pbi->decoding_thread_count)); | 546 CHECK_MEM_ERROR(pbi->h_event_start_decoding, vpx_malloc(sizeof(sem_t) *
pbi->decoding_thread_count)); |
| 441 CHECK_MEM_ERROR(pbi->mb_row_di, vpx_memalign(32, sizeof(MB_ROW_DEC) * pb
i->decoding_thread_count)); | 547 CHECK_MEM_ERROR(pbi->mb_row_di, vpx_memalign(32, sizeof(MB_ROW_DEC) * pb
i->decoding_thread_count)); |
| 442 vpx_memset(pbi->mb_row_di, 0, sizeof(MB_ROW_DEC) * pbi->decoding_thread_
count); | 548 vpx_memset(pbi->mb_row_di, 0, sizeof(MB_ROW_DEC) * pbi->decoding_thread_
count); |
| 443 CHECK_MEM_ERROR(pbi->de_thread_data, vpx_malloc(sizeof(DECODETHREAD_DATA
) * pbi->decoding_thread_count)); | 549 CHECK_MEM_ERROR(pbi->de_thread_data, vpx_malloc(sizeof(DECODETHREAD_DATA
) * pbi->decoding_thread_count)); |
| 444 | 550 |
| 445 for (ithread = 0; ithread < pbi->decoding_thread_count; ithread++) | 551 for (ithread = 0; ithread < pbi->decoding_thread_count; ithread++) |
| 446 { | 552 { |
| 447 sem_init(&pbi->h_event_start_decoding[ithread], 0, 0); | 553 sem_init(&pbi->h_event_start_decoding[ithread], 0, 0); |
| 448 | 554 |
| 449 pbi->de_thread_data[ithread].ithread = ithread; | 555 pbi->de_thread_data[ithread].ithread = ithread; |
| 450 pbi->de_thread_data[ithread].ptr1 = (void *)pbi; | 556 pbi->de_thread_data[ithread].ptr1 = (void *)pbi; |
| 451 pbi->de_thread_data[ithread].ptr2 = (void *) &pbi->mb_row_di[ith
read]; | 557 pbi->de_thread_data[ithread].ptr2 = (void *) &pbi->mb_row_di[ith
read]; |
| 452 | 558 |
| 453 pthread_create(&pbi->h_decoding_thread[ithread], 0, thread_decoding_
proc, (&pbi->de_thread_data[ithread])); | 559 pthread_create(&pbi->h_decoding_thread[ithread], 0, thread_decoding_
proc, (&pbi->de_thread_data[ithread])); |
| 454 } | 560 } |
| 455 | 561 |
| 456 sem_init(&pbi->h_event_end_decoding, 0, 0); | 562 sem_init(&pbi->h_event_end_decoding, 0, 0); |
| 457 | 563 |
| 458 pbi->allocated_decoding_thread_count = pbi->decoding_thread_count; | 564 pbi->allocated_decoding_thread_count = pbi->decoding_thread_count; |
| 459 } | 565 } |
| 460 } | 566 } |
| 461 | 567 |
| 462 | 568 |
| 463 void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) | 569 void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) |
| 464 { | 570 { |
| 465 VP8_COMMON *const pc = & pbi->common; | |
| 466 int i; | 571 int i; |
| 467 | 572 |
| 468 if (pbi->b_multithreaded_rd) | 573 if (pbi->b_multithreaded_rd) |
| 469 { | 574 { |
| 470 vpx_free(pbi->mt_current_mb_col); | 575 vpx_free(pbi->mt_current_mb_col); |
| 471 pbi->mt_current_mb_col = NULL ; | 576 pbi->mt_current_mb_col = NULL ; |
| 472 | 577 |
| 473 /* Free above_row buffers. */ | 578 /* Free above_row buffers. */ |
| 474 if (pbi->mt_yabove_row) | 579 if (pbi->mt_yabove_row) |
| 475 { | 580 { |
| (...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 624 pbi->h_event_start_decoding = NULL; | 729 pbi->h_event_start_decoding = NULL; |
| 625 | 730 |
| 626 vpx_free(pbi->mb_row_di); | 731 vpx_free(pbi->mb_row_di); |
| 627 pbi->mb_row_di = NULL ; | 732 pbi->mb_row_di = NULL ; |
| 628 | 733 |
| 629 vpx_free(pbi->de_thread_data); | 734 vpx_free(pbi->de_thread_data); |
| 630 pbi->de_thread_data = NULL; | 735 pbi->de_thread_data = NULL; |
| 631 } | 736 } |
| 632 } | 737 } |
| 633 | 738 |
| 634 | |
| 635 static void lpf_init( VP8D_COMP *pbi, int default_filt_lvl) | |
| 636 { | |
| 637 VP8_COMMON *cm = &pbi->common; | |
| 638 MACROBLOCKD *mbd = &pbi->mb; | |
| 639 /*YV12_BUFFER_CONFIG *post = &cm->new_frame;*/ /*frame_to_show;*/ | |
| 640 loop_filter_info *lfi = cm->lf_info; | |
| 641 FRAME_TYPE frame_type = cm->frame_type; | |
| 642 | |
| 643 /*int mb_row; | |
| 644 int mb_col; | |
| 645 int baseline_filter_level[MAX_MB_SEGMENTS];*/ | |
| 646 int alt_flt_enabled = mbd->segmentation_enabled; | |
| 647 | |
| 648 int i; | |
| 649 /*unsigned char *y_ptr, *u_ptr, *v_ptr;*/ | |
| 650 | |
| 651 /* Note the baseline filter values for each segment */ | |
| 652 if (alt_flt_enabled) | |
| 653 { | |
| 654 for (i = 0; i < MAX_MB_SEGMENTS; i++) | |
| 655 { | |
| 656 /* Abs value */ | |
| 657 if (mbd->mb_segement_abs_delta == SEGMENT_ABSDATA) | |
| 658 pbi->mt_baseline_filter_level[i] = mbd->segment_feature_data[MB_
LVL_ALT_LF][i]; | |
| 659 /* Delta Value */ | |
| 660 else | |
| 661 { | |
| 662 pbi->mt_baseline_filter_level[i] = default_filt_lvl + mbd->segme
nt_feature_data[MB_LVL_ALT_LF][i]; | |
| 663 pbi->mt_baseline_filter_level[i] = (pbi->mt_baseline_filter_leve
l[i] >= 0) ? ((pbi->mt_baseline_filter_level[i] <= MAX_LOOP_FILTER) ? pbi->mt_ba
seline_filter_level[i] : MAX_LOOP_FILTER) : 0; /* Clamp to valid range */ | |
| 664 } | |
| 665 } | |
| 666 } | |
| 667 else | |
| 668 { | |
| 669 for (i = 0; i < MAX_MB_SEGMENTS; i++) | |
| 670 pbi->mt_baseline_filter_level[i] = default_filt_lvl; | |
| 671 } | |
| 672 | |
| 673 /* Initialize the loop filter for this frame. */ | |
| 674 if ((cm->last_filter_type != cm->filter_type) || (cm->last_sharpness_level !
= cm->sharpness_level)) | |
| 675 vp8_init_loop_filter(cm); | |
| 676 else if (frame_type != cm->last_frame_type) | |
| 677 vp8_frame_init_loop_filter(lfi, frame_type); | |
| 678 } | |
| 679 | |
| 680 | |
| 681 void vp8mt_decode_mb_rows( VP8D_COMP *pbi, MACROBLOCKD *xd) | 739 void vp8mt_decode_mb_rows( VP8D_COMP *pbi, MACROBLOCKD *xd) |
| 682 { | 740 { |
| 683 int mb_row; | 741 int mb_row; |
| 684 VP8_COMMON *pc = &pbi->common; | 742 VP8_COMMON *pc = &pbi->common; |
| 685 | 743 |
| 686 int ibc = 0; | |
| 687 int num_part = 1 << pbi->common.multi_token_partition; | 744 int num_part = 1 << pbi->common.multi_token_partition; |
| 688 int i; | 745 int i; |
| 689 volatile int *last_row_current_mb_col = NULL; | 746 volatile int *last_row_current_mb_col = NULL; |
| 690 int nsync = pbi->sync_range; | 747 int nsync = pbi->sync_range; |
| 691 | 748 |
| 692 int filter_level; | 749 int filter_level = pc->filter_level; |
| 693 loop_filter_info *lfi = pc->lf_info; | 750 loop_filter_info_n *lfi_n = &pc->lf_info; |
| 694 int alt_flt_enabled = xd->segmentation_enabled; | |
| 695 int Segment; | |
| 696 | 751 |
| 697 if(pbi->common.filter_level) | 752 if (filter_level) |
| 698 { | 753 { |
| 699 /* Set above_row buffer to 127 for decoding first MB row */ | 754 /* Set above_row buffer to 127 for decoding first MB row */ |
| 700 vpx_memset(pbi->mt_yabove_row[0] + VP8BORDERINPIXELS-1, 127, pc->yv12_fb
[pc->lst_fb_idx].y_width + 5); | 755 vpx_memset(pbi->mt_yabove_row[0] + VP8BORDERINPIXELS-1, 127, pc->yv12_fb
[pc->lst_fb_idx].y_width + 5); |
| 701 vpx_memset(pbi->mt_uabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (pc->y
v12_fb[pc->lst_fb_idx].y_width>>1) +5); | 756 vpx_memset(pbi->mt_uabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (pc->y
v12_fb[pc->lst_fb_idx].y_width>>1) +5); |
| 702 vpx_memset(pbi->mt_vabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (pc->y
v12_fb[pc->lst_fb_idx].y_width>>1) +5); | 757 vpx_memset(pbi->mt_vabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (pc->y
v12_fb[pc->lst_fb_idx].y_width>>1) +5); |
| 703 | 758 |
| 704 for (i=1; i<pc->mb_rows; i++) | 759 for (i=1; i<pc->mb_rows; i++) |
| 705 { | 760 { |
| 706 vpx_memset(pbi->mt_yabove_row[i] + VP8BORDERINPIXELS-1, (unsigned ch
ar)129, 1); | 761 vpx_memset(pbi->mt_yabove_row[i] + VP8BORDERINPIXELS-1, (unsigned ch
ar)129, 1); |
| 707 vpx_memset(pbi->mt_uabove_row[i] + (VP8BORDERINPIXELS>>1)-1, (unsign
ed char)129, 1); | 762 vpx_memset(pbi->mt_uabove_row[i] + (VP8BORDERINPIXELS>>1)-1, (unsign
ed char)129, 1); |
| 708 vpx_memset(pbi->mt_vabove_row[i] + (VP8BORDERINPIXELS>>1)-1, (unsign
ed char)129, 1); | 763 vpx_memset(pbi->mt_vabove_row[i] + (VP8BORDERINPIXELS>>1)-1, (unsign
ed char)129, 1); |
| 709 } | 764 } |
| 710 | 765 |
| 711 /* Set left_col to 129 initially */ | 766 /* Set left_col to 129 initially */ |
| 712 for (i=0; i<pc->mb_rows; i++) | 767 for (i=0; i<pc->mb_rows; i++) |
| 713 { | 768 { |
| 714 vpx_memset(pbi->mt_yleft_col[i], (unsigned char)129, 16); | 769 vpx_memset(pbi->mt_yleft_col[i], (unsigned char)129, 16); |
| 715 vpx_memset(pbi->mt_uleft_col[i], (unsigned char)129, 8); | 770 vpx_memset(pbi->mt_uleft_col[i], (unsigned char)129, 8); |
| 716 vpx_memset(pbi->mt_vleft_col[i], (unsigned char)129, 8); | 771 vpx_memset(pbi->mt_vleft_col[i], (unsigned char)129, 8); |
| 717 } | 772 } |
| 718 lpf_init(pbi, pc->filter_level); | 773 |
| 774 /* Initialize the loop filter for this frame. */ |
| 775 vp8_loop_filter_frame_init(pc, &pbi->mb, filter_level); |
| 719 } | 776 } |
| 720 | 777 |
| 721 setup_decoding_thread_data(pbi, xd, pbi->mb_row_di, pbi->decoding_thread_cou
nt); | 778 setup_decoding_thread_data(pbi, xd, pbi->mb_row_di, pbi->decoding_thread_cou
nt); |
| 722 | 779 |
| 723 for (i = 0; i < pbi->decoding_thread_count; i++) | 780 for (i = 0; i < pbi->decoding_thread_count; i++) |
| 724 sem_post(&pbi->h_event_start_decoding[i]); | 781 sem_post(&pbi->h_event_start_decoding[i]); |
| 725 | 782 |
| 726 for (mb_row = 0; mb_row < pc->mb_rows; mb_row += (pbi->decoding_thread_count
+ 1)) | 783 for (mb_row = 0; mb_row < pc->mb_rows; mb_row += (pbi->decoding_thread_count
+ 1)) |
| 727 { | 784 { |
| 728 | |
| 729 xd->current_bc = &pbi->mbc[mb_row%num_part]; | 785 xd->current_bc = &pbi->mbc[mb_row%num_part]; |
| 730 | 786 |
| 731 /* vp8_decode_mb_row(pbi, pc, mb_row, xd); */ | 787 /* vp8_decode_mb_row(pbi, pc, mb_row, xd); */ |
| 732 { | 788 { |
| 733 int i; | 789 int i; |
| 734 int recon_yoffset, recon_uvoffset; | 790 int recon_yoffset, recon_uvoffset; |
| 735 int mb_col; | 791 int mb_col; |
| 736 int ref_fb_idx = pc->lst_fb_idx; | 792 int ref_fb_idx = pc->lst_fb_idx; |
| 737 int dst_fb_idx = pc->new_fb_idx; | 793 int dst_fb_idx = pc->new_fb_idx; |
| 738 int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride; | 794 int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride; |
| (...skipping 17 matching lines...) Expand all Loading... |
| 756 for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) | 812 for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) |
| 757 { | 813 { |
| 758 if ( mb_row > 0 && (mb_col & (nsync-1)) == 0){ | 814 if ( mb_row > 0 && (mb_col & (nsync-1)) == 0){ |
| 759 while (mb_col > (*last_row_current_mb_col - nsync) && *last_
row_current_mb_col != pc->mb_cols - 1) | 815 while (mb_col > (*last_row_current_mb_col - nsync) && *last_
row_current_mb_col != pc->mb_cols - 1) |
| 760 { | 816 { |
| 761 x86_pause_hint(); | 817 x86_pause_hint(); |
| 762 thread_sleep(0); | 818 thread_sleep(0); |
| 763 } | 819 } |
| 764 } | 820 } |
| 765 | 821 |
| 766 if (xd->mode_info_context->mbmi.mode == SPLITMV || xd->mode_info
_context->mbmi.mode == B_PRED) | 822 update_blockd_bmi(xd); |
| 767 { | |
| 768 for (i = 0; i < 16; i++) | |
| 769 { | |
| 770 BLOCKD *d = &xd->block[i]; | |
| 771 vpx_memcpy(&d->bmi, &xd->mode_info_context->bmi[i], size
of(B_MODE_INFO)); | |
| 772 } | |
| 773 } | |
| 774 | 823 |
| 775 /* Distance of Mb to the various image edges. | 824 /* Distance of MB to the various image edges. |
| 776 * These are specified to 8th pel as they are always compared to
values that are in 1/8th pel units | 825 * These are specified to 8th pel as they are always compared to |
| 826 * values that are in 1/8th pel units. |
| 777 */ | 827 */ |
| 778 xd->mb_to_left_edge = -((mb_col * 16) << 3); | 828 xd->mb_to_left_edge = -((mb_col * 16) << 3); |
| 779 xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; | 829 xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; |
| 780 | 830 |
| 831 #if CONFIG_ERROR_CONCEALMENT |
| 832 { |
| 833 int corrupt_residual = (!pbi->independent_partitions && |
| 834 pbi->frame_corrupt_residual) || |
| 835 vp8dx_bool_error(xd->current_bc); |
| 836 if (pbi->ec_active && |
| 837 (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) &
& |
| 838 corrupt_residual) |
| 839 { |
| 840 /* We have an intra block with corrupt coefficients, |
| 841 * better to conceal with an inter block. Interpolate |
| 842 * MVs from neighboring MBs |
| 843 * |
| 844 * Note that for the first mb with corrupt residual in a |
| 845 * frame, we might not discover that before decoding the |
| 846 * residual. That happens after this check, and |
| 847 * therefore no inter concealment will be done. |
| 848 */ |
| 849 vp8_interpolate_motion(xd, |
| 850 mb_row, mb_col, |
| 851 pc->mb_rows, pc->mb_cols, |
| 852 pc->mode_info_stride); |
| 853 } |
| 854 } |
| 855 #endif |
| 856 |
| 857 |
| 781 xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoff
set; | 858 xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoff
set; |
| 782 xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvof
fset; | 859 xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvof
fset; |
| 783 xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvof
fset; | 860 xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvof
fset; |
| 784 | 861 |
| 785 xd->left_available = (mb_col != 0); | 862 xd->left_available = (mb_col != 0); |
| 786 | 863 |
| 787 /* Select the appropriate reference frame for this MB */ | 864 /* Select the appropriate reference frame for this MB */ |
| 788 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) | 865 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) |
| 789 ref_fb_idx = pc->lst_fb_idx; | 866 ref_fb_idx = pc->lst_fb_idx; |
| 790 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) | 867 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) |
| (...skipping 12 matching lines...) Expand all Loading... |
| 803 } | 880 } |
| 804 | 881 |
| 805 vp8_build_uvmvs(xd, pc->full_pixel); | 882 vp8_build_uvmvs(xd, pc->full_pixel); |
| 806 decode_macroblock(pbi, xd, mb_row, mb_col); | 883 decode_macroblock(pbi, xd, mb_row, mb_col); |
| 807 | 884 |
| 808 /* check if the boolean decoder has suffered an error */ | 885 /* check if the boolean decoder has suffered an error */ |
| 809 xd->corrupted |= vp8dx_bool_error(xd->current_bc); | 886 xd->corrupted |= vp8dx_bool_error(xd->current_bc); |
| 810 | 887 |
| 811 if (pbi->common.filter_level) | 888 if (pbi->common.filter_level) |
| 812 { | 889 { |
| 890 int skip_lf = (xd->mode_info_context->mbmi.mode != B_PRED && |
| 891 xd->mode_info_context->mbmi.mode != SPLITMV
&& |
| 892 xd->mode_info_context->mbmi.mb_skip_coeff); |
| 893 |
| 894 const int mode_index = lfi_n->mode_lf_lut[xd->mode_info_cont
ext->mbmi.mode]; |
| 895 const int seg = xd->mode_info_context->mbmi.segment_id; |
| 896 const int ref_frame = xd->mode_info_context->mbmi.ref_frame; |
| 897 |
| 898 filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; |
| 899 |
| 813 /* Save decoded MB last row data for next-row decoding */ | 900 /* Save decoded MB last row data for next-row decoding */ |
| 814 if(mb_row != pc->mb_rows-1) | 901 if(mb_row != pc->mb_rows-1) |
| 815 { | 902 { |
| 816 vpx_memcpy((pbi->mt_yabove_row[mb_row +1] + 32 + mb_col*
16), (xd->dst.y_buffer + 15 * recon_y_stride), 16); | 903 vpx_memcpy((pbi->mt_yabove_row[mb_row +1] + 32 + mb_col*
16), (xd->dst.y_buffer + 15 * recon_y_stride), 16); |
| 817 vpx_memcpy((pbi->mt_uabove_row[mb_row +1] + 16 + mb_col*
8), (xd->dst.u_buffer + 7 * recon_uv_stride), 8); | 904 vpx_memcpy((pbi->mt_uabove_row[mb_row +1] + 16 + mb_col*
8), (xd->dst.u_buffer + 7 * recon_uv_stride), 8); |
| 818 vpx_memcpy((pbi->mt_vabove_row[mb_row +1] + 16 + mb_col*
8), (xd->dst.v_buffer + 7 * recon_uv_stride), 8); | 905 vpx_memcpy((pbi->mt_vabove_row[mb_row +1] + 16 + mb_col*
8), (xd->dst.v_buffer + 7 * recon_uv_stride), 8); |
| 819 } | 906 } |
| 820 | 907 |
| 821 /* save left_col for next MB decoding */ | 908 /* save left_col for next MB decoding */ |
| 822 if(mb_col != pc->mb_cols-1) | 909 if(mb_col != pc->mb_cols-1) |
| 823 { | 910 { |
| 824 MODE_INFO *next = xd->mode_info_context +1; | 911 MODE_INFO *next = xd->mode_info_context +1; |
| 825 | 912 |
| 826 if (xd->frame_type == KEY_FRAME || next->mbmi.ref_fram
e == INTRA_FRAME) | 913 if (next->mbmi.ref_frame == INTRA_FRAME) |
| 827 { | 914 { |
| 828 for (i = 0; i < 16; i++) | 915 for (i = 0; i < 16; i++) |
| 829 pbi->mt_yleft_col[mb_row][i] = xd->dst.y_buffer
[i* recon_y_stride + 15]; | 916 pbi->mt_yleft_col[mb_row][i] = xd->dst.y_buffer
[i* recon_y_stride + 15]; |
| 830 for (i = 0; i < 8; i++) | 917 for (i = 0; i < 8; i++) |
| 831 { | 918 { |
| 832 pbi->mt_uleft_col[mb_row][i] = xd->dst.u_buffer
[i* recon_uv_stride + 7]; | 919 pbi->mt_uleft_col[mb_row][i] = xd->dst.u_buffer
[i* recon_uv_stride + 7]; |
| 833 pbi->mt_vleft_col[mb_row][i] = xd->dst.v_buffer
[i* recon_uv_stride + 7]; | 920 pbi->mt_vleft_col[mb_row][i] = xd->dst.v_buffer
[i* recon_uv_stride + 7]; |
| 834 } | 921 } |
| 835 } | 922 } |
| 836 } | 923 } |
| 837 | 924 |
| 838 /* update loopfilter info */ | |
| 839 Segment = (alt_flt_enabled) ? xd->mode_info_context->mbmi.se
gment_id : 0; | |
| 840 filter_level = pbi->mt_baseline_filter_level[Segment]; | |
| 841 /* Distance of Mb to the various image edges. | |
| 842 * These are specified to 8th pel as they are always compare
d to values that are in 1/8th pel units | |
| 843 * Apply any context driven MB level adjustment | |
| 844 */ | |
| 845 filter_level = vp8_adjust_mb_lf_value(xd, filter_level); | |
| 846 | |
| 847 /* loopfilter on this macroblock. */ | 925 /* loopfilter on this macroblock. */ |
| 848 if (filter_level) | 926 if (filter_level) |
| 849 { | 927 { |
| 850 if (mb_col > 0) | 928 if(pc->filter_type == NORMAL_LOOPFILTER) |
| 851 pc->lf_mbv(xd->dst.y_buffer, xd->dst.u_buffer, xd->d
st.v_buffer, recon_y_stride, recon_uv_stride, &lfi[filter_level], pc->simpler_lp
f); | 929 { |
| 930 loop_filter_info lfi; |
| 931 FRAME_TYPE frame_type = pc->frame_type; |
| 932 const int hev_index = lfi_n->hev_thr_lut[frame_type]
[filter_level]; |
| 933 lfi.mblim = lfi_n->mblim[filter_level]; |
| 934 lfi.blim = lfi_n->blim[filter_level]; |
| 935 lfi.lim = lfi_n->lim[filter_level]; |
| 936 lfi.hev_thr = lfi_n->hev_thr[hev_index]; |
| 852 | 937 |
| 853 if (xd->mode_info_context->mbmi.dc_diff > 0) | 938 if (mb_col > 0) |
| 854 pc->lf_bv(xd->dst.y_buffer, xd->dst.u_buffer, xd->ds
t.v_buffer, recon_y_stride, recon_uv_stride, &lfi[filter_level], pc->simpler_lpf
); | 939 LF_INVOKE(&pc->rtcd.loopfilter, normal_mb_v) |
| 940 (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_b
uffer, recon_y_stride, recon_uv_stride, &lfi); |
| 855 | 941 |
| 856 /* don't apply across umv border */ | 942 if (!skip_lf) |
| 857 if (mb_row > 0) | 943 LF_INVOKE(&pc->rtcd.loopfilter, normal_b_v) |
| 858 pc->lf_mbh(xd->dst.y_buffer, xd->dst.u_buffer, xd->d
st.v_buffer, recon_y_stride, recon_uv_stride, &lfi[filter_level], pc->simpler_lp
f); | 944 (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_b
uffer, recon_y_stride, recon_uv_stride, &lfi); |
| 859 | 945 |
| 860 if (xd->mode_info_context->mbmi.dc_diff > 0) | 946 /* don't apply across umv border */ |
| 861 pc->lf_bh(xd->dst.y_buffer, xd->dst.u_buffer, xd->ds
t.v_buffer, recon_y_stride, recon_uv_stride, &lfi[filter_level], pc->simpler_lpf
); | 947 if (mb_row > 0) |
| 948 LF_INVOKE(&pc->rtcd.loopfilter, normal_mb_h) |
| 949 (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_b
uffer, recon_y_stride, recon_uv_stride, &lfi); |
| 950 |
| 951 if (!skip_lf) |
| 952 LF_INVOKE(&pc->rtcd.loopfilter, normal_b_h) |
| 953 (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_b
uffer, recon_y_stride, recon_uv_stride, &lfi); |
| 954 } |
| 955 else |
| 956 { |
| 957 if (mb_col > 0) |
| 958 LF_INVOKE(&pc->rtcd.loopfilter, simple_mb_v) |
| 959 (xd->dst.y_buffer, recon_y_stride, lfi_n->mblim[
filter_level]); |
| 960 |
| 961 if (!skip_lf) |
| 962 LF_INVOKE(&pc->rtcd.loopfilter, simple_b_v) |
| 963 (xd->dst.y_buffer, recon_y_stride, lfi_n->blim[f
ilter_level]); |
| 964 |
| 965 /* don't apply across umv border */ |
| 966 if (mb_row > 0) |
| 967 LF_INVOKE(&pc->rtcd.loopfilter, simple_mb_h) |
| 968 (xd->dst.y_buffer, recon_y_stride, lfi_n->mblim[
filter_level]); |
| 969 |
| 970 if (!skip_lf) |
| 971 LF_INVOKE(&pc->rtcd.loopfilter, simple_b_h) |
| 972 (xd->dst.y_buffer, recon_y_stride, lfi_n->blim[f
ilter_level]); |
| 973 } |
| 862 } | 974 } |
| 975 |
| 863 } | 976 } |
| 864 | |
| 865 recon_yoffset += 16; | 977 recon_yoffset += 16; |
| 866 recon_uvoffset += 8; | 978 recon_uvoffset += 8; |
| 867 | 979 |
| 868 ++xd->mode_info_context; /* next mb */ | 980 ++xd->mode_info_context; /* next mb */ |
| 869 | 981 |
| 870 xd->above_context++; | 982 xd->above_context++; |
| 871 | 983 |
| 872 pbi->mt_current_mb_col[mb_row] = mb_col; | 984 pbi->mt_current_mb_col[mb_row] = mb_col; |
| 873 } | 985 } |
| 874 | 986 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 890 }else | 1002 }else |
| 891 vp8_extend_mb_row(&pc->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 1
6, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); | 1003 vp8_extend_mb_row(&pc->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 1
6, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); |
| 892 | 1004 |
| 893 ++xd->mode_info_context; /* skip prediction column */ | 1005 ++xd->mode_info_context; /* skip prediction column */ |
| 894 } | 1006 } |
| 895 xd->mode_info_context += xd->mode_info_stride * pbi->decoding_thread_cou
nt; | 1007 xd->mode_info_context += xd->mode_info_stride * pbi->decoding_thread_cou
nt; |
| 896 } | 1008 } |
| 897 | 1009 |
| 898 sem_wait(&pbi->h_event_end_decoding); /* add back for each frame */ | 1010 sem_wait(&pbi->h_event_end_decoding); /* add back for each frame */ |
| 899 } | 1011 } |
| OLD | NEW |